query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Checks that the solution vector is within optimal problem's boundaries. | def _check(self, vector):
for i, elmt in enumerate(vector):
# checks lower bound
if (elmt < self.lower[i]):
vector[i] = self.lower[i]
# checks upper bound
elif (elmt > self.upper[i]):
vector[i] = self.upper[i]
return vector | [
"def in_box_bounds(self, test_vec):\n above_min = np.greater(test_vec, self.lower_vertex).all()\n below_max = np.greater(self.upper_vertex, test_vec).all()\n return above_min and below_max",
"def _in_bounds(v, lb, ub):\n\n return np.logical_and(v >= lb, v <= ub).all()",
"def _parameter_check(self):\r\n\r\n # Setting bounds for the x variable\r\n bounds = ((None, None),)\r\n\r\n # Setting the negated value of goal function because scipy.optimize\r\n # doesn't have maximization function\r\n func = lambda x: -(self.V_sl(x) - x - self.c[1])\r\n\r\n # Initial guesses for value of x\r\n initial_guess = (self.theta - np.sqrt(self.sigma_square))\r\n\r\n # Minimization of the negated function to maximize goal function\r\n result = so.minimize(func, initial_guess, bounds=bounds)\r\n\r\n # Testing the condition\r\n output = -result.fun > 0\r\n\r\n return output",
"def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)",
"def _checkImprovedSolution(self, vertex):\n if self._bestPathVertex.isSolution():\n solWayPoint = sys.maxint\n else:\n solWayPoint = self._bestPathVertex.getNextWaypoint().getIndex()\n\n if vertex.isSolution():\n vxWayPoint = sys.maxint\n else:\n vxWayPoint = vertex.getNextWaypoint().getIndex()\n\n # If waypoints are the same (possibly both at final waypoint) then we compare priority which is a better indicator than admissible.\n if solWayPoint == vxWayPoint:\n return self._bestPathVertex.getTimeThroughHeuristic() > vertex.getTimeThroughHeuristic()\n\n return solWayPoint < vxWayPoint",
"def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False",
"def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True",
"def _check_is_solved(self):\n \n if any(self.outliers) == False:\n raise Exception('No outliers detected! Has solver been fit?')\n \n return self",
"def check_finite_bounds(lb, ub):\n if not np.isfinite(lb).all() or not np.isfinite(ub).all():\n raise ValueError(\n 'Selected optimizer cannot work with unconstrained '\n 'optimization problems.'\n )",
"def check_bounds(self, index):\n if index < self.lower_bound or index > self.upper_bound:\n return False\n return True",
"def maxi_within_bounds(self):\n maxi = self.maxi\n return (\n (maxi < self.ideal_upper_bound)\n & (maxi > self.ideal_lower_bound)\n )",
"def is_in_bounds(self, x=None):\n if x is None:\n x = self.parent.val\n # check whether bounds are satisfied for all variables\n return True if np.all(np.logical_and(x > self.bounds[0], x < self.bounds[1])) else False",
"def isValid(self, aSolution):",
"def checkingBounds(self):\n droi, proi, broi = self.getRoiValues()\n print proi\n print broi\n pxc = proi[0]\n pyc = proi[1]\n pxw = proi[2]\n pyw = proi[3]\n bxc = broi[0]\n byc = broi[1]\n bxw = broi[2]\n byw = broi[3]\n print pxc - pxw / 2\n\n if pxc - pxw / 2 < bxc - bxw / 2 or pxc + pxw / 2 > bxc + bxw / 2 or pyc - pyw / 2 < byc - byw / 2 or\\\n pyc + pyw / 2 > byc + byw / 2:\n print \"True\"\n return True\n else:\n print \"False\"\n return False",
"def is_feasible(self, x):\n return np.all(self.f(x) <= 0)",
"def inside_box_boundaries(x, lb, ub):\n return (lb <= x).all() and (x <= ub).all()",
"def boundary_condition(self, x):\n return self.exact_solution(x)",
"def isPrimalBoundBetter(self, testrun, problemid):\n pb = testrun.getProblemDataById(problemid, Key.PrimalBound)\n objsense = testrun.getProblemDataById(problemid, Key.ObjectiveSense)\n optval = testrun.getProblemDataById(problemid, Key.OptimalValue)\n\n if pb is None:\n return False\n\n reltol = self.gaptol * max(abs(pb), 1.0)\n\n if objsense == ObjsenseReader.minimize and optval - pb > reltol:\n return True\n elif objsense == ObjsenseReader.maximize and pb - optval > reltol:\n return True\n return False",
"def verify_interval_in_state_vector(statevector, start, finish):\r\n found = False\r\n for cell_idx in range(start, finish):\r\n\r\n cell_value = statevector[cell_idx]\r\n if cell_value != 0:\r\n found = True\r\n break\r\n return found"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initmethod = ['random', 'pca'] algos = ['seq','batch'] all_neigh = ['gaussian','manhatan','bubble','cut_gaussian','epanechicov' ] alfa_types = ['linear','inv','power'] | def set_algorithm(self, initmethod = 'pca', algtype = 'batch', neighborhoodmethod = 'gaussian', alfatype = 'inv', alfaini = .5, alfafinal = .005):
self.initmethod = initmethod
self.algtype = algtype
self.alfaini = alfaini
self.alfafinal = alfafinal
self.neigh = neighborhoodmethod | [
"def initFA(Y, terms, I, gene_ids=None, nHidden=3, nHiddenSparse = 0,pruneGenes=True, FPR=0.99, FNR=0.001, \\\n noise='gauss', minGenes=20, do_preTrain=True, nFix=None, priors=None, covariates=None, dropFactors=True, learnPi=False):\n\n \n #check for consistency of input parameters\n [num_cells,num_genes] = Y.shape\n num_terms = I.shape[1]\n\n assert I.shape[0]==num_genes, 'annotation needs to be matched to gene input dimension'\n\n assert noise in ['gauss','hurdle','poisson'], 'invalid noise model'\n assert 0<FNR<1, 'FNR is required to be between 0 and 1'\n assert 0<FNR<1, 'FPR is required to be between 0 and 1'\n if noise==\"hurdle\" and dropFactors==True:\n dropFactors = False\n print(\"dropFactors only supported for gauss noise model. Set to False.\")\n\n #make sure the annotation is boolean\n I = (I>.5) \n #. filter annotation by min number of required genes\n Iok = I.sum(axis=0)>minGenes\n terms = terms[Iok]\n I = I[:,Iok]\n num_terms = I.shape[1]\n\n\n #create initial pi matrix, which corresponds to the effective prior probability of an annotated link\n pi = SP.zeros([num_genes,num_terms],dtype='float') \n #default FNR\n pi[:] = FNR\n #active links\n pi[I] = FPR \n\n #prune genes?\n if pruneGenes==True:\n idx_genes = SP.sum(I,1)>0\n Y = Y[:,idx_genes]\n pi = pi[idx_genes,:]\n if not (gene_ids is None):\n gene_ids = SP.array(gene_ids)[idx_genes]\n else:\n idx_genes = SP.arange(Y.shape[1]) \n if Y.shape[1]>10000:\n print(\"For large datasets we recommend setting the pruneGenes option to True.\")\n\n\n #center data for Gaussian observation noise\n if noise=='gauss':\n Y-=SP.mean(Y,0) \n\n\n\n #include hidden variables\n if nHiddenSparse>0:\n piSparse = SP.ones((Y.shape[1],nHiddenSparse))*.01\n idxVar = SP.argsort(-Y.var(0))\n for iH in range(piSparse.shape[1]):\n idxOnH = SP.random.choice(idxVar[:100],20, replace=False)\n piSparse[idxOnH,iH] = 0.99\n pi = SP.hstack([piSparse,pi])\n thiddenSparse = SP.repeat('hiddenSparse',nHiddenSparse)\n termsHiddnSparse = ['%s%s' % t for t in zip(thiddenSparse, SP.arange(nHiddenSparse))]\n terms = SP.hstack([termsHiddnSparse,terms])\n num_terms += nHiddenSparse\n\n\n thidden = SP.repeat('hidden',nHidden)\n termsHidden = ['%s%s' % t for t in zip(thidden, SP.arange(nHidden))]\n terms = SP.hstack([termsHidden,terms]) \n\n pi = SP.hstack([SP.ones((Y.shape[1],nHidden))*.99,pi])\n num_terms += nHidden\n\n if not (covariates is None):\n if len(covariates.shape)==1:\n covariates = covariates[:,SP.newaxis]\n nKnown=covariates.shape[1]\n pi = SP.hstack([SP.ones((Y.shape[1],nKnown))*.99,pi])\n num_terms += nKnown\n tcovariates = SP.repeat('covariate',nKnown)\n termsCovariates = ['%s%s' % t for t in zip(tcovariates, SP.arange(nKnown))]\n terms = SP.hstack([termsCovariates,terms]) \n \n\n\n\n#mean term for non-Gaussian noise models\n if noise!='gauss':\n terms = SP.hstack([ 'bias',terms])\n pi = SP.hstack([SP.ones((Y.shape[1],1))*(1.-1e-10),pi]) \n num_terms += 1\n\n if do_preTrain==True: \n Ilabel = preTrain(Y, terms, pi, noise=noise, nFix=nFix, priors=priors, covariates=covariates)\n pi = pi[:,Ilabel]\n terms = terms[Ilabel] \n\n\n\n\n\n\n init={'init_data':CGauss(Y),'Pi':pi,'terms':terms, 'noise':noise, 'covariates':covariates, \"dropFactors\":dropFactors}\n if not gene_ids is None:\n gene_ids = SP.array(gene_ids)\n\n FA = slalom.CSparseFA(components=num_terms, idx_genes = idx_genes, gene_ids = gene_ids, priors=priors, learnPi=learnPi) \n FA.saveInit=False\n FA.init(**init) \n\n return FA",
"def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)",
"def preTrain(Y, terms, P_I, noise='gauss', nFix=None, priors=None, covariates=None):\n\n init_params = {}\n init_params['noise'] = noise\n init_params['iLatent'] = SP.where(terms=='hidden')[0] \n init_params['iLatentSparse'] = SP.array([])#SP.where(terms=='hiddenSparse')[0] \n if not (covariates is None):\n init_params['Known'] = covariates\n learnPi=False\n \n pi = P_I.copy()\n K = pi.shape[1]\n\n #data for sparseFA instance \n pi[pi>.8] =1-1e-100 # 0.99#1-1e-100#0.9999\n pi[pi<.2] =1e-100 #1e-8\n \n\n init={'init_data':CGauss(Y),'Pi':pi,'terms':terms, 'noise':noise, 'covariates':covariates}\n sigmaOff = 1E-3\n sparsity = 'VB'\n\n #prior on noise level \n if priors is None:\n priors = {'Eps': {'priors':[1E-3,1E-3]}}\n #how to initialize network?\n #initType = 'pcaRand'\n terms0=terms\n pi0=pi.copy()\n FA0 = slalom.CSparseFA(components=K,sigmaOff=sigmaOff,sigmaOn=SP.ones(pi.shape[1])*1.0,sparsity=sparsity,nIterations=50,\n permutation_move=False,priors=priors,initType='pcaRand', learnPi=learnPi)\n FA0.init(**init)\n if nFix==None:\n nFix = FA0.nKnown+FA0.nLatent\n \n#Fit PCA \n pca = PCA(n_components=1)#,svd_solver='full')\n pca.fit(FA0.Z.E1)\n X = pca.transform(FA0.Z.E1)\n\n\n#Sort by correlation to PC1 \n MPC = abs(vcorrcoef(FA0.S.E1[:,SP.argsort(FA0.W.Ilabel)].T,X.T))[nFix:]\n Ipi = SP.argsort(-MPC.ravel())\n IpiRev = SP.argsort(MPC.ravel())\n\n\n mRange = list(range(FA0.components))\n mRange[nFix:] = Ipi+nFix\n \n mRangeRev = list(range(FA0.components))\n mRangeRev[nFix:] = IpiRev+nFix\n\n#Run model for 50 iterations \n pi = pi0[:,mRange]\n terms = terms0[mRange] \n init={'init_data':CGauss(Y),'Pi':pi,'terms':terms, 'noise':noise}\n FA = slalom.CSparseFA(components=K,sigmaOff=sigmaOff,sigmaOn=SP.ones(pi.shape[1])*1.0,sparsity=sparsity,\n nIterations=50,permutation_move=False,priors=priors,initType='pcaRand', learnPi=learnPi)\n FA.shuffle=True\n FA.nScale = 30\n\n FA.init(**init) \n for j in range(50):\n FA.update() \n \n\n #Run reverse model for 50 iterations \n pi = pi0[:,mRangeRev]\n terms = terms0[mRangeRev]\n init={'init_data':CGauss(Y),'Pi':pi,'terms':terms, 'noise':noise}\n FArev = slalom.CSparseFA(components=K,sigmaOff=sigmaOff,sigmaOn=SP.ones(pi.shape[1])*1.0,sparsity=sparsity,\n nIterations=50,permutation_move=False,priors=priors,initType='pcaRand', learnPi=learnPi)\n FArev.shuffle=True\n FArev.nScale = 30\n FArev.init(**init) \n\n #FArev.iterate(forceIterations=True, nIterations=nIterations)\n for j in range(50):\n FArev.update() \n \n #import pdb\n IpiM = (-(0.5*(1./FArev.Alpha.E1[SP.argsort(mRangeRev)][nFix:])+.5*(1./FA.Alpha.E1[SP.argsort(mRange)][nFix:]))).argsort() \n \n\n# IpiM = (-(0.5*(1./FArev.Alpha.E1[SP.argsort(mRangeRev)][nFix:]*FArev.S.E1[:,SP.argsort(mRangeRev)][:,nFix:].std(0))+.5*(1./FA.Alpha.E1[SP.argsort(mRange)][nFix:]*FA.S.E1[:,SP.argsort(mRange)][:,nFix:].std(0)))).argsort() \n Ilabel = SP.hstack([SP.arange(nFix),IpiM+nFix])\n\n return Ilabel",
"def __init__(self, total_args):\n\t\tself.alpha = 0.0\n\t\tself.salida = 0.0\n\t\tself.bias = pseudoaleatorio(-1.0, 1.0)\n\t\tself.pesos = []\n\t\tfor i in range(total_args):\n\t\t\tself.pesos.append(pseudoaleatorio(-1.0, 1.0))",
"def run(self):\n\n # Create an initial uniform distribution if not given\n if self._distribution is None:\n inputMin = self._input.getMin()\n inputMin[0] = np.min(self._defectSizes)\n inputMax = self._input.getMax()\n inputMax[0] = np.max(self._defectSizes)\n marginals = [ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)]\n self._distribution = ot.ComposedDistribution(marginals)\n\n # Create the design of experiments of the candidate points where the\n # criterion is computed\n if self._distribution.hasIndependentCopula():\n # without copula use low discrepancy experiment as first doe\n doeCandidate = ot.LowDiscrepancyExperiment(ot.SobolSequence(), \n self._distribution, self._candidateSize).generate()\n else:\n # else simple Monte Carlo distribution on Uniform distribution\n doeCandidate = self._distribution.getSample(self._candidateSize)\n\n doeCandidate = np.array(doeCandidate)\n # build initial classifier model\n # build the kriging model without optimization\n\n if self._verbose:\n print('Building the classifier')\n\n n_ini = int(self._input.getSize())\n self._input = np.array(self._input)\n self._signals = np.hstack(self._signals)\n\n n_added_points = 0\n algo_iteration = 0\n \n ## Cas de la classif par svc\n if self._classifierType == \"svc\" :\n algo_temp = list(map(\n lambda C, kernel, degree, probability :\n svm.SVC(\n C=C,\n kernel=kernel,\n degree=degree,\n probability=probability,\n coef0=1,\n ),\n *self._ClassifierParameters))[0]\n\n ## Cas de la classif par fro\n if self._classifierType == \"rf\" :\n algo_temp = list(map(\n lambda n_estimators, max_depth, min_samples_split, random_state : \n ExtraTreesClassifier(\n n_estimators=n_estimators,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n random_state=random_state\n ),\n *self._ClassifierParameters))[0] \n \n algo_temp.fit(self._input,self._signals)\n \n list_classifiers = []\n f_iter = algo_temp.predict_proba\n list_classifiers.append(f_iter)\n self._classifierModel = f_iter\n \n plt.ion()\n # Start the improvment loop\n if self._verbose and self._nMorePoints > 0:\n print('Start the improvement loop')\n\n while n_added_points < self._nMorePoints : \n \n # calcul de ce qu il y a dans l' exp de la proba \n probs = f_iter(doeCandidate)[:,1] \n\n # recuperation des indices ou la p p_min < proba(x) < p_max \n ind_p1 = np.where(probs<self._pmax)[0]\n ind_p2 = np.where(probs>=self._pmin)[0]\n ind_p = np.intersect1d(ind_p2,ind_p1)\n ind = ind_p\n \n # s'il n'a pas d indices on elargit p_min = 0.45, p_max=0.55\n if len(ind)==0:\n ind_p1 = np.where(probs<0.1)[0]\n ind_p2 = np.where(probs>=0.8)[0]\n ind_p = np.intersect1d(ind_p2,ind_p1) \n ind = ind_p \n \n ind_rank = np.argsort(probs[ind])\n quant = [0,\n int(len(ind)/4.),\n int(len(ind)/2.),\n int(3.*len(ind)/4.),\n len(ind)-1]\n\n ind_bis = ind_rank[quant]\n x_new = doeCandidate[ind[ind_bis],:]\n z_new = np.hstack(self._physicalModel(x_new)) \n \n n_new_temp = len(self._input) + len(x_new)\n\n # si on depasse le nombre de points, on s arrete\n if n_new_temp > (n_ini + self._nMorePoints) :\n x_new = x_new[:self._nMorePoints + n_ini - len(self._input),:]\n z_new = z_new[:self._nMorePoints + n_ini - len(self._input)]\n \n self._input = np.vstack((self._input,x_new)) \n self._signals = np.hstack((self._signals,z_new))\n \n n_added_points = n_new_temp - n_ini\n algo_iteration = algo_iteration + 1\n \n if self._classifierType == \"svc\" :\n algo_temp = list(map(\n lambda C, kernel, degree, probability :\n svm.SVC(\n C=C,\n kernel=kernel,\n degree=degree,\n probability=probability,\n coef0=1),\n *self._ClassifierParameters))[0]\n \n \n if self._classifierType == \"rf\" :\n algo_temp = list(map(\n lambda n_estimators, max_depth, min_samples_split, random_state : \n ExtraTreesClassifier(\n n_estimators=n_estimators,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n random_state=random_state\n ),\n *self._ClassifierParameters))[0] \n \n # Apprentissage avec self._input,self._signals\n algo_temp.fit(self._input,self._signals)\n \n\n self._confMat = np.zeros((2,2)) \n for classifier in list_classifiers :\n conf_temp = 1.*confusion_matrix(self._signals, classifier(self._input)[:,1]>=0.5 )\n conf_temp = 1.*conf_temp/conf_temp.sum(axis=0)\n self._confMat = conf_temp + self._confMat \n \n self._confMat = 1.*self._confMat/len(list_classifiers)\n classif_algo_temp = algo_temp.predict_proba\n \n p11 = self._confMat[1,1]\n p10 = self._confMat[1,0] \n \n def agg_classifier(x_in):\n c = p11-p10\n p1_bayes = 1./c*(classif_algo_temp(x_in)[:,1] - p10)\n p1_bayes = np.vstack(np.min(np.array([np.max(np.array(\n [p1_bayes,np.zeros(len(p1_bayes))]), axis=0),\n np.ones(len(p1_bayes))]), axis=0)) \n return(np.array([1-p1_bayes,p1_bayes]).T)[0]\n \n f_iter = agg_classifier\n list_classifiers.append(f_iter)\n self._classifierModel = f_iter\n\n if self._verbose:\n updateProgress(n_added_points-1, self._nMorePoints, 'Adding points')\n\n if self._graph:\n self._PODPerDefect = self._computePOD(self._defectSizes, agg_classifier)\n # create the interpolate function of the POD model\n meanPOD = self._PODPerDefect.computeMean()\n interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')\n self._PODmodel = ot.PythonFunction(1, 1, interpModel)\n # The POD at confidence level is built in getPODCLModel() directly\n fig, ax = self.drawPOD(self._probabilityLevel, self._confidenceLevel)\n plt.draw()\n plt.pause(0.001)\n plt.show()\n if self._graphDirectory is not None:\n fig.savefig(os.path.join(self._graphDirectory, 'AdaptiveHitMissPOD_')+str(algo_iteration),\n bbox_inches='tight', transparent=True)\n\n self._input = ot.NumericalSample(self._input)\n self._signals = ot.NumericalSample(np.vstack(self._signals))\n # Compute the sample predicted for each defect sizes\n self._PODPerDefect = self._computePOD(self._defectSizes, self._classifierModel)\n # compute the POD for all defect sizes\n meanPOD = self._PODPerDefect.computeMean()\n # create the interpolate function of the POD model\n interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')\n self._PODmodel = ot.PythonFunction(1, 1, interpModel)\n \n # The POD at confidence level is built in getPODCLModel() directly\n\n # remove the interactive plotting\n plt.ioff()",
"def __init__(self, petri_net):\n self.petri_net = petri_net",
"def execute_algos_used_Generated_instances(arr_pl_M_T_vars_init,\r\n name_dir=None,\r\n date_hhmm=None,\r\n k_steps=None,\r\n NB_REPEAT_K_MAX=None,\r\n algos=None,\r\n learning_rates=None,\r\n pi_hp_plus=None,\r\n pi_hp_minus=None,\r\n a=1, b=1,\r\n gamma_version=1,\r\n used_instances=True,\r\n used_storage_det=True,\r\n manual_debug=False, \r\n criteria_bf=\"Perf_t\", \r\n debug=False):\r\n # directory to save execution algos\r\n name_dir = \"tests\" if name_dir is None else name_dir\r\n date_hhmm = datetime.now().strftime(\"%d%m_%H%M\") \\\r\n if date_hhmm is None \\\r\n else date_hhmm\r\n \r\n # steps of learning\r\n k_steps = 5 if k_steps is None else k_steps\r\n fct_aux.NB_REPEAT_K_MAX = 3 if NB_REPEAT_K_MAX is None else NB_REPEAT_K_MAX\r\n p_i_j_ks = [0.5, 0.5, 0.5]\r\n \r\n # list of algos\r\n ALGOS = [\"LRI1\", \"LRI2\", \"DETERMINIST\", \"RD-DETERMINIST\"]\\\r\n + fct_aux.ALGO_NAMES_BF + fct_aux.ALGO_NAMES_NASH\r\n algos = ALGOS if algos is None \\\r\n else algos\r\n # list of pi_hp_plus, pi_hp_minus\r\n pi_hp_plus = [0.2*pow(10,-3)] if pi_hp_plus is None else pi_hp_plus\r\n pi_hp_minus = [0.33] if pi_hp_minus is None else pi_hp_minus\r\n # learning rate \r\n learning_rates = [0.01] \\\r\n if learning_rates is None \\\r\n else learning_rates # list(np.arange(0.05, 0.15, step=0.05))\r\n \r\n \r\n \r\n zip_pi_hp = list(zip(pi_hp_plus, pi_hp_minus))\r\n \r\n cpt = 0\r\n algo_piHpPlusMinus_learningRate \\\r\n = it.product(algos, zip_pi_hp, learning_rates)\r\n \r\n for (algo_name, (pi_hp_plus_elt, pi_hp_minus_elt), \r\n learning_rate) in algo_piHpPlusMinus_learningRate:\r\n \r\n print(\"______ execution {}: {}, rate={}______\".format(cpt, \r\n algo_name, learning_rate))\r\n cpt += 1\r\n msg = \"pi_hp_plus_\"+str(pi_hp_plus_elt)\\\r\n +\"_pi_hp_minus_\"+str(pi_hp_minus_elt)\r\n path_to_save = os.path.join(name_dir, \"simu_\"+date_hhmm,\r\n msg, algo_name\r\n )\r\n if algo_name == ALGOS[0]:\r\n # 0: LRI1\r\n print(\"*** ALGO: {} *** \".format(algo_name))\r\n utility_function_version = 1\r\n path_to_save = os.path.join(name_dir, \"simu_\"+date_hhmm,\r\n msg, algo_name, str(learning_rate)\r\n )\r\n Path(path_to_save).mkdir(parents=True, exist_ok=True)\r\n arr_M_T_K_vars = autoLriGameModel\\\r\n .lri_balanced_player_game_all_pijk_upper_08(\r\n arr_pl_M_T_vars_init.copy(),\r\n pi_hp_plus=pi_hp_plus_elt, \r\n pi_hp_minus=pi_hp_minus_elt,\r\n a=a, b=b,\r\n gamma_version=gamma_version,\r\n k_steps=k_steps, \r\n learning_rate=learning_rate,\r\n p_i_j_ks=p_i_j_ks,\r\n utility_function_version=utility_function_version,\r\n path_to_save=path_to_save, \r\n manual_debug=manual_debug, dbg=debug)\r\n elif algo_name == ALGOS[1]:\r\n # 1: LRI2\r\n print(\"*** ALGO: {} *** \".format(algo_name))\r\n utility_function_version = 2\r\n path_to_save = os.path.join(name_dir, \"simu_\"+date_hhmm,\r\n msg, algo_name, str(learning_rate)\r\n )\r\n Path(path_to_save).mkdir(parents=True, exist_ok=True)\r\n arr_M_T_K_vars = autoLriGameModel\\\r\n .lri_balanced_player_game_all_pijk_upper_08(\r\n arr_pl_M_T_vars_init.copy(),\r\n pi_hp_plus=pi_hp_plus_elt, \r\n pi_hp_minus=pi_hp_minus_elt,\r\n a=a, b=b,\r\n gamma_version=gamma_version,\r\n k_steps=k_steps, \r\n learning_rate=learning_rate,\r\n p_i_j_ks=p_i_j_ks,\r\n utility_function_version=utility_function_version,\r\n path_to_save=path_to_save, \r\n manual_debug=manual_debug, dbg=debug)\r\n \r\n elif algo_name == ALGOS[2] or algo_name == ALGOS[3]:\r\n # 2: DETERMINIST, 3: RANDOM DETERMINIST\r\n print(\"*** ALGO: {} *** \".format(algo_name))\r\n random_determinist = False if algo_name == ALGOS[2] else True\r\n Path(path_to_save).mkdir(parents=True, exist_ok=True)\r\n arr_M_T_vars = autoDetGameModel.determinist_balanced_player_game(\r\n arr_pl_M_T_vars_init.copy(),\r\n pi_hp_plus=pi_hp_plus_elt, \r\n pi_hp_minus=pi_hp_minus_elt,\r\n a=a, b=b,\r\n gamma_version=gamma_version,\r\n random_determinist=random_determinist,\r\n used_storage=used_storage_det,\r\n path_to_save=path_to_save, \r\n manual_debug=manual_debug, dbg=debug)\r\n \r\n elif algo_name == fct_aux.ALGO_NAMES_BF[0] :\r\n # 0: BEST_BRUTE_FORCE (BF) , 1:BAD_BF, 2: MIDDLE_BF\r\n # execute tous les BF\r\n print(\"*** ALGO: {} *** \".format(algo_name))\r\n Path(path_to_save).mkdir(parents=True, exist_ok=True)\r\n arr_M_T_vars = autoBfGameModel\\\r\n .bf_balanced_player_game(\r\n arr_pl_M_T_vars_init.copy(),\r\n pi_hp_plus=pi_hp_plus_elt, \r\n pi_hp_minus=pi_hp_minus_elt,\r\n a=a, b=b,\r\n gamma_version=gamma_version,\r\n path_to_save=path_to_save, \r\n name_dir=name_dir, \r\n date_hhmm=date_hhmm,\r\n manual_debug=manual_debug, \r\n criteria_bf=criteria_bf, dbg=debug)\r\n \r\n \r\n elif algo_name == fct_aux.ALGO_NAMES_NASH[0] :\r\n # 0: \"BEST-NASH\", 1: \"BAD-NASH\", 2: \"MIDDLE-NASH\"\r\n print(\"*** ALGO: {} *** \".format(algo_name))\r\n Path(path_to_save).mkdir(parents=True, exist_ok=True)\r\n arr_M_T_vars = autoNashGameModel\\\r\n .nash_balanced_player_game(\r\n arr_pl_M_T_vars_init.copy(),\r\n pi_hp_plus=pi_hp_plus_elt, \r\n pi_hp_minus=pi_hp_minus_elt,\r\n a=a, b=b,\r\n gamma_version=gamma_version,\r\n path_to_save=path_to_save, \r\n name_dir=name_dir, \r\n date_hhmm=date_hhmm,\r\n manual_debug=manual_debug, \r\n dbg=debug) \r\n \r\n \r\n print(\"NB_EXECUTION cpt={}\".format(cpt))",
"def test_afnetwork():\n #import matplotlib\n #matplotlib.use('TkAgg')\n #import matplotlib.pyplot as plt\n #import matplotlib.patches as mpatches\n\n import numpy as np\n from crpm.setup_afmodel import setup_afmodel\n\n from crpm.dynamics import computecost\n from crpm.analyzebinaryclassifier import analyzebinaryclassifier\n #from crpm.lossfunctions import loss\n from crpm.analyzebinaryclassifier import plotroc\n from crpm.gradientdecent import gradientdecent\n from crpm.contrastivedivergence import contrastivedivergence\n #from crpm.ffn import FFN\n from crpm.ffn_bodyplan import stack_new_layer\n from crpm.ffn_bodyplan import copy_ffn\n from crpm.fwdprop import fwdprop\n #from crpm.backprop import backprop\n #from crpm.dynamics import computeforces\n #from crpm.dynamics import maxforce\n\n from crpm.gan import gan\n\n\n #init numpy seed\n np.random.seed(40017)\n\n #setup model\n prototype, train, target, valid, vtarget = setup_afmodel()\n\n #trim data\n #maxobv = 150\n #train = train[:,:maxobv]\n #valid = valid[:,:maxobv]\n #target = target[:maxobv]\n #vtarget = vtarget[:maxobv]\n\n #get prototype depth\n nlayer = len(prototype)\n\n #get data dimensions\n nfeat = train.shape[0]\n nobv = train.shape[1]\n\n #return untrained autoencoder\n _, autoencoder = contrastivedivergence(prototype, train, maxepoch=0)\n\n # ----- Discriminator -----\n\n #create discriminator\n discriminator = copy_ffn(autoencoder[0:len(prototype)])\n discriminator = stack_new_layer(discriminator, n=1, activation=\"logistic\")\n\n print(\"analyze untrained discriminator to iden subtype\")\n pred, icost = computecost(discriminator, valid, vtarget, \"bce\")\n roc, ireport = analyzebinaryclassifier(pred, vtarget)\n if ireport[\"AreaUnderCurve\"]<.5:\n #flip labels\n pred, icost = computecost(discriminator, valid, 1-vtarget, \"bce\")\n roc, ireport = analyzebinaryclassifier(pred, 1-vtarget)\n print(ireport)\n #plotroc(roc)\n\n #train discriminator\n pred, cost, _ = gradientdecent(discriminator, train, target, \"bce\",\n valid, vtarget,\n earlystop=True,\n finetune=7)\n\n print(\"analyze trained discriminator to iden subtype\")\n pred, cost = computecost(discriminator, valid, vtarget, \"bce\")\n roc, report = analyzebinaryclassifier(pred, vtarget)\n if report[\"AreaUnderCurve\"]<.5:\n #flip labels\n pred, cost = computecost(discriminator, valid, 1-vtarget, \"bce\")\n roc, report = analyzebinaryclassifier(pred, 1-vtarget)\n print(report)\n #plotroc(roc)\n\n #assert discriminator can be trained by binary cross entropy error\n #assert icost > cost\n\n #assert discriminator has potential to iden two classes\n #assert report[\"AreaUnderCurve\"] > ireport[\"AreaUnderCurve\"]\n #assert report[\"AreaUnderCurve\"] > .55\n\n # ----- GENERATOR -----\n\n #create generator from decoder\n generator = copy_ffn(autoencoder[len(prototype)-1:len(autoencoder)])\n\n #correct label idecies\n idx = 0\n for layer in generator:\n generator[idx][\"layer\"] = idx\n idx += 1\n\n #assert False\n #-- Main GAN training---\n #ganerr = gan(generator, discriminator, train,\n # maxepoch=100000, batchsize=1, finetune=6)\n ganerr = gan(generator, discriminator, train,\n maxepoch=100000, batchsize=1, finetune=6)\n\n #def moving_average(a, n=3) :\n # ret = np.cumsum(a, dtype=float)\n # ret[n:] = ret[n:] - ret[:-n]\n # return ret[n - 1:] / n\n\n #ganerr[:,2] = np.log(ganerr[:,2]) #plot density error on logscale\n #discerrbar = moving_average(ganerr[:, 0], n=20)\n #generrbar = moving_average(ganerr[:, 1], n=20)\n #autoerrbar = moving_average(ganerr[:, 2], n=20)\n\n #assert generator fools discriminator at least some of the time bce<65%.\n print(ganerr[-1,1])\n assert ganerr[-1,1] <.65\n\n #fig = plt.figure()\n #plt.plot(ganerr[:, 0], ganerr[:, 1])\n #plt.plot(discerrbar, generrbar)\n #plt.plot(discerrbar[0], generrbar[0], marker=\"D\", color=\"green\", markersize=10)\n #plt.plot(discerrbar[-1], generrbar[-1], marker=\"8\", color=\"red\", markersize=10)\n #plt.xlabel(\"discriminator error\")\n #plt.ylabel(\"generator error\")\n #plt.show()\n\n #fig = plt.figure()\n #plt.plot(ganerr[:, 0], ganerr[:, 2])\n #plt.plot(discerrbar, autoerrbar)\n #plt.plot(discerrbar[0], autoerrbar[0], marker=\"D\", color=\"green\", markersize=10)\n #plt.plot(discerrbar[-1], autoerrbar[-1], marker=\"8\", color=\"red\", markersize=10)\n #plt.xlabel(\"discriminator error\")\n #plt.ylabel(\"encoder error\")\n #plt.show()\n\n #generate fake data for every training sample\n nsample = train.shape[1]\n fake, _ = fwdprop(np.random.rand(generator[0][\"n\"], nsample), generator)\n #merge training and fake data\n gandata = np.hstack((train, fake))\n ganlabels = np.hstack((np.repeat(1, nsample),np.repeat(0, nsample)))\n\n print(\"analyze trained discriminator on fake vs training set\")\n pred, cost = computecost(discriminator, gandata, ganlabels, \"bce\")\n roc, report = analyzebinaryclassifier(pred, ganlabels)\n if report[\"AreaUnderCurve\"]<.5:\n #flip labels\n pred, cost = computecost(discriminator, gandata, ganlabels, \"bce\")\n roc, report = analyzebinaryclassifier(pred, 1-ganlabels)\n print(report)\n #plotroc(roc)\n\n #gen fake data for every validation sample\n nsample = valid.shape[1]\n fake, _ = fwdprop(np.random.rand(generator[0][\"n\"], nsample), generator)\n #merge validation and fake data\n gandata = np.hstack((valid, fake))\n ganlabels = np.hstack((np.repeat(1, nsample),np.repeat(0, nsample)))\n\n print(\"analyze trained discriminator on fake vs vaidation set\")\n pred, costv = computecost(discriminator, gandata, ganlabels, \"bce\")\n roc, reportv = analyzebinaryclassifier(pred, ganlabels)\n if reportv[\"AreaUnderCurve\"]<.5:\n #flip labels\n pred, costv = computecost(discriminator, gandata, 1-ganlabels, \"bce\")\n roc, reportv = analyzebinaryclassifier(pred, 1-ganlabels)\n print(reportv)\n #plotroc(roc)\n\n #assert discriminator has poor potential to iden fake data\n assert reportv[\"AreaUnderCurve\"] <.55\n\n #get fake data the discriminator thinks is real\n pred, _ = fwdprop(fake, discriminator)\n spoof = fake[:, pred[0, :] > report[\"OptimalThreshold\"]]\n\n #plot metabolite distributions\n #labels = []\n #def add_label(violin, label):\n # color = violin[\"bodies\"][0].get_facecolor().flatten()\n # labels.append((mpatches.Patch(color=color), label))\n\n #add_label(plt.violinplot(train.T), \"Training\")\n #add_label(plt.violinplot(valid.T), \"Validation\")\n #add_label(plt.violinplot(fake.T), \"Simulated\")\n #add_label(plt.violinplot(spoof.T), \"Spoofed\")\n\n #plt.legend(*zip(*labels))\n\n\n #viplt1, = plt.violinplot(train.T)\n #plt.violinplot(valid.T)\n #plt.violinplot(fake.T)\n #plt.legend(labels=[\"training\", \"validation\", \"simulated\"])\n #plt.show()",
"def probinit(self, aaa, n_obj):\n # Set algorithm...\n if aaa == 'nsga':\n algo = nsga_II(m=0.05)\n else:\n algo = jde(memory=True)\n #algo = mde_pbx()\n #algo = de_1220()\n\n # ...and initialize problem with instance atributes\n prob = mga_1dsm(seq = self.FBseq,\n multi_objective = n_obj,\n dsm_dv_barrier = self.MAX_DV)\n\n prob.set_vinf((self.C3)**0.5)\n prob.set_tof(self.TOF[0], self.TOF[1])\n prob.set_entry_barrier(self.entry_barrier)\n prob.set_launch_window(self.EPOCHSTART, self.EPOCHEND)\n return prob, algo",
"def main():\n parser = apollocaffe.base_parser()\n parser.add_argument('--datasize', required=True)\n parser.add_argument('--batchsize', required=True)\n parser.add_argument('--numIter', required=True)\n args = parser.parse_args()\n # config = json.load(open(args.config, 'r'))\n # if args.weights is not None:\n # config[\"solver\"][\"weights\"] = args.weights\n # config[\"solver\"][\"start_iter\"] = args.start_iter\n # apollocaffe.set_random_seed(config[\"solver\"][\"random_seed\"])\n apollocaffe.set_device(args.gpu)\n datasize = int(args.datasize)\n batchsize = int(args.batchsize)\n numIter = int(args.numIter)\n\n # apollocaffe.set_cpp_loglevel(args.loglevel)\n\n train(datasize, batchsize, numIter)",
"def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)",
"def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GAssist_ProgressTrack\"\r\n outPop = \"GH_GAssist_PopulationOut\"\r\n bitLength = 1 # This implementation is not yet set up to handle other rule representations, or bit encoding lengths.\r\n CVpartitions = 10\r\n trackCycles = 1\r\n \r\n # Run Parameters - User specified.\r\n iterInput = '20.50.100' \r\n pop = 100\r\n wild = 0.5\r\n defaultClass = \"0\" #auto, 0, disabled \r\n init = \"cw\" #'none', 'smart', 'cw'\r\n MDL = 1\r\n windows = 2\r\n \r\n #Figure out the iteration stops for evaluation, and the max iterations.\r\n iterList = iterInput.split('.')\r\n for i in range(len(iterList)):\r\n iterList[i] = int(iterList[i])\r\n lastIter = iterList[len(iterList)-1] \r\n\r\n #Sets up up algorithm to be run.\r\n e = GAssist_Environment(trainData,testData,bitLength, init) \r\n cons.setConstants(pop, wild, defaultClass, e, MDL, windows) \r\n sampleSize = e.getNrSamples()\r\n gassist = GAssist(e, outProg, outPop, bitLength, CVpartitions, graphPerformance) \r\n \r\n #Set some GAssist parameters.\r\n if trackCycles == 'Default':\r\n gassist.setTrackingIterations(sampleSize)\r\n else:\r\n gassist.setTrackingIterations(trackCycles) \r\n gassist.setNumberOfTrials(lastIter, iterList) \r\n gassist.setInitialization(init)\r\n #Run the GAssist Algorithm \r\n gassist.runGAssist()",
"def init_genetic_alg():\n source = (random.randint(0, board_size.get() - 1), random.randint(0, board_size.get() - 1))\n dest = (random.randint(0, board_size.get() - 1), random.randint(0, board_size.get() - 1))\n return GeneticAlg(pop_size.get(), board_size.get(), source, dest, obstacles_percent.get() / 100)",
"def i_awGA_templet(AIM_M, AIM_F, PUN_M, PUN_F, FieldDR, problem, maxormin, MAXGEN, MAXSIZE, NIND, SUBPOP, GGAP, selectStyle, recombinStyle, recopt, pm, distribute, drawing = 1):\n \n #==========================初始化配置===========================\n # 获取目标函数和罚函数\n aimfuc = getattr(AIM_M, AIM_F) # 获得目标函数\n if PUN_F is not None:\n punishing = getattr(PUN_M, PUN_F) # 获得罚函数\n exIdx = np.array([])\n #=========================开始遗传算法进化=======================\n if problem == 'R':\n Chrom = ga.crtrp(NIND, FieldDR) # 生成实数值种群\n elif problem == 'I':\n Chrom = ga.crtip(NIND, FieldDR) # 生成整数值种群\n ObjV = aimfuc(Chrom) # 计算种群目标函数值\n NDSet = np.zeros((0, Chrom.shape[1])) # 定义帕累托最优解记录器\n NDSetObjV = np.zeros((0, ObjV.shape[1])) # 定义帕累托最优解的目标函数值记录器\n ax = None\n start_time = time.time() # 开始计时\n # 开始进化!!\n for gen in range(MAXGEN):\n if NDSet.shape[0] > MAXSIZE:\n NDSet = NDSet[0: MAXSIZE, :]\n NDSetObjV = NDSetObjV[0: MAXSIZE, :]\n break\n [CombinObjV, weight] = ga.awGA(ObjV) # 适应性权重法求聚合目标函数值\n FitnV = ga.ranking(maxormin * CombinObjV) # 根据加权单目标计算适应度\n if PUN_F is not None:\n [FitnV, exIdx] = punishing(Chrom, FitnV) # 调用罚函数\n [FitnV, frontIdx] = ga.ndominfast(maxormin * ObjV, exIdx) # 求种群的非支配个体,并更新适应度\n # 更新帕累托最优集以及种群非支配个体的适应度\n [FitnV, NDSet, NDSetObjV, repnum] = ga.upNDSet(Chrom, maxormin * ObjV, FitnV, NDSet, maxormin * NDSetObjV, frontIdx, exIdx)\n if distribute == True: # 若要增强帕累托解集的分布性\n # 计算每个目标下个体的聚集距离(不需要严格计算欧氏距离,计算绝对值即可)\n for i in range(ObjV.shape[1]):\n idx = np.argsort(ObjV[:, i], 0)\n dis = np.abs(np.diff(ObjV[idx, i].T, 1).T) / (np.max(ObjV[idx, i]) - np.min(ObjV[idx, i]) + 1) # 差分计算距离\n dis = np.hstack([dis, dis[-1]])\n dis = dis + np.min(dis)\n FitnV[idx, 0] *= np.exp(dis) # 根据聚集距离修改适应度,以增加种群的多样性\n # 进行遗传操作!!\n SelCh=ga.selecting(selectStyle, Chrom, FitnV, GGAP, SUBPOP) # 选择\n SelCh=ga.recombin(recombinStyle, SelCh, recopt, SUBPOP) #交叉\n if problem == 'R':\n SelCh=ga.mutbga(SelCh,FieldDR, pm) # 变异\n if repnum > Chrom.shape[0] * 0.01: # 当最优个体重复率高达1%时,进行一次高斯变异\n SelCh=ga.mutgau(SelCh, FieldDR, pm) # 高斯变异\n elif problem == 'I':\n SelCh=ga.mutint(SelCh, FieldDR, pm)\n ObjVSel = aimfuc(SelCh) # 求育种个体的目标函数值\n [CombinObjV, weight] = ga.awGA(maxormin * ObjVSel) # 适应性权重法求聚合目标函数值\n FitnVSel = ga.ranking(maxormin * CombinObjV) # 根据聚合目标求育种个体适应度\n if PUN_F is not None:\n [FitnVSel, exIdx] = punishing(Chrom, FitnVSel) # 调用罚函数\n [Chrom,ObjV] = ga.reins(Chrom,SelCh,SUBPOP,1,0.9,FitnV,FitnVSel,ObjV,ObjVSel) #重插入\n if drawing == 2:\n ax = ga.frontplot(NDSetObjV, False, ax, gen + 1) # 绘制动态图\n end_time = time.time() # 结束计时\n #=========================绘图及输出结果=========================\n if drawing != 0:\n ga.frontplot(NDSetObjV,True)\n times = end_time - start_time\n print('用时:', times, '秒')\n print('帕累托前沿点个数:', NDSet.shape[0], '个')\n print('单位时间找到帕累托前沿点个数:', int(NDSet.shape[0] // times), '个')\n # 返回帕累托最优集以及执行时间\n return [ObjV, NDSet, NDSetObjV, end_time - start_time]",
"def __init__(self,fprefix=\"example\",outfroot=\"example\", linkage=True, logl = [], fo=True,oo=False,\r\n labels=['FounderNonAff','FounderAff','OffspringNonAff','OffspringAff']):\r\n missing = ['N','0','.']\r\n # eigenstrat codes\r\n emissval = '9'\r\n ehet = '1'\r\n ehom1 = '0'\r\n ehom2 = '2'\r\n swapdict = {ehom1:ehom2,ehom2:ehom1,ehet:ehet,emissval:emissval} # if initial ref allele was wrong, use these to swap\r\n mdict = dict(zip(missing,missing))\r\n f = file('%s.ped' % fprefix,'r')\r\n if linkage: # read map file\r\n map = readMap(fprefix)\r\n rslist = [x[1] for x in map] # get rs numbers\r\n outmap = file('%s.map' % outfroot,'w')\r\n maps = ['\\t'.join(x) for x in map]\r\n maps.append('')\r\n logl.append('rgPedEigConv.py %s: Writing map file\\n' % (timenow()))\r\n outmap.write('\\n'.join(maps))\r\n else:\r\n head = f.next().strip()\r\n rslist = head.split()\r\n nrs = len(rslist) # number of markers\r\n elen = 2*nrs + 6 # expected # elements on each line\r\n logl.append('rgPedEigConv.py %s: found %d for nrs\\n' % (timenow(),nrs))\r\n eig = {}\r\n eig['founders'] = [array.array('c',[]) for x in xrange(nrs)] # marker rows, subject cols\r\n eig['offspring'] = [array.array('c',[]) for x in xrange(nrs)] # marker rows, subject cols\r\n adicts = [{} for x in xrange(nrs)] # count of alleles in a dict for each marker\r\n refallele = [None for x in xrange(nrs)] # list of first observed alleles\r\n nsubj = 0\r\n indiv = {'founders':[],'offspring':[]}\r\n for lnum,l in enumerate(f):\r\n ll = l.strip().split()\r\n if (lnum+1) % 200 == 0:\r\n logl.append('rgPedEigConv.py %s: Processing line %d\\n' % (timenow(),lnum+1))\r\n if len(ll) < elen: # ? short ?\r\n logl.append('rgPedEigConv.py %s: Line %d is %d long, expected %d\\n' % (timenow(),lnum,len(ll),elen))\r\n else:\r\n nsubj += 1\r\n sid = '%s_%s' % (ll[0],ll[1])\r\n isFounder = isOff = False\r\n iclass = 'founders'\r\n status = labels[0] # founder unaff\r\n if ll[2] <> '0' and ll[3] <> '0': # has parent ids\r\n iclass = 'offspring'\r\n status = labels[2] # unaffected offspring\r\n if ll[5] == '2':\r\n status = labels[3] # affected offspring\r\n else:\r\n if ll[5] == '2':\r\n status = labels[1] #change from unaff to aff founder label\r\n gender = 'M'\r\n if ll[4] == '2':\r\n gender = 'F'\r\n indiv[iclass].append('%s %s %s' % (sid,gender,status)) # for the ind file\r\n for snp in xrange(nrs):\r\n g1,g2 = ll[2*snp + 6],ll[2*snp + 7] # pair of genos\r\n if mdict.get(g1,None) or mdict.get(g2,None): # one or both missing\r\n esnp = emissval # missing value\r\n else:\r\n if not refallele[snp]:\r\n refallele[snp] = g1 # first one we saw!\r\n for g in (g1,g2):\r\n if adicts[snp].get(g,None): # bump\r\n adicts[snp][g] += 1\r\n else:\r\n adicts[snp][g] = 1 # first time\r\n if g1 == g2: # hom\r\n if g1 == refallele[snp]:\r\n esnp = ehom2 # 2 copies of current reference allele\r\n else:\r\n esnp = ehom1 # no copies\r\n else:\r\n esnp = ehet # het - always has one copy of reference allele\r\n eig[iclass][snp].append(esnp) # append the eigenstrat geno code for this new subject\r\n logl.append('rgPedEigConv.py %s: Now checking major allele assignment and fixing as needed\\n' % timenow())\r\n for snp in xrange(nrs): # now check to see if reference = major allele\r\n major = majAllele(adicts[snp])\r\n if major <> refallele[snp]: # either None or we need to change all the codes\r\n if major <> None:\r\n for iclass in eig.keys():\r\n for i in range(len(eig[iclass][snp])):\r\n if eig[iclass][snp][i] <> emissval:\r\n eig[iclass][snp][i] = swapdict[eig[iclass][snp][i]]\r\n self.eig = eig\r\n self.indiv = indiv\r\n self.fo = fo\r\n self.oo = oo\r\n self.outfroot = outfroot\r\n self.nrs = nrs\r\n self.fprefix = fprefix\r\n self.logl = logl",
"def __init__(self, space, monomial=None):\n self.space = space\n self.psis = []\n self.red_boundaries = []\n self.irr_boundaries = 0\n self.kappas = []\n self.cherns = []\n self.lambdas = []\n if None != monomial:\n self.monomial = monomial\n for tclass, expon in monomial.decompose_monomial():\n self.recieve(tclass, expon)\n self.recieve = None #make sure we don't double populate it \n else:\n self.monomial = 1",
"def vanilla_gan():\n hparams = common_hparams.basic_params1()\n hparams.label_smoothing = 0.0\n hparams.hidden_size = 128\n hparams.batch_size = 64\n hparams.add_hparam(\"z_size\", 64)\n hparams.add_hparam(\"c_dim\", 1)\n hparams.add_hparam(\"height\", 28)\n hparams.add_hparam(\"width\", 28)\n hparams.add_hparam(\"discriminator_batchnorm\", int(True))\n return hparams",
"def vanilla_gan():\n hparams = common_hparams.basic_params1()\n\n hparams.batch_size = 32\n hparams.label_smoothing = 0.0\n hparams.add_hparam(\"hidden_dim\", 128)\n hparams.add_hparam(\"random_sample_size\", 100)\n hparams.add_hparam(\"height\", 28)\n hparams.add_hparam(\"width\", 28)\n hparams.add_hparam(\"epsilon\", 1e-4)\n return hparams",
"def build_algorithm(self, algorithm_type):\n distance_matrix = self.matrix_handler.distance_matrix\n algorithm_execution_parameters = {}\n if algorithm_type == \"spectral\":\n # We need to set number of clusters for performance and we get sigma if defined\n algorithm_execution_parameters[\"max_clusters\"] = self.evaluation_parameters[\"maximum_clusters\"]\n if \"sigma\" in self.clustering_parameters[\"algorithms\"][\"spectral\"]:\n algorithm_execution_parameters[\"sigma_sq\"] = self.clustering_parameters[\"algorithms\"][\"spectral\"][\"sigma\"]\n # else it calculates its own sigma\n\n if algorithm_type in [\"spectral\",\"dbscan\",\"gromos\",\"kmedoids\",\"random\",\"hierarchical\"] :\n return ClusteringExplorer.get_clustering_algorithm_class()[algorithm_type](distance_matrix, **algorithm_execution_parameters)\n else:\n print \"[ERROR][ClusteringExplorer::build_algorithms] Not known algorithm type ( %s )\"%(algorithm_type)\n self.notify(\"SHUTDOWN\", \"Not known algorithm type ( %s )\"%(algorithm_type))\n exit()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
som and bmu_ind depending on the lattice "hexa" or "rect" we have different grid distance functions. bmu_ind is a number between 0 and number of nodes1. depending on the map size bmu_coord will be calculated and then distance matrix in the map will be returned | def grid_dist(self,bmu_ind):
try:
lattice = getattr(self, 'lattice')
except:
lattice = 'hexa'
print 'lattice not found! Lattice as hexa was set'
if lattice == 'rect':
return rect_dist(self,bmu_ind)
elif lattice == 'hexa':
try:
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
except:
rows = 0.
cols = 0.
pass
#needs to be implemented
print 'to be implemented' , rows , cols
return np.zeros((rows,cols)) | [
"def som(X, range_n_dim, top_lbls=10, preprocessing = None, bin_X=False, transform=None, experiment_name=None, **kwargs):\n\n for dim in range_n_dim: \n limit = int(np.sqrt(len(X)/20))\n if dim > limit: #verify that number of nodes are sensible for size of input data\n return print('Input size too small for map. Largest n should be ' + str(limit))\n else:\n pass\n \n if experiment_name is None:\n save = False\n else:\n if preprocessing is None:\n pass\n else:\n experiment_name = experiment_name+'_'+ preprocessing\n save = True\n\n #apply pre-binning\n if bin_X != False:\n Xbin = xBins(X, bin_X)\n else:\n Xbin = {'0-4000':X}\n\n for b, ids in Xbin.items():\n try:\n A = X.loc[ids,:]\n except:\n A = ids\n #apply preprocessing \n A = preprocessX(A, norm=preprocessing)\n\n centroids = pd.DataFrame()\n stats = pd.DataFrame() \n cluster_lbls = pd.DataFrame()\n\n for dim in range_n_dim: \n \n cluster_lbls_dim = {}\n stats_dim = pd.DataFrame() \n nrow = ncol = dim\n tic = time.time()\n \n #train clustering algorithm\n som = somoclu.Somoclu(nrow, ncol, compactsupport=False, maptype='planar')\n som.train(A)\n toc = time.time()\n \n if transform == None:\n n_clust = [0] \n elif transform == 'kmeans':\n if kwargs is None:\n n_clust = [10]\n else:\n for key, value in kwargs.items(): #create list with number of clusters for kmeans\n if key == 'n_clusters':\n n_clust = value\n else:\n return('Cannot process this transform algorithm')\n \n for n in n_clust:\n if n == 0:\n #create empty matrix the size of the SOM\n m = np.arange(0, nrow*ncol, 1).reshape(nrow, ncol) \n else:\n clusterer = KMeans(n_clusters=n, random_state=10)\n som.cluster(algorithm=clusterer)\n m = som.clusters\n #get cluster of SOM node and assign to input vecors based on bmus\n k = [m[som.bmus[i][1],som.bmus[i][0]] for i in range(0, len(som.bmus))] \n c = pd.DataFrame(A).assign(cluster=k).groupby('cluster').mean()\n \n #calculate scores\n cluster_stats = clusterStats({}, n, A, cluster_labels = k, preprocessing = preprocessing,\n transform = transform, tic = tic, toc = toc)\n cluster_centroids = np.array(c)\n \n eval_results, centroid_results = saveResults(experiment_name, cluster_stats,\n cluster_centroids, dim, b, save)\n \n stats_dim = stats_dim.append(eval_results)\n centroids = centroids.append(centroid_results)\n \n cluster_lbls_dim[n] = k\n \n #outside n_clust loop\n best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)\n cluster_lbls = pd.concat([cluster_lbls, best_clusters],axis=1)\n stats = pd.concat([stats, best_stats], axis=0)\n \n stats.reset_index(drop=True, inplace=True)\n if save is True:\n saveLabels(cluster_lbls, stats)\n \n return stats, centroids, cluster_lbls",
"def _get_nbh_distance_weight_block(\n self, nbh_func: float, bmus: List[Tuple[int, int]]\n ) -> np.ndarray:\n dist_weight_block = np.zeros((len(bmus), self.n_rows, self.n_columns))\n\n for i, bmu_pos in enumerate(bmus):\n dist_weight_block[i] = self._get_nbh_distance_weight_matrix(\n nbh_func, bmu_pos\n ).reshape((self.n_rows, self.n_columns))\n\n return dist_weight_block",
"def map_vects(self, input_vects):\n \n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n \n to_return = []\n \n distances = []\n \n \n contador_adyacentes = 0\n \n matriz = np.array(list(self._neuron_locations(self._m, self._n)))\n \n m = self._m\n \n n = self._n\n \n matrices = []\n \n matrices = np.stack((matriz,\n matriz + np.array([m,n]), \n matriz - np.array([m,n]), \n matriz + np.array([m,0]),\n matriz - np.array([m,0]),\n matriz + np.array([0,n]),\n matriz - np.array([0,n]),\n matriz + np.array([m,-n]),\n matriz + np.array([-m,n])\n ))\n \n distancias_matriz = []\n\n for i in range(n*m):\n distancias_matriz.append([])\n for j in range(m*n):\n distancias_matriz[i].append(np.min(np.sum(np.power(np.subtract(matriz[i], matrices[:,j]),2), axis = 1)))\n \n distancias_matriz = np.array(distancias_matriz)\n \n \n for vect in input_vects:\n\n # min_index is the index of the BMU\n \n lista_indices = [i for i in range(len(self._weightages))]\n \n min_index = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x]))\n\n # min_index_2 is the index of the 2nd BMU\n \n lista_indices.pop(min_index) # El indice es el mismo que el valor\n \n min_index_2 = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x])) \n \n r2 = np.sqrt(2)\n\n if np.sqrt(distancias_matriz[min_index][min_index_2]) > r2: \n# print('loc 1')\n# print(locaciones[min_index])\n# print('loc 2')\n# print(locaciones[min_index_2])\n contador_adyacentes += 1\n\n\n distance = np.linalg.norm(vect - self._weightages[min_index])\n \n distances.append(distance)\n \n to_return.append(self._locations[min_index]) \n \n # Quantization Error qe (the mean of all distances to the BMU)!\n self.distances = distances \n \n # Topographic error te\n self.proporcion = contador_adyacentes / len(input_vects)\n \n self.prom_dist = np.mean(self.distances)\n \n return to_return",
"def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V",
"def __segment__umap_ward(self, number_of_regions, densmap=False, dims=None, n_neighbors=10):\r\n self.elem_matrix, idx2ij = self.prepare_elem_matrix(dims)\r\n\r\n\r\n self.logger.info(\"UMAP reduction\")\r\n self.dimred_elem_matrix = umap.UMAP(\r\n densmap=densmap,\r\n n_neighbors=n_neighbors,\r\n min_dist=0.0,\r\n n_components=2,\r\n random_state=42,\r\n ).fit_transform(self.elem_matrix)\r\n\r\n self.logger.info(\"Ward reduction\"), \r\n\r\n print(self.dimred_elem_matrix.shape)\r\n\r\n pwdist = pdist(self.dimred_elem_matrix, metric=\"euclidean\")\r\n\r\n print(pwdist.shape)\r\n\r\n Z = spc.hierarchy.ward(pwdist)\r\n self.dimred_labels = spc.hierarchy.fcluster(Z, t=number_of_regions, criterion='maxclust')\r\n return self.dimred_labels",
"def distance_map(self, scaling='sum'):\n\n if scaling not in ['sum', 'mean']:\n raise ValueError(f'scaling should be either \"sum\" or \"mean\" ('\n f'\"{scaling}\" not valid)')\n\n um = nan * zeros((self._weights.shape[0],\n self._weights.shape[1],\n 8)) # 2 spots more for hexagonal topology\n\n ii = [[0, -1, -1, -1, 0, 1, 1, 1]]*2\n jj = [[-1, -1, 0, 1, 1, 1, 0, -1]]*2\n\n if self.topology == 'hexagonal':\n ii = [[1, 1, 1, 0, -1, 0], [0, 1, 0, -1, -1, -1]]\n jj = [[1, 0, -1, -1, 0, 1], [1, 0, -1, -1, 0, 1]]\n\n for x in range(self._weights.shape[0]):\n for y in range(self._weights.shape[1]):\n w_2 = self._weights[x, y]\n e = y % 2 == 0 # only used on hexagonal topology\n for k, (i, j) in enumerate(zip(ii[e], jj[e])):\n if (x+i >= 0 and x+i < self._weights.shape[0] and\n y+j >= 0 and y+j < self._weights.shape[1]):\n w_1 = self._weights[x+i, y+j]\n um[x, y, k] = fast_norm(w_2-w_1)\n\n if scaling == 'mean':\n um = nanmean(um, axis=2)\n if scaling == 'sum':\n um = nansum(um, axis=2)\n\n return um/um.max()",
"def find_bmu_idx(self, vec):\r\n\r\n minVal = np.iinfo(np.int).max\r\n idx = 0\r\n target = -1\r\n for node in self.nodeList:\r\n dist = node.get_distance_hamming(vec)\r\n if dist < minVal:\r\n minVal = dist\r\n bmu = node\r\n target=idx\r\n idx+=1\r\n return target",
"def find_bmu(self, vec):\r\n\r\n minVal = np.iinfo(np.int).max\r\n for node in self.nodeList:\r\n dist = node.get_distance_hamming(vec)\r\n if dist < minVal:\r\n minVal = dist\r\n bmu = node\r\n return bmu",
"def get_gaussian_maps(mu, shape_hw, inv_std, mode='rot'):\n mu_y, mu_x = mu[:, :, 0:1], mu[:, :, 1:2]\n\n y = torch.linspace(-1.0, 1.0, shape_hw[0]).to(mu.device)\n\n x = torch.linspace(-1.0, 1.0, shape_hw[1]).to(mu.device)\n\n if mode in ['rot', 'flat']:\n mu_y, mu_x = torch.unsqueeze(mu_y, dim=-1), torch.unsqueeze(mu_x, dim=-1)\n\n y = y.view(1, 1, shape_hw[0], 1)\n x = x.view(1, 1, 1, shape_hw[1])\n\n g_y = (y - mu_y)**2\n g_x = (x - mu_x)**2\n dist = (g_y + g_x) * inv_std**2\n\n if mode == 'rot':\n g_yx = torch.exp(-dist)\n else:\n g_yx = torch.exp(-torch.pow(dist + 1e-5, 0.25))\n\n elif mode == 'ankush':\n y = y.view(1, 1, shape_hw[0])\n x = x.view(1, 1, shape_hw[1])\n\n g_y = torch.exp(-torch.sqrt(1e-4 + torch.abs((mu_y - y) * inv_std)))\n g_x = torch.exp(-torch.sqrt(1e-4 + torch.abs((mu_x - x) * inv_std)))\n\n g_y = torch.unsqueeze(g_y, dim=3)\n g_x = torch.unsqueeze(g_x, dim=2)\n g_yx = torch.matmul(g_y, g_x) # [B, NMAPS, H, W]\n\n else:\n raise ValueError('Unknown mode: ' + str(mode))\n\n return g_yx",
"def make_mol_kernel(drugs):\n\n dict_drug = drugs.dict_drug\n dict_ind2mol = drugs.dict_ind2mol\n\n # get the ECFP fingerprints\n nb_mol = drugs.nb\n X_fingerprint = np.zeros((nb_mol, 1024), dtype=np.int32)\n list_fingerprint = []\n # for i in list(dict_ind2mol.keys()):\n for i in range(nb_mol):\n dbid = dict_ind2mol[i]\n m = Chem.MolFromSmiles(dict_drug[dbid])\n list_fingerprint.append(AllChem.GetMorganFingerprint(m, 2))\n arr = np.zeros((1,))\n DataStructs.ConvertToNumpyArray(\n AllChem.GetMorganFingerprintAsBitVect(m, \n 2, \n nBits=1024), \n arr)\n X_fingerprint[i, :] = arr\n\n # get the Tanimoto Similarity Matrix\n K = np.zeros((len(list_fingerprint), len(list_fingerprint)))\n for i in range(len(list_fingerprint)):\n for j in range(i, len(list_fingerprint)):\n K[i, j] = DataStructs.TanimotoSimilarity(list_fingerprint[i], \n list_fingerprint[j])\n K[j, i] = K[i, j]\n\n return X_fingerprint, K",
"def get_center_of_mass_allies(self,obs):",
"def get_bmu(\n self, datapoint: np.ndarray, som_array: np.ndarray\n ) -> Tuple[int, int]:\n a = self._get_node_distance_matrix(\n datapoint.astype(np.float64), som_array\n )\n\n return np.argwhere(a == np.min(a))[0]",
"def mi_from_dm(distance_matrix, ns, nh, spike_train_list=None):\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n \n if spike_train_list is not None:\n\n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n\n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 0\n \n if i not in members_of_glob:\n for j in nearest_neighbours[i]:\n if j not in members_of_glob:\n if spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1 # count neigbours out of glob\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*f_i # if one neighbour is in glob, all following neighb are as well\n break\n counts.append(c_i)\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += 1 + (nh - 1)*f_i #If in glob, take fraction of remaining neighbours except you\n counts.append(c_i)\n \n counts = np.array(counts)\n \n else:\n \n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 1\n for j in nearest_neighbours[i]:\n if (i != j and abs(i - j)%ns==0 ):\n c_i += 1 \n counts.append(c_i)\n counts = np.array(counts) \n \n I = sum(np.log2(counts*ns/float(nh))) / float(nr)\n\n return I",
"def get_center_of_mass_enemies(self,obs):",
"def get_gaussian_maps_2d(mu, sigma, shape_hw, mode='rot'):\n with tf.name_scope(None, 'gauss_map', [mu]):\n\n y = tf.cast(tf.linspace(-1.0, 1.0, shape_hw[0]), tf.float64)\n x = tf.cast(tf.linspace(-1.0, 1.0, shape_hw[1]), tf.float64)\n\n [x,y] = tf.meshgrid(x,y)\n xy = tf.stack([x, y], axis=-1)\n xy = tf.stack([xy] * nb_landmarks, axis=0)\n xy = tf.reshape(xy, [1, nb_landmarks, shape_hw[0], shape_hw[1], 2])\n mu = tf.reshape(mu, [-1, nb_landmarks, 1, 1, 2])\n invsigma = tf.linalg.inv(sigma)\n invsigma = tf.reshape(invsigma, [-1, nb_landmarks, 1, 2, 2])\n pp = tf.tile(invsigma, [1, 1, shape_hw[1], 1, 1])\n X = xy-mu\n dist = tf.matmul(X,pp)\n dist = tf.reduce_sum((dist*X), axis=-1)\n\n g_yx = tf.exp(-dist)\n\n g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])\n\n return g_yx",
"def make_static_maps(S,tes):\n Nv,Ne,Nt = S.shape\n # 1. Create B matrix\n B = np.reshape(np.abs(S), (Nv,Ne*Nt)).transpose()\n B_iszero = (B==0)\n B_zerosPerVoxel = np.reshape(sum(B_iszero),(Nv,1))\n GoodVoxels = sum(B_zerosPerVoxel.transpose()==0)\n B[B==0] = 0.000000001\n B = np.log(B)\n # 2. Create the A matrix\n a = np.array([np.ones(Ne),-tes])\n A = np.tile(a,(1,Nt))\n A = np.sort(A)[:,::-1].transpose()\n # 5. Solve the system\n X,res,rank,sing = np.linalg.lstsq(A,B)\n # 6. Extract results\n S0mean = np.exp(X[0,:]).transpose()\n T2Smean = 1.0/X[1,:].transpose()\n GoodVoxels[T2Smean>500] = 0\n GoodVoxels[T2Smean<-500] = 0\n T2Smean[T2Smean>500] = 500\n T2Smean[T2Smean<-500] = -500\n return S0mean, T2Smean, GoodVoxels",
"def _compute_umatrix(self):\n assert self._trained, 'You should train the map first!'\n neuron_distances = np.zeros((2 * self._map.neurons.shape[0] - 1,\n 2 * self._map.neurons.shape[1] - 1))\n neuron_distances -= 1\n for row in range(0, neuron_distances.shape[0]):\n for col in range(0, neuron_distances.shape[1]):\n neuron = row % 2 == 0 and col % 2 == 0\n other = row % 2 != 0 and col % 2 != 0\n if not neuron and not other:\n if row % 2 != 0:\n neuron1_row = (row - 1) / 2\n neuron2_row = (row + 1) / 2\n neuron1 = self._map.neurons[neuron1_row, col / 2, :]\n neuron2 = self._map.neurons[neuron2_row, col / 2, :]\n else:\n neuron1_col = (col - 1) / 2\n neuron2_col = (col + 1) / 2\n neuron1 = self._map.neurons[row / 2, neuron1_col, :]\n neuron2 = self._map.neurons[row / 2, neuron2_col, :]\n dist = self._map._metric(neuron1, neuron2)\n neuron_distances[row, col] = dist\n self.distances = neuron_distances",
"def calc_synLocations(post_branches, n_syns, dist):\n\n\t\t\t\tassert dist in ['uniform', 'random', 'one'], 'Which synapse distribution for %s population? (uniform/random/one) '%self.population_name\n\t\t\t\t\n\t\t\t\tn_branches = len(post_branches)\n\t\t\t\tbranch_locs = {}\n\t\t\t\t\n\t\t\t\tif dist == 'uniform':\n\t\t\t\t\traise Exception('uniform', '{} dist is under construction!'.format(dist))\n\t\t\t\t\t# density = n_syns / L\n\t\t\t\t\t# locs = sorted(np.arange(0, L, 1/density))\n\t\t\t\t\t# locs = [i/L for i in locs]\n\n\t\t\t\t\t# assert len(locs)==n_syns, ['Sanity check warning: unexpected locs length!', pdb.set_trace()]\n\n\t\t\t\telif dist == 'random':\n\t\t\t\t\t\n\t\t\t\t\tfor i in range(n_syns):\n\n\t\t\t\t\t\t# Randomly choose branch\n\t\t\t\t\t\trand_branch_idx = np.random.randint(n_branches)\n\t\t\t\t\t\trand_branch \t = post_branches[rand_branch_idx]\n\t\t\t\t\t\trand_branch_name = rand_branch.name().split('].')[-1]\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Randomly choose location\n\t\t\t\t\t\trand_loc = np.random.rand()\n\n\t\t\t\t\t\tif rand_branch_name in branch_locs.keys():\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['locs'].append(rand_loc)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name] \t\t\t\t= {}\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['locs'] \t\t= [rand_loc]\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['branch_obj'] = rand_branch\t\t\t\t\t\t\t\t\n\n\t\t\t\t\tfor key in branch_locs:\n\t\t\t\t\t\tbranch_locs[key]['locs'] = sorted(branch_locs[key]['locs'])\n\t\t\t\t\n\t\t\t\telif dist == 'one':\n\t\t\t\t\tsingle_branch_idx \t= np.random.randint(n_branches)\n\t\t\t\t\tsingle_branch \t \t= post_branches[single_branch_idx]\n\t\t\t\t\tsingle_branch_name \t= single_branch.name().split('].')[-1]\n\t\t\t\t\t\n\t\t\t\t\tbranch_locs[single_branch_name] = {'branch_obj': single_branch, 'locs': [0.5]*n_syns}\n\n\t\t\t\treturn branch_locs",
"def return_BMU_coord(self, sess, input_array):\n output = sess.run([self.distance_matrix,self.distance_argmin], feed_dict={self.input_placeholder: input_array})\n index = output[1] #flatten index\n row = index/self.tot_cols\n col = index - (row*self.tot_cols)\n return index, (row,col)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
helper function to get the next ocurring monday as a date object | def _get_next_monday(self):
today = datetime.date.today()
weekday_int = today.weekday()
if weekday_int == 0:
return today
next_mon = today + timedelta(7 - weekday_int)
return next_mon | [
"def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)",
"def memorial_day():\n this_year = datetime.datetime.now().year\n # first monday in june minus one week\n this_year_memorial_day = (datetime.datetime(this_year, 6, 1) +\n relativedelta(day=1, weekday=0) -\n datetime.timedelta(weeks=1))\n\n if this_year_memorial_day.replace(hour=23, minute=59) > datetime.datetime.now():\n next_memorial_day = this_year_memorial_day.replace(tzinfo=LOCAL_TIMEZONE)\n else:\n next_memorial_day = (datetime.datetime(this_year + 1, 6, 1) +\n relativedelta(day=1, weekday=0) -\n datetime.timedelta(weeks=1)).replace(tzinfo=LOCAL_TIMEZONE)\n\n return next_memorial_day, next_memorial_day.replace(hour=23, minute=59)",
"def next_weekday(d, wd):\n return (d + 9 - wd) // 7 * 7 - 2 + wd",
"def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1",
"def get_first_monday(dates):\n return 7%dates[0].weekday()",
"def next_day_of_week(current, day_of_week):\n\n while current.weekday() != day_of_week:\n current += timedelta(1)\n return current",
"def get_next_weekday(date, weekday):\n return date + dt.timedelta(days=(weekday - date.weekday() + 7) % 7)",
"def get_next_weekend():\n d = datetime.date.today()\n # day 5 for saturday\n t = datetime.timedelta((7 + 5 - d.weekday()) % 7)\n return (d + t).strftime('%d-%m-%Y')",
"def get_mothers_day_date(year):\n may = (datetime(year, 5, k) for k in range(1, 32))\n sundays = filter(lambda x: x.weekday() == 6, may)\n next(sundays)\n return next(sundays).date()",
"def next_weekday(date, weekday):\n delta = weekday - date.weekday()\n if delta < 0:\n delta += 7\n return date + timedelta(days=int(delta))",
"def get_mothers_day_date(year):\n day = date(year=year, month=5, day=1)\n while 1:\n if day.weekday() == 6:\n day += timedelta(days=7)\n break\n day += timedelta(days=1)\n return day",
"def get_next_day(self):\n self.date += timedelta(days=1)\n return self.date",
"def upcoming_dow_to_date(dow, reference_date=None):\n if not reference_date:\n reference_date = date.today()\n d = get_next_day(reference_date)\n while day_of_week(d).lower() != dow:\n d = get_next_day(d)\n return d",
"def _next_weekday(day: datetime.datetime, weekday: int) -> datetime.datetime:\n\n days_ahead = weekday - day.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return day + datetime.timedelta(days_ahead)",
"def get_month_first_monday():\n this_month = datetime.now().month\n this_year = datetime.now().year\n first_day_of_month = date(this_year, this_month, 1)\n first_monday = first_day_of_month + timedelta(\n days =-first_day_of_month.weekday(), weeks = 1)\n return first_monday",
"def next_sunday(day):\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))",
"def get_mothers_day_date(year):\n return date(year, 5, 1) + rd.relativedelta(weekday=rd.SU(+2))",
"def get_next_closest_day(weekday):\n names = {\n 'monday': 0,\n 'tuesday': 1,\n 'wednesday': 2,\n 'thursday': 3,\n 'friday': 4,\n 'saturday': 5,\n 'sunday': 6\n }\n\n today = get_current_india_time().date()\n day_shift = (names[weekday] - today.weekday()) % 7\n next_day = datetime.datetime.combine(\n today + datetime.timedelta(days=day_shift), datetime.time.min)\n\n if next_day.weekday() == today.weekday():\n next_day = next_day + datetime.timedelta(days=7)\n return next_day",
"def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function adding some known todo list items for the test user | def _add_todo_items(self):
todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)
todo_list.save()
items = [
'feed the cats',
'drive to work',
'read a book',
'eat some food',
]
todo_items = []
for item in items:
new_item = ToDoItem(
title=item,
to_do_list=todo_list,
priority=1
)
new_item.save()
todo_items.append(new_item)
return todo_items | [
"def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return",
"def add_todos(self, todo_name, due_date, status):\n\n user_todo = ToDo(todo_name, due_date, status)\n self.all_todos[todo_name] = user_todo",
"def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')",
"def help_todo(self):\n print_say(\"Create your personal TODO list!\", self)\n print(\"Supported Commands: todo <command>\")\n print(\n \"\\tadd [<index>] <todo - comment>, add comment <index> <comment>, add due <index> <time>\")\n print(\"\\tremove <index>\")\n print(\"\\tcomplete <index> [<completion>]\")\n print(\"\\tpriority <index> [<level>]\")\n print(\"\\tlist\")",
"def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')",
"def add_todo():\n task = flask.request.form[\"task\"]\n todos.append(ToDo(task))\n return \"success\"",
"def todo_added(name, description):",
"def if_able_to_create_new_note_and_add_to_note_list(self):\n\t\tself.assertEqual(Note_it.create_note(3), 'My Fourth')\n\t\t#self.assertEqual(Search_note(3), 'My Third')",
"def test_list_user(self):\n pass",
"def add_list_to(context):",
"def add(self, item):\n self.verify_item_string(item)\n self.todos.append(item)\n self.save_todo()",
"def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))",
"def new_item(self, ui_info):\n new_item = ToDoItem()\n self.model.items.append(new_item)",
"def test_user_list_items_endpoint_create_item(\n client, user, is_author, mock_user_list_index\n):\n author = UserFactory.create()\n userlist = UserListFactory.create(\n author=author, privacy_level=PrivacyLevel.public.value\n )\n course = CourseFactory.create()\n\n client.force_login(author if is_author else user)\n\n data = {\"content_type\": \"course\", \"object_id\": course.id}\n\n resp = client.post(\n reverse(\"userlistitems-list\", args=[userlist.id]), data=data, format=\"json\"\n )\n assert resp.status_code == (201 if is_author else 403)\n if resp.status_code == 201:\n assert resp.json().get(\"object_id\") == course.id\n mock_user_list_index.upsert_user_list.assert_called_once_with(userlist.id)",
"def create_task(self,task):\n if valid_task(task) != \"Valid\":\n return valid_task(task)\n todo_task = [x for x in todo_list if x[\"task\"] == task]\n if len(todo_task) != 0:\n return \"Task already exist. Input a different task\"\n id = len(todo_list) + 1\n todo_item ={\n \"id\": id,\n \"task\": task\n }\n # user_todo.append(todo_item)\n todo_list.append(todo_item)\n return \"Successfully Created Todo task\"",
"def test_list_notes(self):\n pass",
"def test_notes_list(self):\n pass",
"def test_v1alpha3_userlist(self):\n pass",
"def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function adding some known todo list items for the test user for the previous day | def _backfill_todo_items_for_previous_day(self):
previous_day_date = self.day.date - timedelta(days=1)
day, created = Day.get_or_create(date=previous_day_date)
todo_list = ToDoList(day=day, user=self.user.user.rolllistuser)
todo_list.save()
items = [
'cut the grass',
'water the plants',
'take out the trash',
]
todo_items = []
for item in items:
new_item = ToDoItem(
title=item,
to_do_list=todo_list,
priority=1
)
new_item.save()
todo_items.append(new_item)
return todo_items | [
"def _add_todo_items(self):\n\n todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'feed the cats',\n 'drive to work',\n 'read a book',\n 'eat some food',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items",
"def todo_added(name, description):",
"def add_todos(self, todo_name, due_date, status):\n\n user_todo = ToDo(todo_name, due_date, status)\n self.all_todos[todo_name] = user_todo",
"def repopulate_todo_list(self):\n while self.ballots_completed:\n self.ballots_todo.append(self.ballots_completed.pop())",
"def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return",
"async def test_todo_feed_response_is_ordered_correctly(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n res = await authorized_client.get(app.url_path_for(\"feed:get-todo-feed-for-user\"))\n assert res.status_code == status.HTTP_200_OK\n todo_feed = res.json()\n # the first 13 should be updated and the rest should not be updated\n for feed_item in todo_feed[:13]:\n assert feed_item[\"event_type\"] == \"is_update\"\n for feed_item in todo_feed[13:]:\n assert feed_item[\"event_type\"] == \"is_create\"",
"def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')",
"def extra_tasks_for_today(self):\n\n return []",
"def create_task(self,task):\n if valid_task(task) != \"Valid\":\n return valid_task(task)\n todo_task = [x for x in todo_list if x[\"task\"] == task]\n if len(todo_task) != 0:\n return \"Task already exist. Input a different task\"\n id = len(todo_list) + 1\n todo_item ={\n \"id\": id,\n \"task\": task\n }\n # user_todo.append(todo_item)\n todo_list.append(todo_item)\n return \"Successfully Created Todo task\"",
"def new_item(self, ui_info):\n new_item = ToDoItem()\n self.model.items.append(new_item)",
"def testAddsAcrossDays(self):\r\n self._tester.AddFollowers(self._cookie, self._vp_id,\r\n [{'identity': 'Email:user1@emailscrubbed.com'}])\r\n\r\n # Add 24 hours to timestamp.\r\n act_dict = self._tester.CreateActivityDict(self._cookie)\r\n act_dict['timestamp'] += constants.SECONDS_PER_DAY\r\n self._tester.AddFollowers(self._cookie, self._vp_id,\r\n [{'identity': 'Email:user1@emailscrubbed.com'},\r\n {'user_id': self._extra_users[0].user_id}],\r\n act_dict=act_dict)\r\n\r\n # Subtract 24 hours from timestamp.\r\n act_dict = self._tester.CreateActivityDict(self._cookie)\r\n act_dict['timestamp'] -= constants.SECONDS_PER_DAY\r\n self._tester.AddFollowers(self._cookie, self._vp_id,\r\n [{'identity': 'Email:user1@emailscrubbed.com'},\r\n {'user_id': self._extra_users[0].user_id},\r\n {'user_id': self._extra_users[1].user_id}],\r\n act_dict=act_dict)",
"def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')",
"def test_adding_many_todos(self):\n event = Event.objects.filter(slug__endswith=\"-upcoming\") \\\n .order_by(\"-pk\")[0]\n event.end = event.start + datetime.timedelta(days=2)\n event.save()\n\n # check if the event has 0 todos\n assert event.todoitem_set.all().count() == 0\n\n # add standard todos\n ident = event.get_ident()\n url, form = self._get_initial_form('todos_add', ident)\n\n # fix: turn Nones into empty strings\n for key, value in form.items():\n if value is None:\n form[key] = ''\n\n rv = self.client.post(reverse('todos_add', args=[ident]), form)\n\n # let's check if the form passes\n assert rv.status_code == 302\n\n # finally let's check there are some new todos\n assert event.todoitem_set.all().count() == 9",
"def add_todo():\n task = flask.request.form[\"task\"]\n todos.append(ToDo(task))\n return \"success\"",
"def if_able_to_create_new_note_and_add_to_note_list(self):\n\t\tself.assertEqual(Note_it.create_note(3), 'My Fourth')\n\t\t#self.assertEqual(Search_note(3), 'My Third')",
"def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)",
"def add_new_todo(self, newtodo):\n for l in range(0, len(self.contents)):\n if self.contents[l].strip() == ',INBOX':\n num_tabs = indent_count(self.contents[l]) + 1\n indented_todo = '\\t' * num_tabs + newtodo\n self.contents[l:l + 1] = [',INBOX', indented_todo]\n self.sync()\n return True\n return False",
"def split_todo(self, original, addition):\n \"\"\" Given an original todo, insert a new one underneath it \"\"\"\n self.contents[original.linenum:original.linenum+1] = [str(original), str(addition)]\n self.sync()",
"def create_todo(todo_text, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return TodoItem.objects.create(todo_text=todo_text, pub_date=time)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function adding some known schedule items for the test user | def _add_schedule_items(self):
schedules = [
{
'start_time': '9:30 AM',
'end_time': '10:00 AM',
'title': 'Daily Scrum',
'location': 'Hogwarts',
'day': self.day,
'user': self.user.user.rolllistuser,
},
{
'start_time': '10:30 AM',
'end_time': '11:00 AM',
'title': 'Engineering Interview',
'location': 'Narnia',
'day': self.day,
'user': self.user.user.rolllistuser,
},
{
'start_time': '12:00 PM',
'end_time': '12:30 PM',
'title': 'Lunch',
'location': 'Kitchen',
'day': self.day,
'user': self.user.user.rolllistuser,
},
{
'start_time': '2:00 PM',
'end_time': '2:30 PM',
'title': 'Workout',
'location': 'Gym',
'day': self.day,
'user': self.user.user.rolllistuser,
},
]
recurring_item_data = {
'start_time': '3:00 PM',
'end_time': '3:30 PM',
'title': 'Recurring thing',
'location': 'asdf',
'day': self.day,
'user': self.user.user.rolllistuser,
}
schedule_items = []
schedule_dict = {i['start_time']: i for i in schedules}
for schedule in schedules:
save_data = schedule
save_data['start_time'] = get_relevant_time_id(schedule['start_time'])
save_data['end_time'] = get_relevant_time_id(schedule['end_time'])
new_schedule_item = ScheduleItem(**save_data)
new_schedule_item.save()
schedule_items.append(new_schedule_item)
save_data = recurring_item_data
save_data['start_time'] = get_relevant_time_id(recurring_item_data['start_time'])
save_data['end_time'] = get_relevant_time_id(recurring_item_data['end_time'])
new_schedule_item = ScheduleItem(**save_data)
new_schedule_item.save()
new_schedule_item.make_recurring([0])
schedule_items.append(new_schedule_item)
return schedule_items, schedule_dict | [
"def test_add_recurring_schedule(self):\n pass",
"def test_list_schedules(self):\n pass",
"def _create_schedules(self):\n\n ''''''",
"def test_create_schedule(self):\r\n pass",
"def add_schedule(self):\r\n\r\n # Take the schedule entires from TOML file\r\n entries = cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Parse schedule payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele = 'link')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/schedule.json', test=ready)\r\n # Post request\r\n self.add_post(ready, API.url_schl, self.schedules)\r\n if payload['link']:\r\n for link in payload['link']:\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=link)\r\n # Post request\r\n self.add_post(link, API.url_link, self.links)",
"def add_schedule(doc_user, date, schedule, logger):\n #my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n if my_calendar == None:\n logger.info('{}: calendar start'.format(doc_user[\"user_id\"]))\n my_calendar = {\"User\": doc_user[\"_id\"],\n \"schedules\": []}\n col_calendar.insert_one(my_calendar)\n\n if not schedule:\n return False\n\n if len(schedule) > 5:\n logger.info('{}: day schedules are already full'.format(\n doc_user[\"user_id\"]))\n return False\n\n ret = 0\n for s in schedule:\n my_calendar[\"schedules\"] += [{\"date\": date,\n \"events_list\": [s]}]\n logger.info('{}: {} added into schedule'.format(\n date, s))\n ret += 1\n\n if ret >= 1:\n col_calendar.find_one_and_replace({\"User\": doc_user[\"_id\"]}, my_calendar)\n\n return True",
"def test_update_housekeeping_schedule(self):\n pass",
"def test_schedule_batch(self):\n pass",
"def test_schedule(request, resource_name, role_name, estimated_hrs):\n Fluent(request).task('task-2').schedule().add_resource(resource_name, role_name).set_role_hrs(estimated_hrs).verify()",
"def setup_schedules(self, adapter):\n pass",
"def test_adding_schedule_success(self):\n\n # prepare\n date = [\"2013-04-20\", \"2013-04-21\", \"2013-04-22\"]\n title = [\"MA for 2013-04\", \"Football\", \"Shopping\"]\n detail = [\"Time schedule is from 10:00 to 14:00\",\n \"Plays football at Shin-Yokohama park.\",\n \"Shopping with friends at Yokohama Station\"]\n\n expected_date = [\"2013-04-20\", \"2013-04-21\",\n \"2013-04-22\", \"2019-10-31\"]\n expected_title = [\"MA for 2013-04\", \"Football\",\n \"Shopping\", \"adding_title_for_2019_10_31\"]\n expected_detail = [\"Time schedule is from 10:00 to 14:00\",\n \"Plays football at Shin-Yokohama park.\",\n \"Shopping with friends at Yokohama Station\",\n \"adding_detail_for_2019_10_31\"]\n\n edit_date = \"2019-10-31\"\n edit_title = \"adding_title_for_2019_10_31\"\n edit_detail = \"adding_detail_for_2019_10_31\"\n\n # execute\n actual_date, actual_title, actual_detail = CalenderProcess.add(\n date, title, detail, edit_date, edit_title, edit_detail)\n\n # assert\n self.assertTrue(expected_date, actual_date)\n self.assertTrue(expected_title, actual_title)\n self.assertTrue(expected_detail, actual_detail)",
"def test_send_with_schedule(self):\n pass",
"def add_schedule(self):\r\n\r\n # Take the schedule entires from TOML file\r\n entries = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Parse schedule payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele = 'link')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/schedule.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_schl, self.schedules)\r\n if 'link' in payload.keys() and payload['link'] != [{}]:\r\n b2 = self.link(self.schedules[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b1 and b2\r\n else:\r\n return False",
"def _add_games_to_schedule(self, schedule, game_type, year):\n for item in schedule:\n game = Game(item, game_type, year)\n self._games.append(game)",
"def test_add_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def addSchedule(self, schedule):\n\t\tassert isinstance(schedule, Schedule)\n\t\tself.schedules.append(schedule)\n\t\tself.sorted = False",
"def test_update_report_schedule(self):\n pass",
"def test_get_schedule(self):\n self.assertEqual(subject_info.get_schedule(\"tdt4145\")['course']['summarized'][0]['acronym'], \"FOR\")\n self.assertEqual(subject_info.get_schedule(\"tdt4100\")['course']['summarized'][0]['acronym'], \"LAB\")\n self.assertFalse(subject_info.get_schedule(\"tdt123\"))\n self.assertFalse(subject_info.get_schedule(123))\n self.assertFalse(subject_info.get_schedule(\"tdt4120\"))",
"def test_remove_recurring_schedule(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a list of victory conditions based on the size of the board | def create_victory_conditions(size): #Written by Cody West. Not used in current program, could be used to make boards of different sizes
victory_conditions = []
for i in range(size):
horizontal_victory = []
for n in range(size):
horizontal_victory.append(size*i+n)
victory_conditions.append(horizontal_victory)
for i in range(size):
vertical_victory = []
for n in range(size):
vertical_victory.append(size*n+i)
victory_conditions.append(vertical_victory)
diagonal_victory_1 = []
for i in range(size):
diagonal_victory_1.append(size*i+i)
victory_conditions.append(diagonal_victory_1)
diagonal_victory_2 = []
for i in range(size):
diagonal_victory_2.append((i+1)*size-(i+1))
victory_conditions.append(diagonal_victory_2)
return(victory_conditions) | [
"def build_a_board(self, size):\n return [self.EMPTY for i in range(size*size)]",
"def create_board(size) -> list:\n return list(itertools.product([i for i in range(size)], repeat=2))",
"def board(constraints):\n rows = len(constraints[0])\n columns = len(constraints[1])\n board = []\n for i in range(rows):\n board.append([Empty for k in range(columns)])\n return board",
"def scratch_board():\n return [[False] * BOARD_SIZE for _ in range(BOARD_SIZE)]",
"def possible_moves(size):\n\n return [2 ** (n - 1) for n in range(size, 0, -1)]",
"def create_chessboard(size=8):\r\n for row in range(size):\r\n if row % 2 == 0:\r\n print(''.join([WHITE if tile % 2 == 0 else BLACK for tile in range(size)]))\r\n else:\r\n print(''.join([BLACK if tile % 2 == 0 else WHITE for tile in range(size)]))",
"def make_board():\n return [[0 for i in range(8)] for i in range(8)]",
"def generateQueenAttacks(boardsize, pos):\n assert isinstance(pos, Position) and validatePosition(boardsize, pos)\n attackList = []\n startPos = Position(pos.x, pos.y)\n \n def addAttackList(pos):\n for attacked in attackList:\n if pos.compare(attacked):\n return\n attackList.append(Position(pos.x, pos.y))\n\n #positive x\n while pos.x < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #positive y\n while pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative x\n while pos.x >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative y\n while pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x +y left bottom\n while pos.x >= 0 and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x -y left top\n while pos.x >= 0 and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x +y right bottom\n while pos.x < boardsize and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x -y right top\n while pos.x < boardsize and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n\n return attackList",
"def create_board(self, size):\n self.board = [\n [FieldState.EMPTY for _ in range(size)]\n for _ in range(size)\n ]",
"def create_chessboard(size=8):\n for i in range(size):\n pattern = WHITE + BLACK\n if i % 2:\n pattern = pattern[::-1]\n print(pattern * int(size / 2))",
"def _create_board_symbol_list(self):\n rows = []\n for y in range(8,0,-1):\n row = [y]\n for x in range(1,9):\n if self.board.get((x, y)):\n row.append(self.board[(x,y)].symbol)\n else:\n row.append(\" \")\n rows.append(row)\n return rows",
"def random_board_state(board_width, board_height):\n board = []\n\n for h in range(0, board_height):\n width_list = []\n for w in range(0, board_width):\n random_number = random.random() # generates a random value for each row.\n if random_number >= 0.5:\n width_list.append(1)\n else:\n width_list.append(0)\n board.append(width_list)\n\n return board",
"def generate_all_squares_on_board():\n return [[1, i, j] for i in range(8) for j in range(8)]",
"def init_board(size):\n if size < 1:\n raise ValueError(\"The board size must be 4 or more.\")\n\n return [0] * size",
"def get_complete_3D_action_list():\n # Action is a tuple tile_type,nbr_to_move, row_to_move_to\n # 5 * 5 * 6 = 150 possibilities\n actions = list()\n for tt in range(0,5):\n for i in range(1,6): # the final value represents 5 or more\n for row in range(0,6):\n actions.append((tt,i,row))\n return actions",
"def possibleBoards(self):\n\t\t#With a little inspiration from: https://github.com/ryanwilsonperkin/rushhour\n\t\t\n\t\tvehicles = self.vehicles\n\t\t\n\t\t#Create an empty list for the possible board instances\n\t\tpossibleBoards = []\n\t\tfor vehicle in self.vehicles:\n\t\t\n\t\t\t#If the orientation of the given vehicle is horizontal:\n\t\t\tif vehicle.orientation == \"HORIZONTAL\":\n\t\t\n\t\t\t\t#1.Check if left space from vehicle is empty\n\t\t\t\tleftX_of_vehicle = vehicle.coordinates[0][0]\n\t\t\t\ty_of_vehicle = vehicle.coordinates[0][1]\n\t\t\t\tif leftX_of_vehicle > 0:\n\t\t\t\t\tfor x in reversed(range(leftX_of_vehicle)):\n\t\t\n\t\t\t\t\t\t#If the space is empty, shift the vehicle to the left\n\t\t\t\t\t\tif self.board[y_of_vehicle][x] == '.':\n\t\t\t\t\t\t\tshift = x - leftX_of_vehicle\n\t\t\n\t\t\t\t\t\t\t#Create a new coordinate of the shifted vehicle\n\t\t\t\t\t\t\tnewCoordinates = [ (x[0]+shift,y_of_vehicle) for x in vehicle.coordinates]\n\t\t\n\t\t\t\t\t\t\t#With new coordinate of shifted vehicle, create a new board instance\n\t\t\t\t\t\t\tnewVehicle = Vehicle(vehicle.id, newCoordinates, vehicle.orientation)\n\t\t\t\t\t\t\tnewVehicles = vehicles.copy()\n\t\t\t\t\t\t\tnewVehicles.remove(vehicle)\n\t\t\t\t\t\t\tnewVehicles.append(newVehicle)\n\t\t\t\t\t\t\tpossibleBoards.append( Board(newVehicles, self, self.layer+1 ) )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\n\t\t\t\t#2.Check if right space from vehicle is empty\n\t\t\t\trightX_of_vehicle = vehicle.coordinates[-1][0]\n\t\t\t\tif rightX_of_vehicle < self.width:\n\t\t\t\t\tfor x in range(rightX_of_vehicle+1, self.width+1):\n\t\t\n\t\t\t\t\t\t#If the space is empty, shift the vehicle to the right\n\t\t\t\t\t\tif self.board[y_of_vehicle][x] == '.':\n\t\t\t\t\t\t\tshift = x - rightX_of_vehicle\n\t\t\n\t\t\t\t\t\t\t#Create a new coordinate of the shifted vehicle\n\t\t\t\t\t\t\tnewCoordinates = [ (x[0]+shift,y_of_vehicle) for x in vehicle.coordinates]\n\t\t\n\t\t\t\t\t\t\t#With new coordinate of shifted vehicle, create a new board instance\n\t\t\t\t\t\t\tnewVehicle = Vehicle(vehicle.id, newCoordinates, vehicle.orientation)\n\t\t\t\t\t\t\tnewVehicles = vehicles.copy()\n\t\t\t\t\t\t\tnewVehicles.remove(vehicle)\n\t\t\t\t\t\t\tnewVehicles.append(newVehicle)\n\t\t\t\t\t\t\tpossibleBoards.append( Board(newVehicles, self, self.layer+1 ) )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\n\t\t\t#Else if the orientation of the given vehicle is vertical:\n\t\t\telse:\n\t\t\n\t\t\t\t#1.Check if space above of the vehicle is empty\n\t\t\t\tupperY_of_vehicle = vehicle.coordinates[0][1]\n\t\t\t\tx_of_vehicle = vehicle.coordinates[0][0]\n\t\t\t\tif upperY_of_vehicle > 0:\n\t\t\t\t\tfor y in reversed(range(upperY_of_vehicle)):\n\t\t\n\t\t\t\t\t\t#If the space is empty, shift the vehicle upwards\n\t\t\t\t\t\tif self.board[y][x_of_vehicle] == '.':\n\t\t\t\t\t\t\tshift = y -upperY_of_vehicle\n\t\t\n\t\t\t\t\t\t\t#Create a new coordinate of the shifted vehicle\n\t\t\t\t\t\t\tnewCoordinates = [ (x_of_vehicle,y[1]+shift) for y in vehicle.coordinates]\n\t\t\n\t\t\t\t\t\t\t#With new coordinate of shifted vehicle, create a new board instance\n\t\t\t\t\t\t\tnewVehicle = Vehicle(vehicle.id, newCoordinates, vehicle.orientation)\n\t\t\t\t\t\t\tnewVehicles = vehicles.copy()\n\t\t\t\t\t\t\tnewVehicles.remove(vehicle)\n\t\t\t\t\t\t\tnewVehicles.append(newVehicle)\n\t\t\t\t\t\t\tpossibleBoards.append( Board(newVehicles, self, self.layer+1 ) )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\n\t\t\t\t#2.Check if space below the vehicle is empty\n\t\t\t\tlowerY_of_vehicle = vehicle.coordinates[-1][1]\n\t\t\t\tif lowerY_of_vehicle < self.height:\n\t\t\t\t\tfor y in range(lowerY_of_vehicle+1,self.height+1):\n\t\t\n\t\t\t\t\t\t#If the space is empty, shift the vehicle down\n\t\t\t\t\t\tif self.board[y][x_of_vehicle] == '.':\n\t\t\t\t\t\t\tshift = y - lowerY_of_vehicle\n\t\t\n\t\t\t\t\t\t\t#Create a new coordinate of the shifted vehicle\n\t\t\t\t\t\t\tnewCoordinates = [ (x_of_vehicle,y[1]+shift) for y in vehicle.coordinates]\n\t\t\n\t\t\t\t\t\t\t#With new coordinate of shifted vehicle, create a new board instance\n\t\t\t\t\t\t\tnewVehicle = Vehicle(vehicle.id, newCoordinates, vehicle.orientation)\n\t\t\t\t\t\t\tnewVehicles = vehicles.copy()\n\t\t\t\t\t\t\tnewVehicles.remove(vehicle)\n\t\t\t\t\t\t\tnewVehicles.append(newVehicle)\n\t\t\t\t\t\t\tpossibleBoards.append( Board(newVehicles, self, self.layer+1 ) )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\n\t\treturn possibleBoards",
"def board_possibility_counter(board):\n\tcounts_board = []\n\tfor row in range(10):\n\t\tcounts_row = []\n\t\tfor col in range(10):\n\t\t\tcount = 0\n\t\t\tif(board[row][col] == '?'):\n\t\t\t\tships = unsunk_ships(board)\n\t\t\t\tleft_unknown = count_unknown_spaces(board, row, col, 'left')\n\t\t\t\tright_unknown = count_unknown_spaces(board, row, col, 'right')\n\t\t\t\tup_unknown = count_unknown_spaces(board, row, col, 'up')\n\t\t\t\tdown_unknown = count_unknown_spaces(board, row, col, 'down')\n\t\t\t\tcount += line_possibility_counter(left_unknown, right_unknown, 1, ships)\n\t\t\t\tcount += line_possibility_counter(up_unknown, down_unknown, 1, ships)\n\t\t\tcounts_row.append(count)\n\t\tcounts_board.append(counts_row)\n\treturn counts_board",
"def create_chessboard(size=8):\n r1 = (WHITE + BLACK) * int((size / 2)) + \"\\n\"\n r2 = (BLACK + WHITE) * int((size / 2)) + \"\\n\"\n print((r1 + r2) * int((size / 2)))",
"def buildP(self):\n self.p0 = [ [ True for i in range(256) ] for j in range(256) ]\n for i in range(256):\n for j in range(256):\n if(not ((j-i) % 256) in cavetable):\n p0[i][j] = False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates rscu values for each codon | def calculate_rscu(handle: str, genetic_code_num: int, min_len_threshold: int = 200, gene_analysis: bool = False,
save_file: bool = False, file_name: str = 'RSCU_report', folder_path: str = 'Report') -> \
dict[str, float | dict[str, float]]:
records = parse(handle, 'fasta')
references = filter_reference(records, min_len_threshold)
if gene_analysis:
rscu_dict = dict()
for i, seq in enumerate(references):
rscu_dict.update({f'gene_{i + 1}': RSCU([seq], genetic_code_num)})
if save_file:
name = file_name + '.xlsx'
make_dir(folder_path)
file_path = join(folder_path, name)
if is_file_writeable(file_path):
df = pd.DataFrame.from_records(
[
(gene, codon, rscu_val)
for gene, rscu_vals in rscu_dict.items()
for codon, rscu_val in rscu_vals.items()
],
columns=['Gene', 'Codon', 'RSCU_vals']
)
df.to_excel(file_path, float_format='%.4f', columns=df.columns)
print(f'The RSCU score file can be found at: {abspath(file_path)}')
else:
reference = filter_reference(records, min_len_threshold)
rscu_dict = RSCU(reference, genetic_code_num)
if save_file:
name = file_name + '.xlsx'
make_dir(folder_path)
file_path = join(folder_path, name)
if is_file_writeable(file_path):
df = pd.DataFrame.from_records(
[
(codon, rscu_val)
for codon, rscu_val in rscu_dict.items()
],
columns=['Codon', 'RSCU_vals']
)
df.to_excel(file_path, float_format='%.4f', columns=df.columns)
print(f'The RSCU score file can be found at: {abspath(file_path)}')
return rscu_dict | [
"def compute_score(list_scu_ids):\n\tsum_scu = 0\n\tfor scu_id in list_scu_ids:\n\t\tsum_scu += scu_dict[scu_id]\n\treturn sum_scu",
"def calculate_Ricci_tensor(self, riemann, simplify):\n\n ricci = Ricci(index_dict=riemann.index_dict) \n ricci.convert_to_shorthand()\n\n dim = len(ricci.index_dict)\n for i in range(dim):\n for j in range(dim):\n m = ricci.index_dict[i] # mu \n n = ricci.index_dict[j] # nu \n mn = m+n \n for l in range(dim):\n l_str = ricci.index_dict[l] # lambda\n lmln = l_str+m+l_str+n \n ricci.elements[mn] += riemann.elements[lmln] \n\n if simplify is True:\n ricci.elements[mn] = sp.simplify(ricci.elements[mn])\n return ricci",
"def calc_tcre(self):\n #Set up the 1%/yr concentration ramp\n t = np.arange(0,600.0)\n\n conc_opc = self.c0 * np.exp( np.log(1.01) * (t))\n rf_opc = self.a * np.log(conc_opc/self.c0)\n \n self.TCRE = np.zeros_like(self.TCR)\n \n for i in range(0,len(self.TCR)):\n temps_opc, emms_opc = fair_scm_emsback(rf_opc,other_rf= np.zeros_like(rf_opc),TCR=float(self.TCR[i]),\n ECS=float(self.ECS[i]),a=self.a,rT=float(self.s_temp[i]),\n r0=float(self.iirf_100_preind[i]),rC = float(self.s_cu[i]),\n C_0=self.c0,d1=self.d[1],d2=self.d[0])\n cems_opc = np.cumsum(emms_opc)\n \n self.TCRE[i] = temps_opc[np.argmin(np.abs(cems_opc-1000.0))]\n \n return",
"def _calculate_crear(self):\n crea_cutoffs = [90., 110., 130., 150., 170., 210., 250., 999.]\n crea_risk_ints = np.array([0, 1, 2, 3, 4, 5, 6, 8])\n _crea = self.X['Creatinine'].to_numpy()\n _crea_mask = np.array([(_crea < cut) for cut in crea_cutoffs])\n\n _crea_risk = np.take(crea_risk_ints, _crea_mask.argmax(axis=0))\n self.X['_crea_risk'] = _crea_risk\n return _crea_risk",
"def sum_crimes(cs:CrimeStatistics)-> int:\n # return 0 # stub\n #template from atomic\n crimes_total = (cs.violent_crimes+cs.property_crimes+cs.arson)\n return crimes_total",
"def get_crime_rate(crime):#=d1Data.get_US_crime()\n crimeRates_list = []\n for i in range(0,len(crime)):\n crimeRates = list(crime[i])\n crimeRates[2:] = list(round(100000*crimeRates[j]/crimeRates[1],1) for j in range(2,len(crime[0])))\n crimeRates_list.append(crimeRates)\n return(crimeRates_list)",
"def compute_correlation_separability_score(self) -> float:\n sep_scores = pd.DataFrame.from_dict(self.separability_scores).to_numpy()\n sep_scores = minmax_scale(sep_scores)\n corrs = {}\n for tumor_pair in range(sep_scores.shape[1]):\n corr_sep_score = np.corrcoef(PATHO_PRIOR[:, tumor_pair], sep_scores[:, tumor_pair])\n corrs[tumor_pair] = corr_sep_score[1, 0]\n corrs['agg_with_risk'] = sum(\n np.array([val for _, val in corrs.items()]) *\n RISK\n ) \n corrs['agg'] = sum([val for key, val in corrs.items() if type(key)==int]) \n return corrs",
"def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}",
"def calc_perc_reducts():\n rcp26_ozone_f = 'ozone_rcp26_N48_1999_2110v2.nc'\n cube = iris.load(data_dir+rcp26_ozone_f)[0]\n \n iris.coord_categorisation.add_year(cube,'t',name='year')\n iris.coord_categorisation.add_month(cube,'t',name='month')\n cube = cube.extract(iris.Constraint(year = lambda y: y >=2009))\n\n cube_rates = np.ones((cube.shape))\n #Loop over the months and calculate the changes from the previous year\n #Calculate the year on year proportional changes in the global mean\n for i in range(12,cube.shape[0]):\n cube_rates[i] = cube[i].data / cube[(i-12)].data\n\n\n return cube_rates",
"def calculate_MRR(ranks):\n receiporal_ranks = 0\n for i in range(len(ranks)):\n receiporal_ranks = receiporal_ranks + 1/(ranks)\n mrr = (1/len(ranks))*receiporal_ranks\n return mrr",
"def __cnc(cls, sens_mv, we_c):\n if we_c is None:\n return None\n\n cnc = we_c / (sens_mv / 1000.0)\n\n # print(\"A4Datum__cnc: we_c:%s cnc:%f\" % (we_c, cnc), file=sys.stderr)\n\n return cnc",
"def findRFCentre(self):\n self.resetDataArray()\n for c1 in np.arange(self.corrector1Min, self.corrector1Max+self.correctorStepSize, self.correctorStepSize):\n self.machine.setCorr(self.corrector1, c1)\n self.machine.setCorr(self.corrector2, 0)\n self.machine.setLinac1Amplitude(self.linacLowerAmp)\n time.sleep(self.sleepTime)\n dataLow = self.getData()\n self.machine.setLinac1Amplitude(self.linacUpperAmp)\n time.sleep(self.sleepTime)\n dataHigh = self.getData()\n self.setDataArray(c1, 0, dataLow - dataHigh)\n c1Min = self.findC1Min()\n print 'c1Min = ', c1Min\n self.resetDataArray()\n for c2 in np.arange(self.corrector2Min, self.corrector2Max+self.correctorStepSize, self.correctorStepSize):\n self.machine.setCorr(self.corrector1, c1Min)\n self.machine.setCorr(self.corrector2, c2)\n self.machine.setLinac1Amplitude(self.linacLowerAmp)\n time.sleep(self.sleepTime)\n dataLow = self.getData()\n self.machine.setLinac1Amplitude(self.linacUpperAmp)\n time.sleep(self.sleepTime)\n dataHigh = self.getData()\n self.setDataArray(c1Min, c2, dataLow - dataHigh)\n c2Min = self.findC2Min()\n print 'c2Min = ', c2Min\n self.machine.setCorr(self.corrector1, c1Min)\n self.machine.setCorr(self.corrector2, c2Min)",
"def rescore(c):\n\treturn np.where(scores == c)[0][0]",
"def rcas(self, verbose=True, simu=True, syst=True):\n # Initialisation\n self.rcasvalues = {'rcas': N.nan, 'rcas.err': N.nan, 'rcas.stat': N.nan,\n 'rcas.syst': N.nan, 'rcas.mean': N.nan,\n 'rcas_lbd': [N.nan, N.nan, N.nan, N.nan]}\n if self.init_only:\n return\n\n min_1 = 3620\n max_1 = 3716\n min_2 = 3887\n max_2 = 4012\n\n try:\n rcas_value = (self._integration(self.x, self.y, imin=min_2, imax=max_2,\n verbose=verbose)) / \\\n (self._integration(self.x, self.y,\n imin=min_1,\n imax=max_1,\n verbose=verbose))\n except TypeError:\n if verbose:\n print >> sys.stderr, 'ERROR in compute of rcas'\n rcas_value = float(N.nan)\n\n if simu:\n if not N.isfinite(rcas_value):\n return [float(N.nan), float(N.nan)]\n\n rcas_simu = []\n for simu in self.simulations:\n try:\n rcas_simu.append(simu.rcas(simu=False, syst=False,\n verbose=False))\n except TypeError:\n continue\n rcas_sigma = self.std2(N.array(rcas_simu)[N.isfinite(rcas_simu)],\n rcas_value)\n rcas_mean = N.mean(N.array(rcas_simu)[N.isfinite(rcas_simu)])\n\n if N.isfinite(rcas_value):\n self.rcasvalues = {'rcas': float(rcas_value),\n 'rcas.err': float(rcas_sigma),\n 'rcas.stat': float(rcas_sigma),\n 'rcas.mean': float(rcas_mean),\n 'rcas_lbd': [float(min_1),\n float(max_1),\n float(min_2),\n float(max_2)]}\n\n if syst:\n rcas_syst = []\n for system in self.syst:\n try:\n rcas_syst.append(system.rcas(simu=False,\n syst=False, verbose=False))\n except TypeError:\n continue\n rcas_sigma_syst = self.std2(\n N.array(rcas_syst)[N.isfinite(rcas_syst)], rcas_value)\n\n if N.isfinite(rcas_sigma_syst):\n rcas_sigma = float(N.sqrt(rcas_sigma**2 + rcas_sigma_syst**2))\n else:\n rcas_sigma *= 2\n self.rcasvalues['rcas.syst'] = float(rcas_sigma_syst)\n self.rcasvalues['rcas.err'] = float(rcas_sigma)\n\n return [float(rcas_value), float(rcas_sigma)]\n\n if simu is False and syst is False:\n\n if N.isfinite(rcas_value):\n self.rcasvalues = {'rcas': float(rcas_value),\n 'rcas_lbd': [min_1, max_1, min_2, max_2]}\n return rcas_value",
"def calc_cop():\n df = pp.load_csv_file('COP_in.csv', 'metrics_data') \n df = pp.clean_dataframe(df, 5)\n\n df_cop = df['LP01LM01_QQ'] / df['SJ01_SM01']\n df_cop = df_cop.replace(to_replace=np.nan, value = 0, inplace=False)\n \n return df_cop",
"def _calculate_r0(net):\n\n r0 = 0\n for reaction in net.reactions:\n t = reaction.rate(net.species)\n r0 += t\n\n return r0",
"def calculate_icremental_value(self, data):\n for i in data:\n self.dates.append(int(i[1]))\n self.dates.append(int(i[2]))\n\n for i in range(len(data) - 1):\n if data[i][0] == data[i + 1][0]:\n if data[i][1] == data[i][2]:\n self.res.append((data[i][0], data[i][3]))\n else:\n iterations = int(data[i][2]) - int(data[i][1])\n if iterations == 1:\n self.res.append((data[i][0], float(data[i][3]) + float(data[i - 1][3])))\n elif iterations > 1:\n self.icremental_value_iterations(data, i, iterations)\n else:\n if data[i][1] == data[i][2]:\n self.res.append((data[i][0], data[i][3]))\n else:\n self.res.append((data[i][0], float(data[i][3]) + float(data[i - 1][3])))\n\n if data[-1][1] == data[-1][2]:\n self.res.append((data[-1][0], data[-1][3]))\n elif int(data[-2][1]) + 1 == int(data[-1][2]):\n self.res.append((data[-1][0], float(data[-1][3]) + float(data[-2][3])))\n else:\n inc_val = 0\n for i in range(int(data[-1][2]) - int(data[-1][1]) + 1):\n inc_val += float(data[-1 - i][3])\n self.res.append((data[-1][0], inc_val))",
"def compute_crime_rate(model):\n crime_rate = (crime_number / model.pop_count) * 1000\n return crime_rate",
"def qcd_cc( s, m, r, u ):\n\n l2_min = r\n l3_min = u\n l1_min = (m+1)/2\n l1_max = l2_max = l3_max = (3*s+m+2*r+2*u)/2\n\n S = 0\n for l1 in range(l1_min, l2_max+1):\n for l2 in range(l2_min, l2_max+1):\n for l3 in range(l3_min, l3_max+1):\n n1 = 2*l1 + l2 + l3 - 2*s - m - r - u\n n2_t2 = -2*(l1+l2+l3) + 3*s + m + 2*r + 2*u\n n3 = l2-r\n n4 = l3-u\n if n2_t2%2 != 0:\n continue\n n2 = n2_t2/2\n if n1 < 0 or n2 < 0 or n3 < 0 or n4 < 0:\n continue\n\n denom = factorial(n1)*factorial(n2)*factorial(n3)*factorial(n4)*factorial(3)**n1*factorial(4)**n2*factorial(m)*factorial(r)**2*factorial(u)**2\n\n nom = double_factorial(2*l1-1)*factorial(l2)*factorial(l3)\n S+= Fraction(nom, denom)\n\n return S"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return a set of nodes who are within max_dist of self | def neighbors(self, max_dist=3):
# TODO: this may have problems because the set doesn't
# compare object id but uses user defined comparison methods
# TODO: outgoing edges are no longer saved
found = set()
found.add(self)
queue = [(self, 0)]
while queue:
node, d = queue.pop(0)
if d < max_dist:
for edge in node.outgoing:
if edge.head not in found:
found.add(edge.head)
queue.append((edge.head, d+1))
for edge in node.incoming:
for tailnode in edge.tail:
if tailnode not in found:
found.add(tailnode)
queue.append((tailnode, d+1))
return found | [
"def farthest_nodes(self):\n\n diameter = self.diameter() # also initializes self._distance_matrix\n ret = set()\n for u, neigh in self._distance_matrix.items():\n for v, dist in neigh.items():\n if dist == diameter:\n ret.add((u, v))\n assert ret\n return ret",
"def find_relevant_nodes(self, x, y, max_length):\n \n m = self.adjacency_matrix\n mT = self.transposed_adjacency_matrix\n \n # Get the dimension (number of nodes)\n dim = m.shape[0] \n \n n_r = create_one_hot_vector(x, dim)\n n_g = create_one_hot_vector(y, dim)\n \n return find_nodes(m, mT, n_r, n_g, max_length)",
"def find_close_nodes(self, target): \r\n K=8\r\n nodes = [] \r\n if len(self.buckets) == 0: return nodes \r\n index = self.bucket_index(target) \r\n nodes = self.buckets[index].nodes \r\n min = index - 1 \r\n max = index + 1 \r\n while len(nodes) < K and (min >= 0 or max < len(self.buckets)): \r\n if min >= 0: \r\n nodes.extend(self.buckets[min].nodes) \r\n if max < len(self.buckets): \r\n nodes.extend(self.buckets[max].nodes) \r\n min -= 1 \r\n max += 1 \r\n \r\n num = intify(target) \r\n nodes.sort(lambda a, b, num=num: cmp(num^intify(a.nid), num^intify(b.nid))) \r\n return nodes[:K] #K是个常量, K=8 \r",
"def getMaximumDistances(self):\n pass",
"def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]",
"def neighbours_in_range(self, min_distance, max_distance):\n pairwise_distance_matrix = self.pairwise_distances()\n items_in_range = min_distance <= pairwise_distance_matrix < max_distance\n neighbours_in_range_per_particle = np.sum(items_in_range, 0)\n\n return neighbours_in_range_per_particle",
"def find(self, value, max_distance):\n\t\t# type: (Any, int) -> List[Tuple[int, Any]]\n\n\t\tnode = self.root\n\t\tret = [] # type: List[Tuple[int, Any]]\n\n\t\tif node is None:\n\t\t\treturn ret\n\n\t\tcandidates = [node] # is a deque better here?\n\n\t\twhile candidates:\n\t\t\tcandidate = candidates.pop()\n\t\t\tdistance = self.distance_func(value, candidate.value)\n\n\t\t\tif distance <= max_distance:\n\t\t\t\tret.append((distance, candidate.value))\n\n\t\t\t# instead of looking for candidates by searching,\n\t\t\t# one could also directly access the necessary keys in the dict\n\t\t\tfor d, bknode in candidate.leaves.items():\n\t\t\t\tlower = distance - max_distance\n\t\t\t\tupper = distance + max_distance\n\t\t\t\tif lower <= d <= upper:\n\t\t\t\t\tcandidates.append(bknode)\n\n\t\treturn ret",
"def max_nodes(self):\n return self._max_nodes",
"def greedy_max_cut(graph):\n cut = Cut(set(), set())\n for vertex in graph.nodes:\n l_neighbors = sum((adj in cut.left) for adj in graph.neighbors(vertex))\n r_neighbors = sum((adj in cut.right) for adj in graph.neighbors(vertex))\n if l_neighbors < r_neighbors:\n cut.left.add(vertex)\n else:\n cut.right.add(vertex)\n return cut",
"def findClosestNodes(self, target: hash.hash.Hash):\n # TODO: make more efficient\n # See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table\n \n nodes = []\n \n for bucket in self.buckets:\n nodes = nodes + bucket.nodes\n\n nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))\n\n return nodes[:config.K]",
"def get_min_max_electrode_distances(self):\n distances = pdist(self.get_electrode_positions())\n return distances.min(), distances.max()",
"def max_cut(self, num_trials = 1):\n best_A, best_B = set(), set()\n best_across = 0\n for _ in range(num_trials):\n size_A = round(random.random() * self.graph.V)\n vertices = list(range(self.graph.V))\n random.shuffle(vertices)\n A = set(vertices[:size_A])\n B = set(vertices[size_A:])\n A = {0, 1}\n B = {2, 3}\n modified = True\n while modified:\n modified = False\n for s in range(self.graph.V):\n if s in A:\n own_set = A\n other_set = B\n else:\n own_set = B\n other_set = A\n within = 0\n across = 0\n for v in self.graph.adj(s):\n if v in own_set:\n within += 1\n else:\n across += 1\n if within > across:\n own_set.remove(s)\n other_set.add(s)\n modified = True\n across = 0 \n for s in A:\n for v in self.graph.adj(s):\n if v in B:\n across += 1\n if across >= best_across:\n best_A, best_B = A, B\n best_across = across\n\n return best_A, best_B, best_across",
"def find_max_clique(graph) -> set:\n return clique.max_clique(graph)",
"def eligible_edges(self):\n return self.edges",
"def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours",
"def max_cliques(self):\n possible = frozenset(self.vertices())\n acc = frozenset()\n excluded = frozenset()\n cliques = []\n degeneracy_ordered_vertices = self.degeneracy_ordering()\n for v in degeneracy_ordered_vertices:\n neighbors_of_v = self.neighbors(v)\n self._bron_kerbosch(\n acc.union({v}),\n possible.intersection(neighbors_of_v),\n excluded.intersection(neighbors_of_v),\n cliques)\n possible = possible.difference({v})\n excluded = excluded.union({v})\n return cliques",
"def find_targetnodes(self):\n\n self.connect_backwards()\n\n targetnodes = []\n for n in self.find_datanodes():\n if len(n.receives_from) > 0:\n targetnodes.append(n)\n return targetnodes",
"def get_interest_nodes(self):\n # go through each node in the network to find the min and max degrees\n max_value = 0\n min_value = len(self.nodes)\n for name in self.nodes:\n\n # check for new max\n if self.nodes[name].get_degree() >= max_value:\n\n max_value = self.nodes[name].get_degree()\n\n self.max_node = name\n\n # check for new min\n elif self.nodes[name].get_degree() <= min_value:\n\n min_value = self.nodes[name].get_degree()\n\n self.min_node = name\n\n return self.max_node, self.min_node",
"def prune_branches(self):\n if len(self.nodes) < 20:\n return self.nodes\n\n neighbour_min_angle = np.pi*2 / 3\n for node1 in self.nodes:\n node0 = node1.neighbour1\n node2 = node1.neighbour2\n n0vector = [node1.x - node0.x, node1.y - node0.y]\n n2vector = [node2.x - node1.x, node2.y - node1.y]\n angle = np.arctan2(\n np.linalg.norm(\n np.cross(n0vector, n2vector)\n ),\n np.dot(n0vector, n2vector)\n )\n if angle > neighbour_min_angle:\n # prune it out!\n node0.neighbour2 = node2\n node2.neighbour1 = node0\n node1.pruned = True\n\n new_nodes = []\n for node in self.nodes:\n if not hasattr(node, \"pruned\"):\n new_nodes.append(node)\n\n return new_nodes"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
show the neighborhood of this node in a picture | def show_neighborhood(self, max_dist=3, detailed=True):
dotstr = ''
for node in self.neighbors(max_dist):
if node is self:
dotstr += node.dot(color='dodgerblue', detailed=detailed)
else:
dotstr += node.dot(detailed=detailed)
dotstr = 'digraph hypergraph {\nrankdir=BT\n%s}\n' % dotstr
f = open('/tmp/dotty', 'w')
f.write(dotstr)
f.close()
os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif')
os.system('eog /tmp/dotty.gif') | [
"def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()",
"def neighborhood(self, cell):\n pass",
"def visualize_neighbours(self, neighbours, pad=3):\n sorted_neighbours = sorted(neighbours, key=lambda x: x[0][0])\n imgs = np.array([self.create_pair_img(x)\n for x in sorted_neighbours])\n\n N, dy, dx, dz = imgs.shape\n img_pairs = np.vstack(sorted(np.vsplit(imgs, N),\n key=self._perceptual_sorter))\n save_as_padded_rectangle('pngs/legal-neighbours.png', img_pairs)",
"def __neighborhood(self):\n self.__nodes['neighbors'] = \\\n self.__network['propagation'] \\\n .get_neighbors(self.__nodes['coordinates'])\n # print self.__nodes['neighbors']\n for node in range(0, self.__nodes_number):\n failures = [neighbor for neighbor in\n self.__nodes['neighbors'][node]\n if self.__nodes['failure'][neighbor]]\n self.__nodes['neighbors'][node] = \\\n list(set(self.__nodes['neighbors'][node]) - set(failures))\n # print self.__nodes['neighbors']",
"def display_neighbour_table(self):\n for key in self._ngh_ids:\n print key\n for node_id in self._ngh_ids[key]:\n print \" \", node_id",
"def plot_neighborhood(pts : UFloatTensor, # (N, x, dims)\n rep_pts : UFloatTensor, # (N, P, dims)\n pts_regional : UFloatTensor # (N, P, dims)\n ) -> None:\n if rep_pts.is_cuda:\n rep_pts = rep_pts.cpu()\n pts_regional = pts_regional.cpu()\n n = np.randint(0, rep_pts.shape[0])\n t = np.randint(0, rep_pts.shape[1])\n test_point = rep_pts[n,t,:].data.numpy()\n neighborhood = pts_regional[n,t,:,:].data.numpy()\n plt.scatter(pts[n][:,0], pts[n][:,1])\n plt.scatter(test_point[0], test_point[1], s = 100, c = 'green')\n plt.scatter(neighborhood[:,0], neighborhood[:,1], s = 100, c = 'red')\n plt.show()\n plt.cla()",
"def draw_neighbor (self, screen, sx1, sx2, sy1, sy2) -> None:\n posX = (self.margin + self.width) * (sy2) + self.margin\n posY = (self.margin + self.height) * (sx2) + self.margin\n draw.rect(screen, WALL, (posX, posY, 13, 13))\n display.flip()\n\n posX = (self.margin + self.width) * (sy2 + (sy1 - sy2)/2) + self.margin\n posY = (self.margin + self.height) * (sx2 + (sx1 - sx2)/2) + self.margin\n draw.rect(screen, WALL, (posX, posY, 13, 13))\n display.flip()",
"def pixelNeighborhood(point, image, sigma):\n width = int(8*sigma)//2\n x,y = point\n neighborhood = image[x-width:x+width+1, y-width:y+width+1]\n return neighborhood",
"def carcassonne_road():\n return 'https://i.stack.imgur.com/YQHH2.png'",
"def show_board(self, winning_path=[]):\n plt.clf() # Clear the firgure\n\n sizes = [300 for _ in range(len(self.G.nodes))]\n weights = [1 for _ in range(len(self.G.edges))]\n edge_color = ['black' for _ in range(len(self.G.edges))]\n\n if len(winning_path):\n for node in winning_path:\n sizes[list(self.G.nodes).index(hash(node))] = 600\n \n for i in range(0, len(winning_path)-1):\n try:\n index = list(self.G.edges).index((hash(winning_path[i]), hash(winning_path[i+1])))\n except:\n index = list(self.G.edges).index((hash(winning_path[i+1]), hash(winning_path[i])))\n weights[index] = 5\n edge_color[index] = 'lightgreen'\n\n nx.draw(self.G, edge_color=edge_color, with_labels=False, pos=self.positions, node_size=sizes, node_color=self.node_color(), width=weights) # Draw the graph with different colors based on their empty status\n if self.pause: # If there should be a live update of the figure\n plt.pause(self.update_freq) # Pause for update_freq seconds\n else: # If not ...\n plt.show(block=True) # ... Stop the figure from closing",
"def check_neighbor(row, col):\r\n global img\r\n global cur_label\r\n global equal_labels\r\n \r\n if row == 0 and col == 0:\r\n \"\"\"\r\n As it is the first time we traverse the img and we are at the initial position,\r\n in the neighbor area there must be no labeled pixel. So we can directly give this\r\n pixel a new label.\r\n \"\"\"\r\n cur_label += 1\r\n img[row][col] = cur_label\r\n \r\n elif row == 0 and col != 0:\r\n \"\"\"\r\n As it is the first time we reach the last column at the first row,\r\n in the neighbor area only the left pixel may be labeled. So we only have to \r\n check the left pixel.\r\n \"\"\"\r\n left = img[row][col-1]\r\n if left != 0:\r\n img[row][col] = left\r\n else:\r\n cur_label += 1\r\n img[row][col] = cur_label\r\n \r\n elif row != 0 and col == 0:\r\n \"\"\"\r\n If we are at the first column but not first row, in the neighbor area\r\n only the above pixel may be labeled. So we only have to check the above\r\n pixel.\r\n \"\"\"\r\n up = img[row-1][col]\r\n if up != 0:\r\n img[row][col] = up\r\n else:\r\n cur_label += 1\r\n img[row][col] = cur_label\r\n \r\n elif row != 0 and col != 0:\r\n \"\"\"\r\n If we are not at left or up margin of the img, check the left and above\r\n pixel.\r\n \"\"\"\r\n left = img[row][col-1]\r\n up = img[row-1][col]\r\n if left == 0 and up == 0:\r\n cur_label += 1\r\n img[row][col] = cur_label\r\n elif left != 0 and up == 0:\r\n img[row][col] = left\r\n elif left == 0 and up != 0:\r\n img[row][col] = up\r\n elif left != 0 and up != 0:\r\n if left == up:\r\n img[row][col] = left\r\n else:\r\n true_label = min([left, up])\r\n # equal labels are stored into equal_labels in pair\r\n equal_labels.append([left,up])\r\n img[row][col] = true_label",
"def _find_neighbourhood(self):\n cluster_id = 0\n notIterated = list(np.arange(0,len(self.center)))\n self.lables = np.zeros(len(self.center),dtype=int)\n while len(notIterated) > 0:\n stack = [notIterated[0]]\n notIterated.remove(notIterated[0])\n while len(stack) > 0:\n NeuronNeuronDist = np.linalg.norm(self.center[stack[-1]] - self.center[notIterated],axis = 1)\n inPercField = NeuronNeuronDist - ((self.sigma[notIterated] + self.sigma[stack[-1]]) * self.p)\n connected = np.where(inPercField <= 0)\n self.lables[stack[-1]] = cluster_id\n stack.pop()\n for k in np.flip(connected[0]):\n stack.append(notIterated[k])\n notIterated.remove(notIterated[k])\n cluster_id += 1",
"def generateNeighbor(self, aSolution, neighborhoodSize):",
"def neighbourhood(self, node1, node2, t):\n raise NotImplementedError",
"def show(img):\n img.show()",
"def get_neighborhood(image, row, col):\n\n neighborhood = np.array([[image[row-1,col-1], image[row-1,col], image[row-1, col+1]],\n [image[row,col-1], image[row,col], image[row,col+1]],\n [image[row+1,col-1], image[row+1,col], image[row+1, col+1]]])\n\n return neighborhood",
"def addNeighbor(self, neighbor):",
"def show(self, frame, pose):\n\n # plot depth image with annotations\n imgcopy = frame.copy()\n # display hack to hide nd depth\n msk = numpy.logical_and(32001 > imgcopy, imgcopy > 0)\n msk2 = numpy.logical_or(imgcopy == 0, imgcopy == 32001)\n min = imgcopy[msk].min()\n max = imgcopy[msk].max()\n imgcopy = (imgcopy - min) / (max - min) * 255.\n imgcopy[msk2] = 255.\n imgcopy = imgcopy.astype('uint8')\n imgcopy = cv2.cvtColor(imgcopy, cv2.COLOR_GRAY2BGR)\n\n jtI = self.importer.joints3DToImg(pose)\n for i in range(jtI.shape[0]):\n cv2.circle(imgcopy, (jtI[i, 0], jtI[i, 1]), 3, (255, 0, 0), -1)\n\n import matplotlib\n if pose.shape[0] == 16:\n jointConnections = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [0, 10],\n [10, 11], [11, 12], [0, 13], [13, 14], [14, 15]]\n jointConnectionColors = [matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 1]]]))[0, 0]]\n elif pose.shape[0] == 14:\n jointConnections = [[13, 1], [1, 0], [13, 3], [3, 2], [13, 5], [5, 4], [13, 7], [7, 6], [13, 10],\n [10, 9], [9, 8], [13, 11], [13, 12]]\n jointConnectionColors = [matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 1]]]))[0, 0],\n matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.16, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.16, 1, 1]]]))[0, 0]]\n else:\n raise ValueError(\"Invalid number of joints\")\n\n for i in range(len(jointConnections)):\n cv2.line(imgcopy, (jtI[jointConnections[i][0], 0], jtI[jointConnections[i][0], 1]),\n (jtI[jointConnections[i][1], 0], jtI[jointConnections[i][1], 1]), 255.*jointConnectionColors[i], 2)\n\n return imgcopy",
"def print_neighbours(self, word=''):\n\n if word in self.index.keys():\n word_ind = self.index[word]\n for i in self.graph[word_ind]:\n print(self.words[i])\n print()\n else:\n print('Error - Not a valid word')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
subpaths is a list of paths on tail nodes. return a new path generated by concatenating this edge. this is used in kbest paths generation. | def make_path(self, subpaths):
assert len(self.tail) == len(subpaths), '%s' % self
path = Path(self, subpaths)
weight = self.hg.one
for p in subpaths:
if p is not None:
weight = self.hg.prod(weight, p.weight)
weight = self.hg.prod(weight, self.hg.w(self))
path.weight = weight
return path | [
"def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )",
"def toSubpathPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def get_subgraph_from_paths(self, paths):\n nodes, edges = graph_elements_from_paths(paths)\n subgraph = self.graph.subgraph(nodes).edge_subgraph(edges)\n return subgraph",
"def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale\n y_base = point[1] * scale + self.border * scale\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )",
"def extend(self, normsubpaths):\n for anormsubpath in normsubpaths:\n # use append to properly handle regular path items as well as normsubpaths\n self.append(anormsubpath)",
"def fuse_subpaths(path_node):\n path_d = path_node.get(\"d\", None)\n path = simplepath.parsePath(path_d)\n\n if len(path) == 0:\n return\n\n i = 0\n initial_point = [ path[i][1][-2], path[i][1][-1] ]\n return_stack = []\n while i < len(path):\n # Remove any terminators: they are redundant\n if path[i][0] == \"Z\":\n path.remove([\"Z\", []])\n continue\n\n # Skip all elements that do not begin a new path\n if i == 0 or path[i][0] != \"M\":\n i += 1\n continue\n\n # This element begins a new path - it should be a moveto\n assert(path[i][0] == 'M')\n\n # Swap it for a lineto\n path[i][0] = 'L'\n\n # If the old subpath has not been closed yet, close it\n if path[i-1][1][-2] != initial_point[0] or path[i-1][1][-2] != initial_point[1]:\n path.insert(i, ['L', initial_point])\n i += 1\n\n # Set the initial point of this subpath\n initial_point = [ path[i-1][1][-2], path[i-1][1][-1] ]\n\n # Append this point to the return stack\n return_stack.append(initial_point)\n #end while\n\n # Now pop the entire return stack\n while return_stack != []:\n el = ['L', return_stack.pop()]\n path.insert(i, el)\n i += 1\n\n\n path_d = simplepath.formatPath(path)\n path_node.set(\"d\", path_d)",
"def dtw_subsequence_path(subseq, longseq):\n subseq = to_time_series(subseq)\n longseq = to_time_series(longseq)\n acc_cost_mat = subsequence_cost_matrix(subseq=subseq,\n longseq=longseq)\n global_optimal_match = numpy.argmin(acc_cost_mat[-1, :])\n path = subsequence_path(acc_cost_mat, global_optimal_match)\n return path, numpy.sqrt(acc_cost_mat[-1, :][global_optimal_match])",
"def acyclic_sub_path(tree, path):\n for u, v in pairwise(reversed(path)):\n if v in tree.nodes and u not in tree.nodes:\n return path[path.index(v):]",
"def find_all_subpaths(all_paths):\r\n # Calculate length of the maximum path\r\n max_length = max(len(s) for s in all_paths)\r\n\r\n subpaths = set()\r\n for path in all_paths:\r\n for k in range(0, max_length + 1):\r\n for ii in range(0, len(path) - k + 1):\r\n subpaths.add(tuple(path[ii:ii + k]))\r\n subpaths = filter(None, subpaths)\r\n return list(subpaths)",
"def get_paths_of_length_k(subpaths, k):\r\n subpaths_of_length_k = [i for i in subpaths if len(\r\n i) == k] # all k-length subpaths\r\n subpaths = [i for i in subpaths if len(i) != k] # remove k-length subpaths\r\n return subpaths_of_length_k, subpaths",
"def dtw_subsequence_path(subseq, longseq):\n subseq = to_time_series(subseq)\n longseq = to_time_series(longseq)\n return cydtw_subsequence_path(subseq=subseq, longseq=longseq)",
"def GetPath(self, subcollection):\n # If default collection requested and we are not using custom paths.\n if not subcollection and not self.flat_paths:\n return self.path\n return self.flat_paths[subcollection]",
"def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))",
"def add_tail(edge, *identifiers):",
"def get_subgraph(self, head, tail, is_nonempty_self_loop):\n # node set\n nodes = self.find_all_nodes(head, tail)\n subgraph = self.buchi_graph.subgraph(nodes).copy()\n subgraph.graph['init'] = head\n subgraph.graph['accept'] = tail\n\n # remove all outgoing edges of the accepting state from subgraph for the prefix part if head != tail\n if head != tail:\n remove_edge = list(subgraph.edges(tail))\n subgraph.remove_edges_from(remove_edge)\n\n # unpruned subgraph used to extract the run\n unpruned_subgraph = subgraph.copy()\n\n # prune the subgraph\n self.prune_subgraph_automaton(subgraph)\n\n # get all paths in the pruned subgraph\n paths = []\n if head != tail:\n paths = list(nx.all_simple_paths(subgraph, source=head, target=tail))\n else:\n for s in subgraph.succ[head]:\n paths = paths + [[head] + p for p in list(nx.all_simple_paths(subgraph, source=s, target=tail))]\n\n # if self loop around the accepting state, then create an element with edge label ,\n # solving prefix and suffix together\n if is_nonempty_self_loop:\n subgraph.add_node('artificial', label='1',\n neg_label=[], formula=to_dnf('1'))\n subgraph.add_edge(tail, 'artificial', label=subgraph.nodes[tail]['label'],\n neg_label=subgraph.nodes[tail]['neg_label'], formula=subgraph.nodes[tail]['formula'])\n for path in paths:\n path.append('artificial')\n\n return subgraph, unpruned_subgraph, paths",
"def extend_path_over_signatures(s, edges):\n path = [s]\n e1 = edges[0]\n for e2 in edges[1:]:\n next_point = compute_extension_point(s, e1, e2.asLineAxis3D())\n if not next_point or next_point not in e2:\n print \"failed to compute next point at\", s, e1, e2\n break\n s = Segment(B, next_point)\n B = next_point\n path.append(s)\n return path",
"def find_all_paths(self, start_vertex, end_vertex, path=[]):\n graph = self.__graph_dict \n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n if start_vertex not in graph:\n return []\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_paths(vertex, \n end_vertex, \n path)\n for p in extended_paths: \n paths.append(p)\n return paths",
"def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def allPaths(graph, start, end, maxTotalDist, maxDistOutdoors, path = []):\n\n path = path + [start]\n\n if start == end:\n totLength, outLength = pathLength(graph, path)\n if (totLength <= maxTotalDist) and (outLength <= maxDistOutdoors):\n return [path]\n if not (graph.hasNode(start)):\n return []\n paths = []\n for node in graph.childrenOf(start):\n if node[0] not in path:\n #print \"current path \" + str(path)\n extended_paths = allPaths(graph, node[0], end, maxTotalDist, maxDistOutdoors, path)\n for p in extended_paths:\n paths.append(p)\n return paths"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
top down topo sort. nodes that don't reach the target node are thrown away | def topo_sort(self):
# TODO: detect cycles
self.find_reachable_nodes()
# save list of nodes in topo order
self.nodes = []
# assign each node an id field incrementally
cur_id = 0
# count visited outgoing edges for each node
unvisited = {}
for nid, node in list(self.found.items()):
unvisited[nid] = node.nout
queue = [self.root]
#print >>sys.stderr, '+++'
while queue:
# take off nodes whose all outgoing edges are visited from
# queue head
node = queue.pop(0)
self.nodes.append(node)
node.hg = self
node.id = cur_id
cur_id += 1
for edge in node.incoming:
edge.hg = self
for tailnode in edge.tail:
#print >>sys.stderr, tailnode
unvisited[id(tailnode)] -= 1
if unvisited[id(tailnode)] == 0:
queue.append(tailnode)
self.sanity_check()
self.tasks_done.add('topo_sort') | [
"def _prune_top_down(self):\r\n self.logger.info('Pruning tree top-down...')\r\n # starting at the second-to-last level of the tree\r\n for order in range(self.k-1, 0, -1):\r\n # getting nodes from nmap is faster than tree traversal\r\n cur_order = [n for n in self.nmap.values() if n.order == order]\r\n log_step = ceil(self.prune_log_interval * len(cur_order))\r\n self.logger.info('Pruning {:,d} nodes from order {}...'\r\n .format(len(cur_order), order + 1))\r\n for i, node in enumerate(cur_order):\r\n # add an update to the log every so often\r\n if not i % log_step:\r\n self.logger.info('Pruning node {:,d}/{:,d} ({}%) in order {}...'\r\n .format(i, len(cur_order), int(i*100/len(cur_order)), order + 1))\r\n # the two criteria for preserving a higher-order node are if\r\n # 1) it has a higher-order descendent (to preserve flow)\r\n # or\r\n # 2) it has a dependency, as indicate by its relative entropy\r\n if node.marked or (self._has_dependency(node)):\r\n # prune the lord_rule's children by hord weights\r\n # NOT USED - seems to result in a sparse and overfit network\r\n # self._prune_lord_children(node)\r\n # self._mark_ancestors(node)\r\n node.parent.marked = True\r\n else:\r\n self._delete_children(node)\r\n self.logger.info('Pruning successfully completed on order {}.'.format(order + 1)) \r\n self.logger.info('Pruning successfully completed all orders!') \r\n \r\n # self.logger.info('EXPERIMENTAL: checking for sequences to merge...')\r\n # starting at the second-to-last level of the tree\r\n # for order in range(2, self.k):\r\n # getting nodes from nmap is faster than tree traversal\r\n # cur_order = [n for n in self.nmap.values() if n.order == order]\r\n # log_step = ceil(self.prune_log_interval * len(cur_order))\r\n # self.logger.info('Testing {:,d} nodes from order {}...'\r\n # .format(len(cur_order), order + 1))\r\n # for i, node in enumerate(cur_order):\r\n # add an update to the log every so often\r\n # if not i % log_step:\r\n # self.logger.info('Testing node {:,d}/{:,d} ({}%) in order {}...'\r\n # .format(i, len(cur_order), int(i*100/len(cur_order)), order + 1))\r\n \r\n # self.logger.info(f'{node}')\r\n # if not node.checked_for_merge:\r\n # nodes_to_check = self._get_merge_candidates(node)\r\n # print(nodes_to_check)\r\n # for n in nodes_to_check:\r\n # pass\r\n #n.checked_for_merge = True\r\n \r\n # self.logger.info('Merging successfully completed on order {}.'.format(order + 1)) \r\n # self.logger.info('Merging successfully completed all orders!') \r",
"def top_sort(self):\n unvisited = set(self.nodes)\n marked = set([])\n ret_val = []\n\n def visit(node):\n \"\"\"Visit a node; return True if all is well, False if a cycle is\n detected\"\"\"\n if node in marked:\n return False\n if node not in unvisited:\n return True\n marked.add(node)\n for nbr in self.edges[node]:\n if not visit(nbr):\n return False\n ret_val.append(node)\n marked.remove(node)\n unvisited.remove(node)\n return True\n\n while len(unvisited) != 0:\n node = unvisited.pop()\n unvisited.add(node)\n if not visit(node):\n return None\n return ret_val[::-1]",
"def _reset_topological_order(self):\n self._topological_order = self._input_nodes[:]\n self.sorted = False",
"def _topological_sort(self):\n\n # self.order is a list with right computing order\n self.order = []\n for graph in self._dependencies:\n if not graph._used:\n self._depth_first_search(graph)\n\n for graph in self._dependencies:\n graph._used = False",
"def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True",
"def test_limited_topological_sort(self):\n g = self.get_default_graph()\n g.vertices_topological_sort(vertex_set=[1])\n g.vertices_topological_sort(vertex_set=[1,3])\n g.vertices_topological_sort(vertex_set=[1,2])",
"def _topological_sort(self):\n\n visited = defaultdict(bool)\n stack = []\n\n for pod in self.pods:\n if not visited[pod]:\n self._topological_sort_pod(pod, visited, stack)\n\n return stack[::-1]",
"def sorting(self, presorted=None):\n self._sorted_nodes = []\n if presorted:\n notsorted_nodes = copy(presorted)\n else:\n notsorted_nodes = copy(self.nodes)\n predecessors = {key: copy(val) for (key, val) in self.predecessors.items()}\n\n # nodes that depends only on the self._nodes_wip should go first\n # soe remove them from the connections\n for nd_out in self._node_wip:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)\n\n while notsorted_nodes:\n sorted_part, notsorted_nodes = self._sorting(notsorted_nodes, predecessors)\n self._sorted_nodes += sorted_part\n for nd_out in sorted_part:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)",
"def sort_offTargets(self):\n\n if self.offTargets_sorted:\n return\n\n self.offTargets = self.offTarget_hash.values()\n self.offTargets = sorted(self.offTargets, key=attrgetter('chrom', 'start'))\n self.offTargets_sorted = True",
"def topo_sort_kahn_reversed(g):\n g_sorted = []\n count = 0\n while g:\n acyclic = False\n for node, edges in g.items():\n count += 1\n print \"============================\"\n #print \"node:{} edges:{} g:{}\".format(node, edges, g)\n for edge in edges:\n print \"({}): test --- for node:{}... if {} in {}\".format(count, node, edge, g)\n if edge in g:\n print \"{} found ... breaking\".format(edge)\n break\n else: # no edges found, remove node\n print \"({}): deleting node:{} from g\".format(count, node)\n acyclic = True\n del g[node]\n g_sorted.append((node, edges))\n\n if not acyclic:\n print \"error - cycle found\"\n break\n\n print g_sorted",
"def test_toplogical_sort_huang():\n bbn = BbnUtil.get_huang_graph()\n sampler = LogicSampler(bbn)\n\n assert_almost_equal([0, 1, 2, 3, 4, 5, 6, 7], sampler.nodes)",
"def topological_sort(self):\n \n visited = set()\n sorted_node = [] \n\n # sort all the node in the graph\n for i in self.node_set: \n if i not in visited: \n visited = self.topological_sort_helper(i, visited, sorted_node) \n \n visited.clear()\n return sorted_node",
"def restore_node_order(source, target):\n n = source.count()\n if n != target.count():\n raise ValueError('Two trees have different sizes.')\n\n # find matching nodes\n matches = match_taxa(source, target)\n if n != len(matches):\n raise ValueError('Two trees have different topologies.')\n\n def taxastr(node):\n return ','.join(sorted(node.taxa))\n\n taxa2src = {taxastr(x[0]): x[0] for x in matches\n if not x[0].is_tip()}\n\n # re-order child nodes under each internal node\n res = target.copy()\n for node in res.non_tips(include_self=True):\n src_node = taxa2src[taxastr(node)]\n taxa2child = {}\n for child in node.children:\n taxa2child[taxastr(child)] = child\n node.children = []\n for child in src_node.children:\n node.append(taxa2child[taxastr(child)])\n\n return res",
"def topological_sort(self):\n sorted_list = []\n def visit(node, temp_marks):\n if node.visited:\n return\n if node in temp_marks:\n raise CyclicGraphException(\"Cannot perfrom topological sort on a cyclic graph\")\n temp_marks.add(node)\n for neighbor in self.graph[node]:\n visit(neighbor.end, temp_marks)\n temp_marks.remove(node)\n node.visited = True\n sorted_list.append(node)\n\n self.reset_visited()\n temp_marks = set() #need to keep track of temporary marks so that we don't go in circles\n for node in self.graph:\n # run depth first search\n visit(node, temp_marks)\n self.reset_visited()\n return sorted_list[::-1]",
"def find_close_nodes(self, target): \r\n K=8\r\n nodes = [] \r\n if len(self.buckets) == 0: return nodes \r\n index = self.bucket_index(target) \r\n nodes = self.buckets[index].nodes \r\n min = index - 1 \r\n max = index + 1 \r\n while len(nodes) < K and (min >= 0 or max < len(self.buckets)): \r\n if min >= 0: \r\n nodes.extend(self.buckets[min].nodes) \r\n if max < len(self.buckets): \r\n nodes.extend(self.buckets[max].nodes) \r\n min -= 1 \r\n max += 1 \r\n \r\n num = intify(target) \r\n nodes.sort(lambda a, b, num=num: cmp(num^intify(a.nid), num^intify(b.nid))) \r\n return nodes[:K] #K是个常量, K=8 \r",
"def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order",
"def topoSort(self):\n self._gates = self._topoSorter.sort(self._gates, self._outCons)",
"def sort_edges(self): \n self.edges.sort_values([self.to_day, self.to_time], inplace=True, ascending=not self.reverse)\n self.edges.reset_index(drop=True, inplace=True)\n \n # extract the indices of the transport and walk edges for faster accessing\n self.transport_ids = self.edges.trip_id != \"0000\"\n self.walk_ids = np.where(~self.transport_ids)[0]\n self.transport_ids = np.where(self.transport_ids)[0]",
"def topo_sort_tasks(root_task):\n\n stack = []\n to_visit_fifo = collections.deque([root_task])\n\n while len(to_visit_fifo) > 0:\n next_node = to_visit_fifo.popleft()\n stack.append(next_node)\n to_visit_fifo.extend(next_node.deps() if not worker._is_external(next_node) else [])\n \n return stack"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a file and return a toposorted hypergraph. | def deserialize(self, filename):
f = open(filename)
edges_tails = []
nodes = []
# first pass adds incoming edges to nodes
for line in f:
if '->' in line: # edge
edge = self.edge_class()
tail_ids, head_id = edge.deserialize(line)
nodes[head_id].add_incoming(edge)
edges_tails.append((edge, tail_ids))
else: # node
node = self.node_class()
node.deserialize(line)
assert node.id == len(nodes), 'nodes shall appear in order'
nodes.append(node)
# second pass adds tail nodes to edges
for edge, tail_ids in edges_tails:
for nid in tail_ids:
edge.add_tail(nodes[nid])
f.close()
# make a toposorted hypergraph
hg = Hypergraph(nodes[0])
hg.nodes = nodes
for node in hg:
node.hg = hg
for edge in hg.edges():
edge.hg = hg
hg.tasks_done.add('topo_sort')
return hg | [
"def read_graph(filename):\n return nx.read_edgelist(filename, delimiter='\\t')",
"def read_graph(filename):\n with open(filename) as f:\n g = eval(f.read())\n return g",
"def read_file(file):\n\t# File where the graph data is writen\n\tf = open(file, 'r')\n\n\t# Number of nodes\n\tN = int(f.readline())\n\t# Number of groups\n\tq = int(f.readline())\n\t# Adjacency list\n\tadj_list = eval(f.readline())\n\t# Group assignment for each node\n\tgroup = np.array(eval(f.readline()), dtype = np.int8)\n\t# Proportion of nodes on each group\n\tn = np.array(eval(f.readline()))\n\t# Edge probability matrix times N\n\tc = np.array(eval(f.readline()))*N\n\n\tf.close()\n\t\n\t# Returns all data\n\treturn N, q, adj_list, group, n, c",
"def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')",
"def read_graph(filename):\n return nx.read_edgelist(filename, create_using=nx.DiGraph(), nodetype=str)",
"def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')",
"def read(self, file):\n try:\n graph = pygraphviz.AGraph(file)\n except IOError:\n raise FileNotExistsError()\n \n self.adyacency.clear()\n self.conections.clear()\n\n self.add_nodes(graph.nodes())\n\n for edge in graph.edges():\n weight = int(edge.attr[\"label\"])\n self.add_edge(edge[0], edge[1], weight)",
"def read_graph_from_file(filename):\n\n # TODO: Use 'open' to open the file\n filename = \"../\"+filename\n path = Path(__file__).parent / filename\n with path.open() as f:\n lines = f.readlines()\n f.close()\n # TODO: Use the first line (G or D) to determine whether graph is directed\n # and create a graph object\n if lines[0][:len(lines[0])-1] is \"D\":\n graph = Graph(is_directed=True)\n elif lines[0][:len(lines[0])-1] is \"G\":\n graph = Graph(is_directed=False)\n else:\n raise ValueError(\"Invalid Graph Type\")\n # TODO: Use the second line to add the vertices to the graph\n vertices = lines[1][:len(lines[1])-1].split(\",\")\n for elm in vertices:\n graph.add_vertex(elm)\n # TODO: Use the 3rd+ line to add the edges to the graph\n for i in range(2,len(lines)):\n elm = lines[i][:len(lines[i])]\n graph.add_edge(elm[1], elm[3])\n\n return graph",
"def read_graph(filename, directed=True):\n if not directed:\n G = nx.Graph()\n else:\n G = nx.DiGraph()\n with open(filename) as f:\n for line in f:\n d = line.split()\n G.add_edge(int(d[0]), int(d[1]))\n print('Read Graph')\n return G",
"def read_graph_file(file_name):\n def _line_to_edge(index, line):\n \"\"\" Parse each line \"\"\"\n items = line.upper().strip().split(',')\n if len(items) != 3: \n raise Exception('Invalid line %i: %s' % (index, line))\n return Edge(nodes=items[:2], cost=int(items[2]))\n # -----------------------------------------------------------\n graph_data = open(file_name, 'r')\n graph = [_line_to_edge(c, line) for c, line in enumerate(graph_data)]\n graph_data.close()\n return graph",
"def load_graph(file):\n file = os.path.join(network_dir, file)\n G = nx.Graph()\n\n with open(file, 'r') as f:\n while True:\n datas = f.readlines(10)\n if not datas:\n break\n for data in datas:\n if len(data.split()) > 2:\n u, v, w = data.split()\n else:\n u, v = data.split()\n w = \"{0:.2f}\".format(random.uniform(0., 1.))\n G.add_edge(eval(u), eval(v), weight=eval(w))\n for _, node_prop in G.nodes(data=True):\n node_prop['state'] = 0\n print \"loading graph completed successfully!!\"\n print nx.info(G)\n return G",
"def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)",
"def load_graph(self, filename):\n fmt = guess_format(filename)\n logging.info(\"Reading %s as %s...\" % (filename, fmt))\n graph = Graph().parse(filename, format=fmt)\n logging.debug(\"... got %d triples\" % (len(graph)))\n return(graph)",
"def file_parse():\n\n\tfilename = input(\"Enter the file path for your graph: \")\n\ttarget = open(filename, 'r')\n\n\ttarget_lines = [] \t# List of lines from target file\n\t\n\t# Grab the graph count and node/edge count for the first graph\n\ti = 0\n\tfor line in target:\n\t\tif i == 0:\n\t\t\tgraph_count = int(line)\n\t\telif i == 1:\n\t\t\tnode_count = int(line)\n\t\telif i == 2:\n\t\t\tedge_count = int(line)\n\t\telse:\t\n\t\t\ttarget_lines.append(line.strip('\\n'))\n\t\ti += 1\n\n\treturn graph_create(target_lines, graph_count, node_count, edge_count)",
"def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!",
"def read_edges(filename):\n g = nx.read_edgelist(filename, nodetype=str,create_using=nx.DiGraph())\n return g",
"def build_graph(file_name):\n graph = MyGraph()\n with open(file_name, 'r') as fin:\n line = fin.readline().replace('\\n', '')\n while line != \"\":\n vals = line.split(':')\n graph.add_node(vals[0], pos=(int(vals[1]),int(vals[2])))\n line = fin.readline().replace('\\n', '')\n dest = fin.readline().replace('\\n','').split('\\t')\n line = fin.readline().replace('\\n', '')\n edges = []\n while line != '':\n node_info = line.split('\\t')\n src = node_info[0]\n for node in range(1,len(node_info)):\n if node_info[node] != '':\n if (dest[node],src) not in edges:\n edges.append((src,dest[node], node_info[node]))\n line = fin.readline().replace('\\n','')\n for edge in edges:\n graph.add_edge(edge[0], edge[1], weight=int(edge[2]))\n\n return graph",
"def loadGraphFromTextFile(fileName):\n\n # ******************************************************************************************************************\n # Estimate the maximum number of edges based on the number of lines in the file.\n # ******************************************************************************************************************\n blockSize = 65536\n nEdge = 0\n with open(fileName, \"rb\") as fHandle:\n block = fHandle.read(blockSize)\n while block:\n nEdge += block.count(b'\\n')\n block = fHandle.read(blockSize)\n\n # ******************************************************************************************************************\n # Read the edges from the file.\n # ******************************************************************************************************************\n edges = np.zeros((nEdge, 2), dtype=np.uint64)\n nEdge = 0\n nVertex = 0\n for line in open(fileName, 'r'):\n if len(line)==0 or line.startswith('#'):\n continue\n\n tokens = line.split('\\t')\n assert len(tokens)==2, \"Found %d tokens in line %s but expected %d tokens.\" % (len(tokens), line, 2)\n iFrom = int(tokens[0])\n iTo = int(tokens[1])\n edges[nEdge,0] = iFrom\n edges[nEdge,1] = iTo\n nEdge += 1\n nVertex = max(nVertex, iFrom)\n nVertex = max(nVertex, iTo)\n\n nVertex += 1 # Count 0th vertex index\n edges = edges[0:nEdge,:]\n\n # Re-index\n idToIndex = dict()\n iVertex = 0\n for iEdge in range(nEdge):\n iFrom = edges[iEdge,0]\n iTo = edges[iEdge,1]\n if iFrom in idToIndex:\n iFrom = idToIndex[iFrom]\n else:\n idToIndex[iFrom] = iVertex\n iFrom = iVertex\n iVertex += 1\n if iTo in idToIndex:\n iTo = idToIndex[iTo]\n else:\n idToIndex[iTo] = iVertex\n iTo = iVertex\n iVertex += 1\n edges[iEdge,0] = iFrom\n edges[iEdge,1] = iTo\n\n\n # ******************************************************************************************************************\n # Remove all double-linked edges.\n # ******************************************************************************************************************\n edgeExists = set() # set of tuples\n nSelf = 0\n nDouble = 0\n iiEdge = 0\n for iEdge in range(0, nEdge):\n iFrom = edges[iEdge,0]\n iTo = edges[iEdge,1]\n if iFrom==iTo:\n nSelf += 1\n continue\n if iFrom > iTo:\n exists = (iFrom, iTo) in edgeExists\n if not exists:\n edgeExists.add((iFrom,iTo))\n edges[iiEdge,0] = iFrom\n edges[iiEdge,1] = iTo\n iiEdge += 1\n else:\n nDouble += 1\n else:\n exists = (iTo, iFrom) in edgeExists\n if not exists:\n edgeExists.add((iTo,iFrom))\n edges[iiEdge,0] = iTo\n edges[iiEdge,1] = iFrom\n iiEdge += 1\n else:\n nDouble += 1\n\n nEdge = iiEdge\n edges = edges[0:nEdge,:]\n\n # ******************************************************************************************************************\n # Build up the adjacency list.\n # ******************************************************************************************************************\n index = np.argsort(edges[:,0], axis=0)\n startEdge = np.zeros(nVertex+1, dtype=np.uint64)\n fromVertex = np.zeros(nEdge, dtype=np.uint64)\n toVertex = np.zeros(nEdge, dtype=np.uint64)\n iFromLast = 0\n iVertex = 1\n for iEdge in range(nEdge):\n iFrom = edges[index[iEdge],0]\n iTo = edges[index[iEdge],1]\n toVertex[iEdge] = iTo\n fromVertex[iEdge] = iFrom\n if iFrom > iFromLast:\n for iJump in range(int(iFrom-iFromLast)):\n startEdge[iVertex] = iEdge\n iVertex += 1\n\n iFromLast = iFrom\n\n while iVertex <= nVertex:\n startEdge[iVertex] = nEdge\n iVertex += 1\n\n # ******************************************************************************************************************\n # Sort the neighbor lists.\n # ******************************************************************************************************************\n iOffset = 0\n for iVertex in range(nVertex):\n nNeighbor = int(startEdge[iVertex+1] - startEdge[iVertex])\n toVertex[iOffset:iOffset+nNeighbor] = np.sort(toVertex[iOffset:iOffset+nNeighbor])\n iOffset += nNeighbor\n\n return startEdge, fromVertex, toVertex",
"def build_graph(filepath):\n graph = defaultdict(list)\n with open(filepath, 'r') as file:\n for edge in file:\n head, tail = edge.split()\n graph[head].append(tail)\n return graph"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Standard backtracking approach to find the optimal opmesh assignment, starting with the optimal number of stages (best_n_stages). The return is a list [((layer_start, next_layer_start), submesh_shape_idx, sharding_config_idx)] where (layer_start, next_layer_start) is [) slice of the ops and submesh_shape_idx is the submesh those ops should be mapped to (sharding_config_idx is currently always 1 but will be eventually used pick optimal tensor sharding configuration). | def get_optimal_submesh_assignments(
best_n_stages, F_argmin, n_devices, n_ops, submesh_sizes
):
current_s = best_n_stages
current_layer = 0
current_devices = n_devices
optimal_layer_submesh_assignments = []
while current_s > 0 and current_layer < n_ops and current_devices > 0:
next_start_layer, submesh_shape_idx, sharding_config_idx = F_argmin[
current_s, current_layer, current_devices
]
assert next_start_layer != -1 and current_devices != -1
optimal_layer_submesh_assignments.append(
((current_layer, next_start_layer), submesh_shape_idx, sharding_config_idx)
)
current_s -= 1
current_layer = next_start_layer
current_devices -= submesh_sizes[submesh_shape_idx]
assert current_s == 0 and current_layer == n_ops and current_devices == 0
return optimal_layer_submesh_assignments | [
"def split_to_stages(self) -> Dict[int, \"Graph\"]:\n stages = dict()\n\n tmp = Graph(None, None, None, None, None).load_state(self.state())\n\n groups = defaultdict(list)\n for n in tmp.nodes:\n if n.type != NodeTypes.IN:\n groups[n.stage_id].append(n)\n\n for stage_id, group in groups.items():\n stage_nodes = dict()\n stage_inputs = dict()\n stage_output_ids = []\n stage_input_kws = dict()\n\n for n in sorted(group, key=lambda w: w.id):\n stage_nodes[n.id] = n\n # check if stage output\n if (n.id in self.output_ids) or any(o.stage_id != stage_id for o in n.out_edges):\n stage_output_ids.append(n.id)\n\n # discard outgoing edges to external stages\n n.out_edges = [o for o in n.out_edges if o.stage_id == stage_id]\n\n # add stage inputs\n to_replace = dict()\n for u in n.in_edges:\n if (u.stage_id != stage_id) or (u.type is NodeTypes.IN):\n if u.id in stage_inputs:\n stage_input = stage_inputs[u.id]\n else:\n # create a new input node for this stage\n stage_input = Node.from_other(u)\n stage_input.type = NodeTypes.IN\n stage_input.args = []\n stage_input.kwargs = dict()\n stage_input.stage_id = stage_id\n stage_input.out_edges = [o for o in u.out_edges if o.stage_id == stage_id]\n stage_inputs[u.id] = stage_input\n stage_nodes[u.id] = stage_input\n to_replace[u] = stage_input\n\n if u.id in self.input_kw_ids:\n stage_input_kws[u.id] = self.input_kw_ids[u.id]\n\n # replace inputs\n for old, new in to_replace.items():\n n.replace_input(old, new)\n new.add_out_edge(n)\n stages[stage_id] = Graph(stage_nodes, stage_input_kws, stage_output_ids, self.depth, self.basic_blocks)\n\n return stages",
"def dfs_maximizing(state) :\n best_path = None\n best_score = 0\n evals = 0\n queue = [[state]]\n while queue:\n path = queue.pop(0)\n \n #check if path has ended, update evals and best score and path\n if path[-1].is_game_over(): \n score = path[-1].get_endgame_score()\n evals += 1\n if score > best_score:\n best_path = path\n best_score = score\n elif score == best_score:\n if best_path == None or len(path) < len(best_path): \n best_path = path\n best_score = score\n \n #add next depth to queue\n else:\n next_states = path[-1].generate_next_states() \n new_queue = []\n for next_state in next_states:\n new_path = path.copy()\n new_path.append(next_state)\n new_queue.append(new_path)\n queue = new_queue + queue\n \n return (best_path, best_score, evals)",
"def projects_to_seg(input, img_wh, vertex_sampling):\n projects_with_depth, mask_vals = input\n projects = projects_with_depth[:, :, :2]\n\n if vertex_sampling is None:\n part_indices_path = \"./keras_smpl/part_vertices.pkl\"\n else:\n part_indices_path = \"./keras_smpl/\" + str(vertex_sampling) + \"_sampled_part_vertices.pkl\"\n\n with open(part_indices_path, 'rb') as f:\n part_indices = pickle.load(f)\n\n i = tf.range(0, img_wh)\n j = tf.range(0, img_wh)\n\n t1, t2 = tf.meshgrid(i, j)\n grid = tf.cast(tf.stack([t1, t2], axis=2), dtype='float32') # img_wh x img_wh x 2\n reshaped_grid = tf.reshape(grid, [-1, 2]) # img_wh^2 x 2\n\n segs = []\n for part in range(len(part_indices)):\n indices = part_indices[part]\n if vertex_sampling is not None:\n indices = [index//vertex_sampling for index in indices]\n num_indices = len(indices)\n indices = tf.constant(indices, dtype='int32')\n\n part_projects = tf.gather(projects, indices, axis=1) # N x num_indices x 2\n part_projects = tf.tile(tf.expand_dims(part_projects, axis=1),\n [1, img_wh*img_wh, 1, 1]) # N x img_wh^2 x num_indices x 2\n\n part_mask_vals = tf.gather(mask_vals, indices, axis=1) # N x num_indices\n part_mask_vals = tf.tile(tf.expand_dims(part_mask_vals, axis=1),\n [1, img_wh*img_wh, 1]) # N x img_wh^2 x num_indices\n\n expanded_grid = tf.tile(tf.expand_dims(reshaped_grid, axis=1),\n [1, num_indices, 1]) # img_wh^2 x num_indices x 2\n\n diff = tf.subtract(part_projects, expanded_grid) # N x img_wh^2 x num_indices x 2\n norm = tf.norm(diff, axis=3, name='big_norm1') # N x img_wh^2 x num_indices\n norm = tf.multiply(norm, part_mask_vals)\n exp = tf.exp(tf.negative(norm)) # N x img_wh^2 x num_indices\n scores = tf.reduce_max(exp, axis=2) # N x img_wh^2\n seg = tf.reshape(scores, [-1, img_wh, img_wh]) # N x img_wh x img_wh\n segs.append(seg)\n\n stacked_segs = tf.stack(segs, axis=3) # N x img_wh x img_wh x 31\n silhouettes = tf.subtract(1.0,\n tf.clip_by_value(tf.reduce_sum(stacked_segs, axis=3),\n clip_value_min=0,\n clip_value_max=1)) # N x img_wh x img_wh\n # segs.insert(0, silhouettes)\n output_segs = tf.concat([tf.expand_dims(silhouettes, axis=3), stacked_segs],\n axis=3) # N x img_wh x img_wh x 32\n output_segs = tf.reverse(output_segs, axis=[1]) # Flip image vertically\n return output_segs",
"def flax_shard_checkpoint(params, max_shard_size=\"10GB\"):\n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = {}\n current_block_size = 0\n total_size = 0\n\n # flatten the weights to chunk\n weights = flatten_dict(params, sep=\"/\")\n for item in weights:\n weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = {}\n current_block_size = 0\n\n current_block[item] = weights[item]\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = FLAX_WEIGHTS_NAME.replace(\".msgpack\", f\"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack\")\n shards[shard_file] = shard\n for weight_name in shard.keys():\n weight_map[weight_name] = shard_file\n\n # Add the metadata\n metadata = {\"total_size\": total_size}\n index = {\"metadata\": metadata, \"weight_map\": weight_map}\n return shards, index",
"def test_build_extract_submesh_3(self):\n\n nodes = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], [1.0, 0.0], \\\n [1.0, 0.5], [1.0, 1.0], [0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]\n\n\n triangles = [[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8], [0, 9, 1], \\\n [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5], [3, 9, 0], [3, 11, 4], \\\n [6, 11, 3], [7, 11, 6], [4, 11, 7]]\n\n\n boundary = {(13, 1): 'bottom', (7, 1): 'left', (3, 1): 'right', (14, 1): 'right', \\\n (11, 1): 'bottom', (10, 1): 'top', (5, 1): 'left', (4, 1): 'top'}\n\n triangles_per_proc = [5, 6, 5]\n\n\n quantities = {'stage': num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), 'elevation': num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), 'ymomentum': num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]]), 'friction': num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), 'xmomentum': num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])}\n\n\n\n true_submesh = {'full_boundary': [{(3, 1): 'right', (4, 1): 'top'},\\\n {(5, 1): 'left', (10, 1): 'top', (7, 1): 'left'}, \\\n {(13, 1): 'bottom', (14, 1): 'right', (11, 1): 'bottom'}],\n 'ghost_nodes': [num.array([[ 0. , 0. , 0. ],\n [ 1. , 0. , 0.5 ],\n [ 2. , 0. , 1. ],\n [ 6. , 1. , 0. ],\n [ 10. , 0.25, 0.75],\n [ 11. , 0.75, 0.25]]), num.array([[ 3. , 0.5 , 0. ],\n [ 7. , 1. , 0.5 ],\n [ 8. , 1. , 1. ],\n [ 11. , 0.75, 0.25],\n [ 12. , 0.75, 0.75]]), num.array([[ 1. , 0. , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 8. , 1. , 1. ],\n [ 12. , 0.75, 0.75]])],\n 'full_nodes': [num.array([[ 3. , 0.5 , 0. ],\n [ 4. , 0.5 , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 7. , 1. , 0.5 ],\n [ 8. , 1. , 1. ],\n [ 9. , 0.25, 0.25],\n [ 12. , 0.75, 0.75]]), num.array([[ 0. , 0. , 0. ],\n [ 1. , 0. , 0.5 ],\n [ 2. , 0. , 1. ],\n [ 4. , 0.5 , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 9. , 0.25, 0.25],\n [ 10. , 0.25, 0.75]]), num.array([[ 0. , 0. , 0. ],\n [ 3. , 0.5 , 0. ],\n [ 4. , 0.5 , 0.5 ],\n [ 6. , 1. , 0. ],\n [ 7. , 1. , 0.5 ],\n [ 9. , 0.25, 0.25],\n [ 11. , 0.75, 0.25]])],\n 'ghost_triangles': [num.array([[ 5, 0, 9, 1],\n [ 6, 1, 9, 4],\n [ 8, 4, 10, 1],\n [ 9, 5, 10, 4],\n [10, 2, 10, 5],\n [11, 3, 9, 0],\n [12, 3, 11, 4],\n [13, 6, 11, 3],\n [14, 7, 11, 6],\n [15, 4, 11, 7]]), num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 4, 5, 12, 8],\n [11, 3, 9, 0],\n [12, 3, 11, 4]]), num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 3, 8, 12, 7],\n [ 5, 0, 9, 1],\n [ 6, 1, 9, 4]])],\n 'ghost_boundary': [{(13, 1): 'ghost', (8, 0): 'ghost', (14, 1): 'ghost', \\\n (11, 1): 'ghost', (10, 1): 'ghost', (5, 1): 'ghost', (10, 2): 'ghost'}, \\\n {(12, 2): 'ghost', (12, 0): 'ghost', (2, 1): 'ghost', (11, 1): 'ghost',\\\n (2, 2): 'ghost', (4, 1): 'ghost', (4, 0): 'ghost'}, {(3, 2): 'ghost', \\\n (6, 1): 'ghost', (3, 1): 'ghost', (5, 1): 'ghost', (1, 0): 'ghost', (1, 1): 'ghost'}],\n 'full_triangles': [[[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8]], \\\n [[0, 9, 1], [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5]], \\\n [[3, 9, 0], [3, 11, 4], [6, 11, 3], [7, 11, 6], [4, 11, 7]]],\n 'full_commun': [{0: [1, 2], 1: [1, 2], 2: [1, 2], 3: [2], 4: [1]}, \\\n {5: [0, 2], 6: [0, 2], 7: [], 8: [0], 9: [0], 10: [0]}, \\\n {11: [0, 1], 12: [0, 1], 13: [0], 14: [0], 15: [0]}],\n 'ghost_commun': [num.array([[ 5, 1],\n [ 6, 1],\n [ 8, 1],\n [ 9, 1],\n [10, 1],\n [11, 2],\n [12, 2],\n [13, 2],\n [14, 2],\n [15, 2]]), num.array([[ 0, 0],\n [ 1, 0],\n [ 2, 0],\n [ 4, 0],\n [11, 2],\n [12, 2]]), num.array([[0, 0],\n [1, 0],\n [2, 0],\n [3, 0],\n [5, 1],\n [6, 1]])], 'ghost_quan': {'stage': [num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ]])], 'elevation': [num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ]])], 'ymomentum': [num.array([[ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}, 'full_quan': {'stage': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'elevation': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'ymomentum': [num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}}\n\n\n # Subdivide into non-overlapping partitions\n submesh = build_submesh(nodes, triangles, boundary, quantities,\n triangles_per_proc, parameters = None)\n\n\n for i in range(3):\n assert num.allclose(true_submesh['full_triangles'][i],submesh['full_triangles'][i])\n assert num.allclose(true_submesh['full_nodes'][i],submesh['full_nodes'][i])\n assert num.allclose(true_submesh['ghost_triangles'][i],submesh['ghost_triangles'][i])\n assert num.allclose(true_submesh['ghost_nodes'][i],submesh['ghost_nodes'][i])\n assert num.allclose(true_submesh['ghost_commun'][i],submesh['ghost_commun'][i])\n\n assert true_submesh['full_boundary'] == submesh['full_boundary']\n assert true_submesh['full_commun'] == submesh['full_commun']\n\n for key, value in true_submesh['ghost_quan'].items():\n for i in range(3):\n assert num.allclose(true_submesh['ghost_quan'][key][i],submesh['ghost_quan'][key][i])\n assert num.allclose(true_submesh['full_quan'][key][i],submesh['full_quan'][key][i])\n\n\n # Now test the extract_submesh for the 3 processors\n\n submesh_cell_0 = extract_submesh(submesh,triangles_per_proc,p=0)\n submesh_cell_1 = extract_submesh(submesh,triangles_per_proc,p=1)\n submesh_cell_2 = extract_submesh(submesh,triangles_per_proc,p=2)\n\n\n from pprint import pprint\n\n #pprint(submesh_cell_1)",
"def get_optimal_patches(self):\n self.optimal_patch_centers = list()\n # Backtrace through cost to determine optimal samples\n for i in range(self.cost_matrix.shape[0] - 1, -1, -1):\n idx = self.nodes_min_energy_index(i)\n node = self.min_energy_index[i][idx]\n self.optimal_patch_centers.append(node)\n self.optimal_patch_centers.reverse()\n self.optimal_patch_centers = [\n int(patch) for patch in self.optimal_patch_centers if np.isfinite(patch)\n ]\n optimal_patch_centers = list()\n for patch_center in self.optimal_patch_centers:\n if (\n self.source_patches[self.patch_centers[patch_center]].size\n != self.patch_size * self.patch_size * 3\n ):\n node = patch_center - 1 if patch_center > 1 else patch_center + 1\n optimal_patch_centers.append(node)\n if optimal_patch_centers:\n self.optimal_patch_centers = optimal_patch_centers",
"def linear_backtrack(self):\n \n# tracking_state = self.create_starting_tracking_state()\n tracking_state = self.initial_tracking_state\n\n subgraph_is_still_changing = True\n subgraph_is_extendible = True\n new_seed_could_be_found = True\n \n while new_seed_could_be_found:\n new_seed_could_be_found, tracking_state = self.add_new_seed_if_possible(tracking_state)\n if new_seed_could_be_found:\n subgraph_is_still_changing = True\n while subgraph_is_still_changing:\n old_id_map = tracking_state.id_map.copy()\n tracking_state.not_blacklisted_vector[:] = True\n subgraph_is_extendible = self.is_extendable(tracking_state, is_global = True)\n while subgraph_is_extendible:\n vertex = self.pick_next_vertex(tracking_state, is_global = True )\n \n possible_images = self.get_mappable_vertices(tracking_state, vertex)\n\n self.max_found_subgraph_size = 0\n self.largest_mappings = []\n \n for image in possible_images:\n new_tracking_state,_ = self.create_localised_tracking_state( tracking_state, vertex, image )\n self.backtrack( new_tracking_state )\n \n reduced_localised_tracking_state,_ = self.create_localised_tracking_state(tracking_state, vertex)\n self.backtrack( reduced_localised_tracking_state )\n \n tracking_state = self.extend_tracking_state_and_largest_mapping_if_feasible( tracking_state, vertex )\n \n subgraph_is_extendible = self.is_extendable(tracking_state, is_global = True)\n \n subgraph_is_still_changing = not old_id_map == tracking_state.id_map\n \n self.largest_mappings = [tracking_state.id_map]",
"def test_build_submesh_3(self):\n\n nodes = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], [1.0, 0.0], \\\n [1.0, 0.5], [1.0, 1.0], [0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]\n\n\n triangles = [[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8], [0, 9, 1], \\\n [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5], [3, 9, 0], [3, 11, 4], \\\n [6, 11, 3], [7, 11, 6], [4, 11, 7]]\n\n\n boundary = {(13, 1): 'bottom', (7, 1): 'left', (3, 1): 'right', (14, 1): 'right', \\\n (11, 1): 'bottom', (10, 1): 'top', (5, 1): 'left', (4, 1): 'top'}\n\n triangles_per_proc = [5, 6, 5]\n\n\n quantities = {'stage': num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), 'elevation': num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), 'ymomentum': num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]]), 'friction': num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), 'xmomentum': num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])}\n\n\n\n true_submesh = {'full_boundary': [{(3, 1): 'right', (4, 1): 'top'},\\\n {(5, 1): 'left', (10, 1): 'top', (7, 1): 'left'}, \\\n {(13, 1): 'bottom', (14, 1): 'right', (11, 1): 'bottom'}],\n 'ghost_nodes': [num.array([[ 0. , 0. , 0. ],\n [ 1. , 0. , 0.5 ],\n [ 2. , 0. , 1. ],\n [ 6. , 1. , 0. ],\n [ 10. , 0.25, 0.75],\n [ 11. , 0.75, 0.25]]), num.array([[ 3. , 0.5 , 0. ],\n [ 7. , 1. , 0.5 ],\n [ 8. , 1. , 1. ],\n [ 11. , 0.75, 0.25],\n [ 12. , 0.75, 0.75]]), num.array([[ 1. , 0. , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 8. , 1. , 1. ],\n [ 12. , 0.75, 0.75]])],\n 'full_nodes': [num.array([[ 3. , 0.5 , 0. ],\n [ 4. , 0.5 , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 7. , 1. , 0.5 ],\n [ 8. , 1. , 1. ],\n [ 9. , 0.25, 0.25],\n [ 12. , 0.75, 0.75]]), num.array([[ 0. , 0. , 0. ],\n [ 1. , 0. , 0.5 ],\n [ 2. , 0. , 1. ],\n [ 4. , 0.5 , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 9. , 0.25, 0.25],\n [ 10. , 0.25, 0.75]]), num.array([[ 0. , 0. , 0. ],\n [ 3. , 0.5 , 0. ],\n [ 4. , 0.5 , 0.5 ],\n [ 6. , 1. , 0. ],\n [ 7. , 1. , 0.5 ],\n [ 9. , 0.25, 0.25],\n [ 11. , 0.75, 0.25]])],\n 'ghost_triangles': [num.array([[ 5, 0, 9, 1],\n [ 6, 1, 9, 4],\n [ 8, 4, 10, 1],\n [ 9, 5, 10, 4],\n [10, 2, 10, 5],\n [11, 3, 9, 0],\n [12, 3, 11, 4],\n [13, 6, 11, 3],\n [14, 7, 11, 6],\n [15, 4, 11, 7]]), num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 4, 5, 12, 8],\n [11, 3, 9, 0],\n [12, 3, 11, 4]]), num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 3, 8, 12, 7],\n [ 5, 0, 9, 1],\n [ 6, 1, 9, 4]])],\n 'ghost_boundary': [{(13, 1): 'ghost', (8, 0): 'ghost', (14, 1): 'ghost', \\\n (11, 1): 'ghost', (10, 1): 'ghost', (5, 1): 'ghost', (10, 2): 'ghost'}, \\\n {(12, 2): 'ghost', (12, 0): 'ghost', (2, 1): 'ghost', (11, 1): 'ghost',\\\n (2, 2): 'ghost', (4, 1): 'ghost', (4, 0): 'ghost'}, {(3, 2): 'ghost', \\\n (6, 1): 'ghost', (3, 1): 'ghost', (5, 1): 'ghost', (1, 0): 'ghost', (1, 1): 'ghost'}],\n 'full_triangles': [[[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8]], \\\n [[0, 9, 1], [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5]], \\\n [[3, 9, 0], [3, 11, 4], [6, 11, 3], [7, 11, 6], [4, 11, 7]]],\n 'full_commun': [{0: [1, 2], 1: [1, 2], 2: [1, 2], 3: [2], 4: [1]}, \\\n {5: [0, 2], 6: [0, 2], 7: [], 8: [0], 9: [0], 10: [0]}, \\\n {11: [0, 1], 12: [0, 1], 13: [0], 14: [0], 15: [0]}],\n 'ghost_commun': [num.array([[ 5, 1],\n [ 6, 1],\n [ 8, 1],\n [ 9, 1],\n [10, 1],\n [11, 2],\n [12, 2],\n [13, 2],\n [14, 2],\n [15, 2]]), num.array([[ 0, 0],\n [ 1, 0],\n [ 2, 0],\n [ 4, 0],\n [11, 2],\n [12, 2]]), num.array([[0, 0],\n [1, 0],\n [2, 0],\n [3, 0],\n [5, 1],\n [6, 1]])], 'ghost_quan': {'stage': [num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ]])], 'elevation': [num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ]])], 'ymomentum': [num.array([[ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}, 'full_quan': {'stage': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'elevation': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'ymomentum': [num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}}\n\n\n from anuga.abstract_2d_finite_volumes.neighbour_mesh import Mesh\n\n mesh = Mesh(nodes, triangles, boundary)\n boundary_polygon = mesh.get_boundary_polygon()\n\n\n # Subdivide into non-overlapping partitions\n\n submesh = submesh_full(mesh, triangles_per_proc)\n\n #print submesh\n\n\n for i in range(3):\n assert num.allclose(true_submesh['full_triangles'][i],submesh['full_triangles'][i])\n assert num.allclose(true_submesh['full_nodes'][i],submesh['full_nodes'][i])\n assert true_submesh['full_boundary'] == submesh['full_boundary']\n\n # Add any extra ghost boundary layer information\n\n submesh = submesh_ghost(submesh, mesh, triangles_per_proc)\n\n for i in range(3):\n assert num.allclose(true_submesh['ghost_triangles'][i],submesh['ghost_triangles'][i])\n assert num.allclose(true_submesh['ghost_nodes'][i],submesh['ghost_nodes'][i])\n assert num.allclose(true_submesh['ghost_commun'][i],submesh['ghost_commun'][i])\n\n assert true_submesh['full_commun'] == submesh['full_commun']\n\n\n # Order the quantities information to be the same as the triangle\n # information\n\n\n submesh = submesh_quantities(submesh, quantities, \\\n triangles_per_proc)\n\n\n\n for key, value in true_submesh['ghost_quan'].items():\n for i in range(3):\n assert num.allclose(true_submesh['ghost_quan'][key][i],submesh['ghost_quan'][key][i])\n assert num.allclose(true_submesh['full_quan'][key][i],submesh['full_quan'][key][i])\n\n\n submesh[\"boundary_polygon\"] = boundary_polygon",
"def minimax_endgame_search(state, maximize=True) :\n global depth;\n depth=0\n path=[]\n paths=[]\n _path, _score = get_minimax_score(state, maximize, path, paths,INF,always_zero)\n\n return [_path, _score, len(paths)]",
"def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##",
"def gen_einsum_strategies(\n equation: str,\n mesh: DeviceMesh,\n *,\n linearity: bool = False,\n) -> OpStrategy:\n # parse einop equation and extract dims\n input_dims, output_dim = EinsumDims.parse_equation(equation)\n edims = EinsumDims.parse_dims(input_dims, output_dim)\n\n all_mesh_dim_strategies = []\n\n # generate strategies for each mesh dim\n for mesh_dim in range(mesh.ndim):\n mesh_dim_strategies = []\n\n # placement list stores placements of [output, input1, input2, ...]\n # first we always have replicate all for inputs and output\n placement_list: List[Placement] = [Replicate()] * (len(input_dims) + 1)\n mesh_dim_strategies.append(placement_list)\n\n if mesh.size(mesh_dim) <= 1:\n # only replicate strategy for mesh dim with size 1\n # TODO: see if this is valid for the submesh case\n continue\n\n # split batch dim\n for batch_dim in edims.batch_dims:\n output_batch_dim = output_dim.index(batch_dim)\n placement_list = [Shard(output_batch_dim)]\n for input_dim in input_dims:\n input_batch_dim = input_dim.index(batch_dim)\n placement_list.append(Shard(input_batch_dim))\n\n mesh_dim_strategies.append(placement_list)\n\n # split contracting dim\n for contracting_dim in edims.contracting_dims:\n placement_list = [_Partial()]\n for input_dim in input_dims:\n input_contracting_dim = input_dim.index(contracting_dim)\n placement_list.append(Shard(input_contracting_dim))\n\n mesh_dim_strategies.append(placement_list)\n\n # split lhs free dim\n for lhs_dim in edims.lhs_out_only_dims:\n lhs_free_dim = output_dim.index(lhs_dim)\n # this means split the lhs input and output\n # i.e. S(0), R -> S(0)\n lhs_placement_list: List[Placement] = [\n Shard(lhs_free_dim),\n Shard(lhs_free_dim),\n Replicate(),\n ]\n mesh_dim_strategies.append(lhs_placement_list)\n\n # split rhs free dim\n for rhs_dim in edims.rhs_out_only_dims:\n rhs_free_dim = output_dim.index(rhs_dim)\n rhs_placement_list: List[Placement] = [\n Shard(rhs_free_dim),\n Replicate(),\n Shard(rhs_free_dim),\n ]\n mesh_dim_strategies.append(rhs_placement_list)\n\n # linearity strategy\n if linearity:\n linearity_placement_list: List[Placement] = [_Partial()]\n for input_dim in input_dims:\n linearity_placement_list.append(_Partial())\n mesh_dim_strategies.append(linearity_placement_list)\n\n all_mesh_dim_strategies.append(mesh_dim_strategies)\n\n # generate strategies for entire mesh\n strategy_combs = itertools.product(*all_mesh_dim_strategies)\n\n # TODO: filter out invalid strategies, at this point we generate\n # all possible strategies without considering the whether the tensor\n # dim could be sharded or not, we would need to filter out invalid\n # strategies base on the actual tensor shape\n # (i.e. for Shard, tensor dim size must > mesh size)\n all_strategies = []\n for startegy_comb in strategy_combs:\n spec_list = []\n for specs in zip(*startegy_comb):\n spec_list.append(DTensorSpec(mesh, list(specs)))\n strat = PlacementStrategy(output_spec=spec_list[0], input_specs=spec_list[1:])\n all_strategies.append(strat)\n\n return OpStrategy(all_strategies)",
"def search(self, pid, start, layers):\n plan = []\n workload = [0 for _ in range(len(self.workers))]\n\n # each layer is a separate search for the worker to process the layer\n for i in range(len(layers)):\n layer = layers[i]\n target_color = layer[\"color\"]\n target_thickness = layer[\"thickness\"]\n processing_costs = {k: math.ceil(target_thickness / self.processing_rate[k][target_color]) for k in self.processing_rate}\n\n # Searches to find the cost of processing every node at each worker.\n # Cost consists of: Cost of the path \n # + Existing workload cost \n # + processing cost by the worker\n # \n # Basically Dijkstra's.\n visited = set()\n path = {}\n path_costs = {}\n pq = [(0, start)]\n curr_costs = {}\n\n # Assumes single connected component \n while len(visited) != len(self.workers):\n cost, curr = heapq.heappop(pq)\n if curr in visited: continue\n visited.add(curr)\n curr_costs[curr] = cost + processing_costs[self.worker_flavor[curr]] + self.workload[curr]\n if curr == self.origin:\n curr_costs[curr] += self.origin_penalty\n for neighbor in self.neighbors[curr]:\n if neighbor in visited: continue\n cost_new = cost + 1 \n if neighbor == self.origin:\n cost_new += self.origin_penalty\n if neighbor not in path_costs or cost_new < path_costs[neighbor]:\n path_costs[neighbor] = cost_new\n path[neighbor] = curr\n heapq.heappush(pq, (cost_new, neighbor))\n\n # Get the best cost and candidate for processing the current layer\n best_cost = float(\"inf\")\n best_cand = -1\n for cand in curr_costs:\n if curr_costs[cand] < best_cost:\n best_cost = curr_costs[cand]\n best_cand = cand\n\n # If the best candidate isn't the starting node, add the cost of the\n # path for future workload considerations\n if best_cand != start:\n # create the path \n best_path = [best_cand]\n while best_path[-1] != start:\n best_path.append(path[best_path[-1]])\n best_path = best_path[::-1]\n\n # Add the Pass operations to the plan\n prev = start \n for curr in best_path[1:]:\n workload[prev] += 1\n plan.append([1, {\"Pass\":{\"pearl_id\":pid,\"to_worker\":curr}}])\n prev = curr\n\n # Add the noms to the plan \n workload[best_cand] += processing_costs[self.worker_flavor[best_cand]]\n plan.append([processing_costs[self.worker_flavor[best_cand]], {\"Nom\": pid}])\n\n # Set the last worker in the path as the start of the next search pass\n start = best_cand\n return plan, workload, start",
"def _construct_tensor_layout_for_opt_shard(dev_matrix, tensor_map, opt_shard_step, opt_shard_size,\n origin_full_tensor_shape):\n\n if opt_shard_step == 0 or opt_shard_size == 0:\n return dev_matrix, tensor_map, list(origin_full_tensor_shape)\n tensor_strategy = _get_tensor_strategy(dev_matrix, tensor_map)\n model_parallel_shard_size = np.prod(tensor_strategy)\n if model_parallel_shard_size != opt_shard_step:\n raise ValueError(\"The optimizer sharding step {} is not equal to the model parallel sharding size {}.\".\n format(opt_shard_step, model_parallel_shard_size))\n\n first_dim_no_sharding_size = origin_full_tensor_shape[0] // tensor_strategy[0]\n full_tensor_shape = list(origin_full_tensor_shape)\n full_tensor_shape[0] = tensor_strategy[0]\n full_tensor_shape.insert(1, first_dim_no_sharding_size)\n new_dev_matrix = tensor_strategy\n repeat_dim = np.prod(dev_matrix) // (opt_shard_step * opt_shard_size)\n\n new_tensor_map = []\n for idx, val in enumerate(tensor_strategy):\n if val == 1:\n new_tensor_map.append(-1)\n else:\n new_tensor_map.append(len(tensor_strategy) - 1 - idx)\n new_tensor_map.insert(1, len(tensor_strategy))\n new_dev_matrix.insert(0, opt_shard_size)\n if repeat_dim > 1:\n new_dev_matrix.insert(0, repeat_dim)\n return new_dev_matrix, new_tensor_map, full_tensor_shape",
"def Maxmize(initial_state,alpha, beta):\n global frontier, size\n # Declare the global variables\n #global cost_of_path,nodes_expanded,set_search_depth\n \n max_depth = 3\n # The stack\n frontier._put(initial_state)\n #frontier_map = set()\n\n # create a new set and a new LifoQueue: explored,explored_LifoQueue\n explored = set()\n explored_LifoQueue = Q.LifoQueue()\n explored_map = set()\n\n while frontier._qsize():\n state = frontier._get()\n #set_search_depth.add(state.cost)\n explored.add(state)\n explored_LifoQueue._put(state)\n li_state = []\n for i in range(size):\n for j in range(size):\n li_state.append(state.map[i][j])\n explored_map.add(tuple(li_state))\n \n for depth in range(10000):\n if test_terminal(state,depth):\n #the number of nodes expanded\n nodes_expanded = len(explored)-1\n return (, eva(state))\n\n (maxChild, maxUtility) = (None, -10000)\n for neighbor in state.expand_reverse():\n XXX,utility = Minimize(state,alpha,beta)\n \n if utility > maxUtility:\n (maxChild, maxUtility) = neighbor, utility\n if maxUtility > beta or maxUtility == beta:\n break\n if maxUtility > alpha:\n alpha = maxUtility\n\n return (maxChild, maxUtility)",
"def stage_mesh_axis(self):\n stage_mesh_axis = None\n p = self.params\n if p.mesh_axis_names is not None:\n stage_mesh_axis = base_layer.to_partition_spec(\n p.weight_split_dims_mapping.stages, p.mesh_axis_names\n )[0]\n return stage_mesh_axis",
"def test_flow_model_regular_vs_optimized_mesh(self):\n\n # Create a regular, non-optimized mesh\n _sz = 10\n time_step = pp.MINUTE\n params = FlowParameters(\n head=\"TestOptimizeMesh/test_optimize_mesh\",\n time_step=time_step,\n end_time=time_step * 4,\n shearzone_names=None,\n mesh_args={\n \"mesh_size_frac\": _sz,\n \"mesh_size_min\": 0.2 * _sz,\n \"mesh_size_bound\": 3 * _sz,\n },\n source_scalar_borehole_shearzone=None,\n well_cells=nd_injection_cell_center,\n injection_rate=1 / 6,\n frac_permeability=0,\n intact_permeability=1e-13,\n )\n setup = FlowISC(params)\n\n pp.run_time_dependent_model(setup, {})\n\n assert setup.neg_ind.size == 6\n\n # --- Re-run test with optimized mesh ---\n # Optimize the mesh\n in_file = params.folder_name / \"gmsh_frac_file.geo\"\n out_file = params.folder_name / \"optimized/gmsh_frac_file.msh\"\n optimize_mesh(\n in_file=in_file,\n out_file=out_file,\n method=\"Netgen\",\n )\n gb: pp.GridBucket = pp.fracture_importer.dfm_from_gmsh(str(out_file), dim=3)\n\n # Check that the grid was indeed optimized\n assert setup.gb.num_cells() < gb.num_cells()\n\n # Set up a new model\n params.folder_name = params.folder_name / \"optimized\"\n setup_opt = FlowISC(params)\n\n # Set the optimized grid bucket to the flow model\n setup_opt.gb = gb\n\n # Run the model\n pp.run_time_dependent_model(setup_opt, {})\n assert setup_opt.neg_ind.size == 0",
"def test_build_submesh_3_layer_4(self):\n\n nodes = [[0.0, 0.0], [0.0, 0.5], [0.0, 1.0], [0.5, 0.0], [0.5, 0.5], [0.5, 1.0], \\\n [1.0, 0.0], [1.0, 0.5], [1.0, 1.0], [0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]\n\n\n triangles = [[4, 9, 3], [4, 12, 5], [7, 12, 4], [8, 12, 7], [5, 12, 8], [0, 9, 1], \\\n [1, 9, 4], [1, 10, 2], [4, 10, 1], [5, 10, 4], [2, 10, 5], [3, 9, 0], [3, 11, 4], \\\n [6, 11, 3], [7, 11, 6], [4, 11, 7]]\n\n\n boundary = {(13, 1): 'bottom', (7, 1): 'left', (3, 1): 'right', (14, 1): 'right', \\\n (11, 1): 'bottom', (10, 1): 'top', (5, 1): 'left', (4, 1): 'top'}\n\n r\"\"\"\n top\n (10,1) (4,1)\n 2 ---------- 5 ---------- 8\n | \\ 10 / | \\ 04 / |\n | \\ / | \\ / |\n (7,1) | 07 10 09 | 01 12 03 | (3,1)\n | / \\ | / \\ |\n | / 08 \\ | / 02 \\ |\n left 1 ---------- 4 ---------- 7 right\n | \\ 06 / | \\ 15 / |\n | \\ / | \\ / |\n (5,1) | 05 09 00 | 12 11 14 | (14,1)\n | / \\ | / \\ |\n | / 11 \\ | / 13 \\ |\n 0 ---------- 3 ---------- 6\n (11,1) (13,1)\n bottom\n\n\n Processor 0 Full Triangles 00,01,02,03,04\n Processor 1 Full Triangles 05,06,07,08,09,10\n Processor 2 Full Triangles 11,12,13,14,15\n \"\"\"\n\n\n triangles_per_proc = [5, 6, 5]\n\n\n quantities = {'stage': num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), 'elevation': num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), 'ymomentum': num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]]), 'friction': num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), 'xmomentum': num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])}\n\n\n\n true_submesh = {'full_boundary': [{(3, 1): 'right', (4, 1): 'top'}, {(5, 1): 'left', \\\n (10, 1): 'top', (7, 1): 'left'}, {(13, 1): 'bottom', (14, 1): 'right', (11, 1): 'bottom'}],\n 'ghost_nodes': [num.array([[ 0. , 0. , 0. ],\n [ 1. , 0. , 0.5 ],\n [ 2. , 0. , 1. ],\n [ 6. , 1. , 0. ],\n [ 10. , 0.25, 0.75],\n [ 11. , 0.75, 0.25]]), num.array([[ 3. , 0.5 , 0. ],\n [ 7. , 1. , 0.5 ],\n [ 8. , 1. , 1. ],\n [ 11. , 0.75, 0.25],\n [ 12. , 0.75, 0.75]]), num.array([[ 1. , 0. , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 8. , 1. , 1. ],\n [ 12. , 0.75, 0.75]])],\n 'full_nodes': [num.array([[ 3. , 0.5 , 0. ],\n [ 4. , 0.5 , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 7. , 1. , 0.5 ],\n [ 8. , 1. , 1. ],\n [ 9. , 0.25, 0.25],\n [ 12. , 0.75, 0.75]]), num.array([[ 0. , 0. , 0. ],\n [ 1. , 0. , 0.5 ],\n [ 2. , 0. , 1. ],\n [ 4. , 0.5 , 0.5 ],\n [ 5. , 0.5 , 1. ],\n [ 9. , 0.25, 0.25],\n [ 10. , 0.25, 0.75]]), num.array([[ 0. , 0. , 0. ],\n [ 3. , 0.5 , 0. ],\n [ 4. , 0.5 , 0.5 ],\n [ 6. , 1. , 0. ],\n [ 7. , 1. , 0.5 ],\n [ 9. , 0.25, 0.25],\n [ 11. , 0.75, 0.25]])],\n 'ghost_triangles': [num.array([[ 5, 0, 9, 1],\n [ 6, 1, 9, 4],\n [ 8, 4, 10, 1],\n [ 9, 5, 10, 4],\n [10, 2, 10, 5],\n [11, 3, 9, 0],\n [12, 3, 11, 4],\n [13, 6, 11, 3],\n [14, 7, 11, 6],\n [15, 4, 11, 7]]), num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 4, 5, 12, 8],\n [11, 3, 9, 0],\n [12, 3, 11, 4]]), num.array([[ 0, 4, 9, 3],\n [ 1, 4, 12, 5],\n [ 2, 7, 12, 4],\n [ 3, 8, 12, 7],\n [ 5, 0, 9, 1],\n [ 6, 1, 9, 4]])],\n 'ghost_boundary': [{(13, 1): 'ghost', (8, 0): 'ghost', \\\n (14, 1): 'ghost', (11, 1): 'ghost', (10, 1): 'ghost', \\\n (5, 1): 'ghost', (10, 2): 'ghost'}, {(12, 2): 'ghost', \\\n (12, 0): 'ghost', (2, 1): 'ghost', (11, 1): 'ghost', \\\n (2, 2): 'ghost', (4, 1): 'ghost', (4, 0): 'ghost'}, \\\n {(3, 2): 'ghost', (6, 1): 'ghost', (3, 1): 'ghost', \\\n (5, 1): 'ghost', (1, 0): 'ghost', (1, 1): 'ghost'}],\n 'full_triangles': [[[4, 9, 3], [4, 12, 5], [7, 12, 4], \\\n [8, 12, 7], [5, 12, 8]], [[0, 9, 1], [1, 9, 4], [1, 10, 2], \\\n [4, 10, 1], [5, 10, 4], [2, 10, 5]], [[3, 9, 0], [3, 11, 4], \\\n [6, 11, 3], [7, 11, 6], [4, 11, 7]]],\n 'full_commun': [{0: [1, 2], 1: [1, 2], 2: [1, 2], 3: [2], 4: [1]}, \\\n {5: [0, 2], 6: [0, 2], 7: [], 8: [0], 9: [0], 10: [0]}, \\\n {11: [0, 1], 12: [0, 1], 13: [0], 14: [0], 15: [0]}],\n 'ghost_commun': [num.array([[ 5, 1],\n [ 6, 1],\n [ 8, 1],\n [ 9, 1],\n [10, 1],\n [11, 2],\n [12, 2],\n [13, 2],\n [14, 2],\n [15, 2]]), num.array([[ 0, 0],\n [ 1, 0],\n [ 2, 0],\n [ 4, 0],\n [11, 2],\n [12, 2]]), num.array([[0, 0],\n [1, 0],\n [2, 0],\n [3, 0],\n [5, 1],\n [6, 1]])], 'ghost_quan': {'stage': [num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ]])], 'elevation': [num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ]])], 'ymomentum': [num.array([[ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}, 'full_quan': {'stage': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'elevation': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'ymomentum': [num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}}\n\n\n # setup parameters to test using different ghost_layer_widths\n parameters = dict(ghost_layer_width = 1)\n\n from anuga.abstract_2d_finite_volumes.neighbour_mesh import Mesh\n\n mesh = Mesh(nodes, triangles, boundary)\n boundary_polygon = mesh.get_boundary_polygon()\n\n\n # Subdivide into non-overlapping partitions\n\n submesh = submesh_full(mesh, triangles_per_proc)\n\n\n\n for i in range(3):\n assert num.allclose(true_submesh['full_triangles'][i],submesh['full_triangles'][i])\n assert num.allclose(true_submesh['full_nodes'][i],submesh['full_nodes'][i])\n assert true_submesh['full_boundary'] == submesh['full_boundary']\n\n # Add any extra ghost boundary layer information\n\n submesh = submesh_ghost(submesh, mesh, triangles_per_proc, parameters)\n\n #print submesh\n\n #print 'ghost_triangles', submesh['ghost_triangles']\n #print 'ghost_boundary', submesh['ghost_boundary']\n #print 'ghost_nodes', submesh['ghost_nodes']\n #print 'ghost_commun', submesh['ghost_commun']\n\n\n true_submesh['ghost_boundary'] = [{(12, 2): 'ghost', (9, 0): 'ghost', (9, 2): 'ghost', \\\n (6, 1): 'ghost', (15, 0): 'ghost', (11, 1): 'bottom', (11, 0): 'ghost', (6, 2): 'ghost'}, \\\n {(0, 1): 'ghost', (1, 2): 'ghost', (1, 0): 'ghost', (11, 1): 'bottom'},\\\n {(5, 1): 'left', (2, 0): 'ghost', (0, 2): 'ghost', (5, 0): 'ghost', (2, 2): 'ghost'}]\n\n\n true_submesh['ghost_triangles'] = [num.array([[ 6, 1, 9, 4],\\\n [ 9, 5, 10, 4],\\\n [11, 3, 9, 0],\\\n [12, 3, 11, 4],\\\n [15, 4, 11, 7]]), \\\n num.array([[ 0, 4, 9, 3],\\\n [ 1, 4, 12, 5],\\\n [11, 3, 9, 0]]),\\\n num.array([[ 0, 4, 9, 3],\\\n [ 2, 7, 12, 4],\\\n [ 5, 0, 9, 1]])]\n\n true_submesh['ghost_nodes'] = [num.array([[ 0. , 0. , 0. ],\\\n [ 1. , 0. , 0.5 ],\\\n [ 10. , 0.25, 0.75],\\\n [ 11. , 0.75, 0.25]]),\\\n num.array([[ 3. , 0.5 , 0. ],\\\n [ 12. , 0.75, 0.75]]),\\\n num.array([[ 1. , 0. , 0.5 ],\\\n [ 12. , 0.75, 0.75]])]\n\n true_submesh['ghost_commun'] = [num.array([[ 6, 1],\\\n [ 9, 1],\\\n [11, 2],\\\n [12, 2],\\\n [15, 2]]),\\\n num.array([[ 0, 0],\\\n [ 1, 0],\\\n [11, 2]]),\\\n num.array([[0, 0],\\\n [2, 0],\\\n [5, 1]])]\n\n\n\n\n for i in range(3):\n assert true_submesh['ghost_boundary'][i] == submesh['ghost_boundary'][i]\n assert num.allclose(true_submesh['ghost_triangles'][i],submesh['ghost_triangles'][i])\n assert num.allclose(true_submesh['ghost_nodes'][i],submesh['ghost_nodes'][i])\n assert num.allclose(true_submesh['ghost_commun'][i],submesh['ghost_commun'][i])\n\n\n\n true_submesh['full_commun'] = [{0: [1, 2], 1: [1], 2: [2], 3: [], 4: []},\\\n {5: [2], 6: [0], 7: [], 8: [], 9: [0], 10: []},\\\n {11: [0, 1], 12: [0], 13: [], 14: [], 15: [0]}]\n\n assert true_submesh['full_commun'] == submesh['full_commun']\n\n\n # Order the quantities information to be the same as the triangle\n # information\n\n\n submesh = submesh_quantities(submesh, quantities, \\\n triangles_per_proc)\n\n\n #print submesh['full_quan']\n\n\n\n true_submesh['full_quan'] = {'stage': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'elevation': [num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0. , -0.125, -0. ],\n [-0. , -0.125, -0.25 ],\n [-0. , -0.125, -0. ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.125, -0.25 ],\n [-0. , -0.125, -0.25 ]]), num.array([[-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0.5 , -0.375, -0.5 ],\n [-0.25 , -0.375, -0.5 ]])], 'ymomentum': [num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ],\n [ 0.5 , 0.75, 1. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 1. , 0.75, 1. ]]), num.array([[ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}\n\n\n true_submesh['ghost_quan'] = {'stage': [num.array([[-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.25 , -0.125, -0. ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0. , -0.125, -0. ]])], 'elevation': [num.array([[-0. , -0.125, -0.25 ],\n [-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.125, -0. ],\n [-0.25 , -0.375, -0.25 ],\n [-0.25 , -0.375, -0.5 ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.25 , -0.375, -0.25 ],\n [-0.25 , -0.125, -0. ]]), num.array([[-0.25 , -0.125, -0.25 ],\n [-0.5 , -0.375, -0.25 ],\n [-0. , -0.125, -0. ]])], 'ymomentum': [num.array([[ 0.5 , 0.25, 0.5 ],\n [ 1. , 0.75, 0.5 ],\n [ 0. , 0.25, 0. ],\n [ 0. , 0.25, 0.5 ],\n [ 0.5 , 0.25, 0.5 ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 1. ],\n [ 0. , 0.25, 0. ]]), num.array([[ 0.5 , 0.25, 0. ],\n [ 0.5 , 0.75, 0.5 ],\n [ 0. , 0.25, 0.5 ]])], 'friction': [num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]]), num.array([[ 0., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])], 'xmomentum': [num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]]), num.array([[ 2., 2., 2.],\n [ 2., 2., 2.],\n [ 2., 2., 2.]])]}\n\n\n for key, value in true_submesh['ghost_quan'].items():\n for i in range(3):\n assert num.allclose(true_submesh['ghost_quan'][key][i],submesh['ghost_quan'][key][i])\n assert num.allclose(true_submesh['full_quan'][key][i],submesh['full_quan'][key][i])\n\n\n submesh[\"boundary_polygon\"] = boundary_polygon",
"def solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic):\n\n result = {}\n result[\"visualStages\"] = []\n for stage in stages:\n\n stage_dic = {}\n object_dic_copy = copy.deepcopy(objects_dic)\n predicates = stage[\"items\"]\n sorted_predicates = priority(predicates, predicates_rules)\n\n # For hanoi problem, reset each stage\n # For logistics problem, reset each stage\n for fname in gstate[\"reset_function\"]:\n gstate[fname] = {}\n solvepredicates(sorted_predicates, object_dic_copy, predicates_rules, gstate)\n stage_dic[\"visualSprites\"] = object_dic_copy\n if \"stageName\" not in stage:\n stage_dic[\"stageName\"] = \"Inital Stage\"\n stage_dic[\"stageInfo\"] = \"No step information\"\n\n else:\n stage_dic[\"stageName\"] = stage[\"stageName\"]\n stage_dic[\"stageInfo\"] = stage[\"stageInfo\"]\n\n result[\"visualStages\"].append(stage_dic)\n\n result[\"subgoals\"] = Subgoal.get_subgoal(stages, problem_dic[1]['goal'].copy(), actionlist.copy())\n\n return result",
"def solve_allsubsets(self):\r\n\r\n if self.many >= self.n:\r\n print(\"Reduce number of best subsets you want to find, it is greater than or equal to all possibilities\")\r\n return None\r\n \r\n mem = hpy()\r\n \"\"\" memory object \"\"\"\r\n mem.heap()\r\n \"\"\" check the objects that are in memory right now \"\"\"\r\n mem.setrelheap()\r\n \"\"\" referencing this point, memory usage will be calculated \"\"\"\r\n t0 = time.process_time()\r\n \"\"\" referencing this point, cpu usage will be calculated \"\"\"\r\n \r\n if self.out != 2:\r\n sys.stdout = open(os.devnull, 'w')\r\n else:\r\n sys.stdout = self.original_stdout \r\n \"\"\" whether user wants to print every step of the algorithm or not \"\"\"\r\n \r\n P = list(range(self.n))\r\n C = []\r\n \"\"\" Preparing the P and C that will be passed to the function \"\"\"\r\n \r\n \r\n t2 = time.process_time()\r\n t3 = time.time()\r\n self.solve_sp_mk0s(P,C)\r\n \"\"\" mk0 also works, but this is in general faster \"\"\"\r\n t4 = time.time()\r\n finish = time.process_time()\r\n duration = finish-t0\r\n self.cpu = duration\r\n if self.out == 0:\r\n sys.stdout = open(os.devnull, 'w')\r\n else:\r\n sys.stdout = self.original_stdout \r\n print(\"CPU time of the algorithm\",duration,\"seconds\")\r\n m = mem.heap()\r\n print(m)\r\n self.memory = m.size\r\n \"\"\" real memory usage is different than the number we store here because we use guppy package \r\n to track down memory usage of our algorithm, tracking down memory usage is also memory extensive\r\n process \"\"\"\r\n \r\n sys.stdout = self.original_stdout\r\n \r\n return [self.residual_squared,self.indexes,self.coefficients]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Count the number of times elem appears in the reversed iterator. | def count(self, elem):
return self.iter.count(elem) | [
"def sequence_sorted_count(self, x, reverse=False):\n c = 0\n if reverse: it = reversed(self)\n else: it = iter(self)\n for v in it:\n if x == v:\n c += 1\n break\n for v in it:\n if x == v: c += 1\n else: break\n return c",
"def elem_count(self, elem):\n res = 0\n for value in self.grid.values():\n if value == elem:\n res += 1\n return res",
"def count(self, e):\r\n x = e\r\n counter = 0\r\n curr = self._head\r\n #if bag is empty\r\n if self._size == 0:\r\n raise Empty('Your bag is empty.')\r\n return\r\n #go through entire list and find at least one occurrence \r\n while curr != None:\r\n if curr._element == x:\r\n counter += 1\r\n break\r\n curr = curr._next\r\n return counter",
"def _get_element_counts(self, basis):\n element_counts = []\n previous_element = None\n for index, element in enumerate(basis[\"elements\"]):\n if previous_element and previous_element[\"value\"] == element[\"value\"]:\n element_counts[-1][\"count\"] += 1\n else:\n element_counts.append({\n \"count\": 1,\n \"value\": element[\"value\"]\n })\n previous_element = basis[\"elements\"][index]\n return element_counts",
"def count(self, val):\n count = 0\n if self._size > 0:\n for i in range(0, self._size):\n if val == self._list[i]:\n count += 1\n i += 1\n return count",
"def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')",
"def count(iterable):\n\treturn sum(1 for _ in iterable)",
"def count(self, i):\n return sum([1 for j in self if i==j])",
"def count_element(count_in, element):\n count = 0\n for value in count_in:\n if value[0] == element:\n count += 1\n return count",
"def count_reversals(data):\n reversal_count = 0\n for i in range(len(data)-1):\n if data[i]<0 and data[i+1]>0:\n reversal_count += 1\n elif data[i]>0 and data[i+1]<0:\n reversal_count += 1\n return reversal_count",
"def iter_count(self):\n return self._iter_count",
"def _len_iterable(iterable: Iterator) -> int:\n return sum(1 for _ in iterable)",
"def count(iterable):\n return sum(1 for _ in iterable)",
"def count(self, element):\n return self._counts.get(element, 0)",
"def cursor_nelements(cursor):\n\tcount = 0\n\tfor data in cursor:\n\t\tcount += 1\n\treturn count",
"def get_number_of_inversions_naive(self, lst):\r\n # Running time: O(n ** 2)\r\n count_inv = 0\r\n \r\n for i in range(len(lst)):\r\n for j in range(i+1, len(lst)):\r\n if lst[i] > lst[j]:\r\n count_inv += 1\r\n \r\n return count_inv",
"def count(seq):\n\treturn sum(1 for x in seq)",
"def _count(subchain: list) -> int:\n # TODO check around gaps only\n return sum([ 1 if _valid([ v for i, v in enumerate(subchain) if i not in g]) else 0 for g in _gaps(len(subchain)) ])",
"def __count_inversions(puzzle):\n puzzleLength = len(puzzle)\n count = 0\n for i in range(puzzleLength):\n for j in range(i + 1, puzzleLength):\n if(puzzle[i] > puzzle[j]):\n count += 1\n return count"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the index of elem in the reversed iterator. | def index(self, elem):
return _coconut.len(self.iter) - self.iter.index(elem) - 1 | [
"def r_index(sequence, element):\n\n for i, e in enumerate(reversed(sequence)):\n if element == e:\n return len(sequence) - 1 - i\n else:\n raise ValueError(\"r_index(sequence, element):\\\n element not in the sequence\")",
"def index(self, elem):\n pointer = self.head\n i = 0\n while(pointer):\n if pointer.data == elem:\n return i\n pointer = pointer.next\n i += 1\n raise ValueError(\"{} is not in list\".format(elem))",
"def get_index(iterable, fun):\n for i in range(0, len(iterable)):\n if fun(iterable[i]):\n return i\n return -1",
"def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)",
"def index(self, elem):\n ponteiro = self.inicio\n i = 0\n while(ponteiro):\n if ponteiro.dado == elem:\n return i\n ponteiro = ponteiro.prox\n i = i + 1\n raise ValueError(\"{} is not in list\".format(elem))",
"def indexOf(self, element):\n return self._getElementIndexFromHead(element)",
"def reverse_linear_search(lst, value):\n i = len(lst) - 1\n while i != -1 and lst[i] != value:\n i = i + 1\n if i == -1:\n return -1\n else:\n return i",
"def lastIndexOf(self, element):\n return self._getElementIndexFromTail(element)",
"def _idIndex(lst, el):\r\n \r\n for i, e in enumerate(lst):\r\n if e is el:\r\n return i\r\n raise ValueError('element %r not found' % el)",
"def index_element(x0, x):\n for n in range(len(x)):\n if x0==x[n]: return n\n return -1",
"def search(elements_list, element):\n for index, item in enumerate(elements_list):\n if item == element:\n return index\n return -1",
"def index(self, val):\n for j in range(len(self)):\n if self[j] == val: #leftmost match\n return j\n raise ValueError('value not in sequence') #never found a match",
"def linear_search_v2(lst, value):\n\n # The first index is included, the second is not, and the third is the\n # increment.\n for i in range(len(lst) - 1, -1, -1):\n if lst[i] == value:\n return i\n return -1",
"def index(self, val):\n for j in range(len(self)):\n if self[j] == val: # leftmost match\n return j\n raise ValueError(\"value not in sequence\") # never found a match",
"def get_index(parent, elem):\n return list(parent.getchildren()).index(elem)",
"def magic_index(a): # O(n)\n for index, num in enumerate(a):\n if index == num:\n return index",
"def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1",
"def index(self, item: Any) -> int:\n index_so_far = 0\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return index_so_far\n index_so_far += 1\n curr = curr.next\n raise ValueError",
"def last_index(arr, item):\n return len(arr) - arr[::-1].index(item) - 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
consume(iterable, keep_last) fully exhausts iterable and return the last keep_last elements. | def consume(iterable, keep_last=0):
return _coconut.collections.deque(iterable, maxlen=keep_last) | [
"def last(iterable: Iterable):\n hey = None\n for hey in iterable: pass\n return hey",
"def last(iterable):\n d = deque(iterable, maxlen=1)\n try:\n return d.pop()\n except IndexError:\n raise ValueError(\"Cannot return last item from empty iterable {!r}\".format(iterable))",
"def last(iterable):\n return reduce(lambda x, y: y, iterable)",
"def last(iterable):\n it = iter(iterable)\n item = next(it)\n for item in it:\n pass\n return item",
"def last(iterable, *, default=None):\n d = deque(iterable, maxlen=1) # C speed\n return d.pop() if d else default",
"def return_last(iter):\n for thing in iter:\n pass\n return thing",
"def last(iterator):\n item = None\n for item in iterator:\n pass\n return item",
"def tail(iterable):\n return drop(1, iterable)",
"def last(iterable, default=_marker):\n try:\n try:\n # Try to access the last item directly\n return iterable[-1]\n except (TypeError, AttributeError, KeyError):\n # If not slice-able, iterate entirely using length-1 deque\n return deque(iterable, maxlen=1)[0]\n except IndexError: # If the iterable was empty\n if default is _marker:\n raise ValueError('last() was called on an empty iterable, and no '\n 'default value was provided.')\n return default",
"def repeatlast(it):\r\n for item in it:\r\n yield item\r\n while 1: # pragma: no cover\r\n yield item",
"def _last_iter(iterable):\n it = iter(iterable)\n last = it.next()\n for val in it:\n yield last, False\n last = val\n yield last, True",
"def lastn(n, iterable):\n d = deque(iterable, maxlen=n) # C speed\n yield from d",
"def keep_until(\n iterable: typing.Iterable[T],\n pred: typing.Callable[[T], bool]\n) -> typing.Iterable[T]:\n for e in iterable:\n yield e\n if pred(e):\n return",
"def last(seq):\n try:\n return seq[-1]\n except TypeError:\n old = None\n it = iter(seq)\n while True:\n try:\n old = next(it)\n except StopIteration:\n return old",
"def last(iter):\n x = None\n for x in iter:\n pass\n return x",
"def _consume(iterator, n):\n if n == -1:\n n = None\n collections.deque(itertools.islice(iterator, n), maxlen=0)",
"def last(iterable, *default):\n\tassert len(default) <= 1\n\titerable = iter(iterable)\n\n\ttry:\n\t\tx = next(iterable)\n\texcept StopIteration:\n\t\tif default:\n\t\t\treturn default[0]\n\t\traise\n\n\tfor x in iterable:\n\t\tpass\n\treturn x",
"def do_last(environment, seq):\r\n try:\r\n return iter(reversed(seq)).next()\r\n except StopIteration:\r\n return environment.undefined('No last item, sequence was empty.')",
"def exhaust(it: Iterable[Any]) -> None:\n deque(it, maxlen=0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct an object of the given data_type containing the given arguments. | def makedata(data_type, *args):
if _coconut.hasattr(data_type, "_make") and _coconut.issubclass(data_type, _coconut.tuple):
return data_type._make(args)
if _coconut.issubclass(data_type, (_coconut.map, _coconut.range, _coconut.abc.Iterator)):
return args
if _coconut.issubclass(data_type, _coconut.str):
return "".join(args)
return data_type(args) | [
"def data_type_constructor(\n data_type: str,\n nbits: Optional[int] = None,\n compression: str = \"DEFLATE\",\n):\n\n def get_data_type(no_data):\n return DataType(\n data_type=data_type, no_data=no_data, nbits=nbits, compression=compression\n )\n\n return get_data_type",
"def _factory(*args_, **kwargs_):\n return DataSetType(*args_, **kwargs_)",
"def from_data(cls, data):",
"def build(cls, data, _type): # type: ignore[no-untyped-def]\n if isinstance(data, list):\n backing = Int32Field(cls._convert_to_int(data))\n elif isinstance(data, Int32Field):\n backing = data\n elif isinstance(data, float):\n backing = Int32Field(int(data * sensor_fixed_point_conversion))\n else:\n backing = Int32Field(data)\n as_int = int(backing.value)\n if isinstance(_type, SensorTypeField):\n _converted_type = SensorType(_type.value)\n else:\n _converted_type = _type\n return cls(backing, as_int, _converted_type)",
"def __init__(self, name, data_type=STRING):\n self.name = name\n self.data_type = data_type",
"def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data",
"def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t",
"def _instantiate(clz, **data):\n\n new_obj = clz()\n setattr(new_obj, \"data\", data)\n for key, val in deepcopy(data).items():\n setattr(new_obj, key, val)\n return new_obj",
"def __init__(self, type_of, start, end, spent_time):\n try:\n assert type_of in (\"Productive\", \"Neutral\", \"Counterproductive\")\n except AssertionError:\n raise RuntimeError(\"Invalid parameter passed for type_of in Activity instance creation: \", type_of,\n \"should be either Productive, Neutral, Counterproductive\")\n\n DataTypesAbstractClass.__init__(self)\n self._create_database_row(type_of, start, end, spent_time)",
"def createData(self, address: ghidra.program.model.address.Address, datatype: ghidra.program.model.data.DataType) -> ghidra.program.model.listing.Data:\n ...",
"def __init__(self, *args, **kwargs):\n nargs = len(args) + len(kwargs)\n if nargs == 0:\n raise TypeError(\"one or more arguments required (0 given)\")\n \n first_arg = args[0]\n if isinstance(first_arg, str):\n if nargs > 2 or (nargs > 1 and \"quiet\" not in kwargs):\n raise TypeError(\n \"incorrect arguments for creating Dta from file\"\n )\n self._new_from_file(*args, **kwargs)\n elif isinstance(first_arg, Dta):\n if nargs > 3:\n raise TypeError(\n \"too many arguments to create Dta from existing Dta\"\n )\n self._new_from_dta(*args, **kwargs)\n elif isinstance(first_arg, collections.Iterable):\n self._new_from_iter(*args, **kwargs)\n else:\n raise TypeError(\"Dta cannot be created from these arguments:\")",
"def __new__(subtype,parent,name,typecode,dimensions,**kwds):\n if 'values' in kwds.keys():\n result=kwds.pop('values')\n else:\n shape=[]\n for d in dimensions:\n dim = parent.dimensions[d]\n\n # Adding support for netCDF3 dimension objects\n if not isinstance(dim, int):\n dim = len(dim)\n shape.append(dim)\n\n result=np.zeros(shape,typecode)\n \n result=result[...].view(subtype)\n\n result.typecode = lambda: typecode\n result.dimensions = tuple(dimensions)\n result._ncattrs = ()\n for k,v in kwds.items():\n setattr(result,k,v)\n return result",
"def __init__(self,\r\n kwargs):\r\n self.data = list()\r\n for item in kwargs:\r\n self.data.append(GenotypeModel(**item))",
"def _create_dataclass(obj, plain_dict=False):\n if plain_dict:\n items = obj\n name = \"Obj\"\n else:\n name = obj[\"class_name\"]\n items = obj[\"data\"]\n\n cls = dataclasses.make_dataclass(name, items.keys())\n return cls(**items)",
"def _factory(*args_, **kwargs_):\n return ObsType(*args_, **kwargs_)",
"def __init__(self, data_type=None):\n super(EventData, self).__init__()\n self._event_data_stream_identifier = None\n self._event_values_hash = None\n self._parser_chain = None\n\n self.data_type = data_type",
"def create(data):\n class _Object:pass\n if not isinstance(data,dict):\n return data\n newobj = _Object()\n for k,v in data.items():\n setattr( newobj,str(k),v)\n return newobj",
"def make_constructor(typeexpr, importer):\n resolved_type = typeexpr\n while type(resolved_type) is tccore.TypeRef:\n resolved_type = resolved_type.resolve()\n assert issubclass(resolved_type.__class__, tccore.Type)\n if resolved_type.fqn == \"map\":\n return \"{}\"\n elif resolved_type.fqn == \"list\":\n return \"[]\"\n elif resolved_type.isa(tccore.AtomicType):\n if resolved_type.fqn == \"any\":\n return \"null\"\n elif resolved_type.fqn == \"boolean\":\n return \"false\"\n elif resolved_type.fqn in (\"int\", \"long\"):\n return \"0\"\n elif resolved_type.fqn in (\"float\", \"double\"):\n return \"0.0\"\n elif resolved_type.fqn == \"string\":\n return '\"\"'\n elif issubclass(resolved_type.__class__, tccore.ContainerType) and resolved_type.tag == \"record\":\n return \"new %s()\" % importer.ensure(resolved_type.fqn)\n elif type(resolved_type) is tccore.TypeApp:\n typeop = resolved_type.expr.resolve()\n typeargs = [arg.resolve() for arg in resolved_type.args]\n out = \"new (%s(%s))()\" % (importer.ensure(typeop.fqn), \", \".join(importer.ensure(arg.fqn) for arg in typeargs))\n return out\n set_trace()\n assert False, \"Cannot create constructor for invalid type: %s\" % repr(resolved_type)",
"def create(self, objecttype, under, **kwargs):\n self.LogCommand()\n tclcode = \"stc::create \" + objecttype + \" -under \" + under\n\n for key in kwargs:\n tclcode = tclcode + \" \" + \"-\" + key + \" \" + str(kwargs[key])\n\n objecthandle = self.Exec(tclcode)\n logging.debug(\" - Python result - \" + str(objecthandle))\n return objecthandle"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fmap(func, obj) creates a copy of obj with func applied to its contents. Override by defining obj.__fmap__(func). | def fmap(func, obj):
if _coconut.hasattr(obj, "__fmap__"):
return obj.__fmap__(func)
if obj.__class__.__module__ == "numpy":
from numpy import vectorize
return vectorize(func)(obj)
return _coconut_makedata(obj.__class__, *(_coconut_starmap(func, obj.items()) if _coconut.isinstance(obj, _coconut.abc.Mapping) else _coconut_map(func, obj))) | [
"def fmap(functor: Any, func: Callable[[Any], Any]) -> Any:\n ...",
"def fmap(f, g):\n pass",
"def fmap(function, descriptor):\n return MappedDescriptor(descriptor, function)",
"async def afmap(function: t.Callable, functor: F = _NO_ARGUMENT):\n # Curry if no functor given.\n if functor is _NO_ARGUMENT:\n return lambda functor: afmap(function, functor)\n # Assume it is the identity functor.\n return await function(functor)",
"def Mapper(function, obj, category, wrap=tuple):\n if isinstance(obj,category):\n view = ABCView(category)(obj) \n else:\n view = ABCView(category)(wrap(obj))\n \n return map(function,view)",
"def map(self, function):\n return FunctionalWrapper(map(function, self.data))",
"def fmap(self, func):\n @wraps(self.v)\n def state_mapper(state, func=func, runner=self):\n result, new_state = runner(state)\n return (func(result), state)\n\n return State(state_mapper)",
"def map(self, fn: Callable[[Any], Any]) -> 'Reader':\n def _compose(x: Any) -> Any:\n try:\n ret = fn(self.run(x))\n except TypeError:\n ret = partial(fn, self.run(x))\n return ret\n return Reader(_compose)",
"def map(self, function):\n pass",
"def invmap(funclist, obj):\n return [func(obj) for func in funclist]",
"def map(self, func, inplace=True):\n # only leaves have to be adapted\n new_leaves = [func(l) for l in self.leaves]\n if inplace:\n self.leaves = new_leaves\n return self\n else:\n return Structure(struct=self.struct, leaves=new_leaves)",
"def imap_c(func):\n return functools.partial(imap, func)",
"def map(self, fn, *side_inputs, **options):\n return transforms.map(self, fn, *side_inputs, **options)",
"def map(self, func):\n return self.select(func)",
"def map(self, func):\n return List(map(func, self))",
"def python_map(func, *arglist, **kwds):\n #print \"ignoring: %s\" % kwds #XXX: should allow use of **kwds\n result = map(func, *arglist) # see pathos.pyina.ez_map\n return result",
"def map_collection(func, collection):\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection",
"def map(self, obj):\n if isinstance(obj, np.ndarray) and obj.ndim >= 2 and obj.shape[0] in (2,3):\n return fn.transformCoordinates(self, obj)\n else:\n return QtGui.QMatrix4x4.map(self, obj)",
"def map(self, fn, inv_fn):\r\n\t\treturn MapProjectedList(self, [fn], [inv_fn])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Thread loop. This is an infinite loop. The iter method calls self.sql_queue.get() which blocks if there are not values in the queue. As soon as values are placed into the queue the process will continue. If many executes happen at once it will churn through them all before calling commit() to speed things up by reducing the number of times commit is called. | def run(self):
logging.debug("run: Thread started")
execute_count = 0
for token, query, values in iter(self.sql_queue.get, None):
logging.debug("sql_queue: %s", self.sql_queue.qsize())
if token != self.exit_token:
logging.debug("run: %s", query)
self.run_query(token, query, values)
execute_count += 1
# Let the executes build up a little before committing to disk
# to speed things up.
if self.sql_queue.empty() \
or execute_count == self.max_queue_size:
logging.debug("run: commit")
self.sqlite3_conn.commit()
execute_count = 0
pass # exit if
# Only exit if the queue is empty. Otherwise keep getting
# through the queue until it's empty.
if self.exit_set and self.sql_queue.empty():
self.sqlite3_conn.commit()
self.sqlite3_conn.close()
self.thread_running = False
return
pass | [
"def run ( self ):\n\t\tLOGGER.debug(\"run: Thread started\")\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tx = self._sql_queue.get()\n\t\t\t\tx.execute()\n\t\t\texcept Sqlite3WorkerExit as e:\n\t\t\t\tif not self._sql_queue.empty(): # pragma: no cover ( TODO FIXME: come back to this )\n\t\t\t\t\tLOGGER.debug ( 'requeueing the exit event because there are unfinished actions' )\n\t\t\t\t\tself._sql_queue.put ( e ) # push the exit event to the end of the queue\n\t\t\t\t\tcontinue\n\t\t\t\tLOGGER.debug ( 'closing database connection' )\n\t\t\t\tself._sqlite3_cursor.close()\n\t\t\t\tself._sqlite3_conn.commit()\n\t\t\t\tself._sqlite3_conn.close()\n\t\t\t\tLOGGER.debug ( 'exiting thread' )\n\t\t\t\tbreak",
"def run(self):\n while True:\n query = self.queryqueue.get()\n try:\n answer = self.store.DBQuery(query)\n except:\n traceback.print_exc(file=sys.stdout)\n sys.stdout.flush()\n answer = \"\"\n\n self.answerqueue.put(answer)",
"def execute_transaction_queue(self):\n\n for t in self.__transaction_queue:\n print(\"...executing command in queue...\")\n t.execute()",
"def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)",
"def run(self):\n # We defer creating the Couchbase object until we are actually 'in' the\n # separate process here.\n self._connect()\n\n while True:\n next_size = None\n (i, doc, size) = self.in_queue.get()\n # We use a \"magic\" null generator to terminate the workers\n if not doc:\n # Pass the death on...\n self.out_queue.put((i, doc, size))\n break\n # Actually perform the set.\n try:\n next_size = doc.next()\n value = self.buffer[:next_size]\n self._set_with_retry('doc_' + str(i), value)\n size = next_size\n except StopIteration:\n pass\n self.out_queue.put((i, doc, size))",
"def processq(self):\n\n while True:\n command = None\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if any(q):\n command = q.pop(0)\n # remember q has now changed\n if not any(q):\n self.queuefile.unlink()\n else:\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()\n\n if command:\n self.execute(command)\n else:\n break",
"def _thread_worker(self):\n while self._running:\n # Retrieve next cmd, or block\n packet = self._queue.get(True)\n if isinstance(packet, dict) and QS_CMD in packet:\n try:\n self._callback_listen(packet)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.error(\"Exception in callback\\nType: %s: %s\",\n type(err), err)\n self._queue.task_done()",
"def execute_process_queue():\n while not PROCESS_QUEUE.empty():\n msg_tuple = PROCESS_QUEUE.get()\n process_synchronize_sources_msg(msg_tuple, PROCESS_QUEUE)",
"def _watchdog(self):\n while True:\n try:\n # Arno, 2012-07-12: apswtrace detects 7 s commits with yield 5 min, so reduce\n yield 60.0\n\n # flush changes to disk every 1 minutes\n self._database.commit()\n\n except Exception:\n # OperationalError: database is locked\n dprint(exception=True, level=\"error\")\n\n except GeneratorExit:\n if __debug__: dprint(\"shutdown\")\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n self._database.commit(exiting = True)\n break",
"def run_data_base_connection(self):\n data_base_connection = sqlite3.connect('Parking.db')\n cursor = data_base_connection.cursor()\n\n while not self.running:\n # stalls the connection until run is called\n sleep(1)\n\n while self.running:\n entry = self.db_queue.get()\n self.log_file('writing to database : ' + entry)\n cursor.execute(entry)\n\n data_base_connection.close()",
"def __iter__(self):\n while not self.post_queue.empty():\n yield self.post_queue.get()",
"def _fetch_loop(self, conn: LoggingDatabaseConnection) -> None:\n i = 0\n while True:\n with self._event_fetch_lock:\n event_list = self._event_fetch_list\n self._event_fetch_list = []\n\n if not event_list:\n # There are no requests waiting. If we haven't yet reached the\n # maximum iteration limit, wait for some more requests to turn up.\n # Otherwise, bail out.\n single_threaded = self.database_engine.single_threaded\n if (\n not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING\n or single_threaded\n or i > EVENT_QUEUE_ITERATIONS\n ):\n return\n\n self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)\n i += 1\n continue\n i = 0\n\n self._fetch_event_list(conn, event_list)",
"def _execute_deferred_queries(self):\n\n assert not self.__is_connected\n\n if not self._deferred_queries:\n return\n\n with Transaction(self.__database_name) as txn:\n while True:\n try:\n query = self._deferred_queries.popleft()\n txn.session.execute(query).close()\n except IndexError:\n break",
"def _run(self) -> None:\n try:\n while True:\n loop_time = self._get_time()\n loop_time_flush_interval = self._get_time(self.flush_interval.total_seconds())\n\n if loop_time >= self.flushing_interval_deadline:\n self._flush_batch()\n self.flushing_interval_deadline = loop_time + loop_time_flush_interval\n self.logger.debug('Flush interval deadline. Flushed batch.')\n\n try:\n interval = self.flushing_interval_deadline - loop_time\n item = self.event_queue.get(True, interval)\n\n if item is None:\n continue\n\n except queue.Empty:\n continue\n\n if item == self._SHUTDOWN_SIGNAL:\n self.logger.debug('Received shutdown signal.')\n break\n\n if item == self._FLUSH_SIGNAL:\n self.logger.debug('Received flush signal.')\n self._flush_batch()\n continue\n\n if isinstance(item, UserEvent):\n self._add_to_batch(item)\n\n except Exception as exception:\n self.logger.error(f'Uncaught exception processing buffer. Error: {exception}')\n\n finally:\n self.logger.info('Exiting processing loop. Attempting to flush pending events.')\n self._flush_batch()",
"def process_entire_queue(self):\r\n while self.queue:\r\n self._dequeue()",
"def process_entire_queue(self):\r\n\t\twhile self.queue:\r\n\t\t\tself._dequeue()",
"def _query_into_queue(query_string, queue, bdb_file):\n bdb = bayesdb_open(pathname=bdb_file)\n res = bdb.execute(query_string)\n queue.put(cursor_to_df(res))",
"def _flush(self):\n tempbuf = self.databuffer\n self.databuffer = []\n self.database.runInteraction(self._executemany, tempbuf)",
"def execute(self):\n for move in self._queue:\n move.execute()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the query results for a specific token. | def query_results(self, token):
delay = .001
while True:
if token in self.results:
return_val = self.results[token]
del self.results[token]
return return_val
# Double back on the delay to a max of 8 seconds. This prevents
# a long lived select statement from trashing the CPU with this
# infinite loop as it's waiting for the query results.
logging.debug("Sleeping: %s %s", delay, token)
time.sleep(delay)
if delay < 8:
delay += delay
pass | [
"def listSearches(self, authenticationToken):\r\n pass",
"def get_by_session_token(cls, token):\n return cls.query(\n cls.session.token == token).order(-cls.created_date).fetch()",
"def retrieve_results(self, token: str = '', measurement_id: str = ''):\n with open(self.config_file) as json_file:\n data = json.load(json_file)\n if token == '':\n token = data[self.server][self.license_key][self.user.email][\"user_token\"]\n\n if token == '':\n raise ValueError(\"No user token provided. Please log in.\")\n\n if not measurement_id or measurement_id == '':\n res = self.measurement.retrieve()\n else:\n res = self.measurement.retrieve(measurement_id=measurement_id)\n return res",
"def getSearch(self, authenticationToken, guid):\r\n pass",
"def results(self, q):\n parsed_query = self.query_parser.parse(q)\n return self.searcher.search(parsed_query, limit=self.max_number_of_results)",
"def getTermByToken(self, token):\n if not token:\n raise LookupError('Expect a userid')\n\n try:\n value = token\n return self.getTerm(value)\n except (IndexError, orm.exc.NoResultFound):\n raise LookupError",
"def query(self):\n query_url = self.get_query_url()\n logging.info('Querying: ' + query_url)\n json_data = request.urlopen(query_url).read().decode()\n logging.debug('Retrieved the following ' + json_data)\n response = json.loads(json_data)\n\n return self.get_docs_from_response(response)",
"def query_token(self, token, token_type_hint):\n token_model = self.server.token_model\n if token_type_hint == 'access_token':\n rv = _query_access_token(token_model, token)\n elif token_type_hint == 'refresh_token':\n rv = _query_refresh_token(token_model, token)\n else:\n rv = _query_access_token(token_model, token)\n if not rv:\n rv = _query_refresh_token(token_model, token)\n\n return rv",
"def search_v1(query_tokens, inverted_index):\n return []",
"def get_results():\n api_key_is_valid(app, flask_request)\n page = get_value(flask_request, \"page\")\n if not page:\n page = 1\n return jsonify(\n select_reports(\n int(page)\n )\n ), 200",
"def list_recommendations_by_next_token(self, token):\n return self.list_recommendations(next_token=token)",
"def search(token, query):\n format_query = query.replace(\" \", \"%20\")\n url = 'https://api.thetvdb.com/search/series?name=' + format_query\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text)\n show_list = json_data.get('data')\n for show in show_list:\n if show.get('status') == 'Continuing':\n show_id = show.get('id')\n s = create_show(token, show_id)\n return s",
"def search_results(query, res_num=2):\n\n\turl = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0'\n\tresp = requests.get( url, params={'q': query} )\n\tjson_bytes = resp.content\n\tjson_text = json_bytes.decode('utf-8')\n\tjson_dict = json.loads(json_text)\n\n\tsearch_results = json_dict['responseData']['results']\n\t\n\treturn search_results[:res_num]",
"def get_results(self):\n return self.results",
"def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)",
"def curate_nd_fetch_resources(token, search_parameter):\n try:\n curate_instance = CurateND(token)\n except PresQTInvalidTokenError:\n raise PresQTResponseException(\n \"Token is invalid. Response returned a 401 status code.\",\n status.HTTP_401_UNAUTHORIZED)\n\n if search_parameter:\n if 'title' in search_parameter:\n # Format the search that is coming in to be passed to the Curate API\n search_parameters = search_parameter['title'].replace(' ', '+')\n search_url = 'https://curate.nd.edu/api/items?q={}'.format(search_parameters)\n try:\n resources = curate_instance.get_resources(search_url)\n except PresQTValidationError as e:\n raise e\n elif 'id' in search_parameter:\n resources = get_curate_nd_resources_by_id(token, search_parameter['id'])\n else:\n resources = curate_instance.get_resources()\n return resources",
"def spot_search_results(q):\n\n auth_manager = create_spot_oauth()\n\n sp = Spotify(auth_manager=auth_manager)\n\n res = sp.search(q=q, limit=50)\n\n return res",
"def read_records(self, token, filter_dict, page=DEFAULT_PAGE_NUM, page_size=DEFAULT_PAGE_SIZE):\n try:\n token_owner = self.user_table.get_a_user_by_token(token)\n except ormexc.NoResultFound:\n msg = f'Permission Denied. Unknown token: {token}'\n logger.error(msg)\n raise exceptions.UnauthenticatedError(msg)\n\n if token_owner.role == 'admin':\n records = self.jogging_table.get_all_records(filter_dict)\n else:\n records = self.jogging_table.get_records_of_a_user(token_owner.username, filter_dict)\n paged = records[(page - 1) * page_size: page * page_size]\n return paged",
"def get_user_messages_by_token(token):\n session = get_session_by_token(token)\n if not session['success']:\n return session\n\n return get_user_messages_by_email(token, session['data']['user'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the 2nd largest value from a given list. | def second_largest(values: List[int]) -> int:
try:
return sorted(set(values))[-2]
except IndexError:
raise ValueError("second_largest() needs at least two distinct values") | [
"def two_largest(inlist):\n largest = second_largest = 0\n it1 = it2 = 0\n\n for i,item in enumerate(inlist):\n if item > largest:\n largest = item\n it1 = i\n elif largest > item > second_largest:\n second_largest = item\n it2 = i\n # Return the results as a tuple\n return largest, it1, second_largest, it2",
"def two_largest(lst):\n i, largest, second_largest = len(lst) - 1, -1, -1\n while i >= 0:\n total = largest + second_largest\n if lst[i] > second_largest:\n if lst[i] > largest:\n largest, second_largest = lst[i], largest\n else:\n second_largest = lst[i]\n lst[i] = total\n i -= 1",
"def nth_largest2(a_list, n):\n a_list.sort()\n new_list = a_list[::-1]\n return new_list[n-1]",
"def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval",
"def largest_number_at_least_twice_of_others2(nums: [int]) -> int:\n if len(nums) == 1:\n return 0\n\n max_index = nums.index(max(nums))\n max_val = nums.pop(max_index)\n next_max = max(nums)\n\n if next_max * 2 <= max_val:\n return max_index\n return -1",
"def max_value(lst):\n return max(lst)",
"def find_greatest_number(incoming_list: list):\n return max(incoming_list)",
"def caluculateHighest(list):\n x = -1 # choose a really low number\n index = 0\n for i in list: # loops through entire list\n if int(list[index]) > x: # compares with x for larger number\n x = int(list[index]) # if larger, x = larger num\n index += 1 # increments to go to next index\n return x # returns largest value",
"def find_largest_and_second_largest(arr):\n largest = None\n largest_idx = None\n second_largest = None\n\n for i, value in enumerate(arr):\n if largest is None:\n # Init largest.\n largest = value\n largest_idx = set([i])\n elif value > largest:\n # Move largest to second largest.\n second_largest = largest\n # Update largest.\n largest = value\n largest_idx = set([i])\n elif value == largest:\n # Add index to largest_idx\n largest_idx.add(i)\n elif second_largest is None:\n # Init second largest.\n second_largest = value\n elif value > second_largest:\n # Update second largest.\n second_largest = value\n elif value == second_largest:\n # Don't care the indices of second largest element.\n pass\n else:\n pass\n\n return largest, largest_idx, second_largest",
"def largest(n,xs):\n return sorted(xs, reverse = True)[:n][::-1]",
"def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)",
"def thirdmax_2(nums: List[int]) -> int:\n top_maxes = [float('-inf'), float('-inf'), float('-inf')]\n nums_unique = set(nums)\n\n if len(nums_unique) <= 2:\n return max(nums_unique)\n else:\n for num in nums_unique:\n if num < top_maxes[0]:\n continue\n elif num > top_maxes[2]:\n top_maxes = [top_maxes[1], top_maxes[2], num]\n elif num > top_maxes[1]:\n top_maxes = [top_maxes[1], num, top_maxes[2]]\n elif num > top_maxes[0]:\n top_maxes = [num, top_maxes[1], top_maxes[2]]\n return int(top_maxes[0])",
"def greatest_difference(num_list):",
"def getMaxAndIndex(myList):\n # For a faster implementation in big lists, try return numpy.argmax(myList)\n max_val = max(myList)\n return (max_val, myList.index(max_val))",
"def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)",
"def get_highest( maxima ):\n val = 0\n peak = None\n for m in maxima:\n if m[1] > val:\n val = m[1]\n peak = m[0]\n return peak",
"def largest_int(number_list):\n\n if not number_list:\n return None\n\n largest_num = number_list[0]\n\n for num in number_list:\n if num > largest_num:\n largest_num = num\n\n return largest_num",
"def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])",
"def maximum(list):\n\n\treturn max(list)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to get the path to pdb.py and return it in a list. | def GetPdbArgs(python):
# Usually, python is /usr/bin/pythonxx and pdb is /usr/lib/pythonxx/pdb.py
components = python.split('/')
if len(components) >= 2:
pdb_path = '/'.join(components[0:-2] + ['lib'] +
components[-1:] + ['pdb.py'])
if os.access(pdb_path, os.R_OK):
return [pdb_path]
# No pdb module found in the python path, default to -m pdb
return ['-m', 'pdb'] | [
"def get_pex_python_paths():\n ppp = Variables.from_rc().get(\"PEX_PYTHON_PATH\")\n if ppp:\n return ppp.split(os.pathsep)\n else:\n return []",
"def retrieve_module_list():\n\n current_dir = getcwd()\n mod_list = []\n\n for item in listdir(current_dir):\n\n if item.endswith('db'):\n\n mod_list.append(item)\n\n return mod_list",
"def pex_python_paths(cls):\n ppp = Variables.from_rc().get('PEX_PYTHON_PATH')\n if ppp:\n return ppp.split(os.pathsep)\n else:\n return []",
"def debugger_list_modules():",
"def get_breakpoint_files(self):\r\n return self.bpoints.values(key='filename')",
"def fetchPDB(pdb_id):\n url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdb_id.split('.')[0]\n return urllib.urlopen(url).read()",
"def get_environ_path(self):\n paths = []\n if 'PYTHONCOMPILED' in os.environ:\n path_string = os.environ['PYTHONCOMPILED']\n paths = path_string.split(os.path.pathsep)\n return paths",
"def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts",
"def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)",
"def ExtractSourceFiles(pdb_filename):\n srctool = subprocess.Popen([srcToolPath, '-r', pdb_filename],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n filelist = srctool.stdout.read()\n res = srctool.wait()\n\n if res == 0 or res == -1 or filelist.startswith(\"srctool: \"): \n print \"Res: %d\" % res + \" for \" + srcToolPath + \" -r \" + pdb_filename\n #raise \"srctool failed: \" + filelist\n return []\n return [x for x in filelist.split('\\r\\n') if len(x) != 0]",
"def ReadPDB (self, pdb_path, db_path):\n\n ReadPDBFile (pdb_path, db_path)\t#",
"def get_source_code(self, objpath: str) -> List[str]:\n\n obj = pydoc.locate(objpath)\n lines = inspect.getsourcelines(obj)[0]\n return [line.replace(\"\\n\", \"\") for line in lines]",
"def process_pdb(self, pdb_filename) :\n args = [self.command, pdb_filename]\n try :\n p = Popen(args, stdout=PIPE)\n (out,err) = p.communicate() \n except OSError :\n raise RuntimeError(\"Cannot communicate with STRIDE.\") \n return out",
"def load_dbc_files(dbc_paths):\n import can_decoder\n from pathlib import Path\n\n db_list = []\n for dbc in dbc_paths:\n db = can_decoder.load_dbc(Path(__file__).parent / dbc)\n db_list.append(db)\n\n return db_list",
"def get_python_paths(cfg: defs.Config) -> List[pathlib.Path]:\n\n def query_program(prog: str) -> List[pathlib.Path]:\n \"\"\"Query a Python interpreter for its search paths.\"\"\"\n cfg.diag(lambda: f\"Querying {prog} for its library search paths\")\n cmd = [\n prog,\n \"-c\",\n \"import sys; print('\\\\n'.join(path for path in sys.path if path))\",\n ]\n cfg.diag(lambda: f\"- about to execute {cmd!r}\")\n try:\n return [\n pathlib.Path(line)\n for line in subprocess.check_output(\n cmd, encoding=\"UTF-8\", env=cfg.utf8_env\n ).splitlines()\n ]\n except FileNotFoundError:\n cfg.diag(lambda: f\"Apparently there is no {prog} on this system\")\n return []\n except (OSError, subprocess.CalledProcessError) as err:\n raise defs.OSIEnvError(f\"Could not execute {cmd!r}: {err}\") from err\n\n return list(itertools.chain(*(query_program(prog) for prog in (\"python3\", \"python2\"))))",
"def import_code_list(ipath):\n\t\n\timport os\n\timport pickle\n\t\n\tifile = open(ipath, \"rb\")\n\tcode_list = pickle.load(ifile)\n\tifile.close()\n\treturn code_list",
"def readPDB(pdb_file):\n\n parser = PDB.PDBParser()\n name = pdb_file.split('/')[-1].split('.pdb')[0]\n structure = parser.get_structure(name, pdb_file)\n\n return structure",
"def load_pdblist(pdblist, addext = 0):\n\n\t#Load the pdblist, and convert to a list.\n\tlistfile = open(pdblist, 'r')\n\tpdbs = listfile.readlines()\n\t\n\tfor pdb in pdbs:\n\t\tpdbname = pdb.strip()\n\t\tif (addext):\n\t\t\tpdbname = pdb.strip() + '.pdb'\n\t\t\n\t\tcmd.load(pdbname)",
"def make_pdblists_native(in_dir):\n pdblists = []\n pdbs = glob.glob(in_dir+\"/*.pdb*\")\n OUTFILE = open(in_dir+\"/PDBLIST.txt\", 'w')\n for pdb in pdbs:\n OUTFILE.write(pdb+\"\\n\")\n OUTFILE.close()\n pdblists.append([\"native_\",in_dir+\"/PDBLIST.txt\"])\n return pdblists"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print usage for the stub script. | def PrintOurUsage():
print 'Stub script %s (auto-generated). Options:' % sys.argv[0]
print ('--helpstub '
'Show help for stub script.')
print ('--debug_binary '
'Run python under debugger specified by --debugger.')
print ('--debugger=<debugger> '
"Debugger for --debug_binary. Default: 'gdb --args'.")
print ('--debug_script '
'Run wrapped script with python debugger module (pdb).')
print ('--show_command_and_exit '
'Print command which would be executed and exit.')
print ('These options must appear first in the command line, all others will '
'be passed to the wrapped script.') | [
"def print_usage():\n print(helptxt)\n sys.exit(2)",
"def usage():\n print(__doc__)",
"def print_usage():\n print(\"Usage: python app.py [OPTIONS]\\n\"\n \"OPTIONS:\\n\"\n \"\\t--debug-print Enables verbose output for debugging the \"\n \"tool.\\n\"\n \"\\t--report-only Only reports on compliance and does not \"\n \"offer to fix broken configurations.\\n\"\n \"\\t--disable-logs Refrain from creating a log file with the \"\n \"results.\\n\"\n \"\\t--disable-prompt Refrain from prompting user before applying \"\n \"fixes.\\n\"\n \"\\t--skip-sudo-checks Do not perform checks that require sudo \"\n \"privileges.\\n\"\n \"\\t--check-required Check only required items.\\n\"\n \"\\t--help -h Print this usage information.\\n\")\n sys.exit()",
"def usage():\n\tprint english.usage",
"def Usage():\n\n print(\"Usage:\", file=sys.stderr)\n print(USAGE, file=sys.stderr)\n sys.exit(2)",
"def print_usage(msg):\n print('Usage: ' + msg)",
"def print_help():\n print('usage: {source directory} {output filename}')\n sys.exit()",
"def usage(self):\n return '%(prog)s'",
"def print_usage():\n print(\"usage: \" + sys.argv[0] + \" -m model | -t | -r request\")\n print(\"Options and arguments:\")\n print(\"-m --model\\t: research model chosen for the search. ['b','boolean', 'v', 'vector']\")\n print(\"-t\\t\\t: enable the time record.\")",
"def usage():\n for option, default_value, documentation in _FLAGS:\n print '\\t\\t--%s\\t\"%s\" (%s)' % (option, documentation, default_value)",
"def help(self, dummy):\r\n help = self.doc + \"\\n\"\r\n if help.find(\"%s\") > 0:\r\n help = help.replace(\"%s\", self.progname)\r\n print_function(help, end='', file=self.stdout)\r\n self.exit(0)",
"def print_help(self):\n print \"--uuid \\t<uuid of system> (use 'system-list' on the Hemlock server)\"\n print \"--client \\t <name of client> (client file must exist in the clients folder)\"\n print \"-h \\thelp\\n\"\n sys.exit(0)",
"def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()",
"def help(args):\n print(help_text)",
"def usage ():\n\n print \"Usage: dbAlerter [-hpv] [-c Config file] [-p pid file]\\r\\n\"\n print \"Options: --help -h Displays this usage.\"\n print \" --config -c Path to config file\" \n print \" --pid-file -p Path to save pid file\"\n sys.exit(0)",
"def usage(code, msg=''):\n if msg:\n print >> sys.stderr, msg\n print >> sys.stderr\n print >> sys.stderr, __doc__ % globals()\n sys.exit(code)",
"def print_help():\n\n print(\"help message\")",
"def help(self):\n\n print(\"\"\"minimum-cli v0.4\nxminimum == bin/cli.py\n\nxmake == bin/makefile.py\nuse command xmake --help to learn more\n\n\"\"\")",
"def usage(prtflag):\n\n\t#\n\t# Set up our usage string.\n\t#\n\toutstr = \"\"\"hostxref [options]\n\n where [options] are:\n\n\t\t-find - base protocol for searching other protocols\n\t\t-nozero - only display matches\n\t\t-originator - specify originator address list to search for\n\t\t-responder - specify responder address list to search for\n\n -verbose - give verbose output\n -Version - show version and exit\n -help - show usage message\n -man - show man page\n \"\"\"\n\n\t#\n\t# Just return the output if we aren't to print the usage string.\n\t#\n\tif(prtflag == 0):\n\t\treturn(outstr)\n\n\t#\n\t# Print the usage string and exit.\n\t#\n\tprint(\"usage: \" + outstr.rstrip())\n\texit(0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run a module as a script. Locates the module's file and runs it in the current interpreter, or optionally a debugger. | def RunScriptModule(module):
args = sys.argv[1:]
debug_binary = False
debugger = 'gdb --args'
debug_script = False
show_command_and_exit = False
while args:
if args[0] == '--helpstub':
PrintOurUsage()
sys.exit(0)
if args[0] == '--debug_binary':
debug_binary = True
args = args[1:]
continue
if args[0] == '--debug_script':
debug_script = True
args = args[1:]
continue
if args[0] == '--show_command_and_exit':
show_command_and_exit = True
args = args[1:]
continue
matchobj = re.match('--debugger=(.+)', args[0])
if matchobj is not None:
debugger = StripQuotes(matchobj.group(1))
args = args[1:]
continue
break
# Now look for my main python source file
# TODO(dborowitz): This will fail if the module was zipimported, which means
# no egg depending on this script runner can be zip_safe.
main_filename = module.__file__
assert os.path.exists(main_filename), ('Cannot exec() %r: file not found.' %
main_filename)
assert os.access(main_filename, os.R_OK), ('Cannot exec() %r: file not'
' readable.' % main_filename)
args = [main_filename] + args
if debug_binary:
debugger_args = debugger.split()
program = debugger_args[0]
# If pathname is not absolute, determine full path using PATH
if not os.path.isabs(program):
program = FindEnv(program)
python_path = sys.executable
command_vec = [python_path]
if debug_script:
command_vec.extend(GetPdbArgs(python_path))
args = [program] + debugger_args[1:] + command_vec + args
elif debug_script:
args = [sys.executable] + GetPdbArgs(program) + args
else:
program = sys.executable
args = [sys.executable] + args
if show_command_and_exit:
print 'program: "%s"' % program
print 'args:', args
sys.exit(0)
try:
sys.stdout.flush()
os.execv(program, args)
except EnvironmentError as e:
if not getattr(e, 'filename', None):
e.filename = program # Add info to error message
raise | [
"def run_python_script(package=None, module=None, args=[], p_args=[]):\n assert module is not None\n assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))\n path = python_script_exists(package, module)\n run_program(sys.executable, p_args + [path] + args)",
"def exec_module(self, module):\n\n if not self.filename.endswith(config.FILE_EXT) and not self.filename.endswith(\n \"__init__.py\"\n ):\n print(\"Fatal error: ExtensionLoader is asked to load a normal file.\")\n print(\"filename:\", self.filename)\n print(\"Expected extension:\", config.FILE_EXT)\n raise SystemExit\n\n name = module.__name__\n if module.__name__ == config.MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n config.MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n transforms.identify_requested_transformers(source)\n\n if config.TRANSFORMERS:\n original = source\n source = transforms.add_all_imports(source)\n source = transforms.apply_source_transformations(source)\n\n if config.DIFF and original != source:\n self.write_html_diff(name, original, source)\n\n if config.CONVERT and self.filename.endswith(config.FILE_EXT):\n print(\"############### Original source: ############\\n\")\n print(original)\n print(\"\\n############### Converted source: ############\\n\")\n print(source)\n print(\"=\" * 50, \"\\n\")\n\n source = transforms.apply_ast_transformations(source)\n exec(source, vars(module))",
"def exec_module(self, module):\n pass",
"def exec_code(\n interpreter: code.InteractiveInterpreter, args: List[str]\n) -> None:\n try:\n with open(args[0]) as sourcefile:\n source = sourcefile.read()\n except OSError as e:\n # print an error and exit (if -i is specified the calling code will continue)\n print(f\"bpython: can't open file '{args[0]}: {e}\", file=sys.stderr)\n raise SystemExit(e.errno)\n old_argv, sys.argv = sys.argv, args\n sys.path.insert(0, os.path.abspath(os.path.dirname(args[0])))\n spec = importlib.util.spec_from_loader(\"__main__\", loader=None)\n assert spec\n mod = importlib.util.module_from_spec(spec)\n sys.modules[\"__main__\"] = mod\n interpreter.locals.update(mod.__dict__) # type: ignore # TODO use a more specific type that has a .locals attribute\n interpreter.locals[\"__file__\"] = args[0] # type: ignore # TODO use a more specific type that has a .locals attribute\n interpreter.runsource(source, args[0], \"exec\")\n sys.argv = old_argv",
"def runscript(script):\r\n addscriptpath(script)\r\n watchdog.reset()\r\n argv = sys.argv\r\n sys.argv = [ script ]\r\n execfile(script, globals())\r\n sys.argv = argv",
"def run_script(self, filename, start_opts=None, globals_=None,\n locals_=None):\n self.mainpyfile = self.core.canonic(filename)\n\n # Start with fresh empty copy of globals and locals and tell the script\n # that it's being run as __main__ to avoid scripts being able to access\n # the pydb.py namespace.\n if globals_ is None:\n import __main__\n globals_ = {\"__name__\" : \"__main__\",\n \"__file__\" : self.mainpyfile,\n \"__builtins__\" : __builtins__\n }\n if locals_ is None:\n locals_ = globals_\n self.core.start(start_opts)\n retval = False\n self.core.execution_status = 'Running'\n try:\n exec(compile(open(self.mainpyfile).read(), self.mainpyfile, 'exec'), globals_, locals_)\n retval = True\n except SyntaxError:\n print(sys.exc_info()[1])\n retval = False\n pass\n except IOError:\n print(sys.exc_info()[1])\n except Mexcept.DebuggerQuit:\n retval = False\n pass\n except Mexcept.DebuggerRestart:\n self.core.execution_status = 'Restart requested'\n raise Mexcept.DebuggerRestart\n finally:\n self.core.stop(options={'remove': True})\n return retval",
"def _run_script(fullname):\n name = posixpath.basename(fullname)\n if name[-3:] == '.py':\n name = name[:-3] # strip .py extension\n\n modname = [string.join(fullname.split('/')[0:-1],'/')]\n trylist = ((name, None), (name+'.py', None),\n (name, modname), (name+'.py', modname))\n\n # look for the modulate in standard locations, load it if you\n # find it, otherwise return 1\n for fname, path in trylist:\n try:\n if path:\n fp, pathname, description = imp.find_module(fname, path)\n else:\n fp, pathname, description = imp.find_module(fname)\n except ImportError:\n fp = None\n if fp:\n sys.argv[0] = pathname\n try:\n mod = imp.load_module('__main__', fp, pathname, description)\n finally:\n fp.close()\n return 1\n return 0",
"def load_script_as_module(script_name):\n spec = create_script_spec(script_name)\n script = module_from_spec(spec)\n spec.loader.exec_module(script)\n\n return script",
"def run_file(filename, logfile=None, execdir=None):\n if not runpy_available: #pragma:nocover\n raise pyutilib.common.ConfigurationError(\"Cannot apply the run_file() function because runpy is not available\") \n #\n # Open logfile\n #\n if not logfile is None:\n sys.stderr.flush()\n sys.stdout.flush()\n save_stdout = sys.stdout\n save_stderr = sys.stderr\n OUTPUT=open(logfile,\"w\")\n sys.stdout=OUTPUT\n sys.stderr=OUTPUT\n #\n # Add the file directory to the system path\n #\n if '/' in filename:\n tmp= \"/\".join((filename).split(\"/\")[:-1])\n tmp_import = (filename).split(\"/\")[-1]\n sys.path.append(tmp)\n elif '\\\\' in filename:\n tmp = \"\\\\\".join((filename).split(\"\\\\\")[:-1])\n tmp_import = (filename).split(\"\\\\\")[-1]\n sys.path.append(tmp)\n else:\n tmp_import = filename\n name = \".\".join((tmp_import).split(\".\")[:-1])\n #\n # Run the module\n #\n try:\n if not execdir is None:\n tmp=os.getcwd()\n os.chdir(execdir)\n tmp_path = sys.path\n sys.path = [execdir] + sys.path\n runpy.run_module(name,None,\"__main__\")\n if not execdir is None:\n os.chdir(tmp)\n sys.path = tmp_path\n except Exception: #pragma:nocover\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr\n raise\n #\n # Close logfile\n #\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr",
"def run_module(ir: IR) -> Value:\n\n env = default_env()\n module = compile_module(ir, env)\n\n # Lê argumentos da linha de comando e executa a função main\n # do ambiente\n argdefs = ir[\"main\"][0]\n argvalues = read_args(argdefs)\n\n main_fn = cast(Callable, module[\"main\"])\n result = main_fn(*argvalues)\n\n # Imprimimos true|false e não True|False, como acontece por padrão no Python\n if isinstance(result, bool):\n print(\"true\" if result else \"false\")\n else:\n print(result)\n\n return result",
"def run_file(path):\n if PY26:\n dirpath, name = splitname(path)\n found = imp.find_module(name, [dirpath])\n module = imp.load_module(\"__main__\", *found)\n return vars(module)\n else:\n return runpy.run_path(path, run_name=\"__main__\")",
"def run_script():\n print(\"run script\")",
"def run(src):\n mod = module(src)\n main = mod.get(\"main\")\n if not main:\n raise RuntimeError('módulo não define uma função \"main()\"')\n main()",
"def runScriptAtPath(path):\n \n sys.argv = [path]\n for arg in PytoClasses.Python.shared.args:\n sys.argv.append(str(arg))\n \n def run() -> None:\n os.system = PytoClasses.Python.shared.system\n directory = os.path.expanduser(os.path.dirname(path))\n sys.path.insert(0, directory)\n try:\n global __script__\n spec = importlib.util.spec_from_file_location(\"__main__\", path)\n __script__ = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(__script__)\n PytoClasses.Python.shared.values = [item for item in dir(__script__) if not item.startswith(\"__\")]\n except SystemExit:\n print(\"SystemExit\")\n except Exception as e:\n \n exc_type, exc_obj, exc_tb = sys.exc_info()\n \n extracts = traceback.extract_tb(sys.exc_info()[2])\n count = len(extracts)\n \n lineNumber = -1\n \n fileName = path\n for i, extract in enumerate(extracts):\n if extract[0] == fileName:\n lineNumber = extract[1]\n break\n count -= 1\n \n if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number\n lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1]\n \n PytoClasses.Python.shared.errorType = exc_type.__name__\n PytoClasses.Python.shared.errorReason = str(e)\n PytoClasses.EditorViewController.visible.showErrorAtLine(lineNumber)\n \n print(traceback.format_exc(limit=-count))\n \n sys.path.remove(directory)\n\n PytoClasses.ReviewHelper.shared.launches = PytoClasses.ReviewHelper.shared.launches+1\n PytoClasses.ReviewHelper.shared.requestReview()\n PytoClasses.Python.shared.isScriptRunning = False\n \n thread = threading.Thread(target=run, args=())\n \n def loop():\n while PytoClasses.Python.shared.isScriptRunning:\n time.sleep(1)\n ignoredThreads.append(thread)\n raise Exception(\"Stopped script!\")\n \n def runLoop():\n try:\n loop()\n except:\n pass\n\n\n thread.start()\n\n runLoop()\n return __script__",
"def runScript(script):\n if script != \"\":\n exec(script, localDict, localDict)",
"def run(self, mod, code):\n env = os.environ.copy()\n pythonpath = [os.path.dirname(mod.so_filename)]\n if 'PYTHONPATH' in env:\n pythonpath.append(env['PYTHONPATH'])\n env[\"PYTHONPATH\"] = os.pathsep.join(pythonpath)\n if self.hpy_abi in ['universal', 'debug']:\n # HPy module\n load_module = \"import sys;\" + \\\n \"import hpy.universal;\" + \\\n \"import importlib.util;\" + \\\n \"spec = importlib.util.spec_from_file_location('{name}', '{so_filename}');\" + \\\n \"mod = hpy.universal.load('{name}', '{so_filename}', spec, debug={debug});\"\n escaped_filename = mod.so_filename.replace(\"\\\\\", \"\\\\\\\\\") # Needed for Windows paths\n load_module = load_module.format(name=mod.name, so_filename=escaped_filename,\n debug=self.hpy_abi == 'debug')\n else:\n # CPython module\n assert self.hpy_abi == 'cpython'\n load_module = \"import {} as mod;\".format(mod.name)\n if self.verbose:\n print(\"\\n---\\nExecuting in subprocess: {}\".format(load_module + code))\n result = atomic_run([sys.executable, \"-c\", load_module + code], env=env,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if self.verbose:\n print(\"stdout/stderr:\")\n try:\n out = result.stdout.decode('latin-1')\n err = result.stderr.decode('latin-1')\n print(\"----\\n{out}--\\n{err}-----\".format(out=out, err=err))\n except UnicodeDecodeError:\n print(\"Warning: stdout or stderr could not be decoded with 'latin-1' encoding\")\n return result",
"def main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('code', help='Python code to execute')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-3', action='store_const', dest='python',\n const='python3', help='Explicitly use Python 3')\n group.add_argument('-2', action='store_const', dest='python',\n const='python2', help='Explicitly use Python 2')\n group.add_argument('-p', '--python', help='Specify python interpreter')\n args = parser.parse_args()\n if args.python is not None:\n call([args.python, __file__, args.code])\n else:\n InteractiveInterpreter(LocalsImportDict()).runsource(args.code)",
"def main():\n ScriptDirectory(op.split(__file__)[0])()",
"def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n execfile(script_path, globals_)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a dict of dicts from dot separated keys. Yet without associated values. | def make_tree(dot_separated_keys):
tree = {}
for item in dot_separated_keys:
inside_tree = tree
for part in item.split('.'):
inside_tree = inside_tree.setdefault(part, {})
return tree | [
"def undotted_keys(dict):\n return {k.lstrip(\".\"): v for k, v in dict.items()}",
"def _dotted_dict_to_nested_dicts(dotted_dict, delimiter_nested=\".\"):\n nested_dict = {}\n for key, value in dotted_dict.items():\n tokens = key.split(delimiter_nested)\n if len(tokens) > 1:\n tmp = nested_dict.setdefault(tokens[0], {})\n for token in tokens[1:-1]:\n tmp = tmp.setdefault(token, {})\n tmp[tokens[-1]] = value\n else:\n nested_dict[tokens[0]] = value\n return nested_dict",
"def flatten(self):\r\n newdict = dict()\r\n def recurse_flatten(prefix, dd):\r\n for k, v in dd.iteritems():\r\n newkey = prefix + '.' + k if len(prefix) > 0 else k\r\n if isinstance(v, DotDict):\r\n recurse_flatten(newkey, v)\r\n else:\r\n newdict[newkey] = v\r\n\r\n recurse_flatten('', self)\r\n return newdict",
"def create_recursive_dot_dict(data: Dict[str, Any], cls=DotDict) -> Union[DotDict, DotDefaultDict]:\n res = cls()\n for k, v in data.items():\n k = k.split(\".\")\n target = res\n for i in range(0, len(k)-1):\n t2 = target.get(k[i])\n if t2 is None:\n t2 = cls()\n target[k[i]] = t2\n\n assert isinstance(t2, cls), f\"Trying to overwrite key {'.'.join(k[:i+1])}\"\n target = t2\n\n assert isinstance(target, cls), f\"Trying to overwrite key {'.'.join(k)}\"\n target[k[-1]] = v\n return res",
"def dot2object(dotted_dict):\n obj = {}\n for k in dotted_dict:\n keys = k.split(\".\")\n new_key = keys[0]\n if not new_key in obj:\n obj[new_key] = {}\n obj[new_key][\".\".join(keys[1:])] = dotted_dict.get(k)\n for key in obj:\n if \".\" in key:\n _obj = dot2object(obj[key])\n obj[key] = _obj\n return obj",
"def _nested_dicts_to_dotted_keys(d, key=None):\n if isinstance(d, Mapping):\n if d:\n for k in d:\n k_ = k if key is None else \".\".join((key, k))\n yield from _nested_dicts_to_dotted_keys(d[k], key=k_)\n elif key is not None:\n yield key, d\n else:\n if type(d) is list:\n d = _to_hashable(d)\n yield key, d",
"def create_namespace_tree(dotted_names):\r\n ret = {}\r\n for dn in dotted_names:\r\n path = dn.split('.')\r\n for i in xrange(len(path)):\r\n ns = '.'.join(path[:i])\r\n itempath = '.'.join(path[:i + 1])\r\n if ns not in ret:\r\n ret[ns] = []\r\n if itempath not in ret[ns]:\r\n ret[ns].append(itempath)\r\n return ret",
"def strip(self, dic, keys):\n \"\"\" Returned dict will have key names of the deepest subkeys \"\"\"\n ret = {}\n for k in keys:\n path=k.split('.')\n d=dic\n subkey=''\n while len(path)>0:\n subkey=path.pop(0)\n # print(subkey)\n arrsplit = subkey.split('[')\n subkey_main=arrsplit[0]\n d=d[subkey_main]\n if(len(arrsplit)>1):\n index = int(arrsplit[1].split(']')[0])\n d=d[index]\n ret[subkey_main]=d\n return ret",
"def asdict(namedtupl):\n return {k.strip('_'):v for k, v in namedtupl._asdict().items()}",
"def dot_vals(value):\n ret = {}\n for key, val in __pillar__.get(\"master\", {}).items():\n if key.startswith(\"{}.\".format(value)):\n ret[key] = val\n for key, val in __opts__.items():\n if key.startswith(\"{}.\".format(value)):\n ret[key] = val\n return ret",
"def replace_dots(son):\n for key, value in son.items():\n if '.' in key:\n new_key = key.replace('.', '_')\n if isinstance(value, dict):\n son[new_key] = replace_dots(\n son.pop(key)\n )\n else:\n son[new_key] = son.pop(key)\n elif isinstance(value, dict): # recurse into sub-docs\n son[key] = replace_dots(value)\n return son",
"def dottree(seq, sep='.', fullpath=False):\n def ins(map, path):\n d = map\n parents = []\n for p in path:\n if fullpath:\n key = sep.join(parents + [p])\n else:\n key = p\n d = d.setdefault(key, {})\n parents.append(p)\n ret = {}\n seq = sorted(seq)\n for p in seq:\n pp = p.split(sep)\n if not pp:\n continue\n ins(ret, pp)\n return ret",
"def deflatten(d: dict, sep: Optional[str] = '.', maxdepth: int = -1):\n ret = {}\n if sep is not None:\n d = {\n tuple(k.split(sep, maxdepth)): v for k, v in d.items()\n }\n\n for keys, v in d.items():\n sub_dict = ret\n for sub_key in keys[:-1]:\n if sub_key not in sub_dict:\n sub_dict[sub_key] = {}\n assert isinstance(sub_dict[sub_key], dict), (\n f'Conflicting keys! {keys}'\n )\n sub_dict = sub_dict[sub_key]\n assert keys[-1] not in sub_dict, f'Conflicting keys! {keys}'\n sub_dict[keys[-1]] = v\n return ret",
"def _prefix_keys(data):\n return {'ot.{}'.format(key): value for key, value in data.items()}",
"def make_hierarchical_dict(d, sep=u\":\"):\n assert type(d) == dict\n assert type(sep) == unicode\n\n result = {}\n for key, value in d.items():\n path = key.split(sep)\n curr_dict = result\n for i, directory in enumerate(path):\n if i == (len(path) - 1):\n assert directory not in curr_dict\n curr_dict[directory] = value\n else:\n if directory in curr_dict:\n assert type(curr_dict[directory]) == dict\n else:\n curr_dict[directory] = {}\n curr_dict = curr_dict[directory]\n return result",
"def path_to_dict(path: str, val: Any) -> Dict:\n d = val\n for k in reversed(path.split('.')):\n d = {k: d}\n return d",
"def get_partial_dict(prefix, dictionary):\n\n match = prefix + \".\"\n n = len(match)\n\n new_dict = Bunch([(key[n:], dictionary[key])\n for key in dictionary.keys()\n if key.startswith(match)])\n if new_dict:\n return new_dict\n else:\n raise AttributeError",
"def prepend_name_dict(prefix, d):\n return {prefix + name: value for name, value in d.items()}",
"def get_dotted_field(input_dict, accessor_string):\n current_data = input_dict\n for chunk in accessor_string.split('.'):\n current_data = current_data.get(chunk, {})\n return current_data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sum of the factorials of the digits of a number x | def factsum(x):
return sum(list(map(lambda x: factorial(x), getdigits(x)))) | [
"def digit_factorial_sum(n):\n\n factorial_sum = 0\n\n while n:\n digit = n%10\n factorial_sum += cached_factorials[digit]\n n //= 10\n\n return factorial_sum",
"def sum_of_digit_factorials(num):\n return sum(\n FACTORIAL_TABLE[int(char)]\n for char in str(num)\n )",
"def f(n):\n return sum(math.factorial(int(ch)) for ch in str(n))",
"def factorial_digit_sum(number=100):\n\t#Get the factorial of the number\n\ttry:\n\t\tfact_number = __factorial(number)\n\texcept Exception,e:\n\t\traise(e)\n\telse:\n\t\treturn sum([int(fact_digit) for fact_digit in str(fact_number)])",
"def Sum_Numbers_x_Power_Digits(x):\n totalSum = 0 \n for i in xrange(10, 999999):\n if i == sum([int(j)**x for j in str(i)]):\n totalSum += i\n return totalSum",
"def calculateFactorials():\n\n ni = []\n ni.append( 295232799039604140847618609643520000000) # 34!\n ITERATIONS = 34\n for n in range( 1, ITERATIONS,1 ) :\n ni.append(math.floor(ni[n - 1] / n))\n print( \"\\n \".join([\"xi = (xi * _x) >> PRECISION;\\n res += xi * %s;\" % hex(int(x)) for x in ni]))",
"def factorial(x):\n return math.factorial(x)",
"def sum_of_digits_of_sum_of_factors(n):\n ############################################################################\n # TODO (continued): This function is PURPOSELY ** not implemented. **\n # TODO (continued): DO NOT IMPLEMENT sum_of_digits_of_sum_of_factors.\n # Just leave it as it is (with no code).\n ############################################################################",
"def fact(n):\n return float(misc.factorial(n, True))",
"def euler20(n=100) -> int:\n return sum(int(s) for s in str(math.factorial(n)))",
"def factorial(n):\n return product(n, iden)",
"def problem56():\n def digit_sum(n):\n return sum(digits_from_num(n))\n return max(digit_sum(a**b) for a in range(100) for b in range(100))",
"def factorial_trailing_zero(n):\n\n count = 0\n idx = 5\n while(n/idx >= 1):\n count += math.floor(n/idx)\n idx *= 5\n\n return count",
"def factorial (n):\n i = 1\n resultado = 1\n while i <= n:\n resultado *= i\n i += 1\n return resultado",
"def sum_of_digits(n):\n return sum(int(c) for c in str(n))",
"def factorial(n):\n\tf = 1\n\tfor i in range(1,n+1):\n\t\tf = f*i\n\n\treturn f",
"def factorial(number):\n result = 1\n while number:\n result *= number\n number -= 1\n return result",
"def factorial(n):\n if not isinstance(n, int):\n raise TypeError('Not an integer')\n if n < 0:\n raise ValueError('Not a positive integer')\n fact = 1\n for i in range(n):\n fact *= (i + 1)\n return fact",
"def factorial_recursion(n):\n pass # @todo -fix this"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments | def _clone_sequential_model(model, input_tensors=None):
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers,
name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name) | [
"def clone_model(model, input_tensors=None):\n if isinstance(model, Sequential):\n return _clone_sequential_model(model, input_tensors=input_tensors)\n else:\n return _clone_functional_model(model, input_tensors=input_tensors)",
"def clone_model(model):\n model = copy.deepcopy(model)\n zero_grad(model)\n return model",
"def copy_model(model: Module) -> Module:\n return copy.deepcopy(model)",
"def clone(self):\n return _libsbml.Model_clone(self)",
"def clone_model(self, elite):\n print(\"Cloning elite model\")\n self.model = copy.deepcopy(elite)",
"def clone(self):\n return _libsbml.ModelCreator_clone(self)",
"def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model",
"def clone(self):\n return _libsbml.ModelHistory_clone(self)",
"def copy(self) -> ModelSpec:\n return self.__class__(\n model_enum=self.model_enum,\n model_kwargs=deepcopy(self.model_kwargs),\n model_gen_kwargs=deepcopy(self.model_gen_kwargs),\n model_cv_kwargs=deepcopy(self.model_cv_kwargs),\n )",
"def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model",
"def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone",
"def copy_model_over(from_model, to_model):\n for to_model, from_model in zip(to_model.parameters(),\n from_model.parameters()):\n to_model.data.copy_(from_model.data.clone())",
"def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN",
"def clone(self):\n return _libsbml.ModelDefinition_clone(self)",
"def copy(self) -> SequentialDataset:\n params = {\n \"X\": copy.deepcopy(self._X),\n \"lengths\": copy.deepcopy(self._lengths),\n }\n\n if self._y is not None:\n params[\"y\"] = copy.deepcopy(self._y)\n\n if self._classes is not None:\n params[\"classes\"] = copy.deepcopy(self._classes)\n\n return SequentialDataset(**params)",
"def fork(self) -> 'Model':\n new_model = Model(_internal=True)\n new_model._root_graph_name = self._root_graph_name\n new_model.graphs = {name: graph._fork_to(new_model) for name, graph in self.graphs.items()}\n new_model.training_config = copy.deepcopy(self.training_config)\n new_model.history = self.history + [self]\n return new_model",
"def clone_layer(layer: nn.Module, N: int):\n return nn.ModuleList([copy.deepcopy(layer) for _ in range(N)])",
"def convert_model(model, inputs, outputs):\n new = Model(inputs=inputs, outputs=outputs)\n\n for i, layer in enumerate(model.layers[:-4]):\n new.layers[i].set_weights(layer.get_weights())\n\n return new",
"def clone(self):\n return _libsbml.Submodel_clone(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clone any `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments | def clone_model(model, input_tensors=None):
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors) | [
"def clone_model(model):\n model = copy.deepcopy(model)\n zero_grad(model)\n return model",
"def clone(self):\n return _libsbml.Model_clone(self)",
"def clone_model(self, elite):\n print(\"Cloning elite model\")\n self.model = copy.deepcopy(elite)",
"def copy_model(model: Module) -> Module:\n return copy.deepcopy(model)",
"def clone(self):\n return _libsbml.ModelCreator_clone(self)",
"def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model",
"def _clone_sequential_model(model, input_tensors=None):\n if not isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a `Sequential` model instance, '\n 'but got:', model)\n\n def clone(layer):\n return layer.__class__.from_config(layer.get_config())\n\n layers = [clone(layer) for layer in model.layers]\n if input_tensors is None:\n return Sequential(layers=layers, name=model.name)\n else:\n if len(to_list(input_tensors)) != 1:\n raise ValueError('To clone a `Sequential` model, we expect '\n ' at most one tensor '\n 'as part of `input_tensors`.')\n x = to_list(input_tensors)[0]\n if K.is_keras_tensor(x):\n origin_layer = x._keras_history[0]\n if isinstance(origin_layer, InputLayer):\n return Sequential(layers=[origin_layer] + layers,\n name=model.name)\n else:\n raise ValueError('Cannot clone a `Sequential` model on top '\n 'of a tensor that comes from a Keras layer '\n 'other than an `InputLayer`. '\n 'Use the functional API instead.')\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + str(x.name))\n input_layer = input_tensor._keras_history[0]\n return Sequential(layers=[input_layer] + layers, name=model.name)",
"def copy(self) -> ModelSpec:\n return self.__class__(\n model_enum=self.model_enum,\n model_kwargs=deepcopy(self.model_kwargs),\n model_gen_kwargs=deepcopy(self.model_gen_kwargs),\n model_cv_kwargs=deepcopy(self.model_cv_kwargs),\n )",
"def copy_model_over(from_model, to_model):\n for to_model, from_model in zip(to_model.parameters(),\n from_model.parameters()):\n to_model.data.copy_(from_model.data.clone())",
"def clone(self):\n return _libsbml.ModelDefinition_clone(self)",
"def clone(self):\n return _libsbml.ModelHistory_clone(self)",
"def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone",
"def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model",
"def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN",
"def clone(self):\n return _libsbml.Submodel_clone(self)",
"def clone_params(self):\n self.cloned = self.model.state_dict()",
"def clone(self):\n return _libsbml.Input_clone(self)",
"def clone(self, **params):\n\n if 'default' not in params:\n params['default'] = self.default\n\n for key, value in self.__dict__.items():\n if key not in params:\n try:\n params[key] = deepcopy(value)\n except TypeError:\n pass\n\n return type(self)(**params)",
"def convert_model(model, inputs, outputs):\n new = Model(inputs=inputs, outputs=outputs)\n\n for i, layer in enumerate(model.layers[:-4]):\n new.layers[i].set_weights(layer.get_weights())\n\n return new"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the joystick components | def init(self):
pygame.init()
pygame.joystick.init()
self.controller = pygame.joystick.Joystick(0)
self.controller.init()
self.x=0
self.y=0 | [
"def init(self):\n \n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()",
"def __init__(self):\n self.isMoving = 0#0 is stop, 1 is moving forward, -1 is moving backward\n self.isRoutating = False\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)",
"def __init__(self, joystick_ID):\n self.isReady = False\n self._jsID = joystick_ID\n pygame.init()\n pygame.joystick.init()\n n = pygame.joystick.get_count()\n if joystick_ID >= 0 and joystick_ID < n:\n # Joystick with that ID was found, initialize it\n self._JS = pygame.joystick.Joystick(joystick_ID)\n self._JS.init()\n\n # Create controller elements\n self.StickL = Stick(self._JS, [AXS_LX, AXS_LY])\n self.StickR = Stick(self._JS, [AXS_RX, AXS_RY])\n\n self.BtnL = Button(self._JS, BTN_LB_ID)\n self.BtnR = Button(self._JS, BTN_RB_ID)\n self.BtnBack = Button(self._JS, BTN_BACK_ID)\n self.BtnStart = Button(self._JS, BTN_START_ID)\n self.BtnA = Button(self._JS, BTN_A_ID)\n self.BtnB = Button(self._JS, BTN_B_ID)\n self.BtnX = Button(self._JS, BTN_X_ID)\n self.BtnY = Button(self._JS, BTN_Y_ID)\n self.BtnStickL = Button(self._JS, BTN_STICK_L_ID)\n self.BtnStickR = Button(self._JS, BTN_STICK_R_ID)\n\n self.HatL = Hat(self._JS, 0)\n self.isReady = True",
"def joy_init():\n\n pygame.init();\n pygame.joystick.init();\n if pygame.joystick.get_count() == 0:\n raise Exception(\"joy_init: No joysticks connected\");\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n \n control.tare()\n \n return joystick",
"def __init__(self, joystick):\n\t\tself.js = joystick",
"def init_global_joystick():\r\n\r\n global joystick\r\n joystick = con.initCont()\r\n return joystick",
"def initJoysticks():\n \n print 'BEGIN JOYSTICK LOAD'\n #number of joysticks connected\n njoy = PG.joystick.get_count()\n print '\\tFound', njoy, 'joysticks.'\n \n #add each to list\n global gamePad\n gamePad = []\n for pad in xrange(njoy):\n joy = PG.joystick.Joystick(pad)\n joy.init()\n print '\\t\\tJoystick', pad, 'has', joy.get_numbuttons(), 'buttons'\n gamePad.append(joy)\n \n\n print 'END JOYSTICK LOAD\\n'\n #except:\n #print 'no gamepad' ",
"def pyga_joysetup(self):\n jcount=0\n if PYG:\n self.dbgprint(\"pygame starts\")\n jcount=PYG.joystick.get_count()\n if jcount > 0:\n for x in range(jcount):\n j = PYG.joystick.Joystick(x)\n j.init()\n self.dbgprint(\">>>Enabled joystick: %s\" % j.get_name())\n taskMgr.add(self.pyga_joytask, 'tsk_pygajoy')\n else:\n self.dbgprint(\"No Joysticks to Initialize!\")\n\n return jcount",
"def joystick_callback(self, data):\n # self.handle_buttons(data)\n # self.handle_joysticks(data)\n # self.publish()\n\n green_button_pressed = data.buttons[0]\n red_button_pressed = data.buttons[1]\n blue_button_pressed = data.buttons[2]\n yellow_button_pressed = data.buttons[3]\n back_button_pressed = data.buttons[6]\n start_button_pressed = data.buttons[7]\n\n if red_button_pressed:\n self.system_mode_pub.publish(3)\n rospy.loginfo(\"Killswitch engaged\")\n\n elif green_button_pressed:\n self.system_mode_pub.publish(2)\n rospy.loginfo(\"Autonomous control activated\")\n\n # elif yellow_button_pressed:\n # self.enable_control('velocity')\n # self.system_mode_pub.publish(1)\n # rospy.loginfo(\"Joystick control enabled. Sending VELOCITY commands.\")\n\n elif blue_button_pressed or start_button_pressed:\n self.enable_control('motor')\n self.system_mode_pub.publish(1)\n rospy.loginfo(\"Joystick control enabled. Sending MOTOR commands.\")\n\n elif back_button_pressed:\n self.system_mode_pub.publish(0)\n rospy.loginfo(\"Control deactivated\")\n\n left_stick_up_down = data.axes[1]\n right_stick_up_down = data.axes[4]\n left_stick_left_right = data.axes[0]\n right_stick_left_right = data.axes[3]\n\n self.publish(left_stick_up_down, right_stick_up_down,\n left_stick_left_right, right_stick_left_right)",
"def __init__(self, robotName, configDict=None):\n self.name = robotName\n self.leftMotor = ev3.LargeMotor('outC')\n self.rightMotor = ev3.LargeMotor('outB')\n self.servoMotor = ev3.MediumMotor('outD')\n self.servoMotor.reset()\n self.leftTouch = None\n self.rightTouch = None\n self.ultraSensor = ev3.UltrasonicSensor('in4')\n self.colorSensor = None\n self.gyroSensor = ev3.GyroSensor('in2')\n self.pixyCam=PixyCam() #input_1\n self.button=ev3.Button()\n ev3.Sound.set_volume(100)\n if configDict is not None:\n self.setupSensorsMotors(configDict)\n if self.leftMotor is None:\n self.leftMotor = ev3.LargeMotor('outC')\n if self.rightMotor is None:\n self.rightMotor = ev3.LargeMotor('outB')",
"def __init__(self):\n self.inches_moved = 0\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = True\n self.ir_sensor = ev3.InfraredSensor()\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n assert self.ir_sensor\n assert self.touch_sensor\n self.arm_motor.position = 0\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n\n self.right_motor_encoder = self.right_motor.position\n self.left_motor_encoder = self.left_motor.position",
"def init_component(self):\n\n pass",
"def antenny_init_components(self):\n if self.antenny_config is None:\n print(\"Please load a config before initializing components\")\n if not self.antenny_config.check():\n print(\"Config {} is not valid, failed to initialize\".format(self.antenny_config.get_name()))\n print(\"If you believe this is an error, or you have modified the base components of the antenny board, \"\n \"please check Config class as well as the default configs for more details.\")\n\n self.imu_init()\n self.pwm_controller_init()\n self.elevation_servo_init()\n self.azimuth_servo_init()\n self.screen_init()\n self.gps_init()\n self.telemetry_init()\n self.platform_init()",
"def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.MAX_SPEED = 900\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy",
"def set_mode_joystick(self):\n self.command.traj_mode = mob.OMNIBASE_TRAJ_JOYSTICK\n self.command.ctrl_mode = mob.OMNIBASE_CTRL_OPSPACE_TRAJ",
"def map_joystick(joystick):\n left = baxter_interface.Limb('left')\n right = baxter_interface.Limb('right')\n grip_left = baxter_interface.Gripper('left')\n grip_right = baxter_interface.Gripper('right')\n lcmd = {}\n rcmd = {}\n\n #available joints\n lj = left.joint_names()\n rj = right.joint_names()\n\n #abbreviations\n jhi = lambda s: joystick.stick_value(s) > 0\n jlo = lambda s: joystick.stick_value(s) < 0\n bdn = joystick.button_down\n bup = joystick.button_up\n\n def print_help(bindings_list):\n print(\"press any keyboard key to quit.\")\n for bindings in bindings_list:\n for (test, cmd, doc) in bindings:\n if callable(doc):\n doc = doc()\n print(\"%s: %s\" % (str(test[1][0]), doc))\n\n bindings_list = []\n bindings = (\n ((bdn, ['rightTrigger']), (grip_left.close, []), \"left gripper close\"),\n ((bup, ['rightTrigger']), (grip_left.open, []), \"left gripper open\"),\n ((bdn, ['leftTrigger']), (grip_right.close, []), \"right gripper close\"),\n ((bup, ['leftTrigger']), (grip_right.open, []), \"right gripper open\"),\n ((jlo, ['leftStickHorz']), (set_j, [rcmd, right, rj, 0, 0.1]), lambda i=0:\"right inc \"+rj[i]),\n ((jhi, ['leftStickHorz']), (set_j, [rcmd, right, rj, 0, -0.1]), lambda i=0:\"right dec \"+rj[i]),\n ((jlo, ['rightStickHorz']), (set_j, [lcmd, left, lj, 0, 0.1]), lambda i=0:\"left inc \"+lj[i]),\n ((jhi, ['rightStickHorz']), (set_j, [lcmd, left, lj, 0, -0.1]), lambda i=0:\"left dec \"+lj[i]),\n ((jlo, ['leftStickVert']), (set_j, [rcmd, right, rj, 1, 0.1]), lambda i=1:\"right inc \"+rj[i]),\n ((jhi, ['leftStickVert']), (set_j, [rcmd, right, rj, 1, -0.1]), lambda i=1:\"right dec \"+rj[i]),\n ((jlo, ['rightStickVert']), (set_j, [lcmd, left, lj, 1, 0.1]), lambda i=1:\"left inc \"+lj[i]),\n ((jhi, ['rightStickVert']), (set_j, [lcmd, left, lj, 1, -0.1]), lambda i=1:\"left dec \"+lj[i]),\n ((bdn, ['rightBumper']), (rotate, [lj]), \"left: cycle joint\"),\n ((bdn, ['leftBumper']), (rotate, [rj]), \"right: cycle joint\"),\n ((bdn, ['btnRight']), (grip_left.calibrate, []), \"left calibrate\"),\n ((bdn, ['btnLeft']), (grip_right.calibrate, []), \"right calibrate\"),\n ((bdn, ['function1']), (print_help, [bindings_list]), \"help\"),\n ((bdn, ['function2']), (print_help, [bindings_list]), \"help\"),\n )\n bindings_list.append(bindings)\n\n rate = rospy.Rate(100)\n print_help(bindings_list)\n print(\"press any key to stop. \")\n while not rospy.is_shutdown():\n c = iodevices.getch()\n if c:\n if c == '?':\n print_help(bindings_list)\n else:\n return True\n for (test, cmd, doc) in bindings:\n if test[0](*test[1]):\n cmd[0](*cmd[1])\n if callable(doc):\n print(doc())\n else:\n print(doc)\n if len(lcmd):\n left.set_joint_positions(lcmd)\n lcmd.clear()\n if len(rcmd):\n right.set_joint_positions(rcmd)\n rcmd.clear()\n rate.sleep()\n return False",
"def joystickController(self):\n return self.__joystickController",
"def create_device(self, layout):\n events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],\n ecodes.EV_REL: []}\n\n # Joystick device\n if layout.axes or layout.buttons or layout.hats:\n self.joystick_dev = next_joystick_device()\n\n for name in layout.axes:\n params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.hats:\n params = (0, -1, 1, 0, 0)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.buttons:\n events[ecodes.EV_KEY].append(name)\n\n if layout.mouse:\n self.mouse_pos = {}\n self.mouse_rel = {}\n self.mouse_analog_sensitivity = float(\n layout.mouse_options.get(\"MOUSE_SENSITIVITY\",\n DEFAULT_MOUSE_SENSITIVTY)\n )\n self.mouse_analog_deadzone = int(\n layout.mouse_options.get(\"MOUSE_DEADZONE\",\n DEFAULT_MOUSE_DEADZONE)\n )\n self.scroll_repeat_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_REPEAT_DELAY\",\n DEFAULT_SCROLL_REPEAT_DELAY)\n )\n self.scroll_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_DELAY\",\n DEFAULT_SCROLL_DELAY)\n )\n\n for name in layout.mouse:\n if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):\n if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:\n # This ensures that scroll wheel events can work\n events[ecodes.EV_REL].append(ecodes.REL_WHEEL)\n else:\n events[ecodes.EV_REL].append(name)\n self.mouse_rel[name] = 0.0\n\n self.device = UInput(name=layout.name, events=events,\n bustype=layout.bustype, vendor=layout.vendor,\n product=layout.product, version=layout.version)\n self.layout = layout",
"def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shift the colormap by dragging the cursor left or right. Stretch the colormap by dragging the cursor up or down. | def ms_contrast(self, viewer, event, data_x, data_y, msg=True):
if not self.cancmap:
return False
event.accept()
msg = self.settings.get('msg_contrast', msg)
x, y = self.get_win_xy(viewer)
if event.state == 'move':
self._tweak_colormap(viewer, x, y, 'preview')
elif event.state == 'down':
self._start_x, self._start_y = x, y
if msg:
self.onscreen_message(
"Shift and stretch colormap (drag mouse)", delay=1.0)
else:
self.onscreen_message(None) | [
"def ms_cmap_rotate(self, viewer, event, data_x, data_y, msg=True):\n if not self.cancmap:\n return True\n msg = self.settings.get('msg_cmap', msg)\n\n x, y = self.get_win_xy(viewer)\n\n if event.state == 'move':\n self._rotate_colormap(viewer, x, y, 'preview')\n\n elif event.state == 'down':\n self._start_x, self._start_y = x, y\n if msg:\n viewer.onscreen_message(\"Rotate colormap (drag mouse L/R)\",\n delay=1.0)\n else:\n viewer.onscreen_message(None)\n return True",
"def ms_cmap_restore(self, viewer, event, data_x, data_y, msg=True):\n if self.cancmap and (event.state == 'down'):\n self.restore_colormap(viewer, msg)\n return True",
"def shift_cmap(cmap, start=0., locpoint=0.5, stop=1.0, name='centered'):\r\n\r\n # declare a colour + transparency dictionary\r\n cdict={'red':[], 'green':[], 'blue':[], 'alpha':[]}\r\n\r\n # regular index to compute the colors\r\n RegInd = np.linspace(start, stop, cmap.N)\r\n\r\n # shifted index to match what the data should be centered on\r\n ShiftInd = np.hstack([np.linspace(0., locpoint, int(cmap.N / 2),\r\n endpoint=False),\r\n np.linspace(locpoint, 1., int(cmap.N / 2))])\r\n\r\n # associate the regular cmap's colours with the newly shifted cmap colour\r\n for RI, SI in zip(RegInd, ShiftInd):\r\n\r\n # get standard indexation of red, green, blue, alpha\r\n r, g, b, a = cmap(RI)\r\n\r\n cdict['red'].append((SI, r, r))\r\n cdict['green'].append((SI, g, g))\r\n cdict['blue'].append((SI, b, b))\r\n cdict['alpha'].append((SI, a, a))\r\n\r\n return LinearSegmentedColormap(name, cdict)",
"def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )",
"def _move_cursors_to_pos(self):\n for axis in range(3):\n x, y = self._vox[list(self._xy_idx[axis])]\n self._images['cursor_v'][axis].set_xdata([x, x])\n self._images['cursor_h'][axis].set_ydata([y, y])\n self._zoom(0) # doesn't actually zoom just resets view to center\n self._update_images(draw=True)\n self._update_moved()",
"def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))",
"def sources_colormap(self, **kwargs):\n self.cbar_control('Projection', **kwargs)",
"def _setcol(self, c):\n self.settings[\"heatmap\"][\"cmap\"] = self.img.get_cmap().name\n self._save_settings()",
"def drag_zoom(self, event):\r\n if self._xypress:\r\n x, y = event.x, event.y\r\n lastx, lasty, a, ind, view = self._xypress[0]\r\n (x1, y1), (x2, y2) = np.clip(\r\n [[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)\r\n self._zoom_mode = \"x\"\r\n if self._zoom_mode == \"x\":\r\n y1, y2 = a.bbox.intervaly\r\n elif self._zoom_mode == \"y\":\r\n x1, x2 = a.bbox.intervalx\r\n self.draw_rubberband(event, x1, y1, x2, y2)",
"def define_plot_cmap(\n fig,\n ax,\n mid_point,\n cmap,\n ticks,\n labels,\n cmap_label,\n):\n\n new_cmap = shiftedColorMap(\n cmap,\n midpoint=mid_point,\n name='shifted'\n )\n X = np.linspace(0, 1, 256)\n cax = ax.scatter(-X-100, -X-100, c=X, cmap=new_cmap)\n cbar = fig.colorbar(cax, ticks=ticks, spacing='proportional')\n cbar.ax.set_yticklabels(labels, fontsize=16)\n cbar.set_label(cmap_label, fontsize=16)\n return new_cmap",
"def toggle_cursor(self, *args) -> None:\n cursors = [{\"cursor\": \"grab\"}, {\"cursor\": \"crosshair\"}]\n self.m.default_style = cursors[self.menu.v_model]\n self.marker.visible = self.menu.v_model\n\n return",
"def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()",
"def on_preview_cmaps(self, evt):\n wx.BeginBusyCursor()\n import matplotlib.pyplot as plt\n\n colormaps = self.model.get_colormap_choices()\n colormap_strip = self.model.generate_colormap_strip()\n num_maps = len(colormaps) + 1\n figure = plt.figure(figsize=(5, 8))\n figure.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, m in enumerate(colormaps):\n if not m.endswith(\"_r\"):\n ax = plt.subplot(num_maps, 1, i + 1)\n plt.axis('off')\n plt.imshow(colormap_strip, aspect='equal', cmap=self.model.get_cmap(m), origin='lower')\n pos = list(ax.get_position().bounds)\n figure.text(pos[0] - 0.01, pos[1], m, fontsize=10, horizontalalignment='right')\n plt.show()\n wx.EndBusyCursor()",
"def SetColormap(self, *args):\n return self.send({\"cmd\": \"SetColormap\", \"args\": args})",
"def mouse_zoom(self, norm_mouse_x, norm_mouse_y, dragging=True):\n\t\tif self.last_x:\n\t\t\tdx, dy = self.last_x - norm_mouse_x, self.last_y - norm_mouse_y\n\t\t\tnorm_mouse_r_delta = 20*np.sqrt(dx*dx+dy*dy)\n\t\t\tif dy > 0:\n\t\t\t\tnorm_mouse_r_delta = -norm_mouse_r_delta\n\t\t\tif dragging:\n\t\t\t\tself.cam_eye[2] += norm_mouse_r_delta\n\t\t\t\tif self.cam_eye[2] < 0.1:\n\t\t\t\t\tself.cam_eye[2] = 0.1\n\t\t\t\tself.update_modelview()\n\t\tself.last_x, self.last_y = norm_mouse_x, norm_mouse_y",
"def DoCameraZoom(self, evt):\r\n\r\n if (self._right_click_x != None):\r\n # we should change cursor here\r\n self._parent.SetCursor(self._cursors['zoom'])\r\n self._right_click_x = None\r\n self._right_click_y = None\r\n\r\n super(EVSRenderPane2D, self).DoCameraZoom(evt)",
"def set_zooming_mouse(self):\n # Zooming: right button mouse\n self.set('RightClickMove', 'Zoom',\n param_getter=lambda p: (p[\"mouse_position_diff\"][0]*2.5,\n p[\"mouse_press_position\"][0],\n p[\"mouse_position_diff\"][1]*2.5,\n p[\"mouse_press_position\"][1]))",
"def paint(self, row, col, count, colorPair):\n mainCursesWindow.chgat(self.top + row, self.left + col, count, colorPair)",
"def enableZoomOut(self):\n self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)\n self.master.config(cursor = \"cross\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An interactive way to restore the colormap contrast settings after a warp operation. | def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True):
if not self.cancmap:
return False
event.accept()
if event.state == 'down':
self.restore_contrast(viewer, msg=msg) | [
"def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True):\n if self.cancmap and (event.state == 'down'):\n self.restore_contrast(viewer, msg=msg)\n return True",
"def ms_contrast(self, viewer, event, data_x, data_y, msg=True):\n if not self.cancmap:\n return False\n event.accept()\n msg = self.settings.get('msg_contrast', msg)\n\n x, y = self.get_win_xy(viewer)\n\n if event.state == 'move':\n self._tweak_colormap(viewer, x, y, 'preview')\n\n elif event.state == 'down':\n self._start_x, self._start_y = x, y\n if msg:\n self.onscreen_message(\n \"Shift and stretch colormap (drag mouse)\", delay=1.0)\n else:\n self.onscreen_message(None)",
"def invert_heatmap(self):\n if self.settings[\"heatmap\"][\"cmap\"][-2:] == \"_r\":\n self.settings[\"heatmap\"][\"cmap\"] = self.settings[\"heatmap\"][\"cmap\"][:-2]\n self.img.set_cmap(self.settings[\"heatmap\"][\"cmap\"])\n self.static_canvas.draw()\n else:\n try:\n self.img.set_cmap(self.settings[\"heatmap\"][\"cmap\"] + \"_r\")\n self.settings[\"heatmap\"][\"cmap\"] = self.settings[\"heatmap\"][\"cmap\"] + \"_r\"\n self.static_canvas.draw()\n except ValueError as inst:\n print(type(inst), inst)\n self._save_settings()",
"def ms_cmap_restore(self, viewer, event, data_x, data_y, msg=True):\n if self.cancmap and (event.state == 'down'):\n self.restore_colormap(viewer, msg)\n return True",
"def set_contrast(contrast): \n lcd.set_contrast(contrast)",
"def restore(self):\n\t\twindll.gdi32.SetDeviceGammaRamp(self.dc, byref(self.old_ramp))",
"def setColorMap(self, colormap):\n\n if colormap=='overlay':\n self.stack[0].setColorMap(colormap='magenta')\n self.stack[1].setColorMap(colormap='green')\n elif colormap=='diff':\n self.stack[0].setColorMap(colormap='magentadiff')\n self.stack[1].setColorMap(colormap='greendiff')\n else:\n self.stack[0].setColorMap(colormap='magenta')\n self.stack[1].setColorMap(colormap='green')\n\n for i in range(self.numberOfDataSets):\n self.volumeProperty.SetScalarOpacity(i, self.stack[i].currentOpacityMap)\n self.volumeProperty.SetColor(i, self.stack[i].currentColorMap)",
"def colormap(self, *args):\n return self.send({\"cmd\": \"colormap\", \"args\": args})",
"def change_contrast(self, b):\n self.fft_plot.set_clim(0, self.contrast_slider.value * self.v_range)\n clear_output()\n display(self.fig)",
"def adjust_contrast(X):\n\tXc = X.copy()\n\tdarken_ind = np.random.binomial(1, 0.5, X.shape[0]).astype(np.bool)\n\tbrighten_ind = np.invert(darken_ind)\n\tXc[darken_ind] = darken_image(X[darken_ind])\n\tXc[brighten_ind] = brighten_image(X[brighten_ind])\n\treturn Xc",
"def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )",
"def SetColormap(self, *args):\n return self.send({\"cmd\": \"SetColormap\", \"args\": args})",
"def update_colormap(self, to_overlay=None, **kwargs):\n if self._n_overlay >= 1:\n overlay = self._n_overlay - 1 if to_overlay is None else to_overlay\n # Define the colormap data :\n data_lim = self._data_lim[overlay]\n col = np.linspace(data_lim[0], data_lim[1], LUT_LEN)\n self._text2d_data[overlay, ...] = Colormap(**kwargs).to_rgba(col)\n self._text2d.set_data(self._text2d_data)\n self.update()",
"def _setcol(self, c):\n self.settings[\"heatmap\"][\"cmap\"] = self.img.get_cmap().name\n self._save_settings()",
"def mapping_off(planet):\n\tplanet.image_copy = \\\n\tpg.transform.scale(planet.image_mars, (100, 100))\n\tplanet.image_copy.set_colorkey(COLOR[1])",
"def set_contrast_levels(self, contrast_level=0):\n for cmap_panel, img_panel in zip((self.cmap_panels[0], self.cmap_panels[1]),\n (self.img1_panel, self.img2_panel)):\n conf = img_panel.conf\n img = img_panel.conf.data\n if contrast_level is None:\n contrast_level = 0\n conf.contrast_level = contrast_level\n clevels = [contrast_level, 100.0-contrast_level]\n\n imax = img.max()\n imin = img.min()\n if imax - imin < 1:\n imax = imax + 1\n cmap_panel.imin_val.SetValue('%.4g' % imin)\n cmap_panel.imax_val.SetValue('%.4g' % imax)\n jmin, jmax = np.percentile(img, clevels)\n\n conf.int_lo[0] = imin\n conf.int_hi[0] = imax\n conf.cmap_lo[0] = xlo = (jmin-imin)*conf.cmap_range/(imax-imin)\n conf.cmap_hi[0] = xhi = (jmax-imin)*conf.cmap_range/(imax-imin)\n\n cmap_panel.cmap_hi.SetValue(int(xhi))\n cmap_panel.cmap_lo.SetValue(int(xlo))\n cmap_panel.islider_range.SetLabel('Shown: [ %.4g : %.4g ]' % (jmin, jmax))\n cmap_panel.redraw_cmap()\n img_panel.redraw()",
"def reset_color(self):\n sys.stdout.write(C_RES)",
"def update_imshow(val):\n image.set_clim(vmin = image_min, # + contrastSlider.val*image_diff//8\n vmax = image_max - contrastSlider.val*image_diff) #//2",
"def test_colormap_discrete():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(colors=['r', 'g', 'b'],\n interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_rgb.png\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This decorator is meant to decorate management commands. Any exceptions raised in the command's handle method will be logged and reraised. | def log_exceptions(cls):
class NewClass(cls):
def handle(self, *args, **options):
try:
super().handle(args, options)
except Exception:
logger.exception("Management command '{}' failed. Traceback follows: ".format(sys.argv[1]))
raise
return NewClass | [
"def wraps(command):\n\n return CommandWrapper(command)",
"def track_command(func):\n\n def wrapped(*args, **kwargs):\n\n if not _telemetry_enabled():\n # When Telemetry is disabled, call the function immediately and return.\n return func(*args, **kwargs)\n\n telemetry = Telemetry()\n\n exception = None\n return_value = None\n exit_reason = \"success\"\n exit_code = 0\n\n duration_fn = _timer()\n try:\n\n # Execute the function and capture return value. This is returned back by the wrapper\n # First argument of all commands should be the Context\n return_value = func(*args, **kwargs)\n\n except UserException as ex:\n # Capture exception information and re-raise it later so we can first send metrics.\n exception = ex\n exit_code = ex.exit_code\n exit_reason = type(ex).__name__\n\n except Exception as ex:\n exception = ex\n # Standard Unix practice to return exit code 255 on fatal/unhandled exit.\n exit_code = 255\n exit_reason = type(ex).__name__\n\n ctx = Context.get_current_context()\n telemetry.emit(\"commandRun\", {\n # Metric about command's general environment\n \"awsProfileProvided\": bool(ctx.profile),\n \"debugFlagProvided\": bool(ctx.debug),\n \"region\": ctx.region or \"\",\n \"commandName\": ctx.command_path, # Full command path. ex: sam local start-api\n\n # Metric about command's execution characteristics\n \"duration\": duration_fn(),\n \"exitReason\": exit_reason,\n \"exitCode\": exit_code\n })\n\n if exception:\n raise exception # pylint: disable=raising-bad-type\n\n return return_value\n\n return wrapped",
"def CommandDecorator(command_name):\n\n def InnerCommandDecorator(original_class):\n \"\"\"\"Inner Decorator that actually wraps the class.\"\"\"\n if not hasattr(original_class, '__doc__'):\n raise InvalidCommandError('All handlers must have docstrings: %s' %\n original_class)\n\n if not issubclass(original_class, CliCommand):\n raise InvalidCommandError('All Commands must derive from CliCommand: %s' %\n original_class)\n\n _commands[command_name] = original_class\n original_class.command_name = command_name\n\n return original_class\n\n return InnerCommandDecorator",
"def group(self, *args, **kwargs):\n def decorator(f):\n cmd = group( *args, **kwargs )( f )\n self.add_command(cmd)\n return cmd\n return decorator",
"def standard_error_handler(error_function):\n\n async def wrapper(cls, ctx, error):\n\n extra = f\"\\n\\nSee the help message for more information.\"\n\n # This prevents any commands with local handlers being handled here\n if hasattr(ctx.command, \"on_error\"):\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n\n ignored = (commands.CommandNotFound,)\n\n # Anything in ignored will return and prevent anything happening.\n if any([isinstance(error, i) for i in ignored]):\n return\n\n if isinstance(error, DisabledCommand):\n await pretty_print(\n ctx, \"This command is disabled!\", title=\"Error\", color=ERROR_COLOR\n )\n\n elif isinstance(error, MemberNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, RoleNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, NoPrivateMessage):\n await pretty_print(\n ctx,\n \"This command cannot be run in a private message.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, PrivateMessageOnly):\n try:\n await ctx.message.delete()\n extra += \"\\nYour message has been deleted\"\n except:\n print(\"Could not delete message\")\n await pretty_print(\n ctx,\n \"This command should be run in a Private Message only!\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRole):\n await pretty_print(\n ctx, str(error) + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, IllegalRole):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, CheckFailure):\n await pretty_print(\n ctx,\n \"Could not run command, do you have sufficient permissions in this channel?\"\n + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, BadArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Could not run command, is it formatted properly?\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRequiredArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx, \"Missing required arguments\", title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, BadUnionArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Invalid argument\",\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, WalletNotVerified):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, InvalidCoin):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, RequestError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n elif isinstance(error, FatalError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n await error_function(cls, ctx, error)\n\n return wrapper",
"async def admin(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid Command\")",
"def _mod_command_handler(self, cmd, args):\n return bundy.config.create_answer(1, \"Unknown command: \" + str(cmd))",
"def command(func: 'function') -> 'function':\n func._decorators = (Bot.command,)\n return func",
"def on_cmd(cmd_hook):\n def manager(self):\n if self.message[0] == '!':\n action = self.message.split()\n cmd, args = action[0][1:], action[1:]\n try:\n f = getattr(self, cmd, None)\n if f:\n func_if_attr(f, cmd_hook)(*args)\n except Exception as e:\n print e\n return manager",
"def manage(command):\n if not command_is_available(command):\n warn('Management command \"%s\" is not available' % command)\n else:\n run('python manage.py ' + command)",
"def shell_command(cmd_name):\n def inner(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n func.trac_method = cmd_name\n return func(*args, **kwargs)\n return wrapper\n return inner",
"async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)",
"def handle(self, command):\n fn = \"handle_{}\".format(command)\n try:\n self.commands[fn](self)\n except KeyError:\n if settings.DEBUG:\n print(\"Could not find command: {}\".format(command))",
"async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n\n if hasattr(ctx.command, 'on_command_error'):\n return\n\n # This prevents any cogs with an overwritten cog_command_error being handled here.\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n return\n\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(f'Command pas trouvé')\n return\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{ctx.command} has been disabled.')\n return\n\n if isinstance(error,commands.errors.PrivateMessageOnly):\n await ctx.message.delete()\n channel = await ctx.message.author.create_dm()\n await channel.send(f'{ctx.command} ne peut être exécuté que en message privé !!')\n return\n # For this error example we check to see where it came from...\n if isinstance(error, commands.BadArgument):\n await ctx.send('Mauvais arguments passés')\n return\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('Il manque des arguments à la commande')\n return\n # All other Errors not returned come here. And we can just print the default TraceBack.\n logger.error(f'Ignoring exception in command {ctx.command} : {type(error)} {error} {error.__traceback__}')",
"def command(self):\n\n raise NotImplementedError()",
"def mod_command_handler(self, cmd, args):\n self.command_handler_params = (cmd, args) # for inspection\n return bundy.config.create_answer(0)",
"def command(self, *commands):\n def decorator(function):\n for command in commands:\n self.functions[command] = function\n return function\n return decorator",
"def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')",
"def _normalize_command(func):\n\n @functools.wraps(func)\n def wrapper(command, **kwargs):\n if not isinstance(command, (str,)):\n command = _normalize_args(command)\n\n return func(command, **kwargs)\n\n return wrapper"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a list of random minibatches from (X, Y) | def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k*mini_batch_size:(k+1)*mini_batch_size]
mini_batch_Y = shuffled_Y[:, k*mini_batch_size:(k+1)*mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches*mini_batch_size:]
mini_batch_Y = shuffled_X[:, num_complete_minibatches*mini_batch_size:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches | [
"def minibatches(X, Y, batch_size):\n\tm = X.shape[0]\n\tn_batches = int(np.floor(m / batch_size))\n\trandom_indices = np.random.permutation(np.arange(m))\n\tfor i in range(n_batches):\n\t\tbatch_indices = np.arange(i * batch_size, (i + 1) * batch_size)\n\t\tbatch_indices = random_indices[batch_indices]\n\t\tyield X[batch_indices], Y[batch_indices]",
"def _generate_minibatches(X, y, batch_size=32, shuffle=False):\n if shuffle:\n X, y = utils.shuffle(X, y)\n\n N = len(X)\n assert N == len(y)\n assert batch_size == -1 or batch_size > 0, \"Invalid batch size.\"\n\n batch_size = min(batch_size, N)\n\n if batch_size == -1: # batch sgd\n batch_size = N\n n_batches = 1\n last_batch_size = batch_size\n else: # mini-batch sgd\n n_batches, last_batch_size = divmod(N, batch_size)\n n_batches = n_batches + (1 if last_batch_size > 0 else 0)\n\n ix = 0\n\n for batch_i in range(n_batches):\n batch_start = ix\n batch_end = ix + batch_size\n\n if batch_end > N:\n batch_end = ix + last_batch_size\n\n yield batch_i, X[batch_start:batch_end], y[batch_start:batch_end]\n\n ix += batch_size",
"def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \n np.random.seed(seed) \n m = X.shape[1] \n mini_batches = []\n \n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((1,m))\n\n num_complete_minibatches = math.floor(m/mini_batch_size)\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : (k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k + 1) * mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : ]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : ] \n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches",
"def coordinates_generator(total, min_x, max_x, min_y, max_y):\n coordinates = []\n for i in range(total):\n x = randint(min_x, max_x)\n y = randint(min_y, max_y)\n coordinates.append((x, y))\n return coordinates",
"def generate_mines(self, number):\n mine_locations = []\n available_places = [[j, i]\n for i in xrange(0, self.x) for j in xrange(0, self.y)]\n while number > 0:\n # the chosen coordinate for a mine is appended into the list and is\n # removed from the list of choices to prevent duplicates.\n choice = random.choice(available_places)\n available_places.remove(choice)\n mine_locations.append(choice)\n number -= 1\n return mine_locations",
"def mini_batches(X, Y, batch_size = 32):\n \n full_batch_size = X.shape [1]\n number_mini_batches = int (full_batch_size / batch_size)\n splitting = np.array_split (\n np.random.permutation (full_batch_size), number_mini_batches)\n \n mini_batches = []\n \n for i in range (number_mini_batches):\n X_mini_batch_now = X [:, splitting [i]]\n Y_mini_batch_now = Y [:, splitting [i]]\n ## Create list\n mini_batches.append ((X_mini_batch_now, Y_mini_batch_now))\n \n return mini_batches",
"def get_mines(self):\n\t\treturn ((x, y) for x in range(self.width)\n\t\t for y in range(self.height) if self.mines[x][y])",
"def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine",
"def generate_positions_by_minimum_distance(shape, num_bots, min_distance):\n x_pos = []\n y_pos = []\n \n x_points = list(np.arange(10, shape[0] - 10))\n y_points = list(np.arange(10, shape[1] - 10))\n while len(x_pos) < num_bots and len(x_points) > 0 and len(y_points) > 0:\n ind_x = random.choice(range(len(x_points))) #select random point\n ind_y = random.choice(range(len(y_points)))\n x = x_points.pop(ind_x)\n y = y_points.pop(ind_y)\n minimum = float('inf') #keep track of minimum distance\n for point in range(len(x_pos)):\n distance = sqrt((x_pos[point] - x)**2 + (y_pos[point] - y)**2) \n if distance < minimum:\n minimum = distance\n if minimum <= min_distance:\n continue\n else:\n x_pos.append(x)\n y_pos.append(y)\n if len(x_pos) < num_bots:\n print \"SHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIi\", zip(x_pos, y_pos)\n return None\n else:\n #print \"Remaining points: %s %s\" % (len(x_points), len(y_points))\n return zip(x_pos, y_pos)",
"def get_minibatches(data, minibatch_size, shuffle=True):\r\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\r\n data_size = len(data[0]) if list_data else len(data)\r\n indices = np.arange(data_size)\r\n if shuffle:\r\n np.random.shuffle(indices)\r\n for minibatch_start in np.arange(0, data_size, minibatch_size):\r\n minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]\r\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\r\n else minibatch(data, minibatch_indices)",
"def get_start_centers(k, x, y):\n chosen = set()\n centers = []\n for i in range(k):\n _rand = randrange(len(x))\n while _rand in chosen:\n _rand = randrange(len(x))\n chosen.add(_rand)\n centers.append([x[_rand], y[_rand]])\n return centers",
"def get_random_minibatch(self):\n batch_size = min(len(self.buffer), 256) # prev: 64\n weights = np.linspace(0.0, 1.0, len(self.buffer)) # Add weights to cases, so newer cases are prioritized\n random_batch = random.choices(population=self.buffer, weights=weights, k=batch_size)\n return random_batch",
"def bootstrap_sample(data: List[X]) -> List[X]:\n\treturn [random.choice(data) for _ in data]",
"def _get_bootstrap_sample(x, y, num_reps):\r\n combined = array(list(x) + list(y))\r\n total_obs = len(combined)\r\n num_x = len(x)\r\n for i in range(num_reps):\r\n # sampling with replacement\r\n indices = randint(0, total_obs, total_obs)\r\n sampled = combined.take(indices)\r\n # split into the two populations\r\n sampled_x = sampled[:num_x]\r\n sampled_y = sampled[num_x:]\r\n yield sampled_x, sampled_y",
"def resample(X, y):\n N = len(X)\n for _ in range(N):\n i = random.randint(0, N - 1)\n yield X[i], y[i]",
"def get_minibatch(self, X, batch_size, batch_start):\n\n batch_slice = slice(batch_start, batch_start + batch_size)\n noise_batch_start = int(batch_start * self.nu)\n noise_batch_slice = slice(noise_batch_start, noise_batch_start + int(batch_size * self.nu))\n\n X_batch = X[batch_slice]\n Y_batch = self.Y[noise_batch_slice]\n\n return X_batch, Y_batch",
"def create_moves(x, y):\n if (x == 0) and (y == 0):\n return list([(0, 0)])\n\n moves = []\n for i in range(x):\n moves.append((i, y))\n for i in range(y):\n moves.append((x, i))\n\n shortest = min(x, y)\n for i in range(1, shortest + 1):\n moves.append((x - i, y - i))\n\n return list(set(moves))",
"def weights_generator(coordinates, min_w, max_w):\n weights = []\n for i in range(len(coordinates)):\n coordinate_weight = randint(min_w, max_w)\n weights.append(coordinate_weight)\n return weights",
"def sample_between_points(self, x, y):\n return x + (y - x)*np.random.random()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that Predictor instances are not serializable. | def test_serialization():
# Class is serializable.
ray.put(DummyPredictor)
# Instance is not serializable.
predictor = DummyPredictor()
with pytest.raises(PredictorNotSerializableException):
ray.put(predictor) | [
"def test_serializable_check(self):\n with patch_config_options({\"runner.enforceSerializableSessionState\": True}):\n script = self.script_from_string(\n \"\"\"\n import streamlit as st\n\n def unserializable_data():\n return lambda x: x\n\n st.session_state.unserializable = unserializable_data()\n \"\"\",\n )\n sr = script.run()\n assert sr.exception\n assert \"pickle\" in sr.exception[0].value",
"def test_serializable_check_off(self):\n with patch_config_options({\"runner.enforceSerializableSessionState\": False}):\n script = self.script_from_string(\n \"\"\"\n import streamlit as st\n\n def unserializable_data():\n return lambda x: x\n\n st.session_state.unserializable = unserializable_data()\n \"\"\",\n )\n sr = script.run()\n assert not sr.exception",
"def _check_pickleable(obj):\n def recurse(obj):\n if isinstance(obj, (list, tuple, set)):\n return [recurse(x) for x in obj]\n if isinstance(obj, dict):\n return [[recurse(x), recurse(y)] for x, y in obj.items()]\n if isinstance(obj, (str, int, float, bool, bytes, bytearray)):\n return None # Python primitive types are pickleable.\n if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:\n return None # NumPy arrays and PyTorch tensors are pickleable.\n if is_persistent(obj):\n return None # Persistent objects are pickleable, by virtue of the constructor check.\n return obj\n with io.BytesIO() as f:\n pickle.dump(recurse(obj), f)",
"def test_pickle_dataset(dataset: Dataset):\n assert pickle.loads(pickle.dumps(dataset)) == dataset",
"def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)",
"def test_valid_serialization_unfit_model(self):\n instance = GammaUnivariate()\n result = GammaUnivariate.from_dict(instance.to_dict())\n assert instance.to_dict() == result.to_dict()",
"def test_pickleable(self):\n try:\n pickle.dumps(self.matcher)\n except TypeError:\n self.fail(\"Cannot dump matcher using pickle\")",
"def test_collection_not_estimator(self):\n for cls in (list, dict, tuple, set):\n self.assertFalse(isestimator(cls))\n\n things = ['pepper', 'sauce', 'queen']\n self.assertFalse(isestimator(things))",
"def serializable() -> bool:\n return False",
"def test__pickle_unpickle(self):\n pass",
"def test_serialize_object(self):\n test_obj = self.TestObject(prop1='x', prop2=1234)\n\n with self.assertRaises(TypeError):\n serialize(test_obj)",
"def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))",
"def is_serializable(obj):\n try:\n PickleAppendixEncoder.encode(obj)\n return True\n except:\n return False",
"def test_deserialize_missing_data(self):\n data = {\"id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)",
"def test_to_dict_no_schema(__init__):\n model = type(\"model\", (utility_base.UtilityBase,), {\"__init__\": __init__})\n instance = model()\n\n with pytest.raises(exceptions.ModelAttributeError):\n instance.to_dict()",
"def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data",
"def test_serialize_no_metadata(self):\n pass # pragma: no cover",
"def test_to_dict_no_properties(__init__):\n model = type(\n \"model\", (utility_base.UtilityBase,), {\"_schema\": {}, \"__init__\": __init__}\n )\n instance = model()\n\n with pytest.raises(exceptions.MalformedSchemaError):\n instance.to_dict()",
"def test_picklable(self):\n pickled = pickle.dumps(self.structure)\n unpickled = pickle.loads(pickled)\n self.assertEqual(self.structure, unpickled)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds player calendar to team_cal and returns filled teams | def match_with_player(self, name, player_cal):
updated_team_cal = self.team_cal.copy()
filled_team_keys = []
for loc in player_cal.stack().index:
current_player_count = self.team_cal.at[loc]
if self.price_cal.at[loc] <= player_cal.at[loc]:
if current_player_count < self.team_size * 2:
updated_team_cal.at[loc] += 1
self.team_dict[f'{loc[1]}-{loc[0]}'].append(name)
if current_player_count == self.team_size * 2 - 1:
filled_team_keys.append(f'{loc[1]}-{loc[0]}')
else:
continue # team is filled
self.team_cal = updated_team_cal
return filled_team_keys | [
"def schedule_set(teams):\n for day in league:\n for num in day:\n num[0] = teams[num[0]]\n num[1] = teams[num[1]]\n show_schedule()\n return league",
"def get_playoff_teams(self, simulation=False, final_day=False):\n if simulation:\n league_standings = DayOfGames.league_standings_class\n team_standings = DayOfGames.team_standings_class\n else:\n league_standings = self.league_standings\n team_standings = self.team_standings\n playoff_teams_all = {}\n for conference in ['East', 'West']:\n division_leaders = self.get_division_leaders(conference, simulation)\n for team, max_wins in league_standings[conference]['Conference'].items():\n standings = sorted(league_standings[conference]['Conference'], \n key=league_standings[conference]['Conference'].get, reverse=True)\n playoff_teams = [x for x in standings if x not in division_leaders][0:5] + division_leaders\n seed_8 = standings[7]\n seed_9 = standings[8]\n seed_8_info = team_standings[seed_8]\n seed_9_info = team_standings[seed_9]\n if seed_8_info['Max_Wins'] == seed_9_info['Max_Wins']:\n tie_break = seed_8_info['Head2Head'][seed_9]\n if tie_break < 0:\n playoff_teams.remove(seed_8)\n playoff_teams.append(seed_9)\n else:\n if seed_8_info['Conference_Max_Wins'] < seed_9_info['Conference_Max_Wins']:\n playoff_teams.remove(seed_8)\n playoff_teams.append(seed_9)\n playoff_teams_all[conference] = playoff_teams\n if final_day:\n DayOfGames.final_playoff_teams = playoff_teams_all\n else:\n return playoff_teams_all",
"def get_calendar(self):\n matchs = MatchInLeague.objects.filter(league_id=self.id)\n tour_count = max([match.tour for match in matchs])\n for tour in range(1, tour_count + 1):\n yield (tour, matchs.filter(tour=tour))",
"def _create_teams(self):\n\t\tself.teamsDict = {}\n\t\tself.teamNamesList = []\n\t\tfor team in range(self.numberOfTeams):\n\t\t\tname = 'TEAM_'+str(team+1)\n\t\t\tself.teamNamesList.append(name)\n\t\t\tself.teamsDict[name] = app.game.team.Team(sport_type=self.gameData['sportType'])",
"def get_player_games(self, year, use_local=True):",
"def add_league_teams(league_diction, team_count, host, root, password):\r\n teams_diction = scrape_teams(league_diction, team_count)\r\n create_teams(host, root, password, dict_to_read=teams_diction)\r\n\r\n return teams_diction",
"def add_teams(self, teams):\n from .. import orgs\n\n headers = BranchProtection.PREVIEW_HEADERS_MAP[\"nested_teams\"]\n resp = self._post(self.teams_url, data=teams, headers=headers)\n json = self._json(resp, 200)\n return [orgs.ShortTeam(team, self) for team in json] if json else []",
"def add_players_on_floor(self):\n for period in self.Periods:\n # set current players to be period starters\n current_players = period.Starters.copy()\n for pbp_event in period.Events:\n if pbp_event.is_substitution():\n coming_in = pbp_event.player2_id\n going_out = pbp_event.player_id\n team_id = pbp_event.team_id\n current_players[team_id] = [coming_in if player == going_out else player for player in current_players[team_id]]\n pbp_event.current_players = current_players.copy()",
"def get_teams_and_schedule():\n start_time = timedelta(hours=19)\n time_to_add = timedelta(minutes=15)\n teams = session.query(Team).all()\n\n for team in teams:\n team.time = str(start_time)\n start_time += time_to_add\n yield team",
"def add_to_all_teams(user, org):\n\n for team in org.get_teams():\n team.add_to_members(user)",
"def get_contracted_players(self, team):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's contracted players\n doc = self.get_html_document(team, 'contracts')\n\n # returning empty list if no system page could be found\n if doc is None:\n return players\n\n # collecting player names and links to capfriendly pages for different\n # player groups\n cf_links = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/@href\")\n cf_names = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/text()\")\n\n for lnk, name in zip(cf_links, cf_names):\n # retrieving capfriendly id from player page link\n cf_id = lnk.split(\"/\")[-1]\n # trying to find player in database\n plr = Player.find_by_capfriendly_id(cf_id)\n # trying to find player using suggestions\n if plr is None:\n last_name, first_name = name.split(\", \")\n suggested_players = self.get_suggested_players(\n last_name, first_name)\n for suggested_player in suggested_players:\n (\n sugg_plr_id, sugg_pos,\n sugg_last_name, sugg_first_name, _\n ) = (\n suggested_player\n )\n if (last_name, first_name) == (\n sugg_last_name, sugg_first_name):\n plr = Player.find_by_id(sugg_plr_id)\n if plr is None:\n plr = self.create_player(\n sugg_plr_id, last_name, first_name, sugg_pos)\n\n if plr is None:\n print(\"Unable to find player with name %s\" % name)\n else:\n players.append(plr)\n\n return players",
"async def add_players_and_teams(player_data, *, create_index=False):\r\n db = client['players_and_teams']\r\n team_collection = db['teams']\r\n player_collection = db['players']\r\n\r\n team_documents = []\r\n player_documents = []\r\n\r\n #since this isn't going to magically change while we're adding players\r\n #we ok\r\n cached_mod_base = {\r\n \"maps_played\": 0,\r\n \"maps_won\": 0,\r\n \"maps_lost\": 0,\r\n \"total_scores\": 0,\r\n \"average_acc\": 0.00,\r\n \"base_acc\": 0.00,\r\n \"average_score\": 0.00,\r\n \"base_score\": 0,\r\n \"average_contrib\": 0.00, #these just go unused for team docs\r\n \"base_contrib\": 0.00,\r\n }\r\n\r\n for team in player_data:\r\n #first, add the new team\r\n players = team[1:]\r\n player_data = [await osuapi.get_player_data(username) for username in players]\r\n player_ids = [player['user_id'] for player in player_data]\r\n team_document = {\r\n '_id': team[0],\r\n 'name_lower': team[0].lower(),\r\n 'players': player_ids,\r\n 'scores': [],\r\n 'cached':{\r\n 'average_acc': 0.00,\r\n 'base_acc': 0.00, \r\n 'acc_rank': 0,\r\n 'average_score': 0.00,\r\n 'base_score': 0.00, \r\n 'score_rank': 0,\r\n 'maps_played': 0,\r\n 'maps_won': 0,\r\n 'maps_lost': 0,\r\n 'total_scores': 0,\r\n 'hits':{\r\n '300_count': 0,\r\n '100_count': 0,\r\n '50_count': 0,\r\n 'miss_count': 0,\r\n },\r\n \"by_mod\":{\r\n \"NM\": cached_mod_base,\r\n \"HD\": cached_mod_base,\r\n \"HR\": cached_mod_base,\r\n \"DT\": cached_mod_base,\r\n \"FM\": cached_mod_base\r\n }\r\n }\r\n }\r\n team_documents.append(team_document)\r\n\r\n #then iterate over each player id\r\n #really we don't do anything with player_data but at least you can expand it easily\r\n for player_index, player_id in enumerate(player_ids):\r\n player_document = {\r\n \"_id\": player_id,\r\n 'user_name': player_data[player_index]['username'],\r\n 'user_lower': player_data[player_index]['username'].lower(),\r\n 'team_name': team[0],\r\n 'pfp_url': f\"https://a.ppy.sh/{player_id}\",\r\n 'scores': [],\r\n 'cached':{\r\n 'average_acc': 0.00,\r\n 'base_acc': 0.00,\r\n 'acc_rank': 0,\r\n 'average_score': 0.00,\r\n 'base_score': 0,\r\n 'score_rank': 0,\r\n 'average_contrib': 0.00,\r\n 'base_contrib': 0.00,\r\n 'contrib_rank': 0,\r\n 'maps_played': 0,\r\n 'maps_won': 0,\r\n 'maps_lost': 0,\r\n 'hits':{\r\n '300_count': 0,\r\n '100_count': 0,\r\n '50_count': 0,\r\n 'miss_count': 0,\r\n },\r\n \"by_mod\":{\r\n \"NM\": cached_mod_base,\r\n \"HD\": cached_mod_base,\r\n \"HR\": cached_mod_base,\r\n \"DT\": cached_mod_base,\r\n \"FM\": cached_mod_base\r\n }\r\n }\r\n }\r\n player_documents.append(player_document)\r\n await player_collection.insert_many(player_documents)\r\n await team_collection.insert_many(team_documents)\r\n\r\n if create_index:\r\n for field in [\"average_acc\", \"average_score\", \"average_contrib\", \"acc_rank\", \"score_rank\",\r\n \"contrib_rank\", \"user_name\", \"user_lower\"]:\r\n await player_collection.create_index([(field, -1)])\r\n for field in [\"average_acc\", \"average_score\", \"acc_rank\", \"score_rank\", \"name_lower\"]:\r\n await team_collection.create_index([(field, -1)])",
"def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)",
"def collect_teams(year):\n\n team_list = Team.objects.filter(year=year).order_by('location')\n teams = []\n for t in team_list:\n team = {\n 'id': t.abbreviation,\n 'team': t,\n }\n teams.append(team)\n return teams",
"def add_teams(self, team_list):\n\n for team in team_list:\n self.db.append(copy.deepcopy(team.__dict__))",
"def load_premiership_teams():\n # list of PremTeams to add\n team_list = [\n {'name': 'Arsenal', 'code': 'ARS', 'is_prem': True},\n {'name': 'Aston Villa', 'code': 'AVL', 'is_prem': True},\n {'name': 'Brighton and Hove Albion', 'code': 'BTN', 'is_prem': True},\n {'name': 'Brentford', 'code': 'BRE', 'is_prem': True},\n {'name': 'Bournemouth', 'code': 'BOU', 'is_prem': False},\n {'name': 'Burnley', 'code': 'BUR', 'is_prem': True},\n {'name': 'Cardiff City', 'code': 'CAR', 'is_prem': False},\n {'name': 'Chelsea', 'code': 'CHE', 'is_prem': True},\n {'name': 'Crystal Palace', 'code': 'CRY', 'is_prem': True},\n {'name': 'Everton', 'code': 'EVE', 'is_prem': True},\n {'name': 'Fulham', 'code': 'FUL', 'is_prem': False},\n {'name': 'Hull', 'code': 'HUL', 'is_prem': False},\n {'name': 'Huddersfield Town', 'code': 'HUD', 'is_prem': False},\n {'name': 'Leeds United', 'code': 'LEE', 'is_prem': True},\n {'name': 'Leicester City', 'code': 'LEI', 'is_prem': True},\n {'name': 'Liverpool', 'code': 'LIV', 'is_prem': True},\n {'name': 'Manchester City', 'code': 'MCY', 'is_prem': True},\n {'name': 'Manchester United', 'code': 'MUN', 'is_prem': True},\n {'name': 'Middlesbrough', 'code': 'MID', 'is_prem': False},\n {'name': 'Newcastle United', 'code': 'NEW', 'is_prem': True},\n {'name': 'Norwich City', 'code': 'NOR', 'is_prem': True},\n {'name': 'Queens Park Rangers', 'code': 'QPR', 'is_prem': False},\n {'name': 'Sheffield United', 'code': 'SHF', 'is_prem': False},\n {'name': 'Southampton', 'code': 'SOT', 'is_prem': True},\n {'name': 'Stoke City', 'code': 'STO', 'is_prem': False},\n {'name': 'Sunderland', 'code': 'SUN', 'is_prem': False},\n {'name': 'Swansea City', 'code': 'SWA', 'is_prem': False},\n {'name': 'Tottenham Hotspur', 'code': 'TOT', 'is_prem': True},\n {'name': 'Watford', 'code': 'WAT', 'is_prem': True},\n {'name': 'West Bromwich Albion', 'code': 'WBA', 'is_prem': False},\n {'name': 'West Ham United', 'code': 'WHM', 'is_prem': True},\n {'name': 'Wolverhampton Wanderers', 'code': 'WLV', 'is_prem': True},\n ]\n\n for team in team_list:\n print(PremTeam.objects.update_or_create(\n name=team['name'],\n code=team['code'],\n defaults={'is_prem': team['is_prem']}\n ))\n # print(pt, created)",
"def getTeam(team, table):\n\tif table != \"game\" and table != \"season\":\n\t\tprint(\"Invalid table specified\")\n\t\treturn None\n\n\ttry:\n\t\tt = Team.objects.get(name=team)\n\texcept Team.DoesNotExist:\n\t\tprint(\"Team not found in database\")\n\t\treturn None\n\t\n\tp_set = t.player_set.all()\n\tp_count = 0\n\tret = []\n\tfor p in p_set:\n\t\tp_stats = {}\n\t\tp_stats[\"NAME\"] = p.name\n\t\tif table == \"game\":\n\t\t\tg = p.gamestats_set.get(player=p.id)\n\t\t\tp_stats[\"GP\"] = float(g.GamesPlayed)\n\t\t\tp_stats[\"MIN\"] = float(g.MinutesPlayed)\n\t\t\tp_stats[\"PPG\"] = float(g.Points)\n\t\t\tp_stats[\"RPG\"] = float(g.Rebounds)\n\t\t\tp_stats[\"APG\"] = float(g.Assists)\n\t\t\tp_stats[\"SPG\"] = float(g.Steals)\n\t\t\tp_stats[\"BPG\"] = float(g.Blocks)\n\t\t\tp_stats[\"TPG\"] = float(g.Turnovers)\n\t\t\tp_stats[\"FG%\"] = float(g.FieldGoalPercentage)\n\t\t\tp_stats[\"FT%\"] = float(g.FreeThrowPercentage)\n\t\t\tp_stats[\"3P%\"] = float(g.ThreePointPercentage)\n\t\telse:\n\t\t\ts = p.seasonstats_set.get(player=p.id)\n\t\t\tp_stats[\"MIN\"] = s.MinutesPlayed\n\t\t\tp_stats[\"FGM\"] = s.FieldGoalsMade\n\t\t\tp_stats[\"FGA\"] = s.FieldGoalsAttempted\n\t\t\tp_stats[\"FTM\"] = s.FreeThrowsMade\n\t\t\tp_stats[\"FTA\"] = s.FreeThrowsAttempted\n\t\t\tp_stats[\"3PM\"] = s.ThreePointsMade\n\t\t\tp_stats[\"3PA\"] = s.ThreePointsAttempted\n\t\t\tp_stats[\"PTS\"] = s.Points\n\t\t\tp_stats[\"OFFR\"] = s.OffensiveRebounds\n\t\t\tp_stats[\"DEFR\"] = s.DefensiveRebounds\n\t\t\tp_stats[\"REB\"] = s.Rebounds\n\t\t\tp_stats[\"AST\"] = s.Assists\n\t\t\tp_stats[\"TO\"] = s.Turnovers\n\t\t\tp_stats[\"STL\"] = s.Steals\n\t\t\tp_stats[\"BLK\"] = s.Blocks\n\t\tret.insert(p_count, p_stats)\n\treturn ret",
"def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)",
"def _team_init(self):\r\n\t\tfor team_type, team_info in self._teams.items():\r\n\t\t\tteam_info.team_type = team_type\r\n\t\t\tteam_info.maze_pos_finder = \\\r\n\t\t\t\tself._maze_manager.get_finder_by_name(team_type.__str__())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Email summary of results to user. | def EmailResults(recipient, error_mesg, topdir, dumpfile, logfile, motcor_summary):
#*********************************************************************************
if recipient is None:
return
elif 'noname' in recipient:
return
sender = 'preprocess'
if 'Abnormal' in error_mesg > 0:
subject = 'Problem while preprocessing %s' % topdir
else:
subject = 'Preprocessing complete for %s' % topdir
mssg = error_mesg
if logfile is not None and isinstance(logfile, str):
f = open(logfile, 'r')
lines = f.readlines()
f.close()
logged_errors = ''
for i in xrange(len(lines)):
if 'rror' in lines[i]:
mssg += ''.join(lines[i-1:])
break
mssg += motcor_summary
if dumpfile is not None:
f = open(dumpfile,'r')
mssg += '\nSummary of processing:\n'
mssg += f.read()
f.close()
send_email(recipient, subject, mssg, sender) | [
"def send_summary():\n root_dir = os.getcwd()\n log_file = os.path.join(root_dir, 'logs', time.strftime(\"%Y%m%d\") + '.log')\n\n result_text = read_log_to_result_text(log_file)\n send_email(result_text)",
"def email_ssh_report(self, results: list):\n body = self.header\n try:\n smtp_server = smtplib.SMTP(self.smtp_server, self.smtp_port)\n except ConnectionRefusedError:\n print(\"error: SMTP server unreachable.\")\n sys.exit(1)\n for instance in results:\n body += instance + \"\\n\"\n message = self.subject + body\n if results:\n smtp_server.sendmail(self.sender_email, self.receiver_email, message)",
"def _report_summary(self):\n self.report.add_heading(\"Summarized results for %s.\" % self.__class__.__name__, 3)\n entries = filter(lambda te: te.result is not None, self._tests)\n self.report.add_summary(entries)\n resultmsg = \"Aggregate result for %r.\" % (self.test_name,)\n result = int(self.result)\n if not self._nested:\n if result == constants.PASSED:\n self.report.passed(resultmsg)\n elif result == constants.FAILED:\n self.report.failed(resultmsg)\n elif result == constants.INCOMPLETE:\n self.report.incomplete(resultmsg)",
"def send_results_to_mail(self, results_list, to_email, send_to_developers=False, title=\"\", comment=\"\", content_options=CONTENT_ALL):\n raise NotImplementedError\n pass",
"def send_result_email(self): # pragma: no cover\n pass",
"def publish_summary(self, jobs):\n pass",
"def email_article_summary(to_address, summary_filename, start_year, start_month, end_year, end_month, num_articles):\n \n host = HOST\n from_address = FROM_ADDRESS\n body = \"\"\"\n Good morning,\n \n There were %i peer-reviewed papers produced by researchers at this institute between %i/%i and %i/%i. A summary file containing the front page from each article is attached with this email. Please print out these summary pages, highlight the author(s) on each article and pin them to the monthly papers noticeboard.\n \n Thanks a bunch,\n \n Skynet.\n \n \"\"\" % (num_articles, start_month, start_year, end_month, end_year, )\n \n recipients = [to_address, ADMIN_ADDRESS]\n \n logging.info(\"Preparing summary email report for %s\" % (', '.join(recipients), ))\n \n successful = True\n for recipient in recipients:\n \n message = MIMEMultipart()\n message[\"From\"] = from_address\n message[\"To\"] = recipient\n message[\"Subject\"] = \"Refereed papers summary between %i/%i and %i/%i\" % (start_month, start_year, end_month, end_year, )\n message[\"Date\"] = formatdate(localtime=True)\n \n message.attach(MIMEText(textwrap.dedent(body).lstrip()))\n \n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(summary_filename, 'rb').read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(summary_filename))\n message.attach(part)\n \n server = smtplib.SMTP(host)\n \n try:\n failed = server.sendmail(from_address, to_address, message.as_string())\n server.close()\n \n except Exception as e:\n logging.critical(\"Unable to send email to %s. Error: %s\" % (recipient, str(e), ))\n successful = False\n \n else:\n logging.info(\"Email successfully sent to %s\" % recipient)\n \n \n return successful",
"def _log_summary(self, summary):\n\n template = (\n \"Summary of results\\n\\n\"\n \"\\t Total items: \\t{total}\\n\"\n \"\\tItems produced: \\t{fetched}\\n\"\n \"\\t Items skipped: \\t{skipped}\\n\"\n \"\\n\"\n \"\\tLast item UUID: \\t{last_uuid}\\n\"\n \"\\tLast item date: \\t{last_updated_on}\\n\"\n \"\\n\"\n \"\\tMin. item date: \\t{min_updated_on}\\n\"\n \"\\tMax. item date: \\t{max_updated_on}\\n\"\n \"\\n\"\n \"\\tMin. offset: \\t{min_offset}\"\n \"\\tMax. offset: \\t{max_offset}\"\n \"\\tLast offset: \\t{last_offset}\\n\"\n \"\\n\"\n )\n\n values = {\n 'total': summary.total,\n 'fetched': summary.fetched,\n 'skipped': summary.skipped,\n 'last_uuid': summary.last_uuid or '-',\n 'last_updated_on': summary.last_updated_on or '-',\n 'min_updated_on': summary.min_updated_on or '-',\n 'max_updated_on': summary.max_updated_on or '-',\n 'min_offset': summary.min_offset or '-',\n 'max_offset': summary.min_offset or '-',\n 'last_offset': summary.last_offset or '-',\n\n }\n message = template.format(**values)\n\n logger.info(message)",
"def user_report(self, args):\n user_email = args.user_email\n show_events = args.show_events\n user = self.find_user_by_email(user_email)\n user.display(show_events)",
"def data_email_send(result_id, user_id):\n user = CustomUser.objects.get(pk=user_id)\n result = Result.objects.get(pk=result_id)\n email = user.email\n subject, from_email, to = 'Twitter data extraction result', 'amithah.nithin@gmail.com', email\n\n html_content = render_to_string('scraper/email.html', {'user': user}) # render with dynamic value\n text_content = strip_tags(html_content) # Strip the html tag. So people can see the pure text at least.\n\n # create the email, and attach the HTML version as well.\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_file(f\"{result.result_file.path}\")\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n print(\"email sent successfullly\")",
"def publishResults(self, tests):\n\n t1 = time.time()\n msgBuffer = []\n for i in range(len(tests)):\n msg1 = self.buildMessageForDetailedGraphs(tests[i])\n msg2 = self.buildMessageForSummaryGraphs(tests[i])\n msgBuffer.append(msg1)\n msgBuffer.append(msg2)\n resp = self.sendBatch(msgBuffer)\n if 'errors' in resp:\n self.logger.error(resp['errors'])\n t2 = time.time()\n self.logger.info('sent {0} metric points for to datadog in {1:.2f} seconds'.format(\n len(tests), t2-t1))",
"def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, 'no-reply@cultrtoolkit.com',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)",
"def post(self, request, *args, **kwargs):\n self.form = self.get_form()\n self.form.full_clean()\n results = self.get_queryset()\n nb_results = results.count()\n first_results = results[:10]\n site = get_current_site(self.request)\n querystring = self.get_form_data().urlencode()\n scheme = 'https'\n search_url = reverse('search_view')\n full_url = '{scheme}://{domain}{search_url}?{querystring}'.format(\n scheme=scheme,\n domain=site.domain,\n search_url=search_url,\n querystring=querystring)\n results_body = render_to_string('emails/search_results.txt', {\n 'user_name': self.request.user.full_name,\n 'aids': first_results,\n 'nb_results': nb_results,\n 'full_url': full_url,\n 'scheme': scheme,\n 'domain': site.domain,\n })\n send_mail(\n self.EMAIL_SUBJECT,\n results_body,\n settings.DEFAULT_FROM_EMAIL,\n [self.request.user.email],\n fail_silently=False)\n return HttpResponse('')",
"def print_names_and_email(self):\n print('*** Students who graduated *** ')\n passed_students = self.get_student_gpa()\n for student in passed_students:\n print('****************************')\n print(student.name)\n print('****************************')\n student.send_congrat_email()\n print('****************************')\n \n top_ten_percent = self.find_top_ten(passed_students)\n self.send_referral(top_ten_percent)",
"def run_report(self):\n self.generate()\n self.generate_report_file()\n smsg = \"Sent reports to {0}\".format(\n \", \".join(self.email_info['to']['email']))\n self.send_report(successmessage=smsg)\n return",
"def email_alert(smtp_server, sender, recipient, results):\n msg = email.mime.multipart.MIMEMultipart()\n msg['Subject'] = 'Execution result of command output to csv script'\n msg['From'] = sender\n msg['To'] = recipient\n mailobj = smtplib.SMTP(smtp_server)\n for result in results:\n try:\n part = MIMEApplication(open(result, 'rb').read())\n except TypeError:\n return\n part.add_header('Content-Disposition', 'attachment', filename=result)\n msg.attach(part)\n try:\n mailobj.sendmail(sender, recipient, msg.as_string())\n print('Email is sent for the result to %s' % recipient)\n mailobj.quit()\n except Exception as exc:\n print(str(exc))",
"def printSummary(self):\n pass",
"def summaryView(request):\n\n alert_errors = []\n alert_infos = []\n alert_filters = []\n\n runs = get_runs_from_request_filters(\n request, alert_errors, alert_infos, alert_filters\n )\n\n summary = SummaryReport(runs)\n\n context = {\n \"refs\": summary.reference_runs(),\n \"runs\": summary.runs_checked_per_type(),\n \"tk_maps\": summary.tracker_maps_per_type(),\n \"certified_runs\": summary.certified_runs_per_type(),\n \"sums\": summary.sum_of_quantities_per_type(),\n \"alert_errors\": alert_errors,\n \"alert_infos\": alert_infos,\n \"alert_filters\": alert_filters,\n }\n\n return render(request, \"certhelper/summary.html\", context)",
"def summary():\n return render_template('summary.html')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Synthesize yaml header filename from directory name. | def _yaml_filename(self, path):
fullpath = os.path.abspath(path)
if not os.path.isdir(fullpath):
dirname = os.path.dirname(fullpath)
else:
dirname = path
if dirname.endswith('/'):
dirname = dirname[:-1]
fname = dirname.split('/')[-1] + '.yaml'
return dirname, fname | [
"def get_filename(self, source):\n return '{}/{}.yml'.format(self.config_dir, source)",
"def generate_html_header(name):\n name = name.split(\"/\")[-1]\n return \"<h1>{0}</h1>\".format(name).replace(\".txt\", \"\").replace(\"_\", \" \").upper()",
"def convert_dir_filename(dir_, tldir):\n dir_path = os.path.realpath(dir_).split('\\\\')\n doc_file = '__'.join(dir_path[dir_path.index(tldir):])\n return doc_file+'.rst'",
"def _gen_filename(self, name):\n if name == 'out_file':\n _, fname, ext = split_filename(self.inputs.in_file)\n return os.path.join(os.getcwd(), ''.join((fname, '_3dT',ext)))",
"def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))",
"def ifdef_name(filename):\n return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\"",
"def master_template_filename(plan_name):\n return '%s-%s' % (plan_name, 'template.yaml')",
"def get_filename(title):\n file_rootdir = get_setting('mediawiker_file_rootdir', None)\n if not file_rootdir:\n return strquote(title)\n use_subdirs = get_setting('mediawiker_use_subdirs', False)\n if use_subdirs:\n filename = os.path.join(file_rootdir, *(strquote(item) for item in os.path.split(title)))\n filedir = os.path.dirname(filename)\n if not os.path.isdir(filedir):\n print(\"Making dir:\", filedir)\n os.makedirs(filedir)\n return filename\n return os.path.join(file_rootdir, strquote(title))\n # If you use subdirs, then you should also adjust get_title() so that is can accomodate:",
"def make_filename_for_article(title):\n return title + '.md'",
"def filename(self, key=None, ext=None):\n filename = sanitize_key(key) if key else ''\n # use custom path separator (set to suppress directory generation) \n filename = filename.replace('/', self.sep)\n # concatenate with base directory and extension\n if ext is None:\n ext = self.ext\n return os.path.join(self._directory, filename) + ext",
"def root_name(file_name, file_id):\n if file_id is not None:\n return \"{}{}\".format(R_DIR, file_name.format(file_id))\n else:\n return \"{}{}\".format(R_DIR, file_name)",
"def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))",
"def generate_file_name(entry):\n return str_for_file(\n \"{name}, {year}, {title}\".format(\n year=entry[\"year\"],\n name=get_last_name(entry[\"author\"][0]),\n title=entry[\"title\"],\n )\n )",
"def make_filename(name):\n return name.replace('/', ' ')",
"def output_filename(directory, prefix, extension):\n if not os.path.exists(directory):\n os.makedirs(directory)\n return os.path.join(directory, '{0}-{1}.{2}'.format(\n prefix, time.strftime(\"%Y%m%d-%H%M%S\"), extension))",
"def _gen_filename(self, name):\n if name == 'out_file':\n _, fname, ext = split_filename(os.path.abspath(self.inputs.xset))\n return os.path.join(os.getcwd(), ''.join((fname, '_3dTcor',ext)))",
"def get_title(self, filename):\n filename = os.path.basename(filename)\n return unicode_path(filename)",
"def title(self):\n return os.path.basename(self.__path) if self.isdir() else self.file_title",
"def makeFolderName(fileName):\n\n exhibit, topic, story = fileName.split(\"_\")\n\n if exhibit == \"TH\":\n return \"{}/\".format(exhibit)\n\n elif exhibit == \"TL\":\n return \"{}/DL/\".format(exhibit)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create list of epis in pfile format (epi_series) and of epis in dicom format (epirt_paths) | def _EpiInfo(self, info, path):
epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \
'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}
for key in self.epi_keys.keys():
if self.epi_keys[key] != str(epi_vals[key]):
# Return None, which will cause these data to be ignored.
return None
# Early versions of the EPIC software saved p-files for the setup epis.
# Don't process these (or any epi with fewer than eight useable frames).
if self.hdr['tdim'] < (8 + self.skip):
return None
info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')
if self.shdr['EffEchoSpacing'] is not None:
info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.
else:
info['echo_spacing'] = 0.
if info['data_filetype'] == 'dicom':
# Entry is name of dirctory for dicom images.
if not os.path.isdir(path):
entry = os.path.dirname(path)
else:
entry = path
else:
# Otherwise it is the name of a directory containing p-files.
entry = path
if info['data_filetype'] == 'ge_data' and info['type'] is not None:
# Found a pfile. Add it to the list.
if entry not in self.pfiles and info['tdim'] > 2:
self.pfiles.append(entry)
self.entry_map['epi'].append(entry)
if info['series'] not in self.epi_series:
self.epi_series.append(info['series'])
elif info['data_filetype'] == 'dicom' and \
info['psdname'] == 'epibold':
# This is the initial EPI done during setup.
info['outdir'] = self.episetup_dir
info['type'] = 'first_epi'
self.entry_map['first_epi'].append(entry)
info['imgfile'] = '%s/first_epi_%d' % \
(self.episetup_dir, len(self.entry_map['first_epi']))
elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \
info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:
# This is an epi reconstructed on the scanner.
self.epi_series.append(info['series'])
self.entry_map['epi'].append(entry)
if not os.path.isdir(path):
tmp_path = os.path.dirname(path)
else:
tmp_path = path
self.epirt_paths.append(tmp_path)
if self.fsl_flip:
info['filetype'] = 'brik'
else:
info['filetype'] = self.tmplt['epi_file_format']
info['TR'] = self.hdr['tsize']
if self.tmplt['acq_tr'] is None:
info['acq_tr'] = float(info['TR'])
else:
info['acq_tr'] = float(self.tmplt['acq_tr'])
return OK | [
"def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)",
"def get_virus_epitopes(self):\n\t\tprint(\"Return all epitopes of virus taxid={}\".format(self.current_virus_taxon_id))\n\t\tepitopes = []\n\t\tfor epi in self.current_virus_epitopes:\n\t\t\tepi_attributes = epi.get_all_attributes()\n\t\t\tif epi_attributes[\"is_linear\"]:\n\t\t\t\tepi_seq = epi_attributes[\"region_seq\"]\n\t\t\telse:\n\t\t\t\tepi_seq = None\n\t\t\tif not epi_seq:\n\t\t\t\tepi_seq = ''\n\t\t\tif not epi_attributes[\"mhc_class\"]:\n\t\t\t\tepi_attributes[\"mhc_class\"] = ''\n\t\t\tif not epi_attributes[\"mhc_allele\"]:\n\t\t\t\tepi_attributes[\"mhc_allele\"] = ''\n\n\t\t\tepitope = tuple([int(epi_attributes[\"epitope_id\"]),\n\t\t\t\t\t\t\t int(epi_attributes[\"virus_taxid\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"host_iri\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes['host_name']),\n\t\t\t\t\t\t\t parse_to_int_or_none(epi_attributes['host_ncbi_id']),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"protein_ncbi_id\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"cell_type\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"mhc_class\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"mhc_allele\"]),\n\t\t\t\t\t\t\t parse_to_float_or_none(epi_attributes[\"response_frequency_positive\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"assay_types\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_seq),\n\t\t\t\t\t\t\t parse_to_int_or_none(epi_attributes[\"region_start\"]),\n\t\t\t\t\t\t\t parse_to_int_or_none(epi_attributes[\"region_stop\"]),\n\t\t\t\t\t\t\t \",\".join(epi_attributes[\"external_links\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"prediction_process\"]),\n\t\t\t\t\t\t\t epi_attributes[\"is_linear\"],\n\t\t\t\t\t\t\t epi_attributes[\"epitope_iri\"],\n\t\t\t\t\t\t\t epi_attributes[\"iedb_epitope_id\"]])\n\t\t\tepitopes.append(epitope)\n\t\treturn epitopes",
"def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])",
"def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)",
"def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname",
"def list_all_ephemerides_files(self) -> Dict:\n ephs = self.list_result_ephemerides_files()\n while 'nextPageToken' in ephs:\n next_page_token = ephs['nextPageToken']\n _, e = self.list_result_ephemerides_files(page_token=next_page_token)\n ephs['ephemerisResourcePath'].extend(e['ephemerisResourcePath'])\n return ephs",
"def _get_all_psfex_objects(self, meds):\n desdata=os.environ['DESDATA']\n meds_desdata=meds._meta['DESDATA'][0]\n\n psfex_list=[]\n info=meds.get_image_info()\n nimage=info.size\n\n for i in xrange(nimage):\n impath=info['image_path'][i].strip()\n psfpath=impath.replace('.fits.fz','_psfcat.psf')\n\n if desdata not in psfpath:\n psfpath=psfpath.replace(meds_desdata,desdata)\n\n pex=psfex.PSFEx(psfpath)\n psfex_list.append(pex)\n\n return psfex_list",
"def get_ephemeris_files():\n config_file = os.path.expanduser(\"~\") + \"/.pyfstat.conf\"\n env_var = \"LALPULSAR_DATADIR\"\n please = \"Please provide the ephemerides paths when initialising searches.\"\n if os.path.isfile(config_file):\n d = {}\n with open(config_file, \"r\") as f:\n for line in f:\n k, v = line.split(\"=\")\n k = k.replace(\" \", \"\")\n for item in [\" \", \"'\", '\"', \"\\n\"]:\n v = v.replace(item, \"\")\n d[k] = v\n try:\n earth_ephem = d[\"earth_ephem\"]\n sun_ephem = d[\"sun_ephem\"]\n except:\n logging.warning(\n \"No [earth/sun]_ephem found in \" + config_file + \". \" + please\n )\n earth_ephem = None\n sun_ephem = None\n elif env_var in list(os.environ.keys()):\n ephem_version = \"DE405\"\n earth_ephem = os.path.join(\n os.environ[env_var], \"earth00-40-{:s}.dat.gz\".format(ephem_version)\n )\n sun_ephem = os.path.join(\n os.environ[env_var], \"sun00-40-{:s}.dat.gz\".format(ephem_version)\n )\n if not (os.path.isfile(earth_ephem) and os.path.isfile(sun_ephem)):\n earth_ephem = os.path.join(\n os.environ[env_var], \"earth00-19-{:s}.dat.gz\".format(ephem_version)\n )\n sun_ephem = os.path.join(\n os.environ[env_var], \"sun00-19-{:s}.dat.gz\".format(ephem_version)\n )\n if not (os.path.isfile(earth_ephem) and os.path.isfile(sun_ephem)):\n logging.warning(\n \"Default [earth/sun]00-[19/40]-\" + ephem_version + \" ephemerides \"\n \"not found in the \" + os.environ[env_var] + \" directory. \" + please\n )\n earth_ephem = None\n sun_ephem = None\n else:\n logging.warning(\n \"No \" + config_file + \" file or $\" + env_var + \" environment \"\n \"variable found. \" + please\n )\n earth_ephem = None\n sun_ephem = None\n return earth_ephem, sun_ephem",
"def virus_epitopes2tsv(self):\n\t\tprint(\"Save current virus epitopes to csv\")\n\n\t\tepitopes_name = \"imported_iedb_epitopes.tsv\"\n\t\tif not exists(join(self.output_path, epitopes_name)):\n\t\t\tprint(\"Create file: {}\".format(epitopes_name))\n\t\t\twith open(join(self.output_path, epitopes_name), \"w\") as epitopes_out:\n\t\t\t\tepitopes_out.write(\n\t\t\t\t\t\"epitope_id\\tvirus_taxid\\tsource_host_iri\\tsource_host_name\\thost_ncbi_id\\tprotein_ncbi_id\\tcell_type\\tmhc_class\\tmhc_restriction\\tresponse_frequency_pos\\tassay_types\\tepitope_sequence\\tepitope_start\\tepitope_stop\\texternal_links\\tprediction_process\\tis_linear\\n\")\n\n\t\twith open(join(self.output_path, epitopes_name), \"a\") as epitopes_out:\n\t\t\tprint(\"Update file: {}\".format(epitopes_name))\n\t\t\tfor epitope in self.current_virus_epitopes:\n\t\t\t\tprint(\"Write IEDB imported epitope\")\n\t\t\t\tepi_attributes = epitope.get_all_attributes()\n\n\t\t\t\tif epi_attributes[\"is_linear\"]:\n\t\t\t\t\tepi_seq = epi_attributes[\"region_seq\"]\n\t\t\t\telse:\n\t\t\t\t\tepi_seq = None\n\t\t\t\tif not epi_seq:\n\t\t\t\t\tepi_seq = ''\n\t\t\t\tif not epi_attributes[\"mhc_class\"]:\n\t\t\t\t\tepi_attributes[\"mhc_class\"] = ''\n\t\t\t\tif not epi_attributes[\"mhc_allele\"]:\n\t\t\t\t\tepi_attributes[\"mhc_allele\"] = ''\n\n\t\t\t\tepitope_row = \"\\t\".join(\n\t\t\t\t\t[str(epi_attributes[\"epitope_id\"]), epi_attributes[\"virus_taxid\"],\n\t\t\t\t\t epi_attributes[\"host_iri\"], epi_attributes['host_name'], epi_attributes['host_ncbi_id'],\n\t\t\t\t\t epi_attributes[\"protein_ncbi_id\"], epi_attributes[\"cell_type\"],\n\t\t\t\t\t epi_attributes[\"mhc_class\"], epi_attributes[\"mhc_allele\"],\n\t\t\t\t\t str(epi_attributes[\"response_frequency_positive\"]),\n\t\t\t\t\t str(epi_attributes[\"assay_types\"]), epi_seq,\n\t\t\t\t\t str(epi_attributes[\"region_start\"]), str(epi_attributes[\"region_stop\"]),\n\t\t\t\t\t \",\".join(epi_attributes[\"external_links\"]),\n\t\t\t\t\t str(epi_attributes[\"prediction_process\"]), str(epi_attributes[\"is_linear\"]),\n\t\t\t\t\t epi_attributes[\"epitope_iri\"], epi_attributes[\"iedb_epitope_id\"]])\n\t\t\t\tepitopes_out.write(epitope_row + \"\\n\")\n\t\tprint(\"====\")",
"def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps",
"def getPEInputFiles(input_dir, isPE=True):\n\tsys.stderr.write(\"Pair input files from %s ...\"%input_dir)\n\tpairedEndPrefix2FileLs = {}\n\tfiles = os.listdir(input_dir)\n\tno_of_fastq_files = 0\n\tfor fname in files:\n\t\tfname_prefix, fname_suffix = utils.getRealPrefixSuffixOfFilenameWithVariableSuffix(fname)\n\t\tif fname_suffix!='.fastq':\t\t#skip non-fastq files\n\t\t\tcontinue\n\t\tno_of_fastq_files += 1\n\t\tif isPE==True:\n\t\t\tpairedEndPrefix = fname_prefix[:-2]\n\t\t\tpairedEndOrder = fname_prefix[-2:]\n\t\t\t\n\t\t\tif pairedEndPrefix not in pairedEndPrefix2FileLs:\n\t\t\t\tpairedEndPrefix2FileLs[pairedEndPrefix] = ['', '']\n\t\t\t\n\t\t\tif pairedEndOrder=='_1':\t#the first file\n\t\t\t\tpairedEndPrefix2FileLs[pairedEndPrefix][0] = fname\n\t\t\telse:\n\t\t\t\tpairedEndPrefix2FileLs[pairedEndPrefix][1] = fname\n\t\telse:\n\t\t\tpairedEndPrefix2FileLs[fname_prefix] = [fname]\t#single End\n\tno_of_files = len(files)\n\tno_of_pairedEndPrefix = len(pairedEndPrefix2FileLs)\n\tif no_of_pairedEndPrefix>0:\n\t\tavg_no_of_files_per_prefix = no_of_fastq_files/float(no_of_pairedEndPrefix)\n\telse:\n\t\tavg_no_of_files_per_prefix = 0.0\n\tsys.stderr.write(\"%.2f files per one pairedEnd prefix. %s fastq files. %s total files. Done.\\n\"%\\\n\t\t\t\t\t(avg_no_of_files_per_prefix, no_of_fastq_files, no_of_files))\n\treturn pairedEndPrefix2FileLs",
"def reconstruct_evp(self):\n new_evp = \"e%s:%s;%s;%s;%s\\n\" % (self.id, self.min_max_points[\"min_x\"], self.min_max_points[\"min_y\"],\n self.min_max_points[\"max_x\"], self.min_max_points[\"max_y\"])\n for path_id in self.paths_order:\n path = self.paths[path_id]\n new_evp += \"p%s%s:\" % (path.id, path.type)\n for point_id in path.points_order:\n coords = path.points[point_id]\n new_evp += \"t%s~%s,%s;%s,%s;%s,%s;\" % (point_id, coords[0][0], coords[0][1], coords[1][0],\n coords[1][1], coords[2][0], coords[2][1])\n\n if path.type in [\"r\", \"l\"]: # skip if the path is not closed\n new_evp += \":r,%s,%s,%s,%s,%s\" % tuple(path.radial)\n new_evp += \":l,%s,%s,%s,%s:\" % tuple(path.linear)\n\n for stop in path.stops:\n params = stop[\"params\"]\n new_evp += \"o%s~%s,%s,%s;\" % (stop[\"stop_id\"], params[0], params[1], params[2])\n\n new_evp += \":s%s,%s,%s\\n\" % tuple(path.stroke)\n self.evp = new_evp\n return",
"def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes",
"def get_2Dtodo(loc=BASE):\n toproc = []\n for ff in Path(loc).glob(\"**/*.d/proces*.mscf\"):\n if (ff.parent/'ser').exists():\n toproc.append(ff)\n if DEBUG:\n print('get_2Dtodo:')\n pprint([str(i.parent.name) for i in toproc])\n return toproc",
"def extract_mediapackage_endpoints(mp_client, mp_channel_id_list):\n emp_endpoint_list = {}\n for channel in mp_channel_id_list:\n emp_endpoint_list[str(channel)] = []\n response = mp_client.list_origin_endpoints()\n for endpoint in response['OriginEndpoints']:\n if str(endpoint[\"ChannelId\"]) in mp_channel_id_list:\n emp_endpoint_list[str(endpoint[\"ChannelId\"])].append(str(endpoint['Id']))\n return emp_endpoint_list",
"def list_result_ephemerides_files(\n self, page_size: int = 100, page_token: str = None) -> Dict:\n params = {}\n if page_size < 0 or page_size > 100:\n page_size = 100\n params['pageSize'] = page_size\n if page_token:\n params['pageToken'] = page_token\n ephs = self._rp._rest.get(\n f'/projects/{self._rp._project}/jobs/{self._job_uuid}'\n f'/ephemerides?{urllib.parse.urlencode(params)}')\n return ephs",
"def epics(self):\n epics_list = []\n data = self.zenhub.get(f\"/p1/repositories/{self.id}/epics\")\n if data:\n epics_list = [\n Epic(epic_data, epic_data['issue_number'], self)\n for epic_data in data['epic_issues']\n ]\n return epics_list",
"def generate_peptide_list(self):\n\n t0 = timer.time()\n\n seq_obj = mspy.sequence(self.sequence)\n peptide_objects = mspy.mod_proteo.digest(seq_obj,\n 'Non-Specific',miscleavage=self.max_length)\n self.peptide_indexed_iso_maxpeak = {}\n self.peptide_indexed_iso_dist = {}\n for peptide in peptide_objects:\n compound = mspy.obj_compound.compound(peptide.formula())\n self.peptide_indexed_iso_dist[peptide.format()] = {}\n self.peptide_indexed_iso_maxpeak[peptide.format()] = {}\n for z in range(self.charge_min,self.charge_max+1):\n pattern = compound.pattern(charge=z,real=False)\n if pattern[0][0] > self.mz_min and pattern[-1][0] < self.mz_max:\n highest_intensity_peak = max(pattern, key=lambda p: p[1])\n self.peptide_indexed_iso_maxpeak[peptide.format()][str(z)] = highest_intensity_peak[0]\n self.peptide_indexed_iso_dist[peptide.format()][str(z)] = pattern\n\n t1 = timer.time() - t0\n print 'Produced peptide isotopic distributions in %s ' % t1",
"def _get_fsevent_image_files(self):\r\n # Print the header columns to the output file\r\n Output.print_columns(self.l_all_fsevents)\r\n \r\n scan_path_spec = None\r\n scanner = source_scanner.SourceScanner()\r\n scan_context = source_scanner.SourceScannerContext()\r\n scan_context.OpenSourcePath(self.meta['source'])\r\n\r\n scanner.Scan(\r\n scan_context,\r\n scan_path_spec=scan_path_spec\r\n )\r\n\r\n for file_system_path_spec, file_system_scan_node in scan_context._file_system_scan_nodes.items():\r\n t_files = 0\r\n self.all_files_count = 0\r\n self.error_file_count = 0\r\n self.all_records_count = 0\r\n self.parsed_file_count = 0\r\n \r\n try:\r\n location = file_system_path_spec.parent.location\r\n except:\r\n location = file_system_path_spec.location\r\n \r\n print(\" Processing Volume {}.\\n\".format(location))\r\n\r\n fs_event_path_spec = path_spec_factory.Factory.NewPathSpec(\r\n file_system_path_spec.type_indicator,\r\n parent=file_system_path_spec.parent,\r\n location=\"/.fseventsd\"\r\n )\r\n\r\n file_entry = resolver.Resolver.OpenFileEntry(\r\n fs_event_path_spec\r\n )\r\n \r\n if file_entry != None:\r\n\r\n t_files = file_entry.number_of_sub_file_entries\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n t_files -= 1\r\n\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n counter = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n counter += 1\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(counter, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Name of source fsevent file\r\n self.src_filename = sub_file_entry.name\r\n self.src_fullpath = self.meta['source'] + \": \" + location + sub_file_entry.path_spec.location\r\n\r\n stat_object = sub_file_entry.GetStat()\r\n\r\n # UTC mod date of source fsevent file\r\n self.m_time = datetime.datetime.fromtimestamp(\r\n stat_object.mtime).strftime(\r\n '%Y-%m-%d %H:%M:%S') + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(self.src_filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n file_object = sub_file_entry.GetFileObject()\r\n\r\n compressedFile = io.StringIO.BytesIO()\r\n compressedFile.write(file_object.read())\r\n compressedFile.seek(0)\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(fileobj=compressedFile, mode='rb')\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_filename)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)\r\n \r\n else:\r\n print('Unable to process volume or no fsevent files found')\r\n continue\r\n\r\n print('\\n\\n All Files Attempted: {}\\n All Parsed Files: {}\\n Files '\r\n 'with Errors: {}\\n All Records Parsed: {}'.format(\r\n self.all_files_count,\r\n self.parsed_file_count,\r\n self.error_file_count,\r\n self.all_records_count))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pair up each epi with a fieldmap. | def _SetFmapInfo(self):
for epi in self.pfiles + self.epirt_paths:
self.info[epi]['fmapname'] = None
self.info[epi]['fmap_entry'] = None
for entry in self.entry_map['fmap']:
fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix']
if self.info[entry]['plane'] == self.info[epi]['plane']:
# Use the fieldmap acquired at the same plane.
self.info[epi]['fmapname'] = fmap_name
self.info[epi]['fmap_entry'] = entry
break
else:
# for fmap in self.fmaps.keys():
for entry in self.entry_map['fmap']:
# No fmap at same orientation, look for fmaps in other planes.
# There won't be more than one, so it isn't much of a choice.
fmap_name = self.info[entry]['imgfile'] + \
self.info[entry]['suffix']
if self.info[entry]['plane'] == 'sagittal':
self.info[epi]['fmapname'] = fmap_name
self.info[epi]['fmap_entry'] = entry
break
elif self.info[entry]['plane'] == 'axial':
self.info[epi]['fmapname'] = fmap_name
self.info[epi]['fmap_entry'] = entry
break
elif self.info[entry]['plane'] == 'coronal':
self.info[epi]['fmapname'] = fmap_name
self.info[epi]['fmap_entry'] = entry
break
elif self.info[entry]['plane'] == 'oblique':
self.info[epi]['fmapname'] = fmap_name
self.info[epi]['fmap_entry'] = entry
self.info[epi]['plane'] = 'oblique'
break | [
"def _map(event_name, data):\n pk = _pk(data)\n for (column, value) in data.items():\n yield (event_name, pk, column, value)",
"def MakeFieldmaps(self):\n if self.verbose:\n print 'Compute fieldmaps.'\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n if self.info[entry]['imgfile'] == None:\n# Fieldmap data not found.\n return\n# Make a magnitude image for use in checking registration.\n cmd = 'convert_file -f0 -m0 %s %s nii' % \\\n (entry, self.info[entry]['magfile'])\n self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii'])\n\n# Make fieldmap. Use separate loop in case make_fmap aborts.\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n fmapname = self.info[entry]['imgfile']\n if not os.path.exists('%s.nii' % fmapname) or self.redo:\n# Couldn't find or existing fmap, compute a new one.\n if self.verbose:\n extra_args = '-v'\n else:\n extra_args = ''\n if self.info[entry]['correct_fmap_phase'] == 'force':\n extra_args += ' --force-slicecorr'\n elif self.info[entry]['correct_fmap_phase'] == 'omit':\n extra_args += ' --omit-slicecorr'\n cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname)\n# error = self.ExecCmd(cmd, halt_on_error=False)\n if self.no_fmapcorr:\n halt_on_error = False\n else:\n halt_on_error = True\n error = self.CheckExec(cmd, ['%s.nii' % fmapname], \\\n halt_on_error=halt_on_error)\n if error:\n self.info[entry]['valid'] = False\n del self.fmaps[entry]",
"def gensim_mapfields_dict(record, fields, filter_by_fields, dictionary, dst_field, id2token={}, dbg_field_name = \"g_\"):\n has_filter_by_fields = sum(1 for field in filter_by_fields if field in record) == len(filter_by_fields)\n if has_filter_by_fields: \n fields_list_of_words = reduce(lambda w1,w2: w1+w2, (record[field].split() for field in fields if field in record) ) \n fields_words_ids = dictionary.doc2bow(fields_list_of_words)\n logging.debug(\"[gensim_mapfields_dict]\"+str(fields_list_of_words)+\" -> \"+str(fields_words_ids))\n if len(fields_words_ids) > 0:\n record[dst_field] = zbl_io.pack_listpairs_field( fields_words_ids )\n record[dbg_field_name] = zbl_io.pack_listpairs_field( (idx,id2token.get(idx,'?')) for idx,count in fields_words_ids ) #this-line is for debugging purposes\n return record",
"def map_fields(self):\n fields = {}\n m2ms = {}\n\n for (django_name, options) in self.field_map.iteritems():\n raw_val = self.get_raw_value(options)\n\n # Begin type conversion...\n\n # FKs / M2Ms are stored in dicts b/c we also need to know the model they reference\n try:\n fk_model = options.conversion.get('phab_fk')\n m2m_model = options.conversion.get('phab_m2m')\n except AttributeError:\n # conversion isn't defined in a dict\n fk_model = None\n m2m_model = None\n\n # convert raw value to correct Python type...\n # Relationships\n if m2m_model:\n val = self.convert_phab_m2m(raw_val, m2m_model)\n elif fk_model and raw_val is not None:\n try:\n val = self.convert_phab_fk(raw_val, fk_model)\n except RelationNotImportedError:\n if options.required:\n raise\n else:\n val = None\n elif options.conversion == 'm2m':\n # We should expect a list of model instances here\n val = raw_val\n\n # other types\n elif options.conversion in (str, 'phid'):\n # string vals don't need conversion\n val = raw_val\n\n elif options.conversion == 'timestamp':\n # Timestamps need conversion to datetimes\n val = datetime.datetime.fromtimestamp(int(raw_val))\n # And saving naive ones raises warnings\n val = timezone.make_aware(val, timezone.get_current_timezone())\n\n elif callable(options.conversion):\n # Any callable other than `str` can be used for a custom conversion\n val = options.conversion(raw_val)\n else:\n # if all else fails\n val = raw_val\n\n # add it to the list of values we'll return\n if val is not None:\n if m2m_model or options.conversion == 'm2m':\n m2ms[django_name] = val\n else:\n fields[django_name] = val\n\n return fields, m2ms",
"def fieldmap(dictseq,name,func):\n for d in dictseq:\n d[name] = func(d[name])\n yield d",
"def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map",
"def build_fieldmap(tables, outfields, uidname=None):\n #Start with the fields as they currently exist in the input featureclasses.\n originalmaps = arcpy.FieldMappings()\n for table in tables:\n originalmaps.addTable(table)\n #Build the output fieldmap from the original field definitions.\n outmap = arcpy.FieldMappings()\n for field in outfields:\n fieldidx = originalmaps.findFieldMapIndex(field)\n if fieldidx >= 0:\n outmap.addFieldMap(originalmaps.getFieldMap(fieldidx))\n else:\n errmsg = 'Error adding the field {0} to the output field map.'\n raise ArcError(errmsg.format(field))\n\n #If a uid fieldname has been passed, modify the UID field to implement\n # the merge rules, etc. necessary for the spatial join. This part is\n # specific to what we need for NoticeMe.\n if uidname is not None:\n uididx = outmap.findFieldMapIndex(uidname)\n if uididx >= 0:\n uidmap = outmap.getFieldMap(uididx)\n uidfield = uidmap.outputField\n #This is a really long field, because the join throws an error if it\n # can't fit the output into the results field. I wish there were a\n # way to get final field length before I actually execute the join,\n # but there's no way to really do that.\n uidfield.length = 300000\n uidfield.name = uidname\n uidmap.outputField = uidfield\n uidmap.mergeRule = 'join'\n uidmap.joinDelimiter = '|'\n outmap.replaceFieldMap(uididx, uidmap)\n else:\n errmsg = 'UID Field {0} not found in input tables.'\n raise ArcError(errmsg.format(uidname))\n\n return outmap",
"def _extract_field(in_file, epi_meta):\n from nipype.utils.filemanip import fname_presuffix\n import numpy as np\n import nibabel as nb\n from sdcflows.utils.epimanip import get_trt\n\n fieldnii = nb.load(in_file[0])\n trt = get_trt(epi_meta[1], in_file=epi_meta[0])\n data = (\n np.squeeze(fieldnii.get_fdata(dtype=\"float32\"))[\n ..., \"ijk\".index(epi_meta[1][\"PhaseEncodingDirection\"][0])\n ]\n / trt\n * (-1.0 if epi_meta[1][\"PhaseEncodingDirection\"].endswith(\"-\") else 1.0)\n )\n out_file = fname_presuffix(in_file[0], suffix=\"_fieldmap\")\n nii = nb.Nifti1Image(data, fieldnii.affine, None)\n nii.header.set_xyzt_units(fieldnii.header.get_xyzt_units()[0])\n nii.to_filename(out_file)\n return out_file",
"def _map_fields(fields):\n return {f.upper(): i for i, f in enumerate(fields)}",
"def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)",
"def edge_mapping(self):\n ...",
"def addfield(expdat,field,values):\n\n\tfor idx,csamp in enumerate(expdat.samples):\n\t\tif type(values)==str:\n\t\t\texpdat.smap[csamp][field]=values\n\t\telse:\n\t\t\texpdat.smap[csamp][field]=values[idx]\n\texpdat.fields.append(field)",
"def gensim_mapfields_dict_file(fin, fout, fields, filter_by_fields, dictionary, dst_field, dbg_field_name = \"g_\"):\n logging.info(\"[gensim_mapfields_dict_file] filter_by_fields=\"+str(filter_by_fields)+\\\n \" fields=\"+str(fields)+\" dictionary=\"+str(dictionary)+\" fin=\"+str(fin)+\" dst_field=\"+str(dst_field))\n id2token = dict( (idx,token) for idx,token in dictionary.iteritems() ) #this-line is for debugging purposes\n counter = 0\n for i,record in enumerate(zbl_io.read_zbl_records(fin)): \n if i%10000 == 0: logging.info(\"[gensim_mapfields_dict_file] \"+str(i)+\" records processed\")\n record = gensim_mapfields_dict(record, fields, filter_by_fields, dictionary, dst_field, id2token, dbg_field_name)\n if dst_field in record:\n counter = counter + 1 \n zbl_io.write_zbl_record(fout, record)\n fout.write(\"\\n\") \n return counter",
"def gensim_mapfield_model(fin, fout, model, src_field, dst_field,\\\n src_field_value_extractor=extract_bag_of_ids, dst_field_value_builder=zbl_io.pack_listpairs_field):\n logging.info(\"[gensim_mapfield_model] src_field=\"+str(src_field)+\\\n \" model=\"+str(model)+\" fin=\"+str(fin)+\" dst_field=\"+str(dst_field)+\\\n \" src_field_value_extractor=\"+str(src_field_value_extractor)+\" dst_field_value_builder=\"+str(dst_field_value_builder))\n counter = 0\n for i,record in enumerate(zbl_io.read_zbl_records(fin)): \n if i%10000 == 0: logging.info(\"[gensim_mapfield_model] \"+str(i)+\" documents mapped...\")\n if src_field in record:\n bag_of_ids = src_field_value_extractor(record[src_field])\n tfidf_values = model[bag_of_ids]\n record[dst_field] = dst_field_value_builder(tfidf_values)\n logging.debug(\"[gensim_mapfield_model]\"+record[src_field]+\" -> \"+record[dst_field])\n counter = counter + 1 \n zbl_io.write_zbl_record(fout, record)\n fout.write(\"\\n\")\n return counter",
"def mapper(record):\n personA = record[0]\n personB = record[1]\n mr.emit_intermediate(personA, personB)",
"def setup_mapping_data():\n\n global demat_evnts, demat_tmplates # demat_desks\n #demat_desks = dict(zip(DEMAT_ACQUIRERS, list( map(lambda x: acm.FParty[x] or None, DEMAT_ACQUIRERS ) ) ) )\n demat_evnts = dict(list(zip(DEMAT_EVENTS, list(map( lambda x: get_choice(x, 'Event'), DEMAT_EVENTS ) ) )))\n demat_tmplates = dict(list(zip(DEMAT_TEMPLATES, list(map( lambda x: get_choice(x, 'Conf Template'), DEMAT_TEMPLATES ) ) )) )",
"def compose_fieldmap(rf1, rf2):\n offset1, size1, step1 = rf1\n offset2, size2, step2 = rf2\n\n size = tuple((size2c - 1) * step1c + size1c\n for size1c, step1c, size2c in zip(size1, step1, size2))\n offset = tuple(offset2c * step1c + offset1c\n for offset2c, step1c, offset1c in zip(offset2, step1, offset1))\n step = tuple(step2c * step1c\n for step1c, step2c in zip(step1, step2))\n return (offset, size, step)",
"def _build_participant_pairing_map(self, files: List[ConsentFile]) -> Dict[int, ParticipantPairingInfo]:\n participant_ids = {file.participant_id for file in files}\n participant_pairing_data = self.participant_dao.get_pairing_data_for_ids(participant_ids)\n return {\n participant_id: ParticipantPairingInfo(hpo_name=hpo_name, org_name=org_name, site_name=site_name)\n for participant_id, hpo_name, org_name, site_name in participant_pairing_data\n }",
"def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the hires structural image that was acquired nearest to "acqtime" | def _FindNearestAnat(self, acqtime):
tdiff_min = 1e6
for anat in self.entry_map['anat']:
if self.info[anat]['type'] == 'T1High' and \
self.info[anat]['InversionTime'] > 0.:
tdiff = abs(acqtime - self.info[anat]['acqtime'])
if tdiff < tdiff_min:
tdiff_min = tdiff
anat_min = anat
return anat_min | [
"def closest_in_time(images, target):\n\n tgt_mjd = fits.getheader(target, ext=1)['mjd-obs']\n mjds = np.array([fits.getheader(i, ext=1)['mjd-obs'] for i in images])\n\n return images[abs(mjds - tgt_mjd).argsort()[0]]",
"def find(image):\n keypoint, description = describe(image)\n # load keypoints, descriptions from mongodb\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n best_match_size = float(\"inf\")\n best_match_index = -1\n match_index = 0\n best_matches = 0\n\n for desc in descriptions:\n matches = bf.match(desc,description)\n matches = sorted(matches, key = lambda x:x.distance)\n if len(matches) > 0:\n match_size = sum(x.distance for x in matches[:10])\n\n print \"match size is \", match_size\n if match_size < best_match_size:\n best_match_size = match_size\n best_match_index = match_index\n best_matches = matches\n\n match_index += 1\n\n needle_color = cv2.imread('needle-stripped.png')[:,:,::-1] # needle\n best_match_image = cv2.imread(\"haystack/\"+files[best_match_index])\n print \"best match is \", files[best_match_index]\n\n # Draw first 10 matches.\n outImg = cv2.imread(\"output/outImg.png\")\n match = cv2.drawMatches(needle_color,keypoint,best_match_image[:,:,::-1],keypoints[best_match_index],best_matches[-20:],outImg, flags=6)\n\n plt.imshow(match),plt.show()\n return",
"def nearest (self, timet):\n def next_index (low, high):\n return math.floor(low+((high-low)/2)) \n\n def search_up (table, low, high, timet):\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n while (high - low) > 1 and searcht < timet:\n low = search\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n if searcht == timet:\n return (search, search)\n else:\n return (low, search)\n\n def search_down (table, low, high, timet):\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n while (high - low) > 1 and searcht > timet:\n high = search\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n if searcht == timet:\n return (search, search)\n else:\n return (search, high)\n\n low = 0\n high = self._table.nrows\n while low != high:\n low, high = search_up(self._table, low, high, timet)\n if low != high:\n low, high = search_down(self._table, low, high, timet)\n return low",
"def closest_on_screen_point(trajectory, viewpoint, yaw, gaze_on_screen):\n\n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n #pprint(traj_angles)\n\n #onscreen_idx, dists, *_ = find_closest_index(traj_angles, gaze_on_screen)\n #idx = closest_node(traj_angles, gaze_on_screen)\n idx = find_closest_index(traj_angles, gaze_on_screen)\n # print(idx)\n\n #traj_ref = trajectory[idx, :]\n screen_ref = traj_angles[idx, :]\n world_ref = trajectory[idx, :]\n\n path_dist = ab_path_length(trajectory, viewpoint, world_ref)\n path_dist /= 8.0 #time headway\n\n #plot_traj(screen_ref, gaze_on_screen, traj_angles)\n\n return(idx, screen_ref, world_ref, path_dist)#, traj_angles)",
"def find_closest_frame(point, trajs, cv_evals):\n\n closest_frame = None\n closest_distance = 1e10\n for i, t in enumerate(trajs):\n dists = np.linalg.norm(point - cv_evals[i], axis=1)\n # print(dists.shape, len(t))\n mindist_index = dists.argmin()\n mindist = dists[mindist_index]\n if mindist < closest_distance:\n # logger.debug(\"Found frame in %s at time %s\", simulation.id, t)\n closest_frame = t[mindist_index]\n closest_distance = mindist\n return closest_frame",
"def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)",
"def find_object(self, row, col, obj, searchp):\n apsize = obj.apsize\n if apsize == 0:\n apsize = searchp.defapsize\n self.signif = searchp.signif\n self.get_image_dims(apsize)\n\n # This is the limit of the grid we look in\n lim = apsize + searchp.maxshift2\n ist = False\n if obj.is_target():\n ist = True\n lim = apsize + searchp.maxshift\n\n colfrac, scol = math.modf(col)\n rowfrac, srow = math.modf(row)\n scol = int(scol)\n srow = int(srow)\n xypixoffsets = apoffsets.ap_offsets(col, row, apsize)\n xypixes = xypixoffsets + (scol, srow)\n # print(\"xypixes shape\", xypixes.shape, \"val\", xypixes)\n xpixes = xypixes[:,0]\n ypixes = xypixes[:,1]\n if xpixes.min() < self.mincol:\n raise FindResultErr(\"Cannot find {:s}, too close to left edge\".format(obj.dispname))\n if xpixes.max() >= self.maxcol:\n raise FindResultErr(\"Cannot find {:s}, too close to right edge\".format(obj.dispname))\n if ypixes.min() < self.minrow:\n raise FindResultErr(\"Cannot find {:s}, too close to bottom edge\".format(obj.dispname))\n if ypixes.max() >= self.maxrow:\n raise FindResultErr(\"Cannot find {:s}, too close to top edge\".format(obj.dispname))\n\n datavals = np.array([self.imagedata[y, x] for x, y in xypixes]) # NB Sky level subtracted\n\n # Normalise data values to 1 as fitting works better that way\n\n meanv = datavals.mean()\n ndatavals = datavals / meanv\n\n # print(\"Pixoffsets\", xypixoffsets, \"scol/srow\", scol, srow, \"col/rowfrac\", colfrac, rowfrac)\n try:\n lresult, lfiterrs = opt.curve_fit(gauss2d.gauss_circle, xypixoffsets, ndatavals, p0=(colfrac, rowfrac, ndatavals.max(), np.std(ndatavals)))\n except (TypeError, RuntimeError):\n raise FindResultErr(\"Unable to find {:s}\".format(obj.dispname))\n\n fr = FindResult(obj=obj, apsize=apsize)\n cdiff, rdiff, fr.amp, fr.sigma = lresult\n # print(\"After fit cdiff={:.4f} rdiff={:.4f} amp={:.4f} sigma={:.4f}\".format(*lresult))\n fr.xoffstd, fr.yoffstd, fr.ampstd, fr.sigmastd = np.diag(lfiterrs)\n if fr.xoffstd > searchp.offsetsig or fr.yoffstd > searchp.offsetsig:\n raise FindResultErr(\"Too great an offset error finding {:s} x={:.4g} y={:.4g}\".format(obj.dispname, fr.xoffstd, fr.yoffstd), True)\n\n # Restore from normalisation\n\n fr.amp *= meanv\n fr.ampstd *= meanv\n\n if fr.amp <= 0.0 or fr.ampstd <= 0 or fr.amp < fr.ampstd * searchp.ampsig or fr.sigma < fr.sigmastd * searchp.sigmasig:\n raise FindResultErr(\"Unable to find {:s} - too much error stderr amp {:.4g} sigma {:.4g}\".format(obj.dispname, fr.ampstd, fr.sigmastd))\n\n # The returned values of cdiff and rdiff are offsets from scol and srow\n # Set cdiff and rdiff in structure to where we expected them to be minus where they are\n\n fr.col = scol + cdiff\n fr.row = srow + rdiff\n fr.cdiff = col - fr.col\n fr.rdiff = row - fr.row\n fr.radeg = obj.ra\n fr.decdeg = obj.dec\n fr.istarget = ist\n\n if abs(fr.cdiff) > lim or abs(fr.rdiff) >= lim:\n raise FindResultErr(\"Unable to find {:s} - too much shift cdiff={:.2f} rdiff={:.2f}\".format(obj.dispname, fr.cdiff, fr.rdiff))\n\n # Now calculate ADUs from data and from fit\n\n fr.adus = np.sum(datavals)\n fr.calculate_mod_integral()\n return fr",
"def find_biggest_region(self):\n # copy the thresholded image\n cv.Copy( self.threshed_image, self.copy ) # copy self.threshed_image\n # this is OpenCV's call to find all of the contours:\n contours = cv.FindContours(self.copy, self.storage, cv.CV_RETR_EXTERNAL,\n cv.CV_CHAIN_APPROX_SIMPLE)\n \n # Next we want to find the *largest* contour\n if len(contours)>0:\n biggest = contours\n biggestArea=cv.ContourArea(contours)\n while contours != None:\n nextArea=cv.ContourArea(contours)\n if biggestArea < nextArea:\n biggest = contours\n biggestArea = nextArea\n contours=contours.h_next()\n \n #Use OpenCV to get a bounding rectangle for the largest contour\n br = cv.BoundingRect(biggest,update=0)\n\n #Extract the characteristics of the bounding box.\n xl=br[0]\n xr=xl + br[2]\n yt=br[1]\n yb=yt + br[3]\n\n #Draw a contour around the bounding box.\n cv.PolyLine(self.image,[[(xl,yt),(xl,yb),(xr,yb),(xr,yt)]],10, cv.RGB(0, 0, 255))\n\n #Publish the bounding box, in clockwise order, as well as the biggest area found\n self.publisher.publish(\"%i %i %i %i %i\" % (xl, yt, xr, yb, biggestArea))\n\n else: self.publisher.publish(\"None\") # If nothing is found, send the string \"None\"",
"def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a",
"def _nearest_depth(self, univ_time: float, kinect_node: str):\n sync_table = None\n with open(self.kinect_sync_table, 'r') as sync_table_file:\n sync_table = json.load(sync_table_file)\n timestamps = sync_table['kinect']['depth'][kinect_node]['univ_time']\n closest = min(range(len(timestamps)), key=lambda i: abs(timestamps[i] - univ_time))\n return closest",
"def calc_nearest_ind(self, robot_pose):\n pass",
"def closest_on_screen_point_optim(trajectory, viewpoint, yaw, gaze_on_screen):\n \n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n \n #pprint(traj_angles)\n\n dist, idx = closest_node_tree(traj_angles, gaze_on_screen)\n ml_screen_ref = traj_angles[idx] \n\n return(idx, ml_screen_ref)",
"def disc_locate_angular(a, b, d, prepared_ffts):\n\n # search space\n t = np.linspace(1,360,36, dtype=np.float16)\n r = np.full(t.shape,a.shape[0]/10, dtype=np.float16)\n\n c1 = np.c_[r,t]\n\n res = np.array([hs_corr_norm(i,a,b,d, prepared_ffts, 'cv.TM_SQDIFF_NORMED') \\\n for i in c1]).reshape(36,6)\n dx,dy,cc,dxi,dyi,cci = [res[:,i] for i in range(6)]\n\n # find min\n cc_sum = cc+cci\n minval = np.min(cc_sum[np.nonzero(cc_sum)])\n minloc = np.where(cc_sum == minval)[0]\n r_sol, t_sol = c1[minloc[0], :]\n # Arc search\n t_min = (t_sol - 10) % 360\n t_max = (t_sol + 10) % 360\n t2 = np.linspace(t_min,t_max, 10, dtype=np.float16)\n r2 = np.linspace(0,a.shape[0]/4, 10, dtype=np.float16)\n\n r2, t2 = np.meshgrid(r2,t2)\n\n r2 = r2.flatten()\n t2 = t2.flatten()\n c2 = np.c_[r2,t2]\n res = np.array([hs_corr_norm(i,a,b,d, prepared_ffts, 'cv.TM_SQDIFF_NORMED') for i in c2]).reshape(10*10,6)\n dx,dy,cc,dxi,dyi,cci = [res[:,i] for i in range(6)]\n # find min\n cc_sum = cc+cci\n minval = np.min(cc_sum[np.nonzero(cc_sum)])\n minloc = np.where(cc_sum == minval)[0]\n if len(minloc)>1:\n minloc = minloc[0]\n minloc = int(minloc)\n # minimums\n r_sol, t_sol = c2[minloc, :]\n\n # find jump\n x0 = dx[minloc]\n x1 = dxi[minloc]\n y0 = dy[minloc]\n y1 = dyi[minloc]\n jump = np.sqrt((x1-x0)**2 + (y1-y0)**2)\n return r_sol, t_sol, jump",
"def most_similar_image():\n ref = images[0] # reference image\n result = np.linalg.norm(images[1:].astype(np.float) - ref.astype(np.float), axis=1)\n index = np.argmin(result)+1\n return index # 60",
"def get_closest_record(self, time):\n dist = 10000000\n record = -1\n # TODO: optimise a bit\n for i, itime in enumerate(self.times):\n if (abs(time-itime)) < dist:\n dist = abs(time-itime)\n record = i\n\n return record",
"def find_hrc_calib_obsid(inst):\n##\n##--- create a list of already processed data\n##\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/6* > '+ zspace\n# os.system(cmd)\n# with open(zspace, 'r') as f:\n# ftest = f.read()\n# wrd = str(inst) + '/61'\n# mc = re.search(wrd, ftest)\n# if mc is not None:\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/61* >' + zspace\n# os.system(cmd)\n#\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/62* >' + zspace\n# os.system(cmd)\n#\n# data = mcf.read_data_file(zspace, remove=1)\n# prev_list = []\n# for ent in data:\n# atemp = re.split('\\/', ent)\n# prev_list.append(int(float(atemp[-1])))\n#\n##\n##--- find today's date and set checking range for the last 30 days\n##\n# today = time.strftime('%Y:%j:%H:%M:%S', time.gmtime())\n# today = int(Chandra.Time.DateTime(today).secs)\n# start = today - 10 * 86400\n##\n##--- extract hrc obsid information\n##\n# line = 'operation=browse\\n'\n# line = line + 'dataset=flight\\n'\n# line = line + 'level=1\\n'\n# line = line + 'detector=hrc\\n'\n# line = line + 'filetype=evt1\\n'\n# line = line + 'tstart=' + str(start) + '\\n'\n# line = line + 'tstop=' + str(today) + '\\n'\n# line = line + 'go\\n'\n#\n# with open('zline', 'w') as fo:\n# fo.write(line)\n#\n# cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > ' + zspace\n# os.system(cmd)\n#\n# mcf.rm_files('./zline')\n#\n# data = mcf.read_data_file(zspace, remove=1)\n##\n##--- select obsids with 61* and 62* starting\n##\n# h_list = []\n# for ent in data:\n# mc = re.search('hrcf', ent)\n# if mc is not None:\n# atemp = re.split('hrcf', ent)\n# btemp = re.split('_', atemp[1])\n# obsid = int(float(btemp[0]))\n# if obsid > 61000 and obsid < 63000:\n##\n##--- if it is already observed skip it\n##\n# if obsid in prev_list:\n# continue\n##\n##--- check which instrument\n##\n# chk = check_inst(obsid)\n# if chk == inst:\n# h_list.append(obsid)\n\n\n\n h_list = ['62410', '62423', '62435', '62437', '62439', '62441', '62443', '62635', '62637', '62649', '62973', '62997', '62422', '62426', '62436', '62438', '62440', '62442', '62446', '62636', '62638', '62796', '62991']\n\n\n return h_list",
"def imageMatcher(path, imgName, imagesFromWhichToSelect):\n\n maxSimilarity = 0\n similarImageName = '2.jpg'\n # Keypoints and descriptor of main image\n keyPoints1, d1 = database[imgName]\n best_good_points = []\n for fileName in imagesFromWhichToSelect:\n # Filename shouldn't be name of this image\n\n # print(fileName)\n keyPoints2, d2 = database[(fileName)]\n # print(len(keyPoints1))\n # print(len(keyPoints2))\n index_params = dict(algorithm = 1, trees = 5)\n search_params = dict()\n # Matching the two images\n flann = cv.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(np.asfarray(d1, np.float32), np.asfarray(d2, np.float32), k = 2)\n # NOTE: can be commented if image not being plotted\n matchesMask = [[0,0] for i in range(len(matches))]\n # Number of good matches\n good_points = []\n # Matching ratio between the two\n ratio = 0.8\n for i, (m, n) in enumerate(matches):\n if m.distance < ratio*n.distance:\n matchesMask[i]=[1,0]\n good_points.append(m)\n # print(len(good_points))\n # Max similarity\n if(len(good_points) > maxSimilarity):\n maxSimilarity = len(good_points)\n similarImageName = fileName\n best_good_points = list(good_points)\n # print(similarImageName)\n similarImage = readEqualisedImage(path + similarImageName)\n # Estimating Homography\n obj = np.empty((len(best_good_points), 2), dtype=np.float32)\n scene = np.empty((len(best_good_points), 2), dtype=np.float32)\n\n keyPoints2 = (database[similarImageName])[0]\n for i in range(len(best_good_points)):\n # -- Get the keypoints from the good matches\n obj[i, 0] = keyPoints1[best_good_points[i].queryIdx].pt[0]\n obj[i, 1] = keyPoints1[best_good_points[i].queryIdx].pt[1]\n scene[i, 0] = keyPoints2[best_good_points[i].trainIdx].pt[0]\n scene[i, 1] = keyPoints2[best_good_points[i].trainIdx].pt[1]\n\n # draw_params = dict(matchColor = (0,255,0),\n # singlePointColor = (255,0,0),\n # matchesMask = matchesMask,\n # flags = cv.DrawMatchesFlags_DEFAULT)\n # matchingImage = cv.drawMatchesKnn(similarImage,keyPoints2,img,keyPoints1,matches,None,**draw_params)\n # plt.imshow(matchingImage),plt.show()\n\n reprojThresh = 4.0\n (Homography, status) = cv.findHomography(scene, obj, cv.RANSAC, reprojThresh)\n # Homography = Homography.astype(np.float32)\n # Homography, status = cv.estimateAffine2D(scene, obj)\n # Homography = np.concatenate((Homography, np.array([[0, 0, 1]])))\n if(Homography.all() == None):\n print(\"Please input more similiar images, exiting now.......\")\n exit(0)\n return (Homography, status, similarImageName)",
"def cacheFindEntry(cache, cameraID, desiredTime):\n if not cameraID in cache:\n return None\n cameraTimes = cache[cameraID]\n closestEntry = min(cameraTimes, key=lambda x: abs(x['time'] - desiredTime))\n if abs(closestEntry['time'] - desiredTime) < 30:\n # logging.warning('close: %s', str(closestEntry))\n return os.path.join(cache['readDir'], closestEntry['fileName'])\n else:\n # logging.warning('far: %s, %s', str(desiredTime), str(closestEntry))\n return None",
"def getImageTimeLoc(cursor, obsHistId):\n sql = \"\"\"select fieldRA, fieldDec, expMJD from %s.%s where obsHistId=%d;\"\"\" % (OPSIM_DB, OPSIM_TABLE, obsHistId)\n cursor.execute(sql)\n [ra,dec,time] = cursor.fetchone()\n return numpy.degrees(ra), numpy.degrees(dec), time"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create structures defining acquisition time for fieldmaps and anatomicals. First find the fieldmap (or hires structural if no fieldmap was collected) nearest (on average) to the epis. Then define this series as the one that should be in register with the epis. | def _SetAnatTgts(self):
anat_candidates = {}
fmap_candidates = {}
for entry in self.entry_map['anat']:
if self.info[entry]['type'] == 'T1High':
anat_candidates[entry] = self.info[entry]['acqtime']
# Find the valid anatomical acquired nearest to fieldmap.
tdiff_min = 1e6
if len(self.entry_map['fmap']) > 0:
for entry in self.entry_map['fmap']:
anat_tgt = self. _FindNearestAnat(self.info[entry]['acqtime'])
self.info[entry]['anat_ref'] = anat_tgt
else:
# No fieldmaps were collected. Find the structural nearest the
# beginning of the EPIs.
if len(self.entry_map['anat']) == 1:
anat_tgt = self.entry_map['anat'][0]
else:
epi_start = []
tmin = 1e6
for anat in self.entry_map['anat']:
if self.info[anat]['type'] != 'T1High':
continue
tsum1 = 0; tsum2 = 0;
for epi in self.entry_map['epi']:
# Difference from start of structural and first epi
tsum1 += abs(self.info[anat]['acqtime'] - \
self.info[epi]['acqtime'])
# Difference from start of structural and last epi
tsum2 += abs(self.info[anat]['acqtime'] - \
(self.info[epi]['acqtime'] +\
self.info[epi]['TR']*self.info[epi]['tdim']))
if tsum1 < tmin or tsum2 < tmin:
tmin = min(tsum1, tsum2)
anat_tgt = anat
# Resolve anatomical names and links.
self._SetAnatNames(anat_tgt)
# Set appropriate attributes in the entry for each EPI.
for epi in self.entry_map['epi']:
if len(self.entry_map['fmap']) > 0 and not self.no_fmapcorr:
fmap_entry = self.info[epi]['fmap_entry']
anat_ref = self.info[fmap_entry]['anat_ref']
self.info[epi]['anat_tgt'] = fmap_entry
self.info[epi]['anat_matfile'] = self.info[fmap_entry]['matfile']
if self.align_fmaps or (not self.no_align_fmaps and \
self._SetCatMotionFmapMats(fmap_entry, anat_ref)):
# Concatenate motion-correction matrices with tranform from
# fieldmap to structural. Use the registered fieldmap.
self.info[epi]['catmats'] = True
fmap_info = self.info[self.info[epi]['fmap_entry']]
self.info[epi]['fmapname'] = \
fmap_info['imgfile_r'] + fmap_info['suffix']
else:
# Assume fieldmap is in register with the structural.
self.info[epi]['catmats'] = False
else:
self.info[epi]['anat_tgt'] = anat_tgt
self.info[epi]['anat_matfile'] = None
self.info[epi]['catmats'] = False
self.info[epi]['anat_link'] = self.info[anat_tgt]['imgfile'] + \
self.info[anat_tgt]['suffix'] | [
"def make_primarybeammap(datetimestring, delays, frequency,\n center=False,\n sunline=True,\n low=1,\n high=2000,\n plothourangle=True,\n extension='png',\n figsize=8,\n title=None,\n directory=None,\n tle=None,\n duration=300,\n moon=False,\n jupiter=False,\n verbose=False):\n su.init_data()\n\n # protect against log errors\n if (low <= 0):\n low = 1\n\n if not os.path.exists(config.RADIO_IMAGE_FILE):\n logger.error(\"Could not find 408 MHz image: %s\\n\" % (config.RADIO_IMAGE_FILE))\n return None\n try:\n if (verbose):\n print(\"Loading 408 MHz map from %s...\" % config.RADIO_IMAGE_FILE)\n f = pyfits.open(config.RADIO_IMAGE_FILE)\n except Exception as e:\n logger.error(\"Error opening 408 MHz image: %s\\nError: %s\\n\" % (config.RADIO_IMAGE_FILE, e))\n return None\n skymap = f[0].data[0]\n # x=skymap[:,0].reshape(-1,1)\n # x=skymap[:,0:10]\n # skymap=numpy.concatenate((skymap,x),axis=1)\n\n tlelines = []\n satellite_label = ''\n if tle is not None:\n try:\n tlefile = open(tle)\n tlelines = tlefile.readlines()\n tlefile.close()\n except Exception as e:\n logger.error('Could not open TLE file %s: %s' % (tle, e))\n\n ra = (f[0].header.get('CRVAL1') +\n (numpy.arange(1, skymap.shape[1] + 1) - f[0].header.get('CRPIX1')) * f[0].header.get('CDELT1')) / 15.0\n dec = (f[0].header.get('CRVAL2') +\n (numpy.arange(1, skymap.shape[0] + 1) - f[0].header.get('CRPIX2')) * f[0].header.get('CDELT2'))\n\n # parse the datetimestring\n try:\n yr = int(datetimestring[:4])\n mn = int(datetimestring[4:6])\n dy = int(datetimestring[6:8])\n hour = int(datetimestring[8:10])\n minute = int(datetimestring[10:12])\n second = int(datetimestring[12:14])\n except ValueError:\n logger.error('Could not parse datetimestring %s\\n' % datetimestring)\n return None\n\n s_obstime = su.TIMESCALE.utc(year=yr, month=mn, day=dy, hour=hour, minute=minute, second=second)\n observer = su.S_MWAPOS.at(s_obstime)\n\n # determine the LST\n LST_hours = s_obstime.gmst + (su.MWA_TOPO.longitude.degrees / 15)\n if LST_hours > 24.0:\n LST_hours -= 24.0\n if (verbose):\n print(\"For %s UT, LST=%6.4f\" % (s_obstime.utc_iso()[:-1], LST_hours))\n\n # this will be the center of the image\n RA0 = 0\n if (center):\n RA0 = LST_hours * 15\n else:\n if (6 < LST_hours < 18):\n RA0 = 180\n\n # use LST to get Az,Alt grid for image\n RA, Dec = numpy.meshgrid(ra * 15, dec)\n UTs = '%02d:%02d:%02d' % (hour, minute, second)\n a_obstime = Time('%d-%d-%d %s' % (yr, mn, dy, UTs), scale='utc')\n\n coords = SkyCoord(ra=RA, dec=Dec, equinox='J2000', unit=(astropy.units.deg, astropy.units.deg))\n coords.location = config.MWAPOS\n coords.obstime = a_obstime\n coords_prec = coords.transform_to('altaz')\n Az, Alt = coords_prec.az.deg, coords_prec.alt.deg\n\n # get the horizon line\n Az_Horz = numpy.arange(360.0)\n Alt_Horz = numpy.zeros(Az_Horz.shape)\n hequatorial = observer.from_altaz(alt_degrees=Alt_Horz,\n az_degrees=Az_Horz,\n distance=si.Distance(au=9e90))\n RA_H_a, Dec_H_a, _ = hequatorial.radec()\n RA_Horz, Dec_Horz = RA_H_a._degrees, Dec_H_a.degrees\n RA_Horz[numpy.where(RA_Horz > 180 + RA0)] -= 360\n RA_Horz[numpy.where(RA_Horz < -180 + RA0)] += 360\n\n maskedskymap = numpy.where(Alt > 0, skymap, numpy.nan)\n\n # figure out where the Sun will be\n RAsun, Decsun, Azsun, Altsun = sunposition(s_obstime)\n if (RAsun > 180 + RA0):\n RAsun -= 360\n if (RAsun < -180 + RA0):\n RAsun += 360\n RAsuns, Decsuns = sunpositions()\n RAsuns = numpy.array(RAsuns)\n Decsuns = numpy.array(Decsuns)\n\n HAsuns = -RAsuns + LST_hours * 15\n RAsuns = numpy.where(RAsuns > 180 + RA0, RAsuns - 360, RAsuns)\n RAsuns = numpy.where(RAsuns < -180 + RA0, RAsuns + 360, RAsuns)\n\n ra_sat = []\n dec_sat = []\n time_sat = []\n if tlelines is not None and len(tlelines) >= 3:\n satellite_label = tlelines[0].replace('_', r'\\_').replace('\\n', '')\n satellite = si.EarthSatellite(tlelines[1], tlelines[2], name=tlelines[0], ts=su.TIMESCALE)\n ra_sat, dec_sat, time_sat, sublong_sat, sublat_sat = satellite_positions(satellite,\n a_obstime.gps,\n range(0, duration, 1),\n RA0=RA0)\n\n # do the plotting\n # this sets up the figure with the right aspect ratio\n fig = pylab.figure(figsize=(figsize, 0.5 * figsize), dpi=120)\n ax1 = fig.add_subplot(1, 1, 1)\n # this is the Haslam map, plotted as a log-scale\n # it is slightly transparent since this does below the horizon too\n ax1.imshow(numpy.log10(skymap),\n cmap=pylab.cm.get_cmap('gray_r'),\n aspect='auto',\n vmin=math.log10(low),\n vmax=math.log10(high),\n origin='lower',\n extent=(ra[0], ra[-1], dec[0], dec[-1]),\n alpha=0.9)\n ax1.imshow(numpy.log10(maskedskymap),\n cmap=pylab.cm.get_cmap('gray_r'),\n aspect='auto',\n vmin=0,\n vmax=math.log10(2000),\n origin='lower',\n extent=(ra[0], ra[-1], dec[0], dec[-1]))\n # this is the Haslam map but only above the horizon\n ax1.imshow(numpy.log10(skymap),\n cmap=pylab.cm.get_cmap('gray_r'),\n aspect='auto',\n vmin=math.log10(low),\n vmax=math.log10(high),\n origin='lower',\n extent=(ra[0] + 24, ra[-1] + 24, dec[0], dec[-1]),\n alpha=0.9)\n ax1.imshow(numpy.log10(maskedskymap),\n cmap=pylab.cm.get_cmap('gray_r'),\n aspect='auto',\n vmin=math.log10(low),\n vmax=math.log10(high),\n origin='lower',\n extent=(ra[0] + 24, ra[-1] + 24, dec[0], dec[-1]))\n\n contourcolors = ['r', 'c', 'y', 'm', 'w', 'g', 'b']\n if (isinstance(frequency, float) or isinstance(frequency, int)):\n if (verbose):\n print(\"Creating primary beam response for frequency %.2f MHz...\" % (frequency))\n print(\"Beamformer delays are %s\" % delays)\n r = return_beam(Alt, Az, delays, frequency)\n if (r is None):\n return None\n Z2 = numpy.where(r >= min(contourlevels), r, 0)\n\n if (verbose):\n i = numpy.nonzero(Z2 == Z2.max())\n ramax = RA[i][0]\n if (ramax < 0):\n ramax += 360\n print(\"Sensitivity is max at (RA,Dec)=(%.5f,%.5f)\" % (ramax, Dec[i][0]))\n\n # put on contours for the beam\n ax1.contour(RA / 15.0, Dec, Z2, contourlevels, colors='r')\n ax1.contour(RA / 15.0 - 24, Dec, Z2, contourlevels, colors='r')\n ax1.contour(RA / 15.0 + 24, Dec, Z2, contourlevels, colors='r')\n else:\n icolor = 0\n for f in frequency:\n color = contourcolors[icolor]\n if (verbose):\n print(\"Creating primary beam response for frequency %.2f MHz...\" % (f))\n print(\"Beamformer delays are %s\" % delays)\n r = return_beam(Alt, Az, delays, f)\n if r is None:\n return None\n Z2 = numpy.where(r >= min(contourlevels), r, 0)\n\n if (verbose):\n i = numpy.nonzero(Z2 == Z2.max())\n ramax = RA[i][0]\n if (ramax < 0):\n ramax += 360\n print(\"Sensitivity is max at (RA,Dec)=(%.5f,%.5f)\" % (ramax, Dec[i][0]))\n\n # put on contours for the beam\n ax1.contour(RA / 15.0, Dec, Z2, contourlevels, colors=color)\n ax1.contour(RA / 15.0 - 24, Dec, Z2, contourlevels, colors=color)\n ax1.contour(RA / 15.0 + 24, Dec, Z2, contourlevels, colors=color)\n icolor += 1\n if (icolor >= len(contourcolors)):\n icolor = 0\n\n # plot the horizon line\n RA_Horz, Dec_Horz = list(zip(*sorted(zip(RA_Horz, Dec_Horz))))\n ax1.plot(numpy.array(RA_Horz) / 15.0, numpy.array(Dec_Horz), 'k')\n x1 = 12 + RA0 / 15\n x2 = -12 + RA0 / 15\n ax1.set_xlim(left=x1, right=x2)\n ax1.set_ylim(bottom=-90, top=90)\n ax1.set_xticks(numpy.arange(-12 + int(RA0 / 15), 15 + int(RA0 / 15), 3))\n ll = []\n for x in numpy.arange(-12 + int(RA0 / 15), 15 + int(RA0 / 15), 3):\n if (0 <= x < 24):\n ll.append('%d' % x)\n elif (x >= 24):\n ll.append('%d' % (x - 24))\n else:\n ll.append('%d' % (x + 24))\n ax1.set_xticklabels(ll)\n ax1.set_yticks(numpy.arange(-90, 105, 15))\n ax1.set_xlabel('Right Ascension (hours)')\n ax1.set_ylabel('Declination (degrees)')\n # plot the Sun\n ax1.plot(RAsun / 15.0, Decsun, 'yo', markersize=10)\n RAsuns, Decsuns = list(zip(*sorted(zip(RAsuns, Decsuns))))\n if (sunline):\n ax1.plot(numpy.array(RAsuns) / 15.0, numpy.array(Decsuns), 'y-')\n\n if moon:\n RAmoon, Decmoon, Azmoon, Altmoon = moonposition(s_obstime)\n if (RAmoon > 180 + RA0):\n RAmoon -= 360\n if (RAmoon < -180 + RA0):\n RAmoon += 360\n ax1.plot(RAmoon / 15.0, Decmoon, 'ko', markersize=10)\n print(RAmoon, Decmoon)\n\n if jupiter:\n RAjupiter, Decjupiter, Azjupiter, Altjupiter = jupiterposition(s_obstime)\n if (RAjupiter > 180 + RA0):\n RAjupiter -= 360\n if (RAjupiter < -180 + RA0):\n RAjupiter += 360\n ax1.plot(RAjupiter / 15.0, Decjupiter, 'bo', markersize=8)\n print(RAjupiter, Decjupiter)\n\n if len(ra_sat) > 0:\n coords = SkyCoord(ra=ra_sat, dec=dec_sat, equinox='J2000', unit=(astropy.units.deg, astropy.units.deg))\n coords.location = config.MWAPOS\n coords.obstime = a_obstime\n coords_prec = coords.transform_to('altaz')\n Azsat, Altsat = coords_prec.az.deg, coords_prec.alt.deg\n\n rsat = return_beam(Altsat, Azsat, delays, frequency)\n ax1.plot(numpy.array(ra_sat) / 15.0, numpy.array(dec_sat), 'c-')\n ax1.scatter(numpy.array(ra_sat) / 15.0,\n numpy.array(dec_sat),\n # c=numpy.arange(len(ra_sat))/(1.0*len(ra_sat)),\n # cmap=pylab.cm.hsv,\n c=1 - rsat,\n cmap=pylab.cm.get_cmap('Blues'),\n alpha=0.5,\n edgecolors='none')\n ax1.text(ra_sat[0] / 15.0,\n dec_sat[0],\n time_sat[0].strftime('%H:%M:%S'),\n fontsize=8,\n horizontalalignment='left',\n color='c')\n ax1.text(ra_sat[-1] / 15.0,\n dec_sat[-1],\n time_sat[-1].strftime('%H:%M:%S'),\n fontsize=8,\n horizontalalignment='left',\n color='c')\n\n # add text for sources\n for source in sources:\n r = Angle(sources[source][1], unit=astropy.units.hour).hour\n d = Angle(sources[source][2], unit=astropy.units.deg).deg\n horizontalalignment = 'left'\n x = r - 0.2\n if (len(sources[source]) >= 6 and sources[source][5] == 'c'):\n horizontalalignment = 'center'\n x = r\n if (len(sources[source]) >= 6 and sources[source][5] == 'r'):\n horizontalalignment = 'right'\n x = r + 0.1\n if (x > 12 + RA0 / 15):\n x -= 24\n if (x < -12 + RA0 / 15):\n x += 24\n fontsize = defaultsize\n if (len(sources[source]) >= 5):\n fontsize = sources[source][4]\n color = defaultcolor\n if (len(sources[source]) >= 4):\n color = sources[source][3]\n ax1.text(x,\n d,\n sources[source][0],\n horizontalalignment=horizontalalignment,\n fontsize=fontsize,\n color=color,\n verticalalignment='center')\n\n if (isinstance(frequency, int) or isinstance(frequency, float)):\n textlabel = '%04d-%02d-%02d %02d:%02d:%02d %.2f MHz' % (yr, mn, dy, hour, minute, second, frequency)\n else:\n\n fstring = \"[\" + ','.join([\"%.2f\" % f for f in frequency]) + \"]\"\n textlabel = '%04d-%02d-%02d %02d:%02d:%02d %s MHz' % (yr, mn, dy, hour, minute, second, fstring)\n icolor = 0\n for i in range(len(frequency)):\n color = contourcolors[icolor]\n ax1.text(x1 - 1,\n 70 - 10 * i,\n '%.2f MHz' % frequency[i],\n fontsize=12,\n color=color,\n horizontalalignment='left')\n icolor += 1\n if (icolor >= len(contourcolors)):\n icolor = 0\n\n if title is not None:\n title = title.replace('_', r'\\_')\n textlabel = title + ' ' + textlabel\n if (plothourangle):\n ax2 = ax1.twiny()\n p = ax2.plot(HAsuns / 15, Decsuns, 'y-')\n p[0].set_visible(False)\n ax1.set_ylim(bottom=-90, top=90)\n ax2.set_ylim(bottom=-90, top=90)\n ax1.set_yticks(numpy.arange(-90, 105, 15))\n # x1b=x1-LST_hours\n # x2b=x2-LST_hours\n x1b = -x1 + LST_hours\n # x2b = -x2 + LST_hours\n while (x1b < 0):\n x1b += 24\n while (x1b > 24):\n x1b -= 24\n x2b = x1b - 24\n ax2.set_xlim(left=x2b, right=x1b)\n ax2.set_xlabel('Hour Angle (hours)')\n ax1.text(x1 - 1,\n 80,\n textlabel,\n fontsize=14,\n horizontalalignment='left')\n if len(satellite_label) > 0:\n ax1.text(x1 - 1,\n 70,\n satellite_label,\n fontsize=14,\n horizontalalignment='left',\n color='c')\n\n else:\n ax1.set_title(textlabel)\n\n # print ax1.get_xlim()\n # try:\n # print ax2.get_xlim()\n # except:\n # pass\n if (isinstance(frequency, int) or isinstance(frequency, float)):\n filename = '%s_%.2fMHz.%s' % (datetimestring, frequency, extension)\n else:\n filename = '%s_%.2fMHz.%s' % (datetimestring, frequency[0], extension)\n if directory is not None:\n filename = directory + '/' + filename\n try:\n pylab.savefig(filename)\n except RuntimeError as err:\n logger.error('Error saving figure: %s\\n' % err)\n return None\n\n return filename",
"def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None, \n max_steps=None, e_field_scaling_only=False):\n \n if step_size is None:\n step_size = 100.\n if max_steps is None:\n max_steps = 1000\n steps = np.arange(max_steps)\n\n # use spacecraft location to get ECEF\n ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)\n\n # prepare output\n eq_zon_drifts_scalar = []\n eq_mer_drifts_scalar = []\n # magnetic field info\n north_mag_scalar = []\n south_mag_scalar = []\n eq_mag_scalar = []\n out = {}\n # meridional e-field scalar map, can also be\n # zonal ion drift scalar map\n # print ('Starting Northern')\n north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,\n glons, alts, dates, 'north',\n 'meridional',\n step_size=step_size,\n max_steps=max_steps, \n edge_length=25.,\n edge_steps=5)\n\n north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,\n glons, alts, dates, 'north',\n 'zonal',\n step_size=step_size,\n max_steps=max_steps, \n edge_length=25.,\n edge_steps=5)\n\n # print ('Starting Southern')\n south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,\n glons, alts, dates, 'south',\n 'meridional',\n step_size=step_size,\n max_steps=max_steps, \n edge_length=25.,\n edge_steps=5)\n\n south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,\n glons, alts, dates, 'south',\n 'zonal',\n step_size=step_size,\n max_steps=max_steps, \n edge_length=25.,\n edge_steps=5)\n # print ('Starting Equatorial') \n # , step_zon_apex2, mind_plus, mind_minus \n eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,\n 'meridional',\n edge_length=25., \n edge_steps=5)\n # , step_mer_apex2, mind_plus, mind_minus \n eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,\n 'zonal',\n edge_length=25., \n edge_steps=5)\n # print ('Done with core')\n north_zon_drifts_scalar = north_zon_drifts_scalar/50. \n south_zon_drifts_scalar = south_zon_drifts_scalar/50. \n north_mer_drifts_scalar = north_mer_drifts_scalar/50. \n south_mer_drifts_scalar = south_mer_drifts_scalar/50. \n # equatorial \n eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar\n eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar\n\n if e_field_scaling_only:\n # prepare output\n out['north_mer_fields_scalar'] = north_zon_drifts_scalar\n out['south_mer_fields_scalar'] = south_zon_drifts_scalar\n out['north_zon_fields_scalar'] = north_mer_drifts_scalar\n out['south_zon_fields_scalar'] = south_mer_drifts_scalar\n out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar\n out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar\n \n else:\n # figure out scaling for drifts based upon change in magnetic field\n # strength\n for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs, \n glats, glons, alts, \n dates): \n yr, doy = pysat.utils.getyrdoy(date)\n double_date = float(yr) + float(doy) / 366.\n # get location of apex for s/c field line\n apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(\n [glat], [glon], \n [alt], [date]) \n # trace to northern footpoint\n sc_root = np.array([ecef_x, ecef_y, ecef_z])\n trace_north = field_line_trace(sc_root, double_date, 1., 120., \n steps=steps,\n step_size=step_size, \n max_steps=max_steps)\n # southern tracing\n trace_south = field_line_trace(sc_root, double_date, -1., 120., \n steps=steps,\n step_size=step_size, \n max_steps=max_steps)\n # footpoint location\n north_ftpnt = trace_north[-1, :]\n nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)\n south_ftpnt = trace_south[-1, :]\n sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)\n \n # scalar for the northern footpoint electric field based on distances\n # for drift also need to include the magnetic field, drift = E/B\n tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt, \n np.deg2rad(90.-glat), \n np.deg2rad(glon))\n # get mag field and scalar for northern footpoint\n tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt, \n np.deg2rad(90.-nft_glat), \n np.deg2rad(nft_glon))\n north_mag_scalar.append(b_sc/b_nft) \n # equatorial values\n tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt, \n np.deg2rad(90.-apex_lat), \n np.deg2rad(apex_lon))\n eq_mag_scalar.append(b_sc/b_eq) \n # scalar for the southern footpoint\n tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt, \n np.deg2rad(90.-sft_glat), \n np.deg2rad(sft_glon))\n south_mag_scalar.append(b_sc/b_sft)\n \n # make E-Field scalars to drifts\n # lists to arrays\n north_mag_scalar = np.array(north_mag_scalar)\n south_mag_scalar = np.array(south_mag_scalar)\n eq_mag_scalar = np.array(eq_mag_scalar)\n # apply to electric field scaling to get ion drift values\n north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar\n south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar \n north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar\n south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar\n # equatorial \n eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar\n eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar\n # output\n out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar\n out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar\n out['north_mer_drifts_scalar'] = north_mer_drifts_scalar\n out['south_mer_drifts_scalar'] = south_mer_drifts_scalar\n out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar\n out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar\n\n return out",
"def align_timeseries(self):\n\t#\tprint(self.pm10,self.am10)\n\t\tself.tuple_ts = np.zeros([2,8640])\n\t\tcurrent_time = self.pm10\n\t\ttime_list_ac = np.array(self.activity_ts.keys())\n\t\ttime_list_au = np.array(self.audio_ts.keys())\n\t\t\n\t\tdistances_ac = np.zeros(8640)\n\t\tdistances_au = np.zeros(8640)\n\t\tfor i in range(0,8640):\n\t\t\t#Find position and value of nearest inference to time step\n\t\t\tdiff_ac = np.abs(time_list_ac-current_time)\n\t\t\tdiff_au = np.abs(time_list_au-current_time)\n\n\t\t\tnearest_ac_inference = np.argmin(diff_ac)\n\t\t\tnearest_au_inference = np.argmin(diff_au)\n\n\t\t\t# Keeping error for evaluation purposes\n\t\t\tdistances_ac[i] = (time_list_ac[nearest_ac_inference]-current_time)\n\t\t\tdistances_au[i] = (time_list_au[nearest_au_inference]-current_time)\n\n\t\t\t# Timelist[nearest] is the nearest timestamp, which is also the key\n\t\t\t# to get the inference from activity/audio_(ts)\n\t\t\tself.tuple_ts[0,i] = self.activity_ts[time_list_ac[nearest_ac_inference]]\n\t\t\tself.tuple_ts[1,i] = self.audio_ts[time_list_au[nearest_au_inference]]\n\t\t\tcurrent_time += 5\n\t\t#print(self.user)\n\t\t#print(np.mean(distances_ac), np.median(distances_ac))\n\t\t#print(np.mean(distances_au), np.median(distances_au))\n\n\t\tif np.abs(np.mean(distances_ac))>1.5 or np.abs(np.mean(distances_au))>1.5:\n\t\t\tself.not_big = 0",
"def generateForcingFields(self, conc_idx, inputs, outputs):\n\n\t\tForcing.log(\"Running %s.generateForcingFields()\"%type(self))\n\n\t\t# Some variable used later\n\t\tscalar = None\n\n\t\tif self.griddedTimeZoneFld == None:\n\t\t\t# Assume all timezones are GMT\n\t\t\tprint \"Warning! No gridded time zone information loaded. Using a field of zeros.\"\n\t\t\ttz = np.zeros((self.ni,self.nj))\n\t\telse:\n\t\t\ttz = self.griddedTimeZoneFld\n\n\t\tif len(self.species) == 0:\n\t\t\traise NoSpeciesException(\"Must specify species\")\n\t\t\treturn\n\n\t\t# We doing time averaging?\n\t\tif self.averaging in ['AVG_MAX', 'AVG_MAX8', 'AVG_MAX24']:\n\t\t\tdo_averaging=True\n\t\t\taveraging_window = self.averaging_window\n\t\telse:\n\t\t\tdo_averaging=False\n\t\t\taveraging_window = None\n\t\t\t#if self.averaging == 'AVG_MASK' or self.averaging == 'AVG_NONE'\n\t\t\tif self.averaging == 'AVG_NONE':\n\t\t\t\t# Ensure this is set right\n\t\t\t\tself.timeMask = range(0,25)\n\t\t\t# If it's the mask, then the timemask should already be set\n\n\t\t# Create zero fields to allocate our arrays\n\t\tfld_empty=np.zeros((len(self.species), self.nt, self.nk_f, self.nj, self.ni), dtype=np.float32)\n\n\t\t# Get the relative days, so [-1 0 1] for [yesterday, today, tomorrow]\n\t\trdays = inputs.keys()\n\t\t# Probably an easiesr way to initalize this since we're only writing later, but for now we'll do it.\n\t\tflds={}\n\t\tfor d in rdays:\n\t\t\tflds[d] = fld_empty.copy()\n\n\t\t# This is NOT efficient. Could probably easily make it\n\t\t# more efficient by implementing some sort of cache though..\n\t\tfor idx_s, species in enumerate(self.species):\n\t\t\t#print \"Iteratiing through species %d=%s\"%(idx_s, species)\n\n\t\t\t# Initialize the data flds. Set to zero if there's a day that doesn't exist\n\t\t\tdatas={}\n\t\t\tfor d in rdays:\n\t\t\t\tif inputs[d] is None:\n\t\t\t\t\tdatas[d] = np.zeros((self.nt, self.nk_f, self.nj, self.ni), dtype=np.float32)\n\t\t\t\telse:\n\t\t\t\t\tdatas[d] = inputs[d].variables[species][:]\n\n\t\t\t# Recall, mask is already considered in these vectors\n\t\t\tfor k in self._layers:\n\t\t\t\t# I think there's a better way to do the next two loops, don't know it though.\n\t\t\t\tfor i in range(0,self.ni):\n\t\t\t\t\tfor j in range(0,self.nj):\n\n\t\t\t\t\t\t# Spatial mask\n\t\t\t\t\t\tif not self.space[j,i]:\n\t\t\t\t\t\t\t# This is masked out. Set to zero and go to the next cell\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tflds[d][idx_s][0:self.nt,k,j,i] = np.zeros((self.nt), dtype=np.float32)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t#else:\n\t\t\t\t\t\t#\t# TEMP HACK!!\n\t\t\t\t\t\t# # This temp hack is used to ensure the mask is working\n\t\t\t\t\t\t#\tfld_yest[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tfld_today[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tfld_tom[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tcontinue\n\n\n\t\t\t\t\t\t# Take averaging into consideration\n\t\t\t\t\t\t# For almost all of these averagings, we'll have to\n\t\t\t\t\t\t# build a vector of all values for all times at that\n\t\t\t\t\t\t# cell. Unfortunately, the data is organized in the \n\t\t\t\t\t\t# opposite way as we want (time is the top index..)\n\t\t\t\t\t\tif do_averaging:\n\t\t\t\t\t\t\tvecs={}\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tvecs[d] = datas[d][:Forcing.dayLen,k,j,i]\n\n\t\t\t\t\t\t\t# REMOVE!\n\t\t\t\t\t\t\t#if i==self.debug_i and j==self.debug_j:\n\t\t\t\t\t\t\t#\tprint \"vec_today[%d,%d]: \"%(self.debug_j, self.debug_i), vec_today\n\n\t\t\t\t\t\t\t# Prepares a vector of values with respect to the\n\t\t\t\t\t\t\t# direction we're going to calculate the average\n\t\t\t\t\t\t\t# (forward/backward), the window size, and time\n\t\t\t\t\t\t\t# zone \n\n\t\t\t\t\t\t\tvec = Forcing.prepareTimeVectorForAvg(vecs, timezone=tz[j][i], winLen=averaging_window, debug=False)\n\t\t\t\t\t\t\t#print \"i=%d,j=%d, preped vec[%d] = %s\"%(i,j,len(vec),\" \".join(map(str, vec)))\n\n\t\t\t\t\t\t\t# Calculate the moving window average\n\t\t\t\t\t\t\tavgs = Forcing.calcMovingAverage(vec, winLen=averaging_window)\n\t\t\t\t\t\t\t#print \"i=%d,j=%d, avg vec[%d] = %s\"%(i,j,len(avgs),\" \".join(map(str, avgs)))\n\n\t\t\t\t\t\t\t# And then, for the 8-hour max to be used for a\n\t\t\t\t\t\t\t# forcing term, generate a vector for yesterday,\n\t\t\t\t\t\t\t# today and tomorrow with the forcing terms in them\n\n\t\t\t\t\t\t\tif self.timeInvariantScalarMultiplcativeFld is not None:\n\t\t\t\t\t\t\t\tscalar = self.timeInvariantScalarMultiplcativeFld[j][i]/averaging_window\n\n\t\t\t\t\t\t\tvecs = Forcing.applyForceToAvgTime(avgs, days=vecs.keys(), winLen=averaging_window, timezone=tz[j][i], min_threshold=self.threshold, forcingValue=scalar)\n\n# This was done blindly\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tflds[d][idx_s][:24,k,j,i] = vecs[d]\n\n\t\t\t\t\t\telif self.averaging == 'AVG_MASK' or self.averaging == 'AVG_NONE':\n# NOT YET TESTED\n\t\t\t\t\t\t\traise NotImplementedError( \"Mask timing or no averaging is not yet tested. Averaging options=%s\"%self.averaging )\n\t\t\t\t\t\t\t# The comments assume timezone = -6\n\t\t\t\t\t\t\tfor t_gmt in self.timeMask:\n\t\t\t\t\t\t\t\t# when t_gmt = 0, t_loc = -6, so we're into yesterday\n\t\t\t\t\t\t\t\tt_loc = t_gmt + tz[j][i]\n\n\t\t\t\t\t\t\t\t# Reference the arrays\n\t\t\t\t\t\t\t\tif t_loc < 0:\n\t\t\t\t\t\t\t\t\tdfld = data_yest\n\t\t\t\t\t\t\t\t\t#ffld = fld_yest\n\t\t\t\t\t\t\t\telif t_loc>0 and t_loc<Forcing.dayLen:\n\t\t\t\t\t\t\t\t\tdfld = data_today\n\t\t\t\t\t\t\t\t\t#ffld = fld_today\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tdfld = data_tomorrow\n\t\t\t\t\t\t\t\t\t#ffld = fld_tomorrow\n\n\t\t\t\t\t\t\t\t# I have to write in GMT\n# This is wrong, as local times can write into another day.. maybe.. but since there's no averaging, another iteration will take care of that..\n\t\t\t\t\t\t\t\tffld = fld_today\n\n\t\t\t\t\t\t\t\t# fld[-6] is fld[18]\n\t\t\t\t\t\t\t\tval=dfld[t_loc,k,j,i]\n\t\t\t\t\t\t\t\tif threshold is not None:\n\t\t\t\t\t\t\t\t\tif val > threshold:\n\t\t\t\t\t\t\t\t\t\tforce = 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tif val > 0.0:\n\t\t\t\t\t\t\t\t\t\tforce = 1\n\n\t\t\t\t\t\t\t\t# Set the field in the referenced forcing field\n\t\t\t\t\t\t\t\tffld[t_loc,k,j,i] = force\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise NotImplementedError( \"Unavailable time averaging method (%s) selected\"%self.averaging )\n\n\t\t\t\t\t\t#endif averaging\n\t\t\t\t\t#endfor j\n\t\t\t\t#endfor i\n\t\t\t#endfor k\n\n\t\t#endfor species\n\n\t\treturn flds",
"def main(name, line1, line2, orbital_filename):\n #name = \"TERRA\"\n #line1 = \"1 25994U 99068A 16048.43680378 .00000258 00000-0 67198-4 0 9999\"\n #line2 = \"2 25994 98.1982 124.4247 0001352 105.3907 254.7441 14.57126067859938\"\n satellite = ephem.readtle(name, line1, line2)\n \n\n # Landsat 8\n #name = \"Landsat8\"\n #line1=\"1 39084U 13008A 16051.82349873 .00000188 00000-0 51829-4 0 9999\"\n #line2=\"2 39084 98.1988 123.2603 0001265 89.4360 270.6984 14.57110027160810\"\n #LD8 = ephem.readtle(name, line1, line2)\n \n\n sun = ephem.Sun()\n fov = np.radians(68.6)\n\n \"\"\"\n Make pandas dataframe to store swath information\n \"\"\"\n import pandas as pd\n data = {\"DateTime\": [],\"DOY\":[],\"Month\": [],\n \"orbit_id\":[], \"ground_lat\": [], \n \"ground_lon\": [], \"swath_width\": []}\n swaths = pd.DataFrame(data)\n swaths.set_index(keys=\"DateTime\")\n # generate shapefile\n\n orbit_id = 0\n # need to do splitted by hemisphere unfortunately..\n for orbit in make_an_orbit(satellite):\n #import pdb; pdb.set_trace()\n if len(orbit) > 1:\n \"\"\"\n So worth doing processing on orbit...\n\n \"\"\"\n sun = ephem.Sun()\n\n print(orbit[0].datetime)\n\n for overpass in orbit:\n overpass.only_daytime_overpasses(sun)\n overpass.derive_swath_width(fov)\n \"\"\"\n Create a tempoary dataframe for this orbit\n \"\"\"\n epoch = datetime.datetime(1970, 1, 1)\n #import pdb; pdb.set_trace()\n tmp_d = {\"DateTime\": [(o.datetime - epoch).total_seconds() for o in orbit],\n \"DOY\":[int(o.datetime.strftime('%j')) for o in orbit],\n \"Month\": [o.datetime.month for o in orbit],\n \"orbit_id\": orbit_id * np.ones(len(orbit)),\n \"ground_lat\": [o.lat for o in orbit],\n \"ground_lon\": [o.long for o in orbit],\n \"swath_width\": [o.swath_width for o in orbit]}\n tmp = pd.DataFrame(tmp_d)\n tmp.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n orbit_id +=1 \n \"\"\"\n Append to main dataframe\n \"\"\"\n swaths = swaths.append(tmp)\n #swaths.set_index(keys=\"DateTime\")\n\n \"\"\"\n Save the DataFrame to a file\n \"\"\"\n swaths = swaths.set_index(keys=\"DateTime\")\n #swaths.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n swaths.to_csv(orbital_filename, header=True)",
"def create_data_structures(self):\n # Data storage arrays for time and measurement\n # Create the array of zeros and preallocating\n start_time = time.time()\n # The number of data points has to be optimized\n self.data_points = 5000\n # prs_data has three rows, 0 = time, 1 = pressure - tare, 2 = raw_pressure\n self.prs_data = np.zeros([3, self.data_points])\n self.prs_data[0, :] = start_time\n # This queue receives data from the sensors and puts it in the graphs and sends to the \n # LifoQueue\n self.prs_q = Queue()\n # The lifo queue is created to send the data to the piston control thread. The piston\n # control will only read and use the last value, since only the most recent information\n # matters\n self.prs_lifo_q = LifoQueue()\n self.prs_tare = 0\n \n self.flw_data = np.zeros([3, self.data_points])\n self.flw_data[0, :] = start_time\n self.flw_q = Queue()\n self.flw_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.flw_tare = 0\n\n self.vol_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.vol_data = np.zeros([2, self.data_points])\n self.vol_data[0, :] = start_time",
"def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)",
"def make_temperature_map(time: u.s, field, instr, **kwargs):\n plot_settings = {'cmap': cm.get_cmap('inferno')}\n plot_settings.update(kwargs.get('plot_settings', {}))\n bins, bin_range = instr.make_detector_array(field)\n visible = is_visible(instr.total_coordinates, instr.observer_coordinate)\n hist_coordinates, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=visible)\n with h5py.File(instr.counts_file, 'r') as hf:\n try:\n i_time = np.where(u.Quantity(hf['time'],\n get_keys(hf['time'].attrs), ('unit', 'units')) == time)[0][0]\n except IndexError:\n raise IndexError(f'{time} is not a valid time in observing time for {instr.name}')\n weights = np.array(hf['electron_temperature'][i_time, :])\n units = u.Unit(get_keys(hf['electron_temperature'].attrs, ('unit', 'units')))\n hist, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=weights * visible)\n hist /= np.where(hist_coordinates == 0, 1, hist_coordinates)\n meta = instr.make_fits_header(field, instr.channels[0])\n del meta['wavelnth']\n del meta['waveunit']\n meta['bunit'] = units.to_string()\n meta['detector'] = 'Electron Temperature'\n meta['comment'] = 'Column-averaged electron temperature calculated by synthesizAR'\n\n return GenericMap(hist.T, meta, plot_settings=plot_settings)",
"def time_info(input_file):\n original_path = os.getcwd() #set original directory\n save_path = input_file['save_path']\n planet = input_file['exoplanet'] #set exoplanet name\n print '\\nObtain the images .... \\n'\n print 'Change to ', save_path\n os.chdir(save_path) #change to save directory where is our scvience images\n images = sorted(glob.glob('AB'+input_file['exoplanet']+'*.fits'))\n print '\\nImages = \\n',images\n tempo_loc = [] #time object\n SUN = [] #Sun coordinate object\n ra_sun, dec_sun, dsun = np.zeros(len(images)),np.zeros(len(images)),np.zeros(len(images)) #sun coordinates\n JD = np.zeros(len(images)) #julian date from time object\n ST = np.zeros(len(images))\n HJD = np.zeros(len(images))\n #create the exoplanet object coordianate\n exoplanet = SkyCoord(dec=input_file['DEC'],ra=input_file['RA'],unit=('deg','deg'),frame=input_file['frame'])\n print '\\nObtain data info from header ....\\n'\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n UTC = hdr['date-obs']+'T'+hdr['UT'] #string that contain the time in UTC in isot format\n tempo_loc.append(Time(UTC,scale=input_file['scale-time'],format='isot',location=(input_file['lon-obs'],input_file['lat-obs'])))#,input_data['altitude'])))\n JD[i] = tempo_loc[i].jd\n ST[i] = tempo_loc[i].sidereal_time('apparent').hour\n SUN.append(get_sun(tempo_loc[i]))\n ra_sun[i],dec_sun[i] = SUN[i].ra.deg, SUN[i].dec.deg\n dsun[i] = SUN[i].distance.value\n HJD[i] = use.hjd_date(JD[i],dsun[i],dec_sun[i],ra_sun[i],exoplanet.dec.deg,exoplanet.ra.deg,circular_orbit=input_file['circular_orbit'])\n use.update_progress((i+1.)/len(images))\n print '\\n.... done.\\n'\n print '\\n Time from header = \\n'\n #print '\\nImages ** UTC (YYYY-MM-DDTHH:MM:SS) ** JD (7d.5d) ** ST (hours) ** ST (HH:MM:SS) ** Sun Coordinate (epoch,RA,DEC,Distance) (deg,deg,AU) \\n'\n ST_string = []\n for i in range(len(images)):\n ST1 = int(ST[i])\n ST2 = int((ST[i]-ST1)*60.)\n ST3 = (((ST[i]-ST1)*60.)-ST2)*60\n ST_string.append(str(ST1)+':'+str(ST2)+':'+str(ST3))\n tempo_loc[i] = tempo_loc[i].value\n use.update_progress((i+1.)/len(images))\n #print images[i], ' ** ',tempo_loc[i], ' ** ', JD[i], ' ** ', ST[i],' ** ',ST_string[i],' ** ',sun_loc[i],' ** ',HJD[i]\n print '\\nSave data file ... \\n'\n data = DataFrame([images,tempo_loc,list(JD),list(ST),list(ST_string),list(ra_sun),list(dec_sun),list(dsun),list(HJD)]).T\n data.columns=['images','UTC','JD','ST','ST_isot','RA_SUN','DEC_SUN','D_SUN','HJD']\n print data\n data.to_csv('results.csv')\n os.chdir(original_path)\n return",
"def my_hvac_prediction_function(start_date, end_date, resolution):\n\n return {\n \"EastZone\": {\n \"inside\": {\n \"1520322778000\": 69.4473087819,\n \"1520326378000\": 69.204815864,\n \"1520329978000\": 69.2362606232,\n \"1520333578000\": 69.2615819209,\n \"1520337178000\": 69.2750708215,\n \"1520340778000\": 69.2776203966,\n \"1520344378000\": 69.2759206799,\n \"1520347978000\": 69.5719546742,\n \"1520351578000\": 69.2436260623,\n \"1520355178000\": 69.6504249292,\n \"1520358778000\": 70.0016997167,\n \"1520362378000\": 70.3898550725,\n \"1520365978000\": 70.4116147309,\n \"1520369578000\": 70.6051136364,\n \"1520373178000\": 70.728125,\n \"1520376778000\": 70.856980057,\n \"1520380378000\": 71.547592068,\n \"1520383978000\": 72.1147727273\n },\n \"outside\": {\n \"1520322778000\": 89.49,\n \"1520326378000\": 89.2,\n \"1520329978000\": 89.22,\n \"1520333578000\": 89.29,\n \"1520337178000\": 89.25,\n \"1520340778000\": 89.26,\n \"1520344378000\": 89.29,\n \"1520347978000\": 89.52,\n \"1520351578000\": 89.23,\n \"1520355178000\": 89.62,\n \"1520358778000\": 80.07,\n \"1520362378000\": 80.35,\n \"1520365978000\": 80.49,\n \"1520369578000\": 80.64,\n \"1520373178000\": 80.7,\n \"1520376778000\": 80.8,\n \"1520380378000\": 81.5,\n \"1520383978000\": 82.13\n },\n \"heating_setpoint\": {\n \"1520322778000\": 50,\n \"1520326378000\": 50,\n \"1520329978000\": 50,\n \"1520333578000\": 50,\n \"1520337178000\": 50,\n \"1520340778000\": 50,\n \"1520344378000\": 50,\n \"1520347978000\": 50,\n \"1520351578000\": 50,\n \"1520355178000\": 50,\n \"1520358778000\": 70,\n \"1520362378000\": 70,\n \"1520365978000\": 70,\n \"1520369578000\": 70,\n \"1520373178000\": 70,\n \"1520376778000\": 70,\n \"1520380378000\": 70,\n \"1520383978000\": 70\n },\n \"cooling_setpoint\": {\n \"1520322778000\": 80,\n \"1520326378000\": 80,\n \"1520329978000\": 80,\n \"1520333578000\": 80,\n \"1520337178000\": 80,\n \"1520340778000\": 80,\n \"1520344378000\": 80,\n \"1520347978000\": 80,\n \"1520351578000\": 80,\n \"1520355178000\": 80,\n \"1520358778000\": 74,\n \"1520362378000\": 74,\n \"1520365978000\": 74,\n \"1520369578000\": 74,\n \"1520373178000\": 74,\n \"1520376778000\": 74,\n \"1520380378000\": 74,\n \"1520383978000\": 74\n },\n \"state\": {\n \"1520322778000\": \"heat stage 1\",\n \"1520329978000\": \"off\",\n \"1520358778000\": \"heat stage 1\",\n \"1520365978000\": \"heat stage 2\",\n \"1520369578000\": \"off\",\n \"1520373178000\": \"heat stage 1\"\n }\n }\n }",
"def get_field_data(self):\n if not self._coverage_computed:\n self.compute_coverage()\n \n # Find the total number of fields\n num_fields = 0\n for m in xrange(self.num_maps):\n if self.fields[m] is not None:\n num_fields += self.fields[m].shape[0]\n \n # Initialize data fields\n field_id = N.arange(num_fields)\n unit_id = N.empty(num_fields, 'h')\n area = N.empty(num_fields, 'd')\n diameter = N.empty(num_fields, 'd')\n radius = N.empty(num_fields, 'd')\n maximum = N.empty(num_fields, 'd')\n average = N.empty(num_fields, 'd')\n center_x = N.empty(num_fields, 'd')\n center_y = N.empty(num_fields, 'd')\n \n # Quantify place field characteristics\n f_id = 0\n for m in xrange(self.num_maps): \n if self.fields[m] is None:\n continue\n \n for field in self.fields[m]:\n \n # Single field-masked ratemap and sum\n rates = field * self.Map[m]\n rates_sum = float(rates.sum())\n \n # Place unit identification\n unit_id[f_id] = m\n \n # Coverage geometry\n area[f_id] = field.sum()\n diameter[f_id] = 2*N.sqrt(area[f_id]/N.pi)\n radius[f_id] = diameter[f_id] / 2\n \n # Rate-dependent quantities\n maximum[f_id] = rates.max()\n average[f_id] = rates_sum / area[f_id]\n center_x[f_id] = (self._xrange[N.newaxis,:] * rates).sum() \\\n / rates_sum\n center_y[f_id] = (self._yrange[:,N.newaxis] * rates).sum() \\\n / rates_sum\n \n f_id += 1\n \n # Create records array\n field_data = N.rec.fromarrays(\n [field_id, unit_id, area, diameter, radius, maximum, average,\n center_x, center_y],\n names='id, unit, area, diameter, radius, peak, average, x, y',\n formats='l, l, l, d, d, d, d, d, d')\n \n return field_data",
"def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata",
"def create_storm_objects():\n\n valid_times_unix_sec = numpy.array([\n 0, 0, 0,\n 1, 1,\n 2, 2,\n 4, 4,\n 5, 5, 5, 5,\n 6, 6, 6,\n 7, 7,\n 10,\n 11\n ], dtype=int)\n\n primary_id_strings = [\n 'A', 'B', 'B',\n 'A', 'B',\n 'A', 'B',\n 'A', 'B',\n 'A', 'A', 'B', 'B',\n 'A', 'B', 'B',\n 'A', 'A',\n 'A',\n 'A'\n ]\n\n secondary_id_strings = [\n 'A1', 'B1', 'B2',\n 'A1', 'B2',\n 'A1', 'B3',\n 'A1', 'B4',\n 'A2', 'A3', 'B4', 'B5',\n 'A2', 'B4', 'B5',\n 'A2', 'A3',\n 'A4',\n 'A4'\n ]\n\n centroid_latitudes_deg = numpy.array([\n 50, 60.5, 59.5,\n 50, 59.5,\n 50, 60,\n 50, 60.5,\n 50.5, 49.5, 60.5, 59.5,\n 50.5, 60.5, 59.5,\n 50.5, 49.5,\n 50,\n 50\n ])\n\n centroid_longitudes_deg = numpy.array([\n 240, 270, 270,\n 240.5, 271,\n 241, 272,\n 242, 274,\n 242.5, 242.5, 275, 275,\n 243, 276, 276,\n 243.5, 243.5,\n 245,\n 245.5\n ])\n\n start_times_unix_sec = numpy.array([\n 0, 0, 0,\n 0, 0,\n 0, 2,\n 0, 4,\n 5, 5, 4, 5,\n 5, 4, 5,\n 5, 5,\n 10,\n 10\n ], dtype=int)\n\n end_times_unix_sec = numpy.array([\n 4, 0, 1,\n 4, 1,\n 4, 2,\n 4, 6,\n 7, 7, 6, 6,\n 7, 6, 6,\n 7, 7,\n 11,\n 11\n ], dtype=int)\n\n first_prev_secondary_id_strings = [\n '', '', '',\n 'A1', 'B2',\n 'A1', 'B1',\n 'A1', 'B3',\n 'A1', 'A1', 'B4', 'B3',\n 'A2', 'B4', 'B5',\n 'A2', 'A3',\n 'A2',\n 'A4'\n ]\n\n second_prev_secondary_id_strings = [\n '', '', '',\n '', '',\n '', 'B2',\n '', '',\n '', '', '', '',\n '', '', '',\n '', '',\n 'A3',\n ''\n ]\n\n first_next_secondary_id_strings = [\n 'A1', 'B3', 'B2',\n 'A1', 'B3',\n 'A1', 'B4',\n 'A2', 'B4',\n 'A2', 'A3', 'B4', 'B5',\n 'A2', '', '',\n 'A4', 'A4',\n 'A4',\n ''\n ]\n\n second_next_secondary_id_strings = [\n '', '', '',\n '', '',\n '', 'B5',\n 'A3', '',\n '', '', '', '',\n '', '', '',\n '', '',\n '',\n ''\n ]\n\n storm_object_table = pandas.DataFrame.from_dict({\n tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,\n tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,\n tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,\n tracking_utils.CENTROID_LATITUDE_COLUMN: centroid_latitudes_deg,\n tracking_utils.CENTROID_LONGITUDE_COLUMN: centroid_longitudes_deg,\n linkage.STORM_CENTROID_X_COLUMN: centroid_longitudes_deg,\n linkage.STORM_CENTROID_Y_COLUMN: centroid_latitudes_deg,\n tracking_utils.CELL_START_TIME_COLUMN: start_times_unix_sec,\n tracking_utils.CELL_END_TIME_COLUMN: end_times_unix_sec,\n tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_secondary_id_strings,\n tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:\n second_prev_secondary_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_secondary_id_strings,\n tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:\n second_next_secondary_id_strings\n })\n\n nested_array = storm_object_table[[\n tracking_utils.VALID_TIME_COLUMN, tracking_utils.VALID_TIME_COLUMN\n ]].values.tolist()\n\n storm_object_table = storm_object_table.assign(**{\n linkage.STORM_VERTICES_X_COLUMN: nested_array,\n linkage.STORM_VERTICES_Y_COLUMN: nested_array\n })\n\n num_storm_objects = len(storm_object_table.index)\n\n for j in range(num_storm_objects):\n storm_object_table[linkage.STORM_VERTICES_X_COLUMN].values[j] = (\n storm_object_table[linkage.STORM_CENTROID_X_COLUMN].values[j] +\n X_VERTICES_RELATIVE\n )\n\n storm_object_table[linkage.STORM_VERTICES_Y_COLUMN].values[j] = (\n storm_object_table[linkage.STORM_CENTROID_Y_COLUMN].values[j] +\n Y_VERTICES_RELATIVE\n )\n\n return storm_object_table",
"def _create_query_points(storm_object_table, lead_times_sec):\n\n if numpy.any(lead_times_sec > 0):\n (storm_speeds_m_s01, geodetic_bearings_deg\n ) = geodetic_utils.xy_to_scalar_displacements_and_bearings(\n x_displacements_metres=storm_object_table[\n tracking_utils.EAST_VELOCITY_COLUMN].values,\n y_displacements_metres=storm_object_table[\n tracking_utils.NORTH_VELOCITY_COLUMN].values)\n\n num_storm_objects = len(storm_object_table.index)\n num_lead_times = len(lead_times_sec)\n list_of_query_point_tables = [None] * num_lead_times\n\n for i in range(num_lead_times):\n if lead_times_sec[i] == 0:\n list_of_query_point_tables[i] = storm_object_table[[\n tracking_utils.CENTROID_LAT_COLUMN,\n tracking_utils.CENTROID_LNG_COLUMN, tracking_utils.TIME_COLUMN,\n tracking_utils.EAST_VELOCITY_COLUMN,\n tracking_utils.NORTH_VELOCITY_COLUMN]]\n\n argument_dict = {\n LEAD_TIME_COLUMN: numpy.full(num_storm_objects, 0, dtype=int)}\n list_of_query_point_tables[i] = (\n list_of_query_point_tables[i].assign(**argument_dict))\n\n else:\n (these_extrap_latitudes_deg, these_extrap_longitudes_deg\n ) = geodetic_utils.start_points_and_displacements_to_endpoints(\n start_latitudes_deg=storm_object_table[\n tracking_utils.CENTROID_LAT_COLUMN].values,\n start_longitudes_deg=storm_object_table[\n tracking_utils.CENTROID_LNG_COLUMN].values,\n scalar_displacements_metres=\n storm_speeds_m_s01 * lead_times_sec[i],\n geodetic_bearings_deg=geodetic_bearings_deg)\n\n this_dict = {\n tracking_utils.CENTROID_LAT_COLUMN: these_extrap_latitudes_deg,\n tracking_utils.CENTROID_LNG_COLUMN: these_extrap_longitudes_deg,\n tracking_utils.TIME_COLUMN:\n (storm_object_table[tracking_utils.TIME_COLUMN].values +\n lead_times_sec[i]),\n tracking_utils.EAST_VELOCITY_COLUMN: storm_object_table[\n tracking_utils.EAST_VELOCITY_COLUMN].values,\n tracking_utils.NORTH_VELOCITY_COLUMN: storm_object_table[\n tracking_utils.NORTH_VELOCITY_COLUMN].values,\n LEAD_TIME_COLUMN: numpy.full(\n num_storm_objects, lead_times_sec[i], dtype=int)\n }\n list_of_query_point_tables[i] = pandas.DataFrame.from_dict(\n this_dict)\n\n if i == 0:\n continue\n\n list_of_query_point_tables[i], _ = list_of_query_point_tables[i].align(\n list_of_query_point_tables[0], axis=1)\n\n return pandas.concat(list_of_query_point_tables, axis=0, ignore_index=True)",
"def dispatch_freq(line_stoptime, line_toStartTimes, headway_sec=300):\n # SimMobility dispatch_freq table [frequency_id, line_id, start_time, end_time, headway_sec]\n dispatch_freq = []\n for line, startTimes in line_toStartTimes.items():\n startTimeInSeconds = [InSeconds(t) for t in startTimes]\n start_time = min(startTimeInSeconds)\n end_time = max(startTimeInSeconds)\n if start_time == end_time:\n end_time += 2*3600\n dispatch_freq.append((line, start_time, end_time, headway_sec))\n dispatch_freq = pd.DataFrame.from_records(dispatch_freq, columns=['line_id', 'start_time', 'end_time', 'headway_sec'])\n print(dispatch_freq)\n dispatch_freq['start_time'] = dispatch_freq.apply(lambda row: formatSecond(row.start_time), axis=1)\n dispatch_freq['end_time'] = dispatch_freq.apply(lambda row: formatSecond(row.end_time), axis=1)\n dispatch_freq['frequency_id'] = dispatch_freq.index\n dispatch_freq = dispatch_freq[['frequency_id', 'line_id', 'start_time', 'end_time', 'headway_sec']]\n\n # Public transit generation dispatch table\n # ['trip_id','arrival_time','departure_time','stop_id','stop_sequence','service','service_id','stop_lat','stop_long','C_type']\n print('line_stops------- ', line_stoptime.columns)\n dispatch_detialed = line_stoptime[['trip_id', 'stop_sequence', 'arrival_time','line_id',\n 'departure_time', 'station_no', 'type', 'stop_lon', 'stop_lat']]\n print('Before rename ', dispatch_detialed.columns)\n dispatch_detialed.rename(columns={'station_no':'stop_id', 'type':'C_type',\n 'stop_lon':'stop_long', 'line_id': 'service_id'}, inplace=True)\n # dispatch_detialed = line_stoptime.rename(columns={'start_time':'arrival_time', 'arrival_time':'arrival_time_old', 'end_time':'departure_time',\n # 'station_no':'stop_id','type':'C_type', 'stop_lon':'stop_long', 'service_id':'service_id_gtfs', 'line_id':'service_id'})\n print(dispatch_detialed.columns)\n dispatch_detialed['service'] = dispatch_detialed['service_id']\n dispatch_detialed.sort_values(by=['service_id', 'trip_id', 'stop_sequence'], inplace=True)\n print('number of frequencey ', len(dispatch_freq.frequency_id.unique()))\n print('number of lines ', len(dispatch_detialed.service_id.unique()))\n return dispatch_freq, dispatch_detialed",
"def Load_EP_Fullcospectra(path,start_day,end_day,variable):\r\n \r\n # Number of days selected\r\n sday = datetime.strptime(start_day,'%Y-%m-%d')\r\n eday = datetime.strptime(end_day,'%Y-%m-%d')\r\n Nday = (eday-sday).days +1\r\n \r\n if Nday <= 0:\r\n print('WARNING!! End day is before start day!')\r\n \r\n Nvars = len(variable)\r\n\r\n allf = os.listdir(path)\r\n fnames = [f for f in allf if f.endswith('.csv')]\r\n \r\n # Read first file to get info (meta) \r\n spec, timeseries, header, meta1 = read_cospectrum(path,[fnames[0]])\r\n Hz = meta1[0]\r\n avg_period = meta1[3]\r\n nseg = np.int(24*60/avg_period)\r\n ppf = np.int(2**np.floor(np.log2(avg_period*60*Hz/2)))\r\n\r\n df = Hz/2/ppf\r\n freq = np.arange(df,Hz/2+df,df)\r\n \r\n # spec shape: [frequency,time,variables]\r\n spec=np.zeros((ppf,np.int(Nday*(24*60/avg_period)),Nvars))*np.nan\r\n spec_time=[]\r\n\r\n tct = -1 # Time counter\r\n for d in range(Nday):\r\n for h in range(nseg):\r\n tct+=1\r\n curtime = sday+timedelta(d,0,0,0,avg_period*(h+1))\r\n spec_time.append(curtime)\r\n hstr = (curtime).strftime('%H%M')\r\n\r\n daystr = curtime.strftime('%Y-%m-%d')\r\n daystr2 = curtime.strftime('%Y%m%d')\r\n print('Loading... {} {}'.format(daystr,hstr))\r\n\r\n # See if file exists\r\n matchi = np.array(['{}-{}'.format(daystr2,hstr) in f for f in fnames])\r\n\r\n if np.sum(matchi)>0:\r\n matchi = np.where(matchi)[0][0]\r\n spec_day, spec_time_day, header_day, meta_day = read_cospectrum(path,[fnames[matchi]])\r\n spec_day = spec_day[0]\r\n\r\n for vi in range(Nvars):\r\n gasheader = 'f_nat*cospec(w_{})'.format(variable[vi])\r\n vmatchi = np.array([gasheader in h for h in header_day])\r\n if np.sum(vmatchi)>0:\r\n vmatchi = np.where(vmatchi)[0][0]\r\n spec[:,tct,vi] = spec_day[:,vmatchi]\r\n\r\n else:\r\n print('And there was a problem!') \r\n \r\n return spec, spec_time, freq",
"def get_time_series(this_lat, this_lon, case, varnames):\n\n cesmdir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly'\n\n if 'LE' in case:\n\n from observational_large_ensemble.params import karen_params_cesm\n\n mode_lag = karen_params_cesm.mode_lag\n cvdp_loc = karen_params_cesm.cvdp_loc\n AMO_cutoff_freq = karen_params_cesm.AMO_cutoff_freq\n\n name_conversion = {'tas': 'TREFHT', 'pr': 'PRECC', 'slp': 'PSL'}\n cesm_names = [name_conversion[v] for v in varnames]\n this_member = int((case).split('-')[-1])\n cvdp_file = '%s/CESM1-CAM5-BGC-LE_#%i.cvdp_data.1920-2018.nc' % (cvdp_loc, this_member)\n\n # Historical filenames for CESM. Will need to append part of RCP8.5 to get full period\n filenames = []\n for var in cesm_names:\n file_str = '%s/%s/b.e11.B20TRC5CNBDRD.f09_g16.%03d.cam.h0.%s.??????-200512.nc' % (cesmdir, var,\n this_member, var)\n this_file = glob(file_str)[0]\n filenames.append(this_file)\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames,\n karen_params_cesm.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n else:\n\n from observational_large_ensemble.params import karen_params_obs\n\n mode_lag = karen_params_obs.mode_lag\n cvdp_loc = karen_params_obs.cvdp_loc\n AMO_cutoff_freq = karen_params_obs.AMO_cutoff_freq\n\n tas_dir = karen_params_obs.tas_dir\n pr_dir = karen_params_obs.pr_dir\n slp_dir = karen_params_obs.slp_dir\n cvdp_file = '%s/HadISST.cvdp_data.1920-2018.nc' % cvdp_loc\n file_dict = {'tas': '%s/Complete_TAVG_LatLong1.nc' % tas_dir,\n 'pr': '%s/full_data_monthly_v2020.nc' % pr_dir,\n 'slp': '%s/prmsl.mon.mean.nc' % slp_dir}\n\n filenames = []\n for var in varnames:\n filenames.append(file_dict[var])\n\n name_conversion = {'tas': 'temperature', 'pr': 'precip', 'slp': 'prmsl'}\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames[0],\n karen_params_obs.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n return this_ts, df_shifted",
"def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata",
"def _get_gedi2a_main_data_dict(self) -> dict:\n gedi_l2a_count_start = pd.to_datetime(\"2018-01-01T00:00:00Z\")\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n \"absolute_time\": (\n gedi_l2a_count_start\n + pd.to_timedelta(list(self[\"delta_time\"]), unit=\"seconds\")\n ),\n # Quality data\n \"sensitivity\": self[\"sensitivity\"][:],\n \"quality_flag\": self[\"quality_flag\"][:],\n \"solar_elevation\": self[\"solar_elevation\"][:],\n \"solar_azimuth\": self[\"solar_elevation\"][:],\n \"energy_total\": self[\"energy_total\"][:],\n # DEM\n \"dem_tandemx\": self[\"digital_elevation_model\"][:],\n \"dem_srtm\": self[\"digital_elevation_model_srtm\"][:],\n # Processing data\n \"selected_algorithm\": self[\"selected_algorithm\"][:],\n \"selected_mode\": self[\"selected_mode\"][:],\n # Geolocation data\n \"lon_lowestmode\": self[\"lon_lowestmode\"][:],\n \"longitude_bin0_error\": self[\"longitude_bin0_error\"][:],\n \"lat_lowestmode\": self[\"lat_lowestmode\"][:],\n \"latitude_bin0_error\": self[\"latitude_bin0_error\"][:],\n \"elev_lowestmode\": self[\"elev_lowestmode\"][:],\n \"elevation_bin0_error\": self[\"elevation_bin0_error\"][:],\n \"lon_highestreturn\": self[\"lon_highestreturn\"][:],\n \"lat_highestreturn\": self[\"lat_highestreturn\"][:],\n \"elev_highestreturn\": self[\"elev_highestreturn\"][:],\n } | {f\"rh{i}\": self[\"rh\"][:, i] for i in range(101)}\n return data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a text string summarizing how the motion correction was done. | def SummarizeMotionTargets(self):
text = '\nSummary of motion-correction: \n'
for epi in self.entry_map['epi']:
info = self.info[epi]
text += self.GetBase(epi, '')
base = self.GetBase(info['base_entry'], '')
text += ' ->3dvolreg-> %s[%s]' % (base, info['base'])
if info['fmap_entry'] is not None:
fmap = info['fmap_entry']
text += ' ->assume-registered-> %s' % self.GetBase(fmap, '')
anat = self.info[fmap]['anat_ref']
if info['catmats']:
text += ' ->3dAllineate-> %s' % \
self.GetBase(anat, '')
else:
text += ' ->assume-registered-> %s' % self.GetBase(anat, '')
else:
anat = info['anat_tgt']
text += ' ->assume-registered-> %s' % self.GetBase(anat, '')
text += '\nEPIs should be in register with %s\n' % \
self.GetBase(self.anatomical, '')
return text | [
"def Explanation(self) -> str:",
"def summary(self) -> str:\n def get_conjugations_summary(json_dict: dict) -> str:\n conjugations = ConjugationsDB()\n conjugations.from_json(json_dict)\n return conjugations.summary\n\n result = textwrap.dedent(f\"\"\"\\\n ---------------------------------------------------------\n Usage info:\n {self.usage_info}\\n\n Infinitive (-ma -da translation):\n {self.infinitive_ma} {self.infinitive_da} {self.infinitive_ma_english}\\n\n Past active participle:\n {self.past_active_participle} {self.past_active_participle_english}\\n\n Past passive participle:\n {self.past_passive_participle} {self.past_passive_participle_english}\\n\n Present tense:\n \"\"\")\n\n result += get_conjugations_summary(self.present)\n\n result += \"\\nConditional mood\\n\"\n result += get_conjugations_summary(self.conditional_mood)\n\n result += \"\\nImperative mood\\n\"\n result += get_conjugations_summary(self.imperative_mood)\n\n result += \"\\nImperative negative mood\\n\"\n result += get_conjugations_summary(self.imperative_negative_mood)\n\n result += \"\\nPerfect tense\\n\"\n result += get_conjugations_summary(self.perfect)\n\n result += \"\\nPast tense\\n\"\n result += get_conjugations_summary(self.past)\n\n result += \"\\nPlusperfect tense\\n\"\n result += get_conjugations_summary(self.plusperfect)\n\n result += \"\\nConditional perfect mood\\n\"\n result += get_conjugations_summary(self.conditional_perfect_mood)\n\n result += \"\\nQuotative tense\\n\"\n result += get_conjugations_summary(self.quotative)\n\n result += \"\\nQuotative perfect tense\\n\"\n result += get_conjugations_summary(self.quotative_perfect)\n\n result += \"\\nJussive tense\\n\"\n result += get_conjugations_summary(self.jussive)\n\n result += \"\\nJussive perfect tense\\n\"\n result += get_conjugations_summary(self.jussive_perfect)\n\n result += \"\\nOther\\n\"\n for key in self.other:\n result += key + \" \" + self.other[key][0] + \" \" + self.other[key][1] + \"\\n\"\n\n result += \"---------------------------------------------------------\\n\"\n return result",
"def getDebugText(self):\n timeDifference = time.time() - self.time_created\n hours = math.floor(timeDifference / 3600)\n minutes = math.floor((timeDifference % 3600) / 60)\n seconds = math.floor(timeDifference % 3600 % 60)\n\n output = \"\\n\" * 50\n output += \"Time started: %s\\n\" % time.ctime(self.time_created)\n output += \"Time now: %s\\n\" % time.ctime()\n output += \"Time elapsed: %02d:%02d:%02d\\n\" % (hours, minutes, seconds)\n output += (\"=\" * 80) + \"\\n\"\n output += \"Health potions used: %d\\n\" % self.hp_pots_used\n output += \"Health potions per hour: %d\\n\" % (self.hp_pots_used / (\n timeDifference / 3600))\n output += \"Mana potions used: %d\\n\" % self.mana_pots_used\n output += \"Mana potions per hour: %d\\n\" % (self.mana_pots_used / (\n timeDifference / 3600))\n return output",
"def get_description(self):\n text = \"is a student's t distribution; characterised by its degrees of freedom, which here is\"+str(self.dofs)+\".\"\n return text",
"def summarize(self):\n txtSumm = ''\n\n if self.legtype == 'Takeoff':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Landing':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Other':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Observing':\n txtSumm = \"%02d -- %s, RA: %s, Dec: %s, LegDur: %s, ObsDur: %s\" %\\\n (self.legno, self.target, self.ra, self.dec,\n str(self.duration),\n str(self.obsdur))\n txtSumm += \"\\n\"\n if self.nonsid is True:\n txtSumm += \"NONSIDERIAL TARGET -- NAIFID: %d\" % (self.naifid)\n txtSumm += \"\\n\"\n txtSumm += \"(The SOFIA project sincerely hopes you enjoy \"\n txtSumm += \"your observing breaks due to XFORMS crashes)\"\n txtSumm += \"\\n\"\n txtSumm += \"ObsPlan: %s, ObsBlk: %s\" % (self.obsplan, self.obsblk)\n txtSumm += \"\\n\\n\"\n txtSumm += \"Elevation Range: %.1f, %.1f\" % (self.range_elev[0],\n self.range_elev[1])\n txtSumm += \"\\n\\n\"\n txtSumm += \"ROF Range: %.1f, %.1f\" % (self.range_rof[0],\n self.range_rof[1])\n txtSumm += \"\\n\"\n txtSumm += \"ROF Rate Range: %.1f, %.1f %s\" % (self.range_rofrt[0],\n self.range_rofrt[1],\n self.range_rofrtu)\n txtSumm += \"\\n\\n\"\n txtSumm += \"True Heading Range: %.1f, %.1f\" % (self.range_thdg[0],\n self.range_thdg[1])\n txtSumm += \"\\n\"\n txtSumm += \"True Heading Rate Range: %.1f, %.1f %s\" %\\\n (self.range_thdgrt[0],\n self.range_thdgrt[1],\n self.range_thdgrtu)\n txtSumm += \"\\n\"\n txtSumm += \"Moon Angle: %.1f, Moon Illumination: %s\" %\\\n (self.moonangle, self.moonillum)\n\n return txtSumm",
"def summarize(self):\n txtStr = \"%s to %s, %d flight legs.\" %\\\n (self.origin, self.destination, self.nlegs)\n txtStr += \"\\nTakeoff at %s\\nLanding at %s\\n\" %\\\n (self.takeoff, self.landing)\n txtStr += \"Flight duration of %s including %s observing time\" %\\\n (str(self.flighttime), self.obstime)\n\n return txtStr",
"def __str__(self):\n title_str = 'Audit\\n-----\\n'\n alpha_str = 'Alpha: {}\\n'.format(self.alpha)\n beta_str = 'Beta: {}\\n'.format(self.beta)\n max_frac_str = 'Maximum Fraction to Draw: {}\\n'.format(self.max_fraction_to_draw)\n replacement_str = 'Replacement: {}\\n\\n'.format(self.replacement)\n return title_str + alpha_str + beta_str + max_frac_str + replacement_str + str(self.contest)",
"def create_analysis(self):\n text = self.input_main.get(\"1.0\", \"end-1c\")\n if not text:\n return \"\"\n if self.ignore_case_value.get():\n text = text.lower()\n\n char_map = calc.char_mapping(text)\n unique_chars = len(char_map)\n entropy = calc.entropy(text)\n metric_entropy = calc.metric_entropy(text)\n optimal = calc.optimal_bits(text)\n\n info = \"\"\"Length: {}\nUnique chars: {}\nEntropy: {}\nMetric entropy: {}\nOptimal bit usage: {}\"\"\".format(\n len(text),\n unique_chars,\n entropy,\n metric_entropy,\n optimal\n )\n\n table_head = \" Char | Probability | Bits | Occurrences \"\n table_body = \"\\n\".join(\n [\n \" {:<4} | {:>11.7f} | {:>11.7f} | {:>11}\".format(\n char,\n prob, calc.prob_to_info(prob),\n text.count(char)\n )\n for char, prob in char_map\n ]\n )\n table = \"\\n\".join([table_head, table_body])\n\n return \"\\n\\n\".join([info, table])",
"def summary_string(self) -> str:",
"def label(self):\n msg = ''\n # timing is in between T flags\n if self.tref is not None:\n if isinstance(self.tref, float):\n stref = '{:.2f}'.format(self.tref)\n elif isinstance(self.tref, int) or isinstance(self.tref, str):\n stref = '{}'.format(self.tref)\n else:\n stref = ''\n msg += 'T' + self.timing + stref\n # mode is in between M flags\n msg += 'M' + self.mode\n msg += 'J' + '{}'.format(self.join_points)\n msg += '_'\n if self.differentiate:\n msg += 'dot_'\n if self.local_fit:\n msg += 'W{:.2f}_'.format(self.time_window)\n if self.scale == 'log':\n msg += 'log_'\n msg += self.raw\n return msg",
"def _get_delta_text_string(self):\n textstring = \"\"\n if (\n self.is_commit_test is True\n ): # include commits if this is an analysis of commit history\n # Write SHA1 commits under examination\n if len(self.delta_fp_string_dict.delta_dict[\"commits\"]) > 0:\n textstring += (\n os.linesep + \"Commit history SHA1 for this analysis:\" + os.linesep\n )\n for sha1_commit in self.delta_fp_string_dict.delta_dict[\"commits\"]:\n textstring += \" \" + sha1_commit + os.linesep\n textstring += os.linesep\n elif (\n self.is_branch_test is True\n ): # include branches if this is a branch v branch analysis\n if len(self.delta_fp_string_dict.delta_dict[\"branches\"]) > 0:\n textstring += os.linesep + \"Branches under analysis:\" + os.linesep\n for branch in self.delta_fp_string_dict.delta_dict[\"branches\"]:\n textstring += \" \" + branch + os.linesep\n textstring += os.linesep\n\n # include added files\n if len(self.delta_fp_string_dict.delta_dict[\"added\"]) > 0:\n for added_file in self.delta_fp_string_dict.delta_dict[\"added\"]:\n add_append_string = \"[A]:\" + added_file + os.linesep\n textstring += add_append_string\n # include deleted files\n if len(self.delta_fp_string_dict.delta_dict[\"deleted\"]) > 0:\n for deleted_file in self.delta_fp_string_dict.delta_dict[\"deleted\"]:\n del_append_string = \"[D]:\" + deleted_file + os.linesep\n textstring += del_append_string\n # include modified files\n if len(self.delta_fp_string_dict.delta_dict[\"modified\"]) > 0:\n for modified_file in self.delta_fp_string_dict.delta_dict[\"modified\"]:\n mod_append_string = \"[M]:\" + modified_file + os.linesep\n textstring += mod_append_string\n\n return textstring",
"def get_text_advice(avg_temp: int, rain=False) -> str:\n text: str\n if avg_temp < 0:\n text = \"Одевайтесь теплее, на улице мороз\"\n elif avg_temp < 10:\n text = \"Прохладно на улице - будьте осторожны, без куртки не выходите\"\n elif avg_temp < 20:\n text = \"Накиньте плащ - не май месяц, все-таки\"\n else:\n text = \"Тепло и приятно - давайте гулять в футболках\"\n\n text += f\" (средняя температура: {avg_temp}C)\"\n\n if rain:\n text += \"\\nИ не забудьте взять зонтик - ожидается дождь\"\n\n return text",
"def format_as_text(self, summary_data):\n output = \"\"\n if self._n_in_scheme_no_move_trials > 0:\n output += (\"Null moves for \"\n + str(self._n_in_scheme_no_move_trials)\n + \" cycles. Excluding null moves:\\n\")\n for line in summary_data:\n output += self._line_as_text(line)\n return output",
"def get_text(self):\n text_complet = \"\"\n rez_dict = self.__results\n for i in range(0, len(rez_dict[\"text\"])):\n text = rez_dict[\"text\"][i]\n conf = int(rez_dict[\"conf\"][i])\n if conf > self.__min_confidence:\n text_complet += text + \" \"\n return text_complet",
"def anova_text_summarize(aov_res, variable='prevchoice', pos_word='Stay', \n neg_word='Switch'):\n s = pos_word if aov_res['fit']['fit_' + variable] > 0 else neg_word\n s += ' %0.2f' % aov_res['ess']['ess_' + variable]\n s += pval_to_star(aov_res['pvals']['p_' + variable])\n return s",
"def get_str_metadata(self):\n return \"\\n\".join([\"Guessed by {}\".format(self.guessed_by), \"{} metaphors used\".format(self.metaphors_used)])",
"def generateText(self, numWords):\n #Generate the first word by sampling from totalTable (using totalWords as the total count)\n sample = self.sampleWord(self.totalTable.getKeyValuePairs(), self.totalWords)\n myText = '...' + sample \n acc = 1\n #Until you have generated enough words:\\\n #Generate the next word by sampling from the hash map in countTable indexed by\n #the previous word (using the number in totalTable indexed by the previous word as the total count)\n #Return the text with '...' added to the beginning and end\n \n while acc != numWords:\n sample = self.sampleWord(self.countTable[sample].getKeyValuePairs(), self.totalTable[sample])\n acc = acc+1\n myText += ' ' + sample\n return myText + '...'",
"def generateText(self, numWords):\n #Generate a string of the first order words by using sampleWord to sample from totalTable.\n sample = self.sampleWord(self.totalTable.getKeyValuePairs(), self.totalWords)\n myText = '...' + sample \n acc = 1\n #Until you have generated enough words:\\\n #Generate the next word by sampling from the hash map in countTable indexed by\n #the previous word (using the number in totalTable indexed by the previous word as the total count)\n #Return the text with '...' added to the beginning and end\n \n while acc != numWords:\n sample = self.sampleWord(self.countTable[sample].getKeyValuePairs(), self.totalTable[sample])\n acc = acc+1\n myText += ' ' + sample\n newList = myText.split()\n newWord = newList[-self.order]\n for i in range(-self.order + 1 , 0):\n newWord += ' '+ newList[i]\n sample = newWord\n return myText + '...'",
"def text_report(self):\n\n word_count = self.word_count()\n\n print(\"\\nThere are {} words in the text.\".format(word_count))\n mean, median, mode = self.average_word_length()\n\n print(\"\\nMean, median and mode word length is {}, {}, {}.\".format(mean, median, mode))\n\n if word_count < 10:\n print(\"\\nLongest words:\")\n else:\n print(\"\\n10 longest words:\")\n for s in self.longest_words():\n print(s)\n\n print(\"\\nMost common words:\")\n for s in self.common_words():\n print(\"{} x {}\".format(s[1], s[0]))\n\n longest_grams = []\n\n # find n_longest n-grams\n n_longest = 10\n # strongly doubt that there will be n-grams longer than 50\n for i in range(min(50, word_count), 1, -1):\n if len(longest_grams) >= n_longest:\n break\n grams = self.find_ngrams(i)\n grams_list = sorted(grams, key=grams.get, reverse=True)\n\n for g in grams_list:\n if grams[g] > 4:\n # do not want to include n-grams which are substrings of longer n-grams\n substring = False\n for s in longest_grams:\n if g in s[1]:\n substring = True\n break\n if not substring:\n longest_grams.append([grams[g], g])\n\n print(\"\\nLongest n-grams:\")\n for g in longest_grams:\n print(\"{} x {}\".format(g[0], g[1]))\n print('\\n')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the correct ref.dat file for each pfile. | def _GetRefdat(self):
for rfile in self.refdats.keys():
# Get times for ref.dat files with a time-stamp.
words = rfile.replace('.','_').split('_')
if len(words) == 6 and words[-2].count(':') == 20:
# This file was time-stamped by the sequence. Get the
# date and time. file name format:
# ref_Sep_9_2007_11:28:32.dat
rtime[rfile] = hms_to_secs(words[-2])
for pfile in self.pfiles:
min_difftime = 1.e20
self.info[pfile]['refdat'] = None
for rfile in self.refdats.keys():
if rfile[:3] == 'ref' and 'dat' in rfile:
# This is a reference data file. First see if the orientation is
# appended. If the file has neither a time-stamp nor a plane and
# there is more than one ref.dat, the epi reconstruction will
# be aborted.
rinfo = {}
ref_file = None
if 'sag' in rfile and self.info[pfile]['plane'] == 'sagittal':
# self.info[pfile]['refdat'] = rfile
ref_file = rfile
break
elif 'cor' in rfile and self.info[pfile]['plane'] == 'coronal':
# self.info[pfile]['refdat'] = rfile
ref_file = rfile
break
elif 'axial' in rfile and self.info[pfile]['plane'] == 'axial':
# self.info[pfile]['refdat'] = rfile
ref_file = rfile
break
elif len(self.refdats.keys()) == 1:
# Use the only one if that is all there is.
ref_file = rfile
epi_time = hms_to_secs(self.info[pfile]['acqtime'].split()[-2])
if epi_time - rtime[rfile] < min_difftime and \
rftime[rfile] > epi_time:
# Use the reference file that acquired nearest to the EPI
# but before it.
min_difftime = epi_time - rtime[rfile]
# self.info[pfile]['refdat'] = rfile
ref_file = rfile
if ref_file:
# Found a candidate.
if not self.info[pfile]['refdat']:
# Haven't found one yet, use it.
self.info[pfile]['refdat'] = ref_file
else:
# Found two. Choose one in the same directory.
oldpath = os.path.dirname(self.info[pfile]['refdat'])
newpath = os.path.dirname(ref_file)
pfile_path = os.path.dirname(pfile)
if oldpath == newpath:
# Same path, use the old one.
self.info[pfile]['refdat'] = ref_file
elif newpath == pfile_path:
self.info[pfile]['refdat'] = ref_file
# else Do nothing, use existing choice.
elif not os.path.exists(rfile):
self.info[pfile]['refdat'] = None
elif os.stat(rfile).st_size > 0:
# This path is taken if no info is encoded in the file name.
# Don't use empty ref.dat files.
self.info[pfile]['refdat'] = rfile | [
"def _find_references(self, input_files):\n reference_regex = re.compile(r'(?:(?:src=|href=|importScripts\\(|url\\()(?:\"([^\"]+)\"|\\'([^\\']+)\\')|url\\(([^\\)\\'\"]+)\\))')\n references = {}\n for input_file in input_files:\n matches = reference_regex.findall(self._filesystem.read_binary_file(input_file))\n if matches:\n references[input_file] = [filter(None, match)[0] for match in matches]\n return references",
"def get_file_references():\n ref_nodes = pm.ls(rf=True)\n file_refs = [r.referenceFile() for r in ref_nodes]\n return file_refs",
"def find(self):\n print(\"Looking for pacnew and pacsave files…\")\n paths = ('/bin', '/etc', '/opt', '/usr')\n for dir_path, _, files in chain.from_iterable(os.walk(path) for path in paths):\n for f in files:\n pacnew = os.path.join(dir_path, f)\n if self.re_pacfiles.search(pacnew):\n self.pacfiles.append(pacnew)\n self.pacfiles.sort()\n print(\"%d file(s) found.\" % len(self.pacfiles))",
"def get_input_file(self, *args, refsep='$', docopy=True):\n # filename = self.get_data(*args, docopy=docopy)\n filename = args[1]\n ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE',\n 'DGEOFILE', 'MDRIZTAB'])\n print(\"Looking for REF_FILES: {}\".format(ref_files))\n\n for ref_file in ref_files:\n if ref_file.strip() == '':\n continue\n if refsep not in ref_file: # Local file\n refname = self.get_data('customRef', ref_file)\n else: # Download from FTP, if applicable\n refname = os.path.join(ref_file)\n if self.use_ftp_crds:\n download_crds(refname, self.timeout)\n return filename",
"def setRelativeReferencePaths(self):\r\n files = os.listdir(self.binDir)\r\n for fileName in files:\r\n\r\n # install_name_tool can't handle zip files or directories\r\n filePath = os.path.join(self.binDir, fileName)\r\n if fileName.endswith('.zip') or os.path.isdir(filePath):\r\n continue\r\n\r\n # ensure write permissions\r\n mode = os.stat(filePath).st_mode\r\n if not (mode & stat.S_IWUSR):\r\n os.chmod(filePath, mode | stat.S_IWUSR)\r\n\r\n # let the file itself know its place\r\n subprocess.call(('install_name_tool', '-id',\r\n '@executable_path/' + fileName, filePath))\r\n\r\n # find the references: call otool -L on the file\r\n otool = subprocess.Popen(('otool', '-L', filePath),\r\n stdout = subprocess.PIPE)\r\n references = otool.stdout.readlines()[1:]\r\n\r\n for reference in references:\r\n\r\n # find the actual referenced file name\r\n referencedFile = reference.decode().strip().split()[0]\r\n\r\n if referencedFile.startswith('@'):\r\n # the referencedFile is already a relative path\r\n continue\r\n\r\n path, name = os.path.split(referencedFile)\r\n\r\n # see if we provide the referenced file;\r\n # if so, change the reference\r\n if name in files:\r\n newReference = '@executable_path/' + name\r\n subprocess.call(('install_name_tool', '-change',\r\n referencedFile, newReference, filePath))",
"def __match_files(self):\r\n\r\n\t\t# Categories to return\r\n\t\tno_pairs = []\r\n\t\tpaired = {}\r\n\t\trandom = []\r\n\r\n\t\t# Temporary list to separate out each type of file\r\n\t\tpcr_file_list = []\r\n\t\tlis_file_list = []\r\n\t\trandom_file_list = []\r\n\r\n\t\t# ____ This section categorizes the files into PCR, LIS, or Unknown\r\n\t\tfor files in self.file_list:\r\n\t\t\tif files.find(\"@DI\") > -1:\r\n\t\t\t\tpcr_file_list.append(files)\r\n\t\t\telif files.find(\"@Pt2\") > -1:\r\n\t\t\t\tlis_file_list.append(files)\r\n\t\t\telse:\r\n\t\t\t\trandom_file_list.append(files)\r\n\r\n\t\t# ____ This section does the mapping to find pairs\r\n\t\tfor pcr_file in pcr_file_list:\r\n\t\t\tget_only_pcr_fname = pcr_file[pcr_file.find(\"@DI\"):]\r\n\t\t\tpartition_pcr_file = get_only_pcr_fname.split(\"-\")\r\n\t\t\tpcr_unique_id = partition_pcr_file[0].replace(\"@DI\",\"\") \\\r\n\t\t\t+ partition_pcr_file[3] + \"_\" \\\r\n\t\t\t+ partition_pcr_file[4] + \"_\" \\\r\n\t\t\t+ partition_pcr_file[5].replace(\".csv\",\"\")\r\n\r\n\t\t\tpaired[pcr_unique_id] = [pcr_file]\r\n\r\n\t\tfor lis_file in lis_file_list:\r\n\t\t\tget_only_lis_fname = lis_file[lis_file.find(\"@Pt2\"):]\r\n\t\t\tpartition_lis_file = get_only_lis_fname.split(\"-\")\r\n\t\t\tlis_unique_id = partition_lis_file[0].replace(\"@Pt2\",\"\") \\\r\n\t\t\t+ partition_lis_file[3] + \"_\" \\\r\n\t\t\t+ partition_lis_file[4] + \"_\" \\\r\n\t\t\t+ partition_lis_file[5].replace(\".lis\",\"\")\r\n\r\n\t\t\thas_pair = paired.get(lis_unique_id, None)\r\n\t\t\tif has_pair:\r\n\t\t\t\tpaired[lis_unique_id].append(lis_file)\r\n\t\t\telse:\r\n\t\t\t\tno_pairs.append(lis_file)\r\n\r\n\t\t# ____ PCR Keys to remove that have no pair\r\n\t\tpcr_keys_to_remove = []\r\n\r\n\t\tfor uniqueID, pairs in paired.items():\r\n\t\t\tif (len(pairs) < 2):\r\n\t\t\t\tpcr_keys_to_remove.append(uniqueID)\r\n\t\t\t\tno_pairs.append(pairs)\r\n\r\n\t\tfor pcr_keys in pcr_keys_to_remove:\r\n\t\t\tdel paired[pcr_keys]\r\n\r\n\t\tfinal_result = {\"paired\":paired, \"no_pairs\":no_pairs, \"random\":random}\r\n\r\n\t\treturn final_result",
"def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs",
"def _find_ref_fname(fname, ref_fname):\n curr_dir = \"\"\n next_dir = os.path.dirname(os.path.abspath(fname))\n while next_dir != curr_dir:\n curr_dir = next_dir\n rcfile = os.path.join(curr_dir, ref_fname)\n if os.path.exists(rcfile):\n return rcfile\n next_dir = os.path.dirname(curr_dir)\n return \"\"",
"def find_distortion_file(inst, aper):",
"def readdata(self, reflist , comment = '#' , regexp = None , substr = None, filename = True):\n self.kpunten = []\n datalist = []\n prefixlist = []\n if os.path.isfile(str(reflist)):\n reflist = [reflist] #if we work with only one file this wraps it automatically in right format\n for ref in reflist:\n print('start with the collection of data from file %s' %ref)\n plotf = open(ref, 'r')\n if not filename:\n prefixlist.append( os.path.dirname(ref) + '/')\n else:\n prefixlist.append(re.sub('\\.dat$' , '' , ref))\n try:\n if regexp != None:\n raise ValueError\n dataf = np.loadtxt(plotf,comments = comment)\n print 'we readed data in with np.loadtxt'\n except:\n print('reading in data with numpy loadtxt failed or use reg exp to extract information')\n dataf = np.array([])\n kpuntenf = []\n plotf.seek(0) #go back to beginning of file\n for line in plotf:\n if regexp is not None:\n analyse = re.search(regexp,line)\n if analyse:\n kpuntenf.append((analyse.group(1), len(dataf)-1 ))\n print 'we found the following matches: %s' % analyse.group(0)\n if substr != None: \n line = re.sub(substr, '' , line)\n if line[0] != comment:\n #print line\n pline = np.array(map(float,line.split()))\n if len(dataf) <= 1:\n dataf = pline\n else:\n try:\n dataf = np.vstack((dataf,pline))\n except:\n continue\n self.kpunten.append(kpuntenf)\n datalist.append(dataf)\n\n plotf.close()\n self.datarg = datalist\n self.prefix = prefixlist\n self.reader = dr.ReaderOutput(reflist[0]) #Some plotting functions need a bit more information this info is extracted from the header of the files\n self.reader.depvar['depvar'] += ' (a.u.)'",
"def find_duplicate(self, pyfile):\n for pinfo in self.pyload.api.getQueue():\n #: Check if package-folder equals pyfile's package folder\n if pinfo.folder != pyfile.package().folder:\n continue\n\n #: Now get packaged data w/ files/links\n pdata = self.pyload.api.getPackageData(pinfo.pid)\n for link in pdata.links:\n #: Check if link == \"skipped\"\n if link.status != 4:\n continue\n\n #: Check if link name collides with pdata's name\n #: and at last check if it is not pyfile itself\n if link.name == pyfile.name and link.fid != pyfile.id:\n return link",
"def _findRefFile(sdict):\n min_shift = 99999.\n reffile = None\n for img in sdict.keys():\n offx = sdict[img][0]\n offy = sdict[img][1]\n shift = numpy.sqrt(pow(offx,2) + pow(offy,2))\n \n if shift < min_shift: \n min_shift = shift\n reffile = img\n \n return reffile",
"def merge_tables():\r\n filename = \"ppxf_results_best.dat\"\r\n s1 = np.genfromtxt(filename, usecols=(0,), dtype=None).tolist()\r\n sref = s1[:]\r\n sref.sort()\r\n x, y = get_positions(sref).T\r\n r = np.sqrt(x * x + y * y)\r\n pa = np.rad2deg(np.arctan2(x, y))\r\n pa[pa < 0.] += 360.\r\n data1 = np.loadtxt(filename, usecols=np.arange(1, 11))\r\n ##########################################################################\r\n # Account for difference in resolution\r\n # Not used anymore because the resolution is now matched in pPXF\r\n # fwhm_dif = (2.5 - 2.1) * c / 5500. / 2.3548\r\n # data1[:,2] = np.sqrt(data1[:,2]**2 - fwhm_dif**2)\r\n ##########################################################################\r\n data1 = match_data(s1, sref, data1)\r\n results = np.column_stack((sref, x, y, r, pa, data1))\r\n header = ['FILE', \"X[kpc]\", \"Y[kpc]\",\r\n \"R[kpc]\", \"PA\",\r\n 'V', 'dV', 'S', 'dS', 'h3', 'dh3',\r\n 'h4', 'dh4', 'chi/DOF', 'S/N']\r\n with open(outtable, \"w\") as f:\r\n for i, field in enumerate(header):\r\n print \"# {0} : {1}\\n\".format(i, field)\r\n f.write(\"# {0} : {1}\\n\".format(i, field))\r\n np.savetxt(f, results, fmt=\"%s\")\r\n return",
"def _get_refpaths(data_dict, reference_file_types, observatory):\n if not reference_file_types: # [] interpreted as *all types*.\n return {}\n with crds_cache_locking.get_cache_lock():\n bestrefs = crds.getreferences(\n data_dict, reftypes=reference_file_types, observatory=observatory)\n refpaths = {filetype: filepath if \"N/A\" not in filepath.upper() else \"N/A\"\n for (filetype, filepath) in bestrefs.items()}\n return refpaths",
"def match(p_file, s_file, matched_p_file, matched_s_file):\n\n log.info('Matching p and s arrivals')\n\n p_arr = pd.read_csv(p_file, header=None, names=column_names, sep=' ')\n s_arr = pd.read_csv(s_file, header=None, names=column_names, sep=' ')\n\n blocks = pd.merge(p_arr[['source_block', 'station_block']],\n s_arr[['source_block', 'station_block']],\n how='inner',\n on=['source_block', 'station_block'])\n matched_P = pd.merge(p_arr, blocks, how='inner',\n on=['source_block', 'station_block'])[column_names]\n matched_S = pd.merge(s_arr, blocks, how='inner',\n on=['source_block', 'station_block'])[column_names]\n matched_P.to_csv(matched_p_file, index=False, header=False, sep=' ')\n matched_S.to_csv(matched_s_file, index=False, header=False, sep=' ')",
"def get_existing_reference_paths(self, mapping):\n references = []\n for ref in mapping.reference_names():\n path = None\n with self.error_on_exception(\"Can't locate reference file\", repr(ref)):\n path = get_existing_path(ref, mapping.observatory)\n if path:\n log.verbose(\"Reference\", repr(ref), \"exists at\", repr(path))\n references.append(path)\n return references",
"def find_reference_files():\n for root, _, files in os.walk(\"./tests/references/\"):\n for basename in fnmatch.filter(files, \"*.tex\"):\n yield os.path.join(root, basename)",
"def get_nsite_DMRfind(inputf,output,samples,path_to_allc=\"\",mc_type=[\"C\"],num_procs=1,use_mc_status=True,min_cov=0):\n #dictionary of sample_name -> file handle\n allc_files = {}\n allc_lines = {}\n allc_fields = {}\n allc_prevbyte = {} #sample_name -> prevbyte (started from) in the file\n with open(inputf,'r') as f, open(output,'w') as g:\n line = f.readline()\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\")\n prefix_len = len(fields) #number of fields in original file\n mc_type = expand_nucleotide_code(mc_type)\n g.write(\"\\t\".join(fields[:prefix_len])+\"\\t\"+\"\\t\".join([\"nsite_\"+sample for sample in samples])+\"\\n\")\n prev_chrom = \"\"\n prev_end = \"\"\n dmr_lines=[]\n methylation_levels = {}\n for line in f:\n line = line.rstrip(\"\\n\")\n dmr_lines.append(line)\n if num_procs == 1:\n for sample in samples:\n methylation_levels[sample]=get_nsite_DMRfind_worker(dmr_lines,mc_type,sample,path_to_allc,output,min_cov,use_mc_status=False)\n else:\n pool = Pool(num_procs)\n results = {}\n for sample in samples:\n results[sample]=pool.apply_async(get_nsite_DMRfind_worker,(dmr_lines,mc_type,sample,path_to_allc,output,min_cov),{\"use_mc_status\":False})\n pool.close()\n pool.join()\n for sample in results:\n methylation_levels[sample]=results[sample].get()\n temp_files = {}\n for sample in samples:\n temp_files[sample]=open(output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\",'r')\n\n for index,line in enumerate(dmr_lines):\n g.write(line)\n for sample in samples:\n #g.write(\"\\t\"+methylation_levels[sample][index])\n g.write(\"\\t\"+temp_files[sample].readline().rstrip(\"\\n\"))\n g.write(\"\\n\")\n for sample in samples:\n temp_files[sample].close()\n subprocess.check_call(shlex.split(\"rm \"+output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\"))",
"def _ref_exists(self):\n self._collect_soft_chain()\n\n found_ref = False\n for node in self.nodes:\n if node.id == self.ref:\n self.file = os.path.join(Constants.ALTER_DIR, node.filename)\n found_ref = True\n break\n\n return found_ref"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign names to each epi file based on information in the template. | def AssignEpiNames(self):
# Sort each run in the series by its acquisition time.
epi_sort = self.epi_times.keys()
epi_sort.sort()
# Rewrite pfiles as an ordered list of p-files to be reconstructed.
for idx in xrange(len(epi_sort)):
entry = self.epi_times[epi_sort[idx]]
info = self.info[entry]
if info['data_filetype'] == 'ge_data':
self.pfiles_recon.append(entry)
info['run'] = '%0d' % (self.n_epi)
self.n_epi = self.n_epi + 1
plane = info['plane']
if not self.epinames.has_key(plane):
plane = 'any'
n_epi = self.epinames[plane]['n_epi']
if n_epi > len(self.epinames[plane]['names'])-1:
if self.epinames.has_key('any') and \
n_epi < len(self.epinames['any']):
plane = 'any'
n_epi = self.epinames[plane]['n_epi']
else:
self.DumpInfo()
errstr = 'Not enough EPI names in template file'
raise RuntimeError(errstr)
# epiname = self.epinames[plane]['names'][n_epi]
filebase = os.path.basename(self.epinames[plane]['names'][n_epi])
epi_mf_outdir = os.path.dirname(\
self.epinames[plane]['names'][n_epi])
epi_base = self.epinames[plane]['subdir'][n_epi]
tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)
# Get output directory for raw epis.
if self.no_motcorr:
epi_r_outdir = epi_mf_outdir
elif self.keep_epi_raw:
epi_r_outdir = self.epi_scratch_space
else:
epi_r_outdir = tmp_outdir
# Get output directory for motion-corrected epis.
if self.keep_epi_mot:
epi_m_outdir = self.epi_scratch_space
else:
epi_m_outdir = tmp_outdir
info['outdir'] = epi_mf_outdir
if n_epi < len(self.epinames[plane]['names']):
epiname = self.epinames[plane]['names'][n_epi]
info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)
else:
info['imgfile'] = '%s/s%0d_epi_run%0d' % \
(epi_r_outdir, n_epi, idx+1)
self.epinames[plane]['n_epi'] += 1
info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)
info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)
info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)
if self.no_motcorr:
info['imgfile_m'] = None
info['imgfile_mf'] = None
info['imgfile_final'] = info['imgfile']
else:
info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)
if self.no_fmapcorr or info['fmap_entry'] is None:
info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)
info['imgfile_mf'] = None
info['imgfile_final'] = info['imgfile_m']
else:
info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)
info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)
info['imgfile_final'] = info['imgfile_mf']
info['skip'] = self.skip
info['motion_ref_frame'] = self.tmplt['motion_ref_frame']
info['motion_interp'] = self.tmplt['epi_motion_interp']
if not info['motion_interp'].startswith('-'):
info['motion_interp'] = '-%s' % info['motion_interp']
info['filetype'] = self.tmplt['epi_file_format']
info['valid'] = True
self.info[entry] = info
if not self.no_motcorr:
epi_base = os.path.basename(info['imgfile_m'])
info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)
info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base) | [
"def set_templates(self):\n templates = glob(os.path.join(self.tempdir, '*m*tau*.fits'))\n for template in templates:\n temp = os.path.splitext(os.path.basename(template))[0]\n nly = os.path.splitext(os.path.basename(template))[0] + '_nly'\n setattr(self, temp, fits.getdata(template))\n\n # read in *.3color for \n data = np.genfromtxt(template.replace('.fits','.3color'),\n usecols=(0,5))\n setattr(self, nly, data)",
"def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]",
"def generate_filename(self):\n file_pattern = os.path.join(self.path, \"TCGA-*\")\n for f in glob(file_pattern):\n organ = get_organ(f)\n for raw_f in glob(os.path.join(f, \"*.tif\")):\n gt_f = raw_f.replace(\".tif\", \".xml\")\n yield raw_f, gt_f, organ",
"def _get_output_files_postprocess(self):\n name_pattern = self.name_pattern.format(action=\"postprocess\")\n for infix in (\"targets\", \"targets_segmented\", \"segments\", \"gene_call\", \"gene_log2\"):\n for key, ext in ((\"txt\", \".txt\"), (\"md5\", \".txt.md5\")):\n name = \"{}_{}\".format(infix, key)\n yield name, os.path.join(\n \"work\", name_pattern, \"out\", name_pattern + \"_\" + infix + ext\n )",
"def fill_output_files_info(self):\n def get_output_file_info(filename):\n for output_module in self.job_report_output.values():\n for output_file_info in output_module:\n ifile = get_file_index(filename, [output_file_info])\n if ifile is not None:\n return output_file_info\n return None\n\n with open('taskinformation.pkl', 'rb') as fd:\n task = pickle.load(fd)\n\n ## Loop over the output files that have to be collected.\n for filename in self.output_files_names:\n ## Search for the output file info in the job report.\n output_file_info = get_output_file_info(filename)\n ## Get the original file name, without the job id.\n left_piece, jobid_fileext = filename.rsplit(\"_\", 1)\n orig_file_name = left_piece\n if \".\" in jobid_fileext:\n fileext = jobid_fileext.rsplit(\".\", 1)[-1]\n orig_file_name = left_piece + \".\" + fileext\n ## If there is an output file info, parse it.\n if output_file_info:\n msg = \"Output file info for %s: %s\" % (orig_file_name, output_file_info)\n self.logger.debug(msg)\n file_info = {}\n self.output_files_info.append(file_info)\n # assign filetype based on the classification made by CRAB Client\n file_info['pfn'] = orig_file_name\n if file_info['pfn'] in task['tm_edm_outfiles']:\n file_info['filetype'] = 'EDM'\n elif file_info['pfn'] in task['tm_tfile_outfiles']:\n file_info['filetype'] = 'TFILE'\n elif file_info['pfn'] in task['tm_outfiles']:\n file_info['filetype'] = 'FAKE'\n else:\n file_info['filetype'] = 'UNKNOWN'\n file_info['module_label'] = output_file_info.get('module_label', 'unknown')\n file_info['inparentlfns'] = [str(i) for i in output_file_info.get('input', [])]\n file_info['events'] = output_file_info.get('events', 0)\n file_info['checksums'] = output_file_info.get('checksums', {'cksum': '0', 'adler32': '0'})\n file_info['outsize'] = output_file_info.get('size', 0)\n if 'pset_hash' in output_file_info:\n file_info['pset_hash'] = output_file_info['pset_hash']\n else:\n file_info['pset_hash'] = 32*'0'\n if 'pfn' in output_file_info:\n file_info['pfn'] = str(output_file_info['pfn'])\n file_info['local_stageout'] = output_file_info.get('local_stageout', False)\n file_info['direct_stageout'] = output_file_info.get('direct_stageout', False)\n file_info['outlocation'] = self.dest_site\n if output_file_info.get('temp_storage_site', 'unknown') != 'unknown':\n file_info['outtmplocation'] = output_file_info.get('temp_storage_site')\n elif self.job_report.get('temp_storage_site', 'unknown') != 'unknown':\n file_info['outtmplocation'] = self.job_report.get('temp_storage_site')\n else:\n file_info['outtmplocation'] = self.executed_site\n file_info['outlfn'] = os.path.join(self.dest_dir, filename)\n file_info['outtmplfn'] = os.path.join(self.source_dir, filename)\n if 'runs' not in output_file_info:\n continue\n file_info['outfileruns'] = []\n file_info['outfilelumis'] = []\n for run, lumis in output_file_info['runs'].items():\n file_info['outfileruns'].append(str(run))\n # Creating a string like '100:20,101:21,105:20...'\n # where the lumi is followed by a colon and number of events in that lumi.\n # Note that the events per lumi information is provided by WMCore version >=1.1.2 when parsing FWJR.\n lumisAndEvents = ','.join(['{0}:{1}'.format(str(lumi), str(numEvents))\n for lumi, numEvents in lumis.items()])\n file_info['outfilelumis'].append(lumisAndEvents)\n else:\n msg = \"Output file info for %s not found in job report.\" % (orig_file_name)\n self.logger.error(msg)\n # now that we have collected info from all files, we can define\n # output dataset name for each.\n # First find if there are multiple datasets in this job\n # since in that case the output dataset name is different. See:\n # https://twiki.cern.ch/twiki/bin/view/CMSPublic/Crab3DataHandling#Output_dataset_names_in_DBS\n edm_file_count = 0\n for file_info in self.output_files_info:\n if file_info['filetype'] == 'EDM':\n edm_file_count += 1\n multiple_edm = edm_file_count > 1\n # now compute the output dataset name for each file and add it to file_info\n outdataset = None\n for file_info in self.output_files_info:\n # use common function with ASOServerJob to ensure uniform dataset name\n if file_info['filetype'] in ['EDM', 'DQM']:\n # group name overrides username when present:\n username = self.job_ad['CRAB_UserHN']\n if self.dest_dir.startswith('/store/group/') and self.dest_dir.split('/')[3]:\n username = self.dest_dir.split('/')[3]\n module_label = file_info.get('module_label') if multiple_edm else None\n outdataset = compute_outputdataset_name(primaryDS=self.job_ad['CRAB_PrimaryDataset'],\n username=username,\n publish_name=self.publish_name,\n pset_hash=file_info['pset_hash'],\n module_label=module_label)\n else:\n outdataset = G_FAKE_OUTDATASET\n file_info['output_dataset'] = outdataset\n\n self.output_dataset = outdataset # we do not support multiple output datasets anymore",
"def make_output_names(self):\n yaml_names = []\n fits_names = []\n\n if self.use_nonstsci_names:\n for i in range(len(self.info['Module'])):\n act = str(self.info['act_id'][i]).zfill(2)\n if self.info['Instrument'][i].lower() == 'niriss':\n det = 'NIS'\n elif self.info['Instrument'][i].lower() == 'fgs':\n det = 'FGS'\n else:\n det = self.info['detector'][i]\n mode = self.info['Mode'][i]\n dither = str(self.info['dither'][i]).zfill(2)\n\n yaml_names.append(os.path.abspath(os.path.join(self.output_dir, 'Act{}_{}_{}_Dither{}.yaml'\n .format(act, det, mode, dither))))\n fits_names.append('Act{}_{}_{}_Dither{}_uncal.fits'.format(act, det, mode, dither))\n\n else:\n for i in range(len(self.info['Module'])):\n if self.info['Instrument'][i].upper() == 'NIRCAM':\n fulldetector = 'nrc{}'.format(self.info['detector'][i].lower())\n else:\n fulldetector = self.info['detector'][i].lower()\n outfilebase = self.create_output_name(self.info, index=i)\n outfile = \"{}{}{}\".format(outfilebase, fulldetector, '_uncal.fits')\n yamlout = \"{}{}{}\".format(outfilebase, fulldetector, '.yaml')\n\n yaml_names.append(yamlout)\n fits_names.append(outfile)\n\n self.info['yamlfile'] = yaml_names\n self.info['outputfits'] = fits_names\n # Table([self.info['yamlfile']]).pprint()",
"def _add_filename_metadata(self, extra_metadata):\n\n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n\n file_name = os.path.basename(self.fname)\n\n # Add file/scan name\n extra_metadata['product_info']['Name'] = file_name.split('_')[0]",
"def _create_info_files(self):\n self._create_info_general_file()\n self._create_info_annotation_file()\n shutil.copyfile(self.rconfig.pipeline_config_file, self.info_file_config)\n shutil.copyfile(self.file_list, self.info_file_filelist)",
"def use_intermediate_files(self):\n # just do this once\n if self.renamed:\n return\n self.renamed = True\n \n for f in self.fields:\n ofn = mbuild.join(self.intermediate_dir,self.file_name[f])\n self.file_name[f] = ofn # update file name-- only call once!",
"def modify_namelist(self):\n orig = self.config['template']\n splitted_orig = os.path.split(orig)\n copied = os.path.join(\n splitted_orig[0],\n 'temp_{0:s}'.format(splitted_orig[1])\n )\n logger.debug('Copied template path: {0:s}'.format(copied))\n with open(orig, mode='r') as orig_file:\n template = orig_file.read()\n for placeholder, value in self.replacement_dict.items():\n template = template.replace(placeholder, str(value))\n with open(copied, mode='w') as copied_file:\n copied_file.write(template)\n return copied",
"def test_pnictogen():\n for template in templates:\n template_prefix, extension = os.path.splitext(template)\n for xyz_file in example_xyz_files:\n input_prefix, xyz_file_extension = os.path.splitext(xyz_file)\n\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(xyz_file, xyz_file_extension[1:])\n )\n written_files = pnictogen(mol, input_prefix, template, extension[1:])\n\n assert_equals(type(written_files), list)\n for written_file in written_files:\n assert_equals(type(written_file), str)\n\n written_files2 = pnictogen(mol, input_prefix, template)\n assert_equals(written_files, written_files2)\n\n # Allow use of template in the parent directory\n with cd(\"pnictogen/repo\"):\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(\"../../data/water-dimer.xyz\", \"xyz\")\n )\n written_files = pnictogen(mol, \"../../data/water-dimer\", \"ADF.in\", \"in\")\n\n assert_equals(written_files, [\"../../data/water-dimer.in\"])\n\n main([\"-g\", \"/tmp/hello.world.ORCA.inp\"])\n mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/co.xyz\", \"xyz\"))\n written_files = pnictogen(mol, \"data/co\", \"/tmp/hello.world.ORCA.inp\", foo=\"bar\")\n\n assert_equals(written_files, [\"data/co.inp\"])",
"def fill_output_files_info(self):\n for output_module in self.job_report_output.values():\n for output_file_info in output_module:\n msg = \"Output file info for %s: %s\"\n msg = msg % (output_file_info.get(u'pfn', \"(unknown 'pfn')\").split('/')[-1], output_file_info)\n logger.debug(msg)\n file_info = {}\n self.output_files_info.append(file_info)\n ## Note incorrect spelling of 'output module' in current WMCore\n if (output_file_info.get(u'output_module_class', '') == u'PoolOutputModule' or \\\n output_file_info.get(u'ouput_module_class', '') == u'PoolOutputModule'):\n file_info['filetype'] = 'EDM'\n elif output_file_info.get(u'Source', '') == u'TFileService':\n file_info['filetype'] = 'TFILE'\n else:\n file_info['filetype'] = 'FAKE'\n file_info['module_label'] = output_file_info.get(u'module_label', 'unknown')\n file_info['inparentlfns'] = [str(i) for i in output_file_info.get(u'input', [])]\n file_info['events'] = output_file_info.get(u'events', 0)\n file_info['checksums'] = output_file_info.get(u'checksums', {'cksum': '0', 'adler32': '0'})\n file_info['outsize'] = output_file_info.get(u'size', 0)\n if u'pset_hash' in output_file_info:\n file_info['pset_hash'] = output_file_info[u'pset_hash']\n if u'pfn' in output_file_info:\n file_info['pfn'] = str(output_file_info[u'pfn'])\n file_info['local_stageout'] = output_file_info.get(u'local_stageout', False)\n file_info['direct_stageout'] = output_file_info.get(u'direct_stageout', False)\n file_info['outlocation'] = self.dest_site\n if output_file_info.get(u'temp_storage_site', 'unknown') != 'unknown':\n file_info['outtmplocation'] = output_file_info.get(u'temp_storage_site')\n elif self.job_report.get(u'temp_storage_site', 'unknown') != 'unknown':\n file_info['outtmplocation'] = self.job_report.get(u'temp_storage_site')\n else:\n file_info['outtmplocation'] = self.executed_site\n if u'runs' not in output_file_info:\n continue\n file_info['outfileruns'] = []\n file_info['outfilelumis'] = []\n for run, lumis in output_file_info[u'runs'].items():\n file_info['outfileruns'].append(str(run))\n file_info['outfilelumis'].append(','.join(map(str, lumis)))\n for filename in self.output_files_names:\n ifile = get_file_index(filename, self.output_files_info)\n if ifile is None:\n continue\n self.output_files_info[ifile]['outlfn'] = os.path.join(self.dest_dir, filename)\n self.output_files_info[ifile]['outtmplfn'] = os.path.join(self.source_dir, filename)",
"def process_templates(templates, folder, project_dir, project_name, project_description,\n author_name, author_email, require_web):\n\n for afile, template, capitalize, web_component in templates:\n actual_file = afile.format(project_name=project_name, project_description=project_description)\n path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n if not require_web and web_component:\n continue\n\n with open(os.path.join(path, \"templates\", folder, template)) as f:\n contents = f.read()\n name = project_name.capitalize() if capitalize else project_name\n actual_contents = contents.format(project_name=name, project_description=project_description,\n author_name=author_name, author_email=author_email,\n version=pkg_resources.get_distribution(\"surround\").version)\n if require_web and afile == \"pyproject.toml\":\n actual_contents = actual_contents.replace(\n \"\\n[build-system]\",\n \"fastapi = \\\"0.79.0\\\"\\nuvicorn = \\\"0.18.2\\\"\\n\\n[build-system]\")\n\n file_path = os.path.join(project_dir, actual_file)\n with open(file_path, 'w') as f:\n f.write(actual_contents)",
"def __parse_exp_files(self, files):\n for json_file_path in files:\n print(\"Reading \", json_file_path)\n exp_json = read_json(json_file_path)\n # This is a global exps json\n if \"sim_files\" in exp_json:\n # For results collections we want to know which jobs\n # where obtained through global descriptions\n exps = self.__parse_exp_files(exp_json[\"sim_files\"])\n if exps:\n self.global_desc.append(GlobDesc(exp_json, exps))\n else:\n self.__configure_experiment(exp_json)",
"def __rename_images(self):\n for idx, image in enumerate(self._values):\n image.partname = '/ppt/media/image%d%s' % (idx+1, image.ext)",
"def _init_output_files(self, pras_file):\n if not self._pras_file.endswith('.h5'):\n self._output_file += '.h5'\n self._output_file = pras_file.replace('.h5', '_updated.h5')\n msg = (\"pras_file={} does not exist. This file must come from a \"\n \"previous ReEDS2PRAS run\").format(self._pras_file)\n assert os.path.exists(self._pras_file), msg\n\n if not os.path.exists(self._output_file):\n shutil.copy(self._pras_file, self._output_file)\n\n self._qa_file = self._output_file.replace(\n '.h5', '_qa_{}_{}.csv'.format(self._tech_type, self._res_class))\n\n self._prof_file = self._output_file.replace(\n '.h5', '_{}_{}.csv'.format(self._tech_type, self._res_class))",
"def process_files(files, project_dir, project_name, project_description, require_web):\n\n for afile, content in files:\n actual_file = afile.format(project_name=project_name, project_description=project_description)\n actual_content = content.format(project_name=project_name, project_description=project_description, version=pkg_resources.get_distribution(\"surround\").version)\n file_path = os.path.join(project_dir, actual_file)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n with open(file_path, 'w') as f:\n f.write(actual_content)",
"def outputFileNames(self, pathFile):\n pathTree = ET.parse(pathFile)\n pathRoot = pathTree.getroot()\n self.outputFileNameDict = {}\n xmlNodes = [\n 'reactions', 'atoms_plot', 'atoms_csv', 'decay_heat', 'bu_power',\n 'flux', 'repository'\n ]\n for xmlNodeNumber in range(0, len(xmlNodes)):\n for xmlNode in pathRoot.iter(xmlNodes[xmlNodeNumber]):\n self.outputFileNameDict[xmlNodes[xmlNodeNumber]] = xmlNode.text",
"def get_region_template_names(self, region, postfix):\n region_template_name = \"%s\" % region + '_%s' % postfix if postfix else region\n region_template_file = \"%s.%s\" % (region_template_name, self.get_template_format())\n path_elements = self.get_path_elements()\n elements_count = len(path_elements)\n for cut in range(elements_count):\n path_prefix = os.path.join(*path_elements[:elements_count - cut])\n yield os.path.join(path_prefix, region_template_file)\n yield region_template_file"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dump the info object to a yaml file. | def DumpInfo(self):
if self.logdir is None:
return
self.dumpfile = '%s/preprocess_info.yaml' % (self.logdir)
try:
f = open(self.dumpfile,'w')
f.write(yaml.dump(self.info,default_flow_style=False, indent=4))
f.close()
except IOError:
self.errors = True
errstr = 'Error accessing %s' % self.dumpfile
raise IOError(errstr)
self.LogErrors(errstr) | [
"def dump(self):\n print(yaml.dump(self.toYaml(), width=float(\"inf\")))",
"def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))",
"def write(self):\n print yaml.dump(self._config, default_flow_style=False),",
"def save(self, filename): \n stream = file(filename, 'w')\n yaml.dump(self, stream)",
"def to_yaml(obj: ConfiguredBaseModel, file: str):\n\n fh = open(file, \"w\") if file else sys.stdout\n\n if isinstance(obj, Entity):\n yaml.dump(obj.dict(), fh, indent=4)\n elif isinstance(obj, Results) or isinstance(obj, HistoPheno) or isinstance(obj, AssociationCountList):\n yaml.dump([item.dict() for item in obj.items], fh, indent=4)\n else:\n raise TypeError(FMT_INPUT_ERROR_MSG)\n\n if file:\n console.print(f\"\\nOutput written to {file}\\n\")\n fh.close()\n\n return",
"def dump(info_path, extended=False): \n info = json.load(open(info_path + '.json'))\n print 'prefix:', info['parentDir']\n print 'title:', info['title']\n print 'labels:', '%d=%s'%(info['bestAttr'][1], info['fields'][info['bestAttr'][1]][0])\n print 'fields:', ', '.join('%d=%s'%(i,t[0]) for i,t in enumerate(info['fields']))\n print 'shapes:', len(info['shapes'])\n if extended:\n pprint.pprint(info['shapes'])\n \n print",
"def dump(data, file_path):\n write_yaml(data, file_path)",
"def to_yaml(cls,dumper,self):\n #self.__modelData['ids'] = self.__mapObj.ids\n self.__modelData['ids'] = ','.join(map(str,self.__mapObj.ids))\n\n ##GENERATE Overview\n old_size = self.__size\n self.__mapObj.size = PREVIEW_SIZE\n typ,dat,width,height = processOverview(self.__mapObj.png)\n self.__modelData['overview_typ'] = typ\n self.__modelData['overview_dat'] = dat\n self.__modelData['overview_width'] = width\n self.__modelData['overview_height'] = height\n self.__mapObj.size = old_size\n #END Overview\n\n node = dumper.represent_mapping(cls.yaml_tag,self.__modelData)\n self.SetModified(False)\n return node",
"def dump_record(self, output_file):\n info = {\n \"best_model\": {\n \"best_model_file\": self.best_model_file,\n \"training_loss\": self.train_loss,\n \"validation_loss\": self.validation_loss,\n },\n \"optimizer_setting\": self.optimizer_settings,\n \"logs\": self._logs\n }\n\n with open(str(output_file), \"w\") as f:\n yaml.safe_dump(info, f, default_flow_style=False)",
"def yaml_dump(self, filepath, data):\n with open(filepath, \"w\") as file_descriptor:\n yaml.dump(data, file_descriptor, default_flow_style=False)",
"def dump_yaml(self, data, output):\n yaml.indent(mapping=MAPPING, sequence=SEQUENCE, offset=OFFSET)\n yaml.dump(data, output)",
"def save(self, config_path, instance):\n logging.debug('Saving to config_path = {0!r}'.format(config_path))\n with open(config_path, 'wb') as save_f:# Write data to file.\n yaml.dump(\n data=vars(instance),\n stream=save_f,\n explicit_start=True,# Begin with '---'\n explicit_end=True,# End with '...'\n default_flow_style=False# Output as multiple lines\n )\n return",
"def write_yaml(self, filename=\"modulation.yaml\"):\n self._write_yaml(filename=filename)",
"def to_file(self, output_file_path: str) -> None:\n with open(output_file_path, \"w+\") as f:\n yaml.dump(\n self.as_dict(),\n f,\n sort_keys=False,\n default_flow_style=False\n )",
"def dump(self, obj, fp):\n pass",
"def to_yaml(self):\n\t\treturn yaml.dump(self.data,default_flow_style=False)",
"def test_to_yaml(self) -> None:\n entry = Entry(\"Cao_2019\", self.EXAMPLE_ENTRY_DICT)\n yaml_str = YAMLParser().dump(entry)\n with open(self.EXAMPLE_YAML_FILE, \"r\", encoding=\"utf-8\") as file:\n assert yaml_str == file.read()",
"def save_to_yaml(self, path=None):\n\n if not path:\n path = \".\".join([self.name.value, \"yaml\"])\n\n planet_dict = {}\n for a in sorted(self.attributes):\n exo_param = getattr(self, a)\n param_dict = exo_param.__dict__\n param_dict = {k: str(v)\n for k, v in param_dict.items()\n if v and len(str(v)) > 0}\n planet_dict[a] = param_dict\n\n with open(path, 'w') as yamlfile:\n yaml.dump(planet_dict, yamlfile, default_flow_style=False)",
"def as_yaml(self):\n return yaml.dump(mapping([\n (\"slug\", quoted(self.slug)),\n (\"name\", quoted(self.name)),\n (\"text\", literal(self.text)),\n (\"check\", literal(self.check)),\n (\"solution\", literal(self.solution)),\n ]), indent=4)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert anatomical images from dicom or ifiles to briks or niftis. | def ConvertAnat(self):
if self.verbose:
print 'Convert T1 and T2 images...'
for entry in self.info:
info = self.info[entry]
if self.info[entry]['imgfile'] is None:
continue
if self.info[entry]['type'] in self.anat_types:
key = self.info[entry]['type']
imgfile = self.info[entry]['imgfile']
cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \
imgfile, self.info[entry]['filetype'])
checkfile = '%s%s' % (imgfile, self.info[entry]['suffix'])
self.CheckExec(cmd, [checkfile])
if self.info[entry]['norm_src'] and self.skull_strip:
cmd = "3dSkullStrip -input %s -prefix %s" % \
(checkfile, self.info[entry]['imgfile_skstrip'])
checkfile = '%s+orig.BRIK' % \
(self.info[entry]['imgfile_skstrip'])
self.CheckExec(cmd, [checkfile]) | [
"def _get_medical_image_blob(roidb):\n num_images = len(roidb)\n processed_ims = []\n pre_ims = []\n post_ims = []\n abdo_masks = []\n for i in range(num_images):\n im = raw_reader(roidb[i][\"image\"], cfg.MET_TYPE, [roidb[i][\"height\"], roidb[i][\"width\"]])\n if roidb[i]['flipped']:\n im = im[:, ::-1]\n processed_ims.append(im)\n\n mask = abdominal_mask(im.copy())\n abdo_masks.append(mask)\n \n if cfg.THREE_SLICES:\n # get pre-image\n basename = osp.basename(roidb[i][\"image\"])\n names = basename[:-4].split(\"_\")\n slice_num = int(names[-1])\n if slice_num == 0:\n pre_im = im\n else:\n slice_num -= 1\n names[-1] = str(slice_num)\n basename = \"_\".join(names) + \".raw\"\n pre_path = osp.join(osp.dirname(roidb[i][\"image\"]), basename)\n pre_im = raw_reader(pre_path, cfg.MET_TYPE, [roidb[i][\"height\"], roidb[i][\"width\"]])\n if roidb[i]['flipped']:\n pre_im = pre_im[:, ::-1]\n pre_ims.append(pre_im)\n\n # get post-image\n basename = osp.basename(roidb[i][\"image\"])\n names = basename[:-4].split(\"_\")\n names[-1] = str(int(names[-1]) + 1)\n basename = \"_\".join(names) + \".raw\"\n post_path = osp.join(osp.dirname(roidb[i][\"image\"]), basename)\n try:\n post_im = raw_reader(post_path, cfg.MET_TYPE, [roidb[i][\"height\"], roidb[i][\"width\"]])\n if roidb[i]['flipped']:\n post_im = post_im[:, ::-1]\n except FileNotFoundError:\n post_im = im\n post_ims.append(post_im)\n\n num_images = len(processed_ims)\n blob = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE, 3), dtype=np.float32)\n abdo_mask = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE), dtype=np.bool)\n if cfg.THREE_SLICES:\n for i in range(num_images):\n blob[i,:,:,0] = pre_ims[i]\n blob[i,:,:,1] = processed_ims[i]\n blob[i,:,:,2] = post_ims[i]\n abdo_mask[i,:,:] = abdo_masks[i]\n else:\n for i in range(num_images):\n blob[i,:,:,0] = processed_ims[i]\n blob[i,:,:,1] = processed_ims[i]\n blob[i,:,:,2] = processed_ims[i]\n abdo_mask[i,:,:] = abdo_masks[i]\n\n if cfg.USE_WIDTH_LEVEL:\n win, wind2, lev = cfg.WIDTH, cfg.WIDTH / 2, cfg.LEVEL\n blob = (np.clip(blob, lev - wind2, lev + wind2) - (lev - wind2)) / 2**16 * win\n else:\n blob /= cfg.MED_IMG_UPPER\n blob = np.clip(blob, -1., 1.)\n\n return blob, abdo_mask",
"def convert_nifti_2_img(path_img_in, path_img_out):\n if not os.path.isfile(path_img_in):\n raise FileNotFoundError('missing input: %s' % path_img_in)\n if not os.path.isdir(os.path.dirname(path_img_out)):\n raise FileNotFoundError('missing output: %s' % os.path.dirname(path_img_out))\n\n nim = nibabel.load(path_img_in)\n\n if len(nim.get_data().shape) > 2: # colour\n img = np.swapaxes(np.swapaxes(nim.get_data(), 0, 3), 1, 4)\n dims = img.shape\n img = img.reshape([dims[0], dims[1], dims[3]])\n else: # gray\n # img = nim.get_data()\n img = np.swapaxes(nim.get_data(), 1, 0)\n\n if img.max() > 1:\n img = img / 255.\n\n io_imsave(path_img_out, img)\n return path_img_out",
"def _get_image_blob(roidb):\n num_images = len(roidb)\n\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = io.imread(roidb[i]['image'], plugin='tifffile')\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n im, im_scale = blob_utils.prep_im_for_blob(im, roidb[i], 'train')\n im_scales.append(im_scale[0])\n processed_ims.append(im[0])\n\n # Create a blob to hold the input images [n, c, s, h, w]\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales",
"def _get_image_blobs(self, roidb, scale_inds):\n num_images = len(roidb)\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n im, im_scale = self._get_image_blob(im, scale_inds[i], False)\n im_scales.append(im_scale)\n processed_ims.append(im)\n \n # Create a blob to hold the input images\n blob = self.im_list_to_blob(processed_ims)\n \n return blob, im_scales",
"def nifti2dicom(seg_nifti, bk_nifti, ref_dicom_dir, save_dir, description, mode_RGB=False, zoom_num=4, watermarks=True): \n #Load nifti, here is segmentation and background\n seg_image = sitk.ReadImage(seg_nifti)\n seg_image = sitk.GetArrayFromImage(seg_image)\n seg_image = seg_image.astype(np.uint8)\n \n # print(nifti_image.shape)\n bk_image = sitk.ReadImage(bk_nifti)\n bk_image = sitk.GetArrayFromImage(bk_image)\n\n #Get Volume report from the seg_image, cubic ml, and the 95% CI:\n v_nonenhancing = round(seg_image[seg_image==1].sum()/1000,1)\n ci_nonenhancing = round(v_nonenhancing*0.2,1)\n v_enhancing = round(seg_image[seg_image==4].sum()/1000,1)\n ci_enhancing = round(v_enhancing*0.3,1)\n v_edema = round(seg_image[seg_image==2].sum()/1000,1)\n ci_edema = round(v_edema*0.1,1)\n\n #Loading the reference dicom, in order to get the headers of each slice. \n series_IDs = sitk.ImageSeriesReader.GetGDCMSeriesIDs(ref_dicom_dir)\n if not series_IDs:\n print(\"ERROR: given directory \\\"\"+data_directory+\"\\\" does not contain a DICOM series.\")\n sys.exit(1)\n\n series_file_names = sitk.ImageSeriesReader.GetGDCMSeriesFileNames(ref_dicom_dir, series_IDs[0])\n\n series_reader = sitk.ImageSeriesReader()\n series_reader.SetFileNames(series_file_names)\n\n # Configure the reader to load all of the DICOM tags (public+private):\n # By default tags are not loaded (saves time).\n # By default if tags are loaded, the private tags are not loaded.\n # We explicitly configure the reader to load tags, including the private ones.\n series_reader.MetaDataDictionaryArrayUpdateOn()\n series_reader.LoadPrivateTagsOn()\n ref_image = series_reader.Execute()\n \n #set reader for slice \n reader = sitk.ImageFileReader()\n reader.LoadPrivateTagsOn()\n \n writer = sitk.ImageFileWriter()\n # Use the study/series/frame of reference information given in the meta-data\n # dictionary and not the automatically generated information from the file IO\n writer.KeepOriginalImageUIDOn()\n\n # Copy some of the tags and add the relevant tags indicating the change.\n # For the series instance UID (0020|000e), each of the components is a number, cannot start\n # with zero, and separated by a '.' We create a unique series ID using the date and time. tags of interest:\n \n castFilter = sitk.CastImageFilter()\n castFilter.SetOutputPixelType(sitk.sitkInt16)\n ORG_ROOT=\"1.3.12.2\"\n #create SeriesInstanceUID and StudyInstanceUID\n SeriesInstanceUID = generateUID(org_root=ORG_ROOT)\n StudyInstanceUID = generateUID(org_root=ORG_ROOT)\n #create a prefix for the accession number\n acc='BTS'+series_reader.GetMetaData(0,\"0008|0050\")\n #changing spacing\n reader.SetFileName(series_file_names[0])\n reader.ReadImageInformation()\n\n if mode_RGB:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID),\n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID), \n (\"0028|0004\", 'RGB'),\n (\"0028|0002\", \"3\")]\n else:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID), \n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID)] \n\n os.makedirs(save_dir, exist_ok = True)\n\n #for nifti, the main axis is the first one, while for dicoms it is the last one\n for i in range(ref_image.GetDepth()):\n #zoom 2 times, todo need to figure out which axis to zoom, post is the 3rd\n #pre assume the first axis is the slice numbers\n bk_slice = ndimage.zoom(bk_image[i,:,:], zoom_num, order=0)\n seg_slice = ndimage.zoom(seg_image[i,:,:], zoom_num, order=0)\n \n #Due to the DICOM saving coordinate system is different with nifti,i.e mirrored, it is easier to flip array\n bk_slice = np.flip(bk_slice, (0, 1)) \n seg_slice = np.flip(seg_slice, (0, 1)) \n\n #get contours\n seg_idx = get_contours(seg_slice)\n \n #add watermarks\n if watermarks:\n canvas_tmp = np.zeros(list(bk_slice.shape), dtype=np.uint8)\n font = cv2.FONT_HERSHEY_PLAIN\n cv2.putText(canvas_tmp,'FOR RESEARCH ONLY;REFER TO OFFICIAL REPORT FOR DETAILS',(10,30), \n font,2,255,1)\n cv2.putText(canvas_tmp,'(This tool is intended for evaluation of gliomas, and results may be unreliable for other pathologies)',(90,50), \n font,1,255,1) \n #add Legend and volumes \n cv2.putText(canvas_tmp, 'Legend Volume(+/-95% CI)',(10,900), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Edema {v_edema}+/-{ci_edema} mL',(30,920), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Enhancing {v_enhancing}+/-{ci_enhancing} mL',(30,940), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Non- {v_nonenhancing}+/-{ci_nonenhancing} mL',(30,960), font,0.8,255,1)\n cv2.putText(canvas_tmp,'Enhancing', (30,975), font,0.8,255,1)\n cv2.putText(canvas_tmp,'(The error is based on testing of algorithm performance vs. manual segmentation)', (150,1000), font,1,255,1)\n\n \n \n #burning segmentation contour into slices\n cv2.line(seg_idx, (10,915), (20,915), 2, 2)\n cv2.line(seg_idx, (10,935), (20,935), 4, 2)\n cv2.line(seg_idx, (10,955), (20,955), 1, 2)\n \n if mode_RGB:\n #burning the watermarks\n bk_slice[canvas_tmp==255]=bk_slice.max()\n #convert dicom from nomogram to RGB\n bk_slice = toRGB(bk_slice)\n #colorize the bk_slice according to seg_idx\n bk_slice[0,:,:,0][seg_idx==1] = 255\n bk_slice[0,:,:,1][seg_idx==4] = 255\n bk_slice[0,:,:,2][seg_idx==2] = 255 \n else:\n #grey the ori_image_slice according to seg_idx\n bk_slice[canvas_tmp==255]=bk_slice.max()//2\n bk_slice[seg_idx==1] = bk_slice.max()*2//50\n bk_slice[seg_idx==2] = bk_slice.max()*1//50\n bk_slice[seg_idx==4] = bk_slice.max()*3//50\n\n converted_slice = sitk.GetImageFromArray(bk_slice)\n reader.SetFileName(series_file_names[i])\n reader.ReadImageInformation()\n spacing_new = [i/zoom_num for i in reader.GetSpacing()[:-1]] + [reader.GetSpacing()[-1]]\n \n #generate SOPInstanceUID\n SOPInstanceUID = generateUID(org_root=ORG_ROOT)\n series_tag_values = [(k, reader.GetMetaData(k)) for k in reader.GetMetaDataKeys()] + customized_tag_values + [(\"0008|0018\", SOPInstanceUID)]\n# print(series_tag_values)\n if '_seg_' in description:\n converted_slice = converted_slice \n \n # Tags shared by the series.\n for tag, value in series_tag_values:\n converted_slice.SetMetaData(tag, value)\n \n # especially set spacing tags\n # Image Position (Patient)\n converted_slice.SetMetaData(\"0020|0013\", str(i)) # Instance Number\n converted_slice.SetSpacing(spacing_new)\n \n # Write to the output directory and add the extension dcm, to force writing in DICOM format \n writer.SetFileName(os.path.join(save_dir, str(i)+'.dcm'))\n writer.Execute(converted_slice)",
"def get_iband_mags():\n os.chdir(unicorn.GRISM_HOME+'ANALYSIS/')\n \n unicorn.catalogs.read_catalogs()\n from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp\n \n ids = zout.id[0::3]\n fields = phot.field[phot.idx]\n \n iflux = zout.z_peak[0::3]*0.-1\n imod = iflux*1.\n lc_i = iflux*1.\n hflux = iflux*1\n hmod = iflux*1.\n lc_h = iflux*1\n \n count = 0\n for id, field in zip(ids, fields):\n path = unicorn.GRISM_HOME+'ANALYSIS/REDSHIFT_FITS_v1.6/ASCII/%s/%s_obs_sed.dat' %(field, id)\n if os.path.exists(path):\n print unicorn.noNewLine+id\n obs = catIO.Readfile(path)\n dlam_spec = obs.lc[-1]-obs.lc[-2]\n is_spec = np.append(np.abs(1-np.abs(obs.lc[1:]-obs.lc[0:-1])/dlam_spec) < 0.05,True)\n dl_i = np.abs(obs.lc-7688.1)\n dl_h = np.abs(obs.lc[~is_spec]-1.6315e4)\n ix_i = np.where(dl_i == dl_i.min())[0][0]\n ix_h = np.where(dl_h == dl_h.min())[0][0]\n iflux[count] = obs.fnu[ix_i]\n imod[count] = obs.obs_sed[ix_i]\n lc_i[count] = obs.lc[ix_i]\n hflux[count] = obs.fnu[ix_h]\n hmod[count] = obs.obs_sed[ix_h]\n lc_h[count] = obs.lc[ix_h]\n # \n count = count+1\n \n fp = open('full_imag_hmag.dat','w')\n fp.write('# id iflux imodel lc_i hflux hmodel lc_h\\n')\n for i in range(len(ids)):\n fp.write('%s %.5e %.5e %.1f %.5e %.5e %.1f\\n' %(ids[i], iflux[i], imod[i], lc_i[i], hflux[i], hmod[i], lc_h[i]))\n fp.close()",
"def dicom_dat(self, pt):\n if self.verbose: print('Converting DaT scan DICOM images to png from {}'.format(pt))\n try:\n conv = Converter(dicom_root_path=pt, run=True, log_dir='/tmp', verbose=self.verbose, cleanup=True,\n same_name=True, yes_to_all=self.yes_to_all)\n self.yes_to_all = conv.yes_to_all # in case yes_to_all was changed\n conv_results = conv.get_results()\n print('{:<40} {}'.format('DICOM images converted to png:', conv_results[0]))\n if conv_results[1]: print('{:<40} {}'.format('DICOM images failed to convert:', conv_results[1]))\n except ValueError:\n print('No DICOM files in {}'.format(pt))",
"def get_imgs(self, key):\n img = self.hf[key]['image'][()]\n img = Image.open(io.BytesIO(img)).convert('RGB')\n\n bbox = None\n if \"bounding_box\" in self.hf[key].keys():\n bbox = self.hf[key][\"bounding_box\"][()]\n\n width, height = img.size\n # For the 'Birds' dataset we crop the image to the\n # corresponding bounding box in order for it to have\n # greater-than-0.75 object-image size ratio.\n if bbox is not None:\n R = int(np.maximum(bbox[2], bbox[3]) * 0.75)\n center_x = int((2 * bbox[0] + bbox[2]) / 2)\n center_y = int((2 * bbox[1] + bbox[3]) / 2)\n y1 = np.maximum(0, center_y - R)\n y2 = np.minimum(height, center_y + R)\n x1 = np.maximum(0, center_x - R)\n x2 = np.minimum(width, center_x + R)\n img = img.crop([x1, y1, x2, y2])\n\n if self.transform is not None:\n img = self.transform(img)\n\n normalize = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n br_images = []\n for i in range(self.branch_num):\n if self.img_sizes[i] == img.size:\n br_images.append(normalize(img))\n else:\n scaled_image = transforms.Resize(\n (self.img_sizes[i], self.img_sizes[i])\n )(img)\n br_images.append(normalize(scaled_image))\n\n return br_images",
"def convert(self):\n \n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n if len(vrtlist)!=0:\n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d mosaicked images were successfully converted to %d %s files.' % (dataCount, len(vrtlist), tifCount, str(self.outformat)))\n \n \n if len(vrtlist)==0: \n \n hdflist = sorted(glob.glob(self.fullPath + '/*.hdf'))\n for i in range(len(hdflist)):\n ms = pymodis.convertmodis_gdal.createMosaicGDAL(hdfnames = [hdflist[i]], subset = self.subset, outformat = 'GTiff')\n ms.run(str(hdflist[i].split('.h')[0]) + 'mos.tif')\n ms.write_vrt(output = str(hdflist[i].split('.h')[0]), separate = True)\n\n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n \n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/full*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d HDF files were successfully converted to %d %s files.' % (dataCount, len(hdflist), tifCount, str(self.outformat)))",
"def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break",
"def d2n_lossless(dcm_folder, nifti, meta_folder):\n\n dcm_files = sorted(glob.glob(os.path.join(dcm_folder, \"*.dcm\")))\n for(i, file) in enumerate(dcm_files):\n fileName = os.path.split(file)[1]\n ds = pydicom.dcmread(file)\n sliceThickness = ds.SliceThickness # for nifti's Affine\n PixelSpacing = ds.PixelSpacing # for nifti's Affine\n empty = bytes(0)\n ds.PixelData = empty\n ds.Rows = 0\n ds.Columns = 0\n if not os.path.exists(meta_folder):\n os.makedirs(meta_folder)\n path = os.path.join(meta_folder, fileName)\n ds.save_as(path)\n\n data = ReadData(dcm_folder)\n output = nifti\n affine = np.eye(4)\n affine[0][0] = sliceThickness\n affine[1][1] = PixelSpacing[0]\n affine[2][2] = PixelSpacing[1]\n img = nib.Nifti1Image(data, affine)\n nib.save(img, output)",
"def preprocess_images(self, images):\n raise NotImplementedError",
"def get_images(self, dtype='http://kgbench.info/dt#base64Image'):\n from PIL import Image\n\n res = []\n # Take in base64 string and return cv image\n num_noparse = 0\n for b64 in self.get_strings(dtype):\n try:\n imgdata = base64.urlsafe_b64decode(b64)\n except:\n print(f'Could not decode b64 string {b64}')\n sys.exit()\n\n try:\n res.append(Image.open(io.BytesIO(imgdata)))\n except:\n num_noparse += 1\n res.append(Image.new('RGB', (1, 1)))\n # -- If the image can't be parsed, we insert a 1x1 black image\n\n if num_noparse > 0:\n warnings.warn(f'There were {num_noparse} images that couldn\\'t be parsed. These have been replaced by black images.')\n\n # print(num_noparse, 'unparseable', len([r for r in res if r is not None]), 'parseable')\n\n return res",
"def cast(obj: 'itkLightObject') -> \"itkImageNBNIF22 *\":\n return _itkSparseFieldFourthOrderLevelSetImageFilterPython.itkImageNBNIF22_cast(obj)",
"def dicom_to_nrrd(self, dicom_root_dir, nrrd_files_dir):\n TEMP_FILE = '/Users/chunwei/Downloads/_TEMP'\n SYSTEM_COMMAND = 'gdcmconv -w {0} {1}'\n\n for i, subject_folder in enumerate(glob.glob(dicom_root_dir + '/*')):\n nrrd_file = nrrd_files_dir + '/'\\\n + re.search(self.KEY_WORD_FLODER, subject_folder).group()\\\n + '_%02d.nrrd' % (i + 1)\n print 'Processing ' + nrrd_file\n\n if not os.path.exists(nrrd_files_dir):\n os.makedirs(nrrd_files_dir)\n\n data_3d = None\n\n dicom_files = glob.glob(subject_folder + '/*')\n for j, dicom_file in enumerate(dicom_files):\n # prompt\n ratio = 100 * float(j)/float(len(dicom_files))\n sys.stdout.write('\\r%d%%' % ratio)\n sys.stdout.flush()\n\n # uncompress the dicom image\n command = SYSTEM_COMMAND.format(dicom_file, TEMP_FILE)\n call(command.split(), shell=False)\n\n # concatenate dicom image layer by layer\n ds = dicom.read_file(TEMP_FILE)\n data = ds.pixel_array\n data_3d = self.concatenate_layers(data_3d, data) # bottom up\n\n # get nrrd options\n options = self.load_dicom_options(TEMP_FILE, len(dicom_file))\n\n # transpose the data\n data_3d = numpy.swapaxes(data_3d, 0, 1)\n data_3d = data_3d[:, :, ::-1]\n\n # write the stack files in nrrd format\n nrrd.write(nrrd_file, data_3d, options)\n\n print",
"def extract_imgs_from_dicom(directory, out_directory):\n allfiles = os.listdir(directory)\n for filename in allfiles[:]:\n# if not \"file\" in filename:\n if not \"results\" in filename:\n if not \"ipynb\" in filename:\n ds = pydicom.read_file(os.path.join(directory, filename),force=True)\n if (\"NumberOfFrames\" in dir(ds)) and (ds.NumberOfFrames>1): #if cine\n outrawfilename = filename + \"_raw\"\n out_directory_path = out_directory + '/' + outrawfilename\n ds.save_as(out_directory_path)\n counter = 0\n while counter < 5:\n counter = read_dicom(out_directory, filename, counter)\n counter = counter + 1\n elif (ds[0x8,0x8][3] == \"0001\"): # if still with measurements\n outrawfilename = filename + \"_raw\"\n out_directory_path = out_directory + '/' + outrawfilename\n ds.save_as(out_directory_path)\n counter = 0\n while counter < 5:\n counter = read_dicom_still(out_directory, filename, counter)\n counter = counter + 1 \n else: # else pulse wave or M mode or Doppler?\n print(filename + \" not cine or still\")\n return 1",
"def itkImageNBNIF22_cast(*args):\n return _itkSparseFieldFourthOrderLevelSetImageFilterPython.itkImageNBNIF22_cast(*args)",
"def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])",
"def to_nifti(self,folder_path: str):\n data_path = settings.STORAGE_DIR\n path = folder_path \n nifti=series.get_series_object(path) \n nifti_str=str(nifti)\n nifti_str=nifti_str[1:44]\n if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT': \n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')\n if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':\n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n nifti.set_ExportType('suv')\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the fieldmap(s) and the corresponding magnitude images. | def MakeFieldmaps(self):
if self.verbose:
print 'Compute fieldmaps.'
for entry in self.info:
if self.info[entry]['type'] == 'fmap':
if self.info[entry]['imgfile'] == None:
# Fieldmap data not found.
return
# Make a magnitude image for use in checking registration.
cmd = 'convert_file -f0 -m0 %s %s nii' % \
(entry, self.info[entry]['magfile'])
self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii'])
# Make fieldmap. Use separate loop in case make_fmap aborts.
for entry in self.info:
if self.info[entry]['type'] == 'fmap':
fmapname = self.info[entry]['imgfile']
if not os.path.exists('%s.nii' % fmapname) or self.redo:
# Couldn't find or existing fmap, compute a new one.
if self.verbose:
extra_args = '-v'
else:
extra_args = ''
if self.info[entry]['correct_fmap_phase'] == 'force':
extra_args += ' --force-slicecorr'
elif self.info[entry]['correct_fmap_phase'] == 'omit':
extra_args += ' --omit-slicecorr'
cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname)
# error = self.ExecCmd(cmd, halt_on_error=False)
if self.no_fmapcorr:
halt_on_error = False
else:
halt_on_error = True
error = self.CheckExec(cmd, ['%s.nii' % fmapname], \
halt_on_error=halt_on_error)
if error:
self.info[entry]['valid'] = False
del self.fmaps[entry] | [
"def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP",
"def _get_magnitudes(self):\n\n self.logging.debug('Get magnitudes ' )\n\n self.mags = {}\n\n steps = ['dbopen netmag', 'dbsubset orid != NULL']\n\n fields = ['orid', 'magid', 'magnitude', 'magtype',\n 'auth', 'uncertainty', 'lddate']\n\n for v in extract_from_db(self.db, steps, fields):\n orid = v.pop('orid')\n self.logging.debug('new mag for orid:%s' % orid)\n\n try:\n v['strmag'] = '%0.1f %s' % ( float(v['magnitude']), v['magtype'] )\n except:\n v['strmag'] = '-'\n\n if not orid in self.mags:\n self.mags[ orid ] = {}\n\n self.mags[ orid ][ v['magid'] ] = v",
"def _SetFmapInfo(self):\n for epi in self.pfiles + self.epirt_paths:\n self.info[epi]['fmapname'] = None\n self.info[epi]['fmap_entry'] = None\n for entry in self.entry_map['fmap']:\n fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix']\n if self.info[entry]['plane'] == self.info[epi]['plane']:\n# Use the fieldmap acquired at the same plane.\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n else:\n# for fmap in self.fmaps.keys():\n for entry in self.entry_map['fmap']:\n# No fmap at same orientation, look for fmaps in other planes.\n# There won't be more than one, so it isn't much of a choice.\n fmap_name = self.info[entry]['imgfile'] + \\\n self.info[entry]['suffix']\n if self.info[entry]['plane'] == 'sagittal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'axial':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'coronal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'oblique':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n self.info[epi]['plane'] = 'oblique'\n break",
"def update_maps(self):\n if self.fmodel is None:\n return\n def fft_map(map_coeffs, resolution_factor = 0.25):\n return map_coeffs.fft_map(resolution_factor = resolution_factor,\n ).apply_sigma_scaling().real_map_unpadded()\n map_types = [\"2mFo-DFc\", \"mFo-DFc\"]\n map_keys = [\"2mFo-DFc\", \"mFo-DFc\"]\n if (self.fmodel.f_obs().anomalous_flag()):\n if (self.params.anom_map_type == \"phaser\"):\n map_types.append(\"llg\")\n elif (self.params.anom_map_type == \"residual\"):\n map_types.append(\"anom_residual\")\n else :\n map_types.append(\"anom\")\n map_keys.append(\"anom\")\n if (self.use_svm):\n map_types.append(\"mFo\")\n map_keys.append(\"mFo\")\n # To save memory, we sample atomic positions immediately and throw out\n # the actual maps (instead of keeping up to 3 in memory)\n sites_frac = self.xray_structure.sites_frac()\n sites_cart = self.xray_structure.sites_cart()\n self._principal_axes_of_inertia = [ None ] * len(sites_frac)\n self._map_variances = [ None ] * len(sites_frac)\n self._map_gaussian_fits = {}\n self.calpha_mean_two_fofc = 0\n for map_type, map_key in zip(map_types, map_keys):\n real_map = self.get_map(map_type)\n if (real_map is not None):\n # Gather values for map peaks at each site\n self._map_values[map_key] = flex.double(sites_frac.size(), 0)\n self._map_gaussian_fits[map_key] = [ None ] * len(sites_frac)\n for i_seq, site_frac in enumerate(sites_frac):\n atom = self.pdb_atoms[i_seq]\n resname = atom.fetch_labels().resname.strip().upper()\n if (resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED or\n atom.segid.strip().upper() in [\"ION\"]):\n value = real_map.eight_point_interpolation(site_frac)\n self._map_values[map_key][i_seq] = value\n if (self.use_svm):\n gaussian_fit = utils.fit_gaussian(\n unit_cell=self.unit_cell,\n site_cart=atom.xyz,\n real_map=real_map)\n self._map_gaussian_fits[map_key][i_seq] = gaussian_fit\n\n if map_type in [\"2mFo-DFc\"]:\n # Gather values on map variance and principal axes of interia\n from cctbx import maptbx\n for i_seq, site_cart in enumerate(sites_cart):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n if resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED:\n # XXX not totally confident about how I'm weighting this...\n p_a_i = maptbx.principal_axes_of_inertia(\n real_map = real_map,\n site_cart = site_cart,\n unit_cell = self.unit_cell,\n radius = self.params.map_sampling_radius)\n self._principal_axes_of_inertia[i_seq] = p_a_i\n variance = maptbx.spherical_variance_around_point(\n real_map = real_map,\n unit_cell = self.unit_cell,\n site_cart = site_cart,\n radius = self.params.map_sampling_radius)\n self._map_variances[i_seq] = variance\n elif (i_seq in self.calpha_sel):\n # Also collect some info in average C_alpha 2FoFc peak heights\n self.calpha_mean_two_fofc += real_map.eight_point_interpolation(\n sites_frac[i_seq])\n del real_map\n\n if (self.calpha_mean_two_fofc > 0):\n n_calpha = len(self.calpha_sel)\n assert (n_calpha > 0)\n self.calpha_mean_two_fofc /= n_calpha\n\n # Gather info on carbons' average Fo peak height for use in estimating other\n # sites' atomic weight\n self.carbon_fo_values = None\n if (len(self.carbon_sel) > 0):\n self.carbon_fo_values = flex.double()\n self._map_values[\"mFo\"] = flex.double(sites_frac.size(), 0)\n fo_map = fft_map(self.fmodel.map_coefficients(\n map_type = \"mFo\",\n exclude_free_r_reflections = True,\n fill_missing = True))\n\n for i_seq, site_frac in enumerate(sites_frac):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n element = self.pdb_atoms[i_seq].element.strip()\n if (element == \"C\") or ((element == \"O\") and (resname in WATER_RES_NAMES)):\n map_value = fo_map.eight_point_interpolation(site_frac)\n self._map_values[\"mFo\"][i_seq] = map_value\n if (element == \"C\"):\n self.carbon_fo_values.append(map_value)\n del fo_map",
"def generateImage(self):\n print(\"Convert final structure to image...\")\n \n self.img = Image.new('RGB', (constants.xImageSize+1, constants.yImageSize+1))\n print(\" Image size: x/y \", constants.xImageSize, constants.yImageSize)\n xMax = int(float(constants.xStorageSize) / self.xStorageToImageRatio)\n yMax = int(float(constants.yStorageSize)/self.yStorageToImageRatio)\n\n # Loop over map struct writing pixels\n # this does mean pixels get rewritten given resizing which isnt \n # optimal\n for x in range(constants.xStorageSize):\n for y in range(constants.yStorageSize):\n c = self.xy[x][y].getCount()\n \n rgbCol = self.getPostcodeColour(self.xy[x][y].getPostcode())\n \n xImg = int(float(x) / self.xStorageToImageRatio)\n yImg = constants.yImageSize - int(float(y)/self.yStorageToImageRatio)\n if c > 0:\n self.img.putpixel((xImg,yImg),(rgbCol.getRed(),rgbCol.getGreen(),rgbCol.getBlue()))\n else:\n self.img.putpixel((xImg,yImg),(255,255,255))\n self.img.show()\n self.img.save(constants.outPngFilename)",
"def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None",
"def generate_map(self) -> None:\n min, max = self.get_aabb_for_link()\n self.height = int((max[0] - min[0]) // self.resolution)\n self.width = int((max[1] - min[1]) // self.resolution)\n self.map = np.ones((self.height, self.width))",
"def get_field_data(self):\n if not self._coverage_computed:\n self.compute_coverage()\n \n # Find the total number of fields\n num_fields = 0\n for m in xrange(self.num_maps):\n if self.fields[m] is not None:\n num_fields += self.fields[m].shape[0]\n \n # Initialize data fields\n field_id = N.arange(num_fields)\n unit_id = N.empty(num_fields, 'h')\n area = N.empty(num_fields, 'd')\n diameter = N.empty(num_fields, 'd')\n radius = N.empty(num_fields, 'd')\n maximum = N.empty(num_fields, 'd')\n average = N.empty(num_fields, 'd')\n center_x = N.empty(num_fields, 'd')\n center_y = N.empty(num_fields, 'd')\n \n # Quantify place field characteristics\n f_id = 0\n for m in xrange(self.num_maps): \n if self.fields[m] is None:\n continue\n \n for field in self.fields[m]:\n \n # Single field-masked ratemap and sum\n rates = field * self.Map[m]\n rates_sum = float(rates.sum())\n \n # Place unit identification\n unit_id[f_id] = m\n \n # Coverage geometry\n area[f_id] = field.sum()\n diameter[f_id] = 2*N.sqrt(area[f_id]/N.pi)\n radius[f_id] = diameter[f_id] / 2\n \n # Rate-dependent quantities\n maximum[f_id] = rates.max()\n average[f_id] = rates_sum / area[f_id]\n center_x[f_id] = (self._xrange[N.newaxis,:] * rates).sum() \\\n / rates_sum\n center_y[f_id] = (self._yrange[:,N.newaxis] * rates).sum() \\\n / rates_sum\n \n f_id += 1\n \n # Create records array\n field_data = N.rec.fromarrays(\n [field_id, unit_id, area, diameter, radius, maximum, average,\n center_x, center_y],\n names='id, unit, area, diameter, radius, peak, average, x, y',\n formats='l, l, l, d, d, d, d, d, d')\n \n return field_data",
"def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255",
"def process_building_map(\n rmf_building_map: RmfBuildingMap,\n static_files: StaticFilesRepository,\n) -> BuildingMap:\n processed_map = message_to_ordereddict(rmf_building_map)\n\n for i, level in enumerate(rmf_building_map.levels):\n level: RmfLevel\n for j, image in enumerate(level.images):\n image: RmfAffineImage\n # look at non-crypto hashes if we need more performance\n sha1_hash = hashlib.sha1()\n sha1_hash.update(image.data)\n fingerprint = base64.b32encode(sha1_hash.digest()).lower().decode()\n relpath = f\"{rmf_building_map.name}/{level.name}-{image.name}.{fingerprint}.{image.encoding}\" # pylint: disable=line-too-long\n urlpath = static_files.add_file(image.data, relpath)\n processed_map[\"levels\"][i][\"images\"][j][\"data\"] = urlpath\n return BuildingMap(**processed_map)",
"def compose_fieldmap(rf1, rf2):\n offset1, size1, step1 = rf1\n offset2, size2, step2 = rf2\n\n size = tuple((size2c - 1) * step1c + size1c\n for size1c, step1c, size2c in zip(size1, step1, size2))\n offset = tuple(offset2c * step1c + offset1c\n for offset2c, step1c, offset1c in zip(offset2, step1, offset1))\n step = tuple(step2c * step1c\n for step1c, step2c in zip(step1, step2))\n return (offset, size, step)",
"def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)",
"def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))",
"def add_field(self, img_dict):\n for k in img_dict.keys():\n assert k in self.bands, \"Celeste model doesn't support band %s\"%k\n self.field_list.append(Field(img_dict))",
"def get_maps(self):\n if self.lite:\n raise ValueError(\"Input maps unavailable for lightweight fields. \"\n \"To use this function, create an `NmtFieldFlat` \"\n \"object with `lite=False`.\")\n maps = np.zeros([self.fl.nmaps, self.fl.npix])\n for imap in range(self.fl.nmaps):\n maps[imap, :] = lib.get_map_flat(self.fl, imap, int(self.fl.npix))\n mps = maps.reshape([self.fl.nmaps, self.ny, self.nx])\n\n return mps",
"def __initArrays(self):\n maps = self.renderable.getArrays()\n for map in maps:\n self.addMap( map)",
"def _make_amp_images(self):\n self.amp_images = OrderedDict()\n sensor_props = self.fp_props.get_sensor(self.sensor_id)\n for amp_name in sensor_props.amp_names:\n self._make_amp_image(amp_name)\n self._apply_crosstalk()\n for amp_name in sensor_props.amp_names:\n self._add_read_noise_and_bias(amp_name)",
"def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps",
"def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create links to BRIK, HEAD, and .nii files. | def LinkFiles(self, srcdir, target):
if '+orig' in target:
tgt_prefix = target.replace('.BRIK','')
tgt_prefix = tgt_prefix.replace('.HEAD','')
linkfiles = ['%s.HEAD'%tgt_prefix, '%s.BRIK' %tgt_prefix]
else:
linkfiles = [target]
for linkfile in linkfiles:
linkname = '%s/%s' % (srcdir, os.path.basename(linkfile))
rel_linkdir = abspath_to_relpath(os.path.dirname(target), srcdir)
rel_linkfile = '%s/%s' % (rel_linkdir, os.path.basename(linkfile))
if not os.path.exists(linkname) and not os.path.islink(linkname):
cmd = 'cd %s && ln -s %s %s' % (srcdir, rel_linkfile, linkname)
self.ExecCmd(cmd) | [
"def _create_links(self):\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))",
"def _create_links(output_dir: str) -> None:\n os.symlink(os.devnull, output_dir + \"/categories_transcriptome.fa\")\n os.symlink(os.devnull, output_dir + \"/categories_noMatch.fa\")\n os.symlink(os.devnull, output_dir + \"/categories_multiMatch.fa\")\n os.symlink(os.devnull, output_dir + \"/categories_summary.tsv\")",
"def fixLinks():",
"def bagURLLinks(request, identifier):\n # assign the proxy url\n proxyRoot = request.build_absolute_uri('/')\n # attempt to grab a bag,\n get_object_or_404(Bag, name__exact=identifier)\n try:\n transList = generateBagFiles(identifier, proxyRoot, settings.CODA_PROXY_MODE)\n except FileHandleError:\n raise Http404\n return render(request, 'mdstore/bag_files_download.html',\n {'links': sorted(transList)})",
"def buildRollLinks(self):\n\t\n\tprint \"Building Roll Links\"\n\trollLocation = self.dist.getRollsPath()\n\tsubprocess.call('mkdir -p %s' % rollLocation, shell=True)\n\n\trolls = []\n\tfor mirror in self.dist.getMirrors():\n\t\trolldir = mirror.getRollsPath()\n\t\tif not os.path.exists(rolldir):\n\t\t\tcontinue\n\t\tfor d in os.listdir(rolldir):\n\t\t\trollpath = os.path.join(rolldir,d)\n\t\t\tif os.path.isdir(rollpath):\n\t\t\t\trolls.append(rollpath)\n\n\there = os.getcwd()\n\tos.chdir(rollLocation)\n\tfor r in rolls:\n\t\tsubprocess.call('ln -sf %s .' % (r), shell=True)\n\tos.chdir(here)",
"def build(link_file_names, index_files):\n os.mkdir(\"./build\")\n for linkf, index in zip(link_file_names, index_files):\n os.mkdir(f\"./build/{linkf}\")\n with open(f\"./build/{linkf}/index.html\", \"w+\") as f:\n f.write(index)",
"def _create_readme(upstream, relative_path, product):\n content = Path(upstream.first).read_text()\n url = ('https://github.com/ploomber/projects/'\n f'blob/master/{relative_path}/README.ipynb')\n link_to_ipynb = ('\\nFor a notebook version (with outputs) of this '\n f'file, [click here]({url})')\n\n lines = content.splitlines()\n\n # add link to readme.ipynb at the header\n try:\n end_header = lines.index('<!-- end header -->')\n except ValueError:\n pass\n else:\n lines.insert(end_header, link_to_ipynb)\n content = '\\n'.join(lines)\n\n content = delete_metadata(content)\n Path(product).write_text(content)",
"def create_reference_files(cxn, log):\n log.info('Preparing reference gene files for exonerate')\n for ref in db.select_reference_genes(cxn):\n with open(ref['ref_file'], 'w') as ref_file:\n util.write_fasta_record(ref_file, ref['ref_name'], ref['ref_seq'])",
"def make_franny_symlinks(src_dirs, out_dir):\n\n for path, dirs, files in chain.from_iterable(os.walk(path)\n for path in src_dirs):\n print('Looking in %s' % path)\n for sta in ['NS12', 'NS13', 'NS14']:\n for filename in fnmatch.filter(files, '*.%s*' % sta):\n net = filename.split('.')[-7]\n chan = filename.split('.')[-4]\n if chan[-1] == 'N':\n new_chan = 'EH1'\n elif chan[-1] == 'E':\n new_chan = 'EH2'\n else:\n continue\n mseed_nm = filename.split('/')[-1]\n new_mseed = string.replace(mseed_nm, chan, new_chan)\n old_path = os.path.join(path, filename)\n new_path = '%s/%s/%s/%s.D/%s' % (out_dir, net,\n sta, new_chan, new_mseed)\n\n print('Creating symlink for file %s at %s'\n % (old_path, new_path))\n spwd = '*blackmore89'\n cmnd = 'sudo -S ln %s %s' % (old_path, new_path)\n os.system('echo %s | %s' % (spwd, cmnd))\n return",
"def def_links(mobj):\n fdict = json_load(os.path.join(\"data\", \"requirements.json\"))\n sdeps = sorted(fdict.keys())\n olines = []\n for item in sdeps:\n olines.append(\n \".. _{name}: {url}\\n\".format(\n name=fdict[item][\"name\"], url=fdict[item][\"url\"]\n )\n )\n ret = []\n for line in olines:\n wobj = textwrap.wrap(line, width=LINE_WIDTH, subsequent_indent=\" \")\n ret.append(\"\\n\".join([item for item in wobj]))\n mobj.out(\"\\n\".join(ret))",
"def run():\n onlyfiles = [f for f in listdir(\".\") if isfile(join(\".\", f))]\n links = {} # Save the links found in each file\n broken_links = {} # Save the links that are not valid in each of the files\n print(\"Testing links...\")\n for file in onlyfiles:\n f = open(file, \"r\", encoding=\"utf8\")\n md = f.read()\n md_links = find_md_links(md) \n if file in links.keys():\n links[file] += md_links\n else:\n links[file] = md_links\n\n for key in links:\n for link in links[key]:\n time.sleep(1) #Avoid GitHub detecting us a robot\n encodedUrl = urllib.parse.quote(link, safe=':/')\n try: \n webUrl = urllib.request.urlopen(encodedUrl)\n data = webUrl.read().decode(\"utf-8\")\n if \"Create New Page\" in data:\n if key in broken_links.keys():\n broken_links[key].append(link)\n else:\n broken_links[key] = [link]\n except:\n if key in broken_links.keys():\n broken_links[key].append(link)\n else:\n broken_links[key] = [link]\n return broken_links",
"def makeBackrefLink(self, info, g_links, i):\n atts, content, infoid, link = '', '', '', ''\n if 'def' in info:\n link = info['def']['link']\n backlink_type = link or g_links\n i_ = self.encode_high(i)\n allow_inc = i not in self.syms\n i_ = int(i_)\n\n if backlink_type == \"!\":\n return ''\n elif backlink_type == '^':\n return \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n info['refids'][0], i\n )\n else:\n result = []\n for refid in info['refids']:\n i_entity = self.decode_high(i_)\n sup = \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n refid, i_entity\n )\n if allow_inc:\n i_ += 1\n result.append(sup)\n result = ' '.join(result)\n return result",
"def get_doc_links():\n doc_links = {}\n for path in iter_docs():\n order, file_name = path.with_suffix('').name.split('_', 1)\n if file_name == 'index':\n continue\n if file_name in doc_links:\n raise ValueError(f\"Two documentation pages have the file name '{file_name}'. Change the file name for one of them.\")\n doc_links[file_name] = path_to_url(path.relative_to(docs_path))\n return doc_links",
"def link_artist(artist, tags):\n created_links = 0\n for tag in tags:\n print \">>> %s -\" % tag,\n source_path = artist.path\n\n tag_dir = os.path.join(CFG.sort_dir, tag)\n if not os.path.isdir(tag_dir):\n if CFG.debug:\n print \"makedirs: '%s'\" % tag_dir\n os.makedirs(tag_dir)\n\n dst_dir = os.path.join(CFG.sort_dir, tag, artist.name)\n if CFG.debug:\n print source_path, dst_dir\n\n if os.path.isdir(dst_dir):\n print \"symbolic link exist, skip.\"\n continue\n\n os.symlink(source_path, dst_dir)\n print \"symbolic link created.\"\n created_links += 1\n \n return created_links",
"def _setup_links(self):\n pass",
"def create_symlinks(self):\n\n # create in reverse order, because of chained symlinks\n for exporter in self.exporters[::-1]:\n exporter.create_symlinks()",
"def create_home_directory_symbolic_links():\n file_paths = (\n path\n for path in repo_home.rglob(\"*\")\n if path.is_file() and not path.is_symlink()\n )\n\n for file_path in file_paths:\n sym_link_path = translate_home_path(file_path)\n\n if sym_link_path.is_file() and not sym_link_path.is_symlink():\n backup_file(sym_link_path)\n sym_link_path.unlink()\n\n if sym_link_path.is_symlink():\n sym_link_path.unlink()\n\n print(f\"Creating Symlink: {sym_link_path} -> {file_path}\")\n sym_link_path.symlink_to(file_path)",
"def convert_links(file):\n try:\n print(f\"Converting {file.name}...\")\n with open(file) as enex:\n soup = str(BeautifulSoup(enex, \"html.parser\"))\n soup_sub = re.sub(r'(<a.*?href=\"evernote.*?>)(.*?)(</a>?)', r\"[[\\2]]\", soup)\n soup_sub = re.sub(r\"(<h1.*?>)(.*?)(</h1>?)\", r\"\\2\", soup_sub)\n with open(f\"{os.path.dirname(file)}/bear/{file.name}\", \"x\") as new_enex:\n new_enex.write(soup_sub)\n print(\"Done. New file available in the bear subdirectory.\")\n except Exception as e:\n print(f\"An error occurred:\\n{e}\\nPlease try again.\")",
"def symlink_hidden(files):\n hidden = get_hidden_files(files)\n\n # TODO: change to home directory just to be safe\n\n # TODO: Remove $HOME/hidden directory \n\n # TODO: Create $HOME/hidden directory using \n\n for f in hidden:\n # TODO: build target link name without leading . in link name\n print(f\"Creating Symbolic Link for file {f} at {os.path.join('hidden',link_name)}\")\n\n # TODO: create symlink use os.symlink "
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract the initial EPIs stored in dicom format. | def ExtractFirstEpi(self):
for entry in self.info:
if self.info[entry]['type'] == 'first_epi':
epiname = self.info[entry]['imgfile']
cmd = 'convert_file %s -f0 %s %s %s' % \
(self.flip_opts, entry,epiname, self.info[entry]['filetype'])
fname = '%s%s' % (epiname, self.info[entry]['suffix'])
self.CheckExec(cmd, [fname])
self.info[entry]['imgfile'] = fname | [
"def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK",
"def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)",
"def get_emep_dat():\n # EMEP all data\n emep_dep_dir_in = r'd:\\doren_src_data\\EMEP'\n emep_dep_name_in = 'QNdepAll_hdr.txt'\n return pd.read_csv(os.path.join(emep_dep_dir_in, emep_dep_name_in), sep=';')",
"def main_isoforms(self, genome):\n rng = self.id_mapper['OMA'].genome_range(genome)\n prot_tab = self.get_hdf5_handle().get_node('/Protein/Entries')\n return prot_tab.read_where(\n '(EntryNr >= {}) & (EntryNr <= {}) & ((AltSpliceVariant == EntryNr) | (AltSpliceVariant == 0))'\n .format(rng[0], rng[1]))",
"def cisExtract(cis_file):\n cis = open(cis_file)#read cis-file\n cis_info = dict()#empty dictionary to store cis-element information\n line = cis.readline()#read the first line of cis-file\n while line:#while loop to keep line will not be empty\n if line[0].startswith('>'):#the first element of each line that startswith '>' is the name of cis-element\n cis_id = line.split()[0]#get the name of of cis-element\n line = cis.readline()#read next line followed by the name of cis-element\n cis_seq = line.split()[0]#the sequence is always the first element.\n cis_info[cis_id]=cis_seq#append the sequences of cis-element into cis_info dictionary indexed by its name\n else:\n line = cis.readline()\n print('Extraction of cis-element names and sequences are completed!')\n return(cis_info)",
"def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])",
"def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps",
"def get_abstract_info(root):\n tempGenSpe = dict() # {'genus':genus_name,''species':species_name}\n tempGenSpe['genus'] = ''\n tempGenSpe['species'] = ''\n df_nonEmpty = pd.DataFrame(columns=['genus', 'species'])\n\n abStructure = './front/article-meta/abstract/p/italic/{http://www.plazi.org/taxpub}taxon-name/{http://www.plazi.org/taxpub}taxon-name-part'\n\n for item in root.iterfind(abStructure):\n if item.attrib['taxon-name-part-type'] == 'genus':\n tempGenSpe['genus'] = item.attrib['reg']\n if item.attrib['taxon-name-part-type'] == 'species':\n tempGenSpe['species'] = item.attrib['reg']\n\n if tempGenSpe['species'] != '':\n insertRow = pd.Series([tempGenSpe['genus'], tempGenSpe['species']], index=['genus', 'species'])\n df_nonEmpty = df_nonEmpty.append(insertRow, ignore_index=True)\n tempGenSpe['genus'] = ''\n tempGenSpe['species'] = ''\n return df_nonEmpty",
"def read_icd(self):\n wiki = wikipediaapi.Wikipedia('en') # may as well declare this here so I don't need to call it every query\n supplemental_articles = []\n with open(ICD10_DESC_PATH, 'r') as f:\n current_family = [] # list of lists of descriptions within the current family (3 letter code = family)\n current_parent = None # Most recent 3 letter code seen\n for line in tqdm(f.readlines(), desc=\"ICD10 Lines Processed\"):\n\n code = line[6:14].strip().lower()\n description = simple_clean(line[77:])\n self.code2desc[code] = description.split()\n\n if len(code) == PARENT_CODE_LENGTH: # found a parent\n # query web if set params to True\n wiki_result = self.query_wikipedia(wiki, description) if self.query else []\n pubmed_result = self.query_pubmed(description) if self.query else []\n\n # store results\n if wiki_result:\n supplemental_articles.extend(wiki_result)\n if pubmed_result:\n supplemental_articles.extend(pubmed_result)\n\n # update metrics using current family\n self.process_family_frequencies(current_parent, current_family)\n current_family = []\n current_parent = code\n current_family.append(description.split())\n self.n_desc += 1\n\n # process the last family\n self.process_family_frequencies(current_parent, current_family)\n # go through all the articles we found, preprocess, and add to self.data\n self.data.extend(self.process_articles(supplemental_articles))\n\n # lastly calculate tf and idf over all descriptions (not including articles here) for use in weighting later\n self.n_words = log10(self.n_words)\n self.n_desc = log10(self.n_words)\n self.word2tf = {word: log10(count) - self.n_words for word, count in self.word2tf.items()}\n self.word2df = {word: count - self.n_desc for word, count in self.word2df.items()}\n self.dump()",
"def get_virus_epitopes(self):\n\t\tprint(\"Return all epitopes of virus taxid={}\".format(self.current_virus_taxon_id))\n\t\tepitopes = []\n\t\tfor epi in self.current_virus_epitopes:\n\t\t\tepi_attributes = epi.get_all_attributes()\n\t\t\tif epi_attributes[\"is_linear\"]:\n\t\t\t\tepi_seq = epi_attributes[\"region_seq\"]\n\t\t\telse:\n\t\t\t\tepi_seq = None\n\t\t\tif not epi_seq:\n\t\t\t\tepi_seq = ''\n\t\t\tif not epi_attributes[\"mhc_class\"]:\n\t\t\t\tepi_attributes[\"mhc_class\"] = ''\n\t\t\tif not epi_attributes[\"mhc_allele\"]:\n\t\t\t\tepi_attributes[\"mhc_allele\"] = ''\n\n\t\t\tepitope = tuple([int(epi_attributes[\"epitope_id\"]),\n\t\t\t\t\t\t\t int(epi_attributes[\"virus_taxid\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"host_iri\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes['host_name']),\n\t\t\t\t\t\t\t parse_to_int_or_none(epi_attributes['host_ncbi_id']),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"protein_ncbi_id\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"cell_type\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"mhc_class\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"mhc_allele\"]),\n\t\t\t\t\t\t\t parse_to_float_or_none(epi_attributes[\"response_frequency_positive\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"assay_types\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_seq),\n\t\t\t\t\t\t\t parse_to_int_or_none(epi_attributes[\"region_start\"]),\n\t\t\t\t\t\t\t parse_to_int_or_none(epi_attributes[\"region_stop\"]),\n\t\t\t\t\t\t\t \",\".join(epi_attributes[\"external_links\"]),\n\t\t\t\t\t\t\t parse_to_string_or_none(epi_attributes[\"prediction_process\"]),\n\t\t\t\t\t\t\t epi_attributes[\"is_linear\"],\n\t\t\t\t\t\t\t epi_attributes[\"epitope_iri\"],\n\t\t\t\t\t\t\t epi_attributes[\"iedb_epitope_id\"]])\n\t\t\tepitopes.append(epitope)\n\t\treturn epitopes",
"def _get_all_psfex_objects(self, meds):\n desdata=os.environ['DESDATA']\n meds_desdata=meds._meta['DESDATA'][0]\n\n psfex_list=[]\n info=meds.get_image_info()\n nimage=info.size\n\n for i in xrange(nimage):\n impath=info['image_path'][i].strip()\n psfpath=impath.replace('.fits.fz','_psfcat.psf')\n\n if desdata not in psfpath:\n psfpath=psfpath.replace(meds_desdata,desdata)\n\n pex=psfex.PSFEx(psfpath)\n psfex_list.append(pex)\n\n return psfex_list",
"def _read_elp_sidecar(fname):\n fname_elp = fname.parent / (fname.stem + \".elp\")\n if not fname_elp.exists():\n logger.info(f\"No {fname_elp} file present containing electrode \" \"information.\")\n return None\n\n logger.info(f\"Reading electrode names and types from {fname_elp}\")\n ch_types = OrderedDict()\n with open(fname_elp) as f:\n lines = f.readlines()\n if len(lines[0].split()) > 3:\n # Channel types present\n for line in lines:\n ch_type, ch_name = line.split()[:2]\n ch_types[ch_name] = ch_type.lower()\n else:\n # No channel types present\n logger.info(\n \"No channel types present in .elp file. Marking all \" \"channels as EEG.\"\n )\n for line in lines:\n ch_name = line.split()[:1]\n ch_types[ch_name] = \"eeg\"\n return ch_types",
"def extract_dta(path, EIS_name):\n dummy_col = [\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"J\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"O\",\n \"P\",\n ]\n init = pd.read_csv(path + EIS_name, encoding=\"latin1\", sep=\"\\t\", names=dummy_col)\n ZC = pd.Index(init.A)\n header_loc = ZC.get_loc(\"ZCURVE\") + 1 ##ZC.get_loc('ZCURVE')+3\n\n header_names_raw = pd.read_csv(\n path + EIS_name, sep=\"\\t\", skiprows=header_loc, encoding=\"latin1\"\n ) # locates number of skiplines\n header_names = []\n for j in range(len(header_names_raw.columns)):\n header_names.append(\n correct_text_EIS(header_names_raw.columns[j])\n ) # reads coloumn text\n data = pd.read_csv(\n path + EIS_name,\n sep=\"\\t\",\n skiprows=ZC.get_loc(\"ZCURVE\") + 3,\n names=header_names,\n encoding=\"latin1\",\n )\n data.update({\"im\": np.abs(data.im)})\n data = data.assign(cycle_number=1.0)\n return data",
"def extract(args):\n prism.extract.run(\n input_fp=args.input,\n output_fp=args.output,\n depth_cutoff=args.depth_cutoff,\n num_cpg_cutoff=args.num_cpg_cutoff,\n prepend_chr=args.prepend_chr,\n paired=args.paired,\n verbose=args.verbose,\n )",
"def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes",
"def parseAbstract():\n # my code here\n with open(data_path) as f:\n lines = f.readlines()\n abstracts = []\n for line in lines[1:]:\n try:\n temp_abstracts = line.split('\\t')[7]\n # temp_abstracts = line.split('\\t')[7][:-3]\n abstracts.append(temp_abstracts)\n except IndexError:\n continue\n return abstracts",
"def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)",
"def create_dipeptide_composition(data):\r\n \r\n data=data\r\n \r\n aminoacids=[\"A\",\"R\",\"N\",\"D\",\"C\",\"Q\",\"E\",\"G\",\"H\",\"I\",\"L\",\"K\",\"M\",\"F\",\"P\",\"S\",\"T\",\"W\",\"Y\",\"V\"]\r\n Dipeptides=[]\r\n for item in aminoacids:\r\n pep1=item\r\n for item in aminoacids:\r\n pep2=str(pep1)+str(item)\r\n Dipeptides.append(pep2)\r\n Dipeptidestuple=tuple(Dipeptides)\r\n \r\n FEATURES=np.array(Dipeptidestuple)\r\n \r\n index = range(len(data))\r\n for row in index:\r\n print(\"Creating Dipeptide composition. Row:%s\" % row)\r\n sequence=data.values[row][3]\r\n feature=np.array(dipeptide_composition(sequence))\r\n FEATURES=np.vstack((FEATURES,feature))\r\n return FEATURES",
"def extract_core_ids(self):\n path2folder = 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[self.radii[0]] + '/'\n analysis_files = [dir for dir in os.listdir(path2folder) if dir.startswith('Matrix-analysis-IP_')]\n analysis_file = path2folder + analysis_files[0] #work for 1 component system\n with open(analysis_file, 'r') as fid:\n my_file = yaml.load(fid, Loader=yaml.FullLoader)\n self.core_ids = list(my_file.keys())\n self.mol_name = analysis_files[0].split('_')[1].split('.')[0]\n\n\n print('coreids', self.core_ids)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reconstruct the EPIs from pfiles. | def ReconEpis(self):
run = zeros(100)
if self.verbose:
print 'Reconstruct EPIs'
for pfile in self.pfiles_recon:
if self.info[pfile]['refdat'] is None:
# Find the ref.dat file later.
continue
if self.info[pfile]['compression'] is not None:
# Data are compressed, copy to tmp.
compression = self.info[pfile]['compression']
pfile_decomp = '%s/%s' % (self.tmpdir, \
os.path.basename(self.info[pfile]['pfile_decomp']))
if os.path.exists(pfile_decomp):
errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \
' in ReconEpis'
cmd = '%s %s > %s' % \
(decompress_cmds[compression], pfile, pfile_decomp)
self.ExecCmd(cmd)
else:
# Create a link on /tmp to the pfile so the link to ref.dat will also
# be on /tmp, (which is always writeable.)
pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))
if not os.path.exists(pfile_decomp):
os.symlink(pfile, pfile_decomp)
refname, refcmpress = self.CheckCompression( \
self.info[pfile]['refdat'])
if refcmpress is not None:
refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))
cmd = '%s %s > %s' % \
(decompress_cmds[refcmpress], \
self.info[pfile]['refdat'], refdat_decomp)
self.ExecCmd(cmd)
else:
refdat_decomp = self.info[pfile]['refdat']
if refdat_decomp is not None:
if refdat_decomp != 'ref.dat':
# Create link bearing the file name epirecon_ex expects.
refdat_link = '%s/ref.dat' % self.tmpdir
if not os.path.exists(refdat_link):
if self.verbose:
print 'ln -s %s %s' % (refdat_decomp, refdat_link)
if os.path.islink(refdat_link):
# ref.dat is a broken symbolic link.
if self.verbose:
print 'rm %s' % ref_file
os.remove(refdat_link)
try:
os.symlink(refdat_decomp, refdat_link)
except OSError:
self.errors = True
pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))
os.symlink(pfile_decomp, pfile_link)
os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)
series = int(self.info[pfile]['series'])
run[series] = run[series] + 1
epiname = self.info[pfile]['imgfile']
cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \
(pfile_decomp, epiname, self.skip)
fname = '%s+orig.BRIK' % epiname
self.CheckExec(cmd, [fname])
# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']
else:
errstr = '*******************************************\n' + \
'No ref.dat file exists for %s\n' % pfile + \
'*******************************************\n'
self.error_log = self.error_log + errstr
self.f_crash.write(errstr) | [
"def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)",
"def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname",
"def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK",
"def from_file(epub_file):\n self = Epub()\n\n # TODO: zipfile.ZipFile accepts a file or a fileobject.\n # That seems ambiguous. We should probably create a\n # separate method to create an EPUB from a file object to be more\n # clear.\n\n if (isinstance(epub_file, file)):\n self.filename = file.name\n\n if (isinstance(epub_file, str)):\n self.filename = epub_file\n\n try:\n archive = zipfile.ZipFile(epub_file)\n except Exception as e:\n print 'Could not open zipfile \"%s\" \\n' % self.filename\n print e\n\n # parse container.xml for full path to content.opf file\n container_xml = archive.read(PATH_TO_CONTAINER_XML)\n container_xml_tree = etree.fromstring(container_xml)\n fullpath = container_xml_tree.xpath('n:rootfiles/n:rootfile/@full-path',\n namespaces=XML_NAMESPACES)[0]\n\n # Each major XML element in the content.opf file is mapped to its own class.\n # This dict maps those classes to the XPaths that point to the corresponding XML\n # element.\n #\n # for example: the XPath \"opf:package\" points to the '<package>' XML element\n # which is mapped to the Package class\n element_map = [{'name': 'package',\n 'class': Package,\n 'element_xpath': '/opf:package'},\n {'name': 'metadata',\n 'class': MetaData,\n 'element_xpath': '/opf:package/opf:metadata',\n 'sub_element_class': Element,\n 'sub_element_xpath': \"./*\"},\n {'name': 'manifest',\n 'class': Manifest,\n 'element_xpath': '/opf:package/opf:manifest',\n 'sub_element_class': ManifestElement,\n 'sub_element_xpath': 'opf:item'},\n {'name': 'spine',\n 'class': Spine,\n 'element_xpath': '/opf:package/opf:spine',\n 'sub_element_class': Element,\n 'sub_element_xpath': 'opf:itemref'},\n {'name': 'guide',\n 'class': Guide,\n 'element_xpath': '/opf:package/opf:guide',\n 'sub_element_class': Element,\n 'sub_element_xpath': 'opf:reference',\n 'optional': True}]\n\n tree = etree.fromstring(archive.read(fullpath))\n\n for element in element_map:\n try:\n element_tree = tree.xpath(element['element_xpath'], namespaces=XML_NAMESPACES)[0]\n except IndexError as e:\n # If the element is marked as optional, just keep going if we don't find it.\n if element['optional']:\n continue\n else:\n print element\n element_class = element['class']()\n element_class.as_xhtml = etree.tostring(element_tree)\n # Step through the attrib dict and replace each key with its localname version\n # i.e. if the key is '{namespace}event', replace it with 'event'.\n # There *shouldn't* be any collisions.\n element_class.tag.attributes = {etree.QName(key).localname: value for key, value in\n element_tree.attrib.iteritems()}\n element_class.tag.localname = etree.QName(element_tree).localname\n element_class.tag.namespace = etree.QName(element_tree).namespace\n element_class.text = element_tree.text\n\n if 'sub_element_class' in element:\n sub_element_tree = element_tree.xpath(element['sub_element_xpath'], namespaces=XML_NAMESPACES)\n for k in sub_element_tree:\n sub_element_class = element['sub_element_class']()\n sub_element_class.as_xhtml = etree.tostring(k)\n sub_element_class.tag.attributes = {etree.QName(key).localname: value for key, value in\n k.attrib.iteritems()}\n sub_element_class.tag.localname = etree.QName(k.tag).localname\n sub_element_class.tag.namespace = etree.QName(k.tag).namespace\n sub_element_class.tag.text = k.text\n element_class.append(sub_element_class)\n\n # if we just created a ManifestElement, we need to additionally\n # pass it a reference to the epub archive and the dirname\n # contained in the fullpath in order for it to access the file\n # it points to\n\n if type(sub_element_class) == ManifestElement:\n # fullpath is the path to the content.opf file.\n # This should also be the path to the manifest item files.\n sub_element_class.basedir = os.path.dirname(fullpath)\n sub_element_class.archive = archive\n\n # Assigns the class we just created as an attribute of the Epub object.\n # The attr name is taken from the 'name' value in the element_map above.\n setattr(self, element['name'], element_class)\n\n # If we just created the spine element, we need to pass it\n # a reference to the manifest. This will enable the spine element to access\n # manifeset elements directly\n # note: this assumes the manifest element has alreay been created\n if element['name'] == 'spine':\n self.spine.manifest = self.manifest\n\n # read in the items from the manifest\n for element in self.manifest:\n if element.isDocument():\n pass\n if element.isImage():\n self.images.append(element)\n if element.isCSS():\n self.css.append(element)\n if element.isTOC():\n pass\n\n # create an array called parts that references elements\n # listed in the spine\n\n for itemref in self.spine.list:\n self.parts.append(self.manifest.getElementById(itemref.tag.attributes['idref']))\n\n return self",
"def reconstruct_evp(self):\n new_evp = \"e%s:%s;%s;%s;%s\\n\" % (self.id, self.min_max_points[\"min_x\"], self.min_max_points[\"min_y\"],\n self.min_max_points[\"max_x\"], self.min_max_points[\"max_y\"])\n for path_id in self.paths_order:\n path = self.paths[path_id]\n new_evp += \"p%s%s:\" % (path.id, path.type)\n for point_id in path.points_order:\n coords = path.points[point_id]\n new_evp += \"t%s~%s,%s;%s,%s;%s,%s;\" % (point_id, coords[0][0], coords[0][1], coords[1][0],\n coords[1][1], coords[2][0], coords[2][1])\n\n if path.type in [\"r\", \"l\"]: # skip if the path is not closed\n new_evp += \":r,%s,%s,%s,%s,%s\" % tuple(path.radial)\n new_evp += \":l,%s,%s,%s,%s:\" % tuple(path.linear)\n\n for stop in path.stops:\n params = stop[\"params\"]\n new_evp += \"o%s~%s,%s,%s;\" % (stop[\"stop_id\"], params[0], params[1], params[2])\n\n new_evp += \":s%s,%s,%s\\n\" % tuple(path.stroke)\n self.evp = new_evp\n return",
"def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps",
"def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])",
"def process_epub(self):\n self.extract_to_temp()\n self.make_changes_to_temp()\n self.temp_to_epub()",
"def load_extended_primers(primerf, verbose=False):\n exact = []\n degenerate = []\n names = {}\n\n with open(primerf, 'r') as f:\n for l in f:\n p = l.strip().split(\"\\t\")\n if (len(p) != 3):\n sys.stderr.write(f\"{colours.RED}ERROR:{colours.ENDC} Can't find the primers in {l}\\n\")\n sys.exit(2)\n p[1] = p[1].upper()\n p[2] = p[2].upper()\n if is_degenerate(p[1]) or is_degenerate(p[2]):\n dp1 = replace_degenerate(p[1])\n degenerate.append([dp1, replace_degenerate(p[2])])\n names[dp1] = p[0]\n else:\n exact.append([p[1],p[2]])\n names[p[1]]=p[0]\n\n return exact, degenerate, names",
"def makeproteins(self, filename, fastatype): #remember in protein collection which proteins generated here?\r\n self.readfile(filename, fastatype)\r\n fastalist = self.readentry()\r\n for ele in fastalist:\r\n prot = Protein(ele[1], ele[0], self.get_fastatype()) # header, AAseq, fastatype\r\n an = prot.get_an()\r\n self.prot_col_dict[an] = prot #dict: key=an, value=protein_object \r",
"def loadEPRFits(fileName):# {{{\n fileHandle = open(fileName,'r')\n lines = fileHandle.readlines()\n# find out how many data lists I need.\n numSets = len(lines[0].split('\\r\\n')[0].split('\\t'))\n# the structure is C0 - field, C1 - data, C2 - fit result, C3 - weights, C4 - component 1, C5 - component 2, C6 and on are more components. \n numComps = numSets - 4\n toStore = zeros((len(lines), numSets))\n for count, line in enumerate(lines):\n line = line.split('\\r\\n')[0].split('\\t')\n for count1, item in enumerate(line):\n toStore[count, count1] = item\n rawData = pys.nddata(toStore[:,1]).rename('value','field').labels('field',toStore[:,0])\n fit = pys.nddata(toStore[:,2]).rename('value','field').labels('field',toStore[:,0])\n components = {}\n for i in range(numComps):\n components.update({'%i'%i: pys.nddata(toStore[:,i+4]).rename('value','field').labels('field',toStore[:,0])})\n return rawData, fit, components# }}}",
"def load_ent_pintpsr(self,epp):\n # Most NANOGrav releases (except 5 yr; -be) include -f flags for all TOAs.\n try:\n flags = epp.flags['f']\n except:\n flags = epp.flags['be']\n \n isort = ut.argsortTOAs(epp._toas, flags)\n self.ephem = epp.model.EPHEM.value\n self.F0 = epp.model.F0.value\n self.P0 = 1.0 / self.F0\n epp.to_pickle() # write pickle object and delete pint_toas/model.\n self.pname = epp.name\n epp._isort = isort\n self.psr = epp",
"def PruneEpiEntries(self):\n pruned = {}\n basefiles = []\n baseentries = {}\n for entry in self.entry_map['epi']:\n if baseentries.has_key(self.info[entry]['basefile']):\n baseentries[self.info[entry]['basefile']].append(entry)\n else:\n baseentries[self.info[entry]['basefile']] = [entry]\n for entry in self.entry_map['epi']:\n targets = []\n if self.no_motcorr:\n target = self.info[entry]['imgfile']\n elif self.info[entry]['fmapname'] is None or self.no_fmapcorr:\n target = self.info[entry]['imgfile_m']\n else:\n target = self.info[entry]['imgfile_mf']\n targets.append(target + self.info[entry]['suffix'])\n targets.append('%s%s' % (self.info[entry]['censor_prefix'], '_censor.1D'))\n pruned[entry] = [True, baseentries[self.info[entry]['basefile']]]\n for target in targets:\n pruned[entry] = \\\n [False, baseentries[self.info[entry]['basefile']]]\n for key in pruned.keys():\n if not pruned[key][0]:\n for entry in pruned[key][1]:\n pruned[entry][0] = False\n tmp = new_map = []\n for entry in self.entry_map['epi']:\n if pruned[entry][0]:\n if self.verbose:\n print 'Skipping %s: Already reconstructed.' % targets[0]\n if entry in self.pfiles_recon:\n self.pfiles_recon.remove(entry)\n else:\n new_map.append(entry)\n self.entry_map['epi'] = new_map",
"def _init_from_files(self) -> None:\n file_paths = _find_aov_files()\n\n readers = [AOVFile(file_path) for file_path in file_paths]\n\n self._merge_readers(readers)\n\n self._build_intrinsic_groups()",
"def compute_products(self):\r\n src_to_classfiles = defaultdict(list)\r\n for pcd_entry in self.pcd_entries:\r\n srcfile = pcd_entry[1]\r\n # In the file classes are represented with slashes, not dots. E.g., com/foo/bar/Baz.\r\n src_to_classfiles[srcfile].append(pcd_entry[0] + '.class')\r\n return src_to_classfiles",
"def preprocess_pseudopotential_input_files(element_list, template_path):\n for elem in element_list:\n template_file = os.path.join(template_path, elem+'.in.template')\n new_input_file = elem+'.in'\n subprocess.check_call(['dprepro', 'params', template_file, new_input_file])",
"def virus_epi_fragments2tsv(self):\n\t\tprint(\"Save current virus epitope fragments to csv\")\n\t\tepitopes_name = \"imported_iedb_epitope_fragment.tsv\"\n\t\tif not exists(join(self.output_path, epitopes_name)):\n\t\t\tprint(\"Create file: {}\".format(epitopes_name))\n\t\t\twith open(join(self.output_path, epitopes_name), \"w\") as epitopes_out:\n\t\t\t\tepitopes_out.write(\n\t\t\t\t\t\"epi_fragment_id\\tparent_epitope_id\\tepi_fragment_sequence\\tepi_fragment_start\\tepi_fragment_stop\\n\")\n\t\twith open(join(self.output_path, epitopes_name), \"a\") as epitopes_out:\n\t\t\tprint(\"Update file: {}\".format(epitopes_name))\n\t\t\tfor epi_fragment in self.current_virus_epi_fragments:\n\t\t\t\tprint(\"Write IEDB imported epitope fragment\")\n\t\t\t\tepi_frag_attributes = epi_fragment.get_all_attributes()\n\t\t\t\tepi_frag_row = \"\\t\".join(\n\t\t\t\t\t[str(epi_frag_attributes[\"fragment_id\"]), str(epi_frag_attributes[\"parent_epi_id\"]),\n\t\t\t\t\t epi_frag_attributes[\"fragment_seq\"],\n\t\t\t\t\t str(epi_frag_attributes[\"fragment_start\"]),\n\t\t\t\t\t str(epi_frag_attributes[\"fragment_stop\"])])\n\t\t\t\tepitopes_out.write(epi_frag_row + \"\\n\")\n\t\tprint(\"====\")",
"def __load(self, pkgrels):\n # keep track of which parts are already loaded\n part_dict = {}\n\n # discard any previously loaded relationships\n self.__relationships = _RelationshipCollection()\n\n # add model-side rel for each pkg-side one, and load target parts\n for pkgrel in pkgrels:\n # unpack working values for part to be loaded\n reltype = pkgrel.reltype\n pkgpart = pkgrel.target\n partname = pkgpart.partname\n content_type = pkgpart.content_type\n # log.debug(\"%s -- %s\", reltype, partname)\n\n # create target part\n part = Part(reltype, content_type)\n part_dict[partname] = part\n part._load(pkgpart, part_dict)\n\n # create model-side package relationship\n model_rel = _Relationship(pkgrel.rId, reltype, part)\n self.__relationships._additem(model_rel)\n\n # gather references to image parts into __images\n self.__images = ImageCollection()\n image_parts = [part for part in self._parts\n if part.__class__.__name__ == 'Image']\n for image in image_parts:\n self.__images._loadpart(image)",
"def _extract(self):\r\n self._data = []\r\n for fname in self.files:\r\n meta = dict(filename=fname)\r\n\r\n # Perform the actual metadata extraction\r\n fname = os.path.splitext(self.filter_filename(fname))[0]\r\n values = fname.split(self.sep)\r\n\r\n # Handle the case where number of fields is less than the length\r\n # of the extracted values, ie cases where we only want to extract\r\n # a subset of available fields.\r\n if self.index:\r\n values = [val for i, val in enumerate(values) if i in self.index]\r\n\r\n meta.update(dict(zip(self.fields, values)))\r\n if self.split_by in self.fields:\r\n meta[self.split_by] = self._get_split_field_values(meta['filename'])\r\n self._data.append(meta)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Eliminate entries in epi recon table that have already been reconstructed. I don't remember why this is here but I know that at one time it was important. | def PruneEpiEntries(self):
pruned = {}
basefiles = []
baseentries = {}
for entry in self.entry_map['epi']:
if baseentries.has_key(self.info[entry]['basefile']):
baseentries[self.info[entry]['basefile']].append(entry)
else:
baseentries[self.info[entry]['basefile']] = [entry]
for entry in self.entry_map['epi']:
targets = []
if self.no_motcorr:
target = self.info[entry]['imgfile']
elif self.info[entry]['fmapname'] is None or self.no_fmapcorr:
target = self.info[entry]['imgfile_m']
else:
target = self.info[entry]['imgfile_mf']
targets.append(target + self.info[entry]['suffix'])
targets.append('%s%s' % (self.info[entry]['censor_prefix'], '_censor.1D'))
pruned[entry] = [True, baseentries[self.info[entry]['basefile']]]
for target in targets:
pruned[entry] = \
[False, baseentries[self.info[entry]['basefile']]]
for key in pruned.keys():
if not pruned[key][0]:
for entry in pruned[key][1]:
pruned[entry][0] = False
tmp = new_map = []
for entry in self.entry_map['epi']:
if pruned[entry][0]:
if self.verbose:
print 'Skipping %s: Already reconstructed.' % targets[0]
if entry in self.pfiles_recon:
self.pfiles_recon.remove(entry)
else:
new_map.append(entry)
self.entry_map['epi'] = new_map | [
"def purgeTable(self):\n todelete = set()\n for implicant in self.implicant_list:\n todelete.add(implicant)\n for f in self.functions_to_simplify:\n if (implicant.funmask >> f.number) & 1 == 1:\n s = {i for i in f.to_be_covered if i in implicant.coverset}\n if s != set():\n todelete.remove(implicant)\n break\n self.implicant_list = [i for i in self.implicant_list if i not in todelete]",
"def remove_implied_extroneous(self):\n rels = self.tuple_relations()\n for lhs, rhs in rels[:]:\n y = lhs[:]\n for attr in lhs:\n y_ = y[:]\n y_.remove(attr)\n if rhs in find_closure(rels, y_):\n y.remove(attr)\n rels.remove((lhs, rhs))\n rels.append((y, rhs))\n self._data[rhs].remove(lhs)\n self._data[rhs].append(y)\n\n # remove dups\n for rhs in self._data:\n\n i = 0\n while i < len(self._data[rhs]):\n if self._data[rhs][i] in self._data[rhs][i + 1:]:\n self._data[rhs].pop(i)\n else:\n i += 1",
"def nonphysicalxs_remotion(a2_data,res_nufi_removal):\n for i in a2_data['I'].keys():\n if i=='MACR' and res_nufi_removal==True:\n if 'nufi' in a2_data['I'][i]['R'].keys():\n a2_data['I'][i]['R'].pop('nufi')\n for r in a2_data['I'][i]['R'].keys():\n if any(x in r for x in ['111', '112', '122', '212', '222', '211', '322',\n '321', '312', '311', '221', '121']):\n a2_data['I'][i]['R'].pop(r)\n return a2_data",
"def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated",
"def _preprocess(self, db):\n db = [set(row)\n for row in db] # Ensure each item occurs at most once in each transaction\n counts = dict()\n for row in db: # Count the appearances of each item in the db\n for item in row:\n if counts.get(item, None) == None:\n counts[item] = 0\n counts[item] += 1\n to_remove = set()\n for item, count in counts.items(): # Collect items with not enough support\n if not self._eval_support(count):\n to_remove.add(item)\n else:\n self.item_counts[item] = count\n new_db = []\n for row in db: # Remove low support items from each transaction in db\n new_row = row.difference(to_remove)\n if new_row:\n new_db.append(\n list(sorted(new_row, key=lambda x: counts[x], reverse=True)))\n return new_db",
"def remove_not_in_table(self, miRormRNA, table_genes):\r\n if miRormRNA == 'mRNA':\r\n exp_dist = self.mRNA2dist\r\n total = self.totalmRNA\r\n elif miRormRNA == 'miR':\r\n exp_dist = self.miR2dist\r\n total = self.totalmiR\r\n table_pre_mir = {}\r\n for mir in table_genes:\r\n if mir.endswith(\"p\"):\r\n pre_mir = mir[:-3]\r\n if pre_mir in table_pre_mir:\r\n table_pre_mir[pre_mir].add(mir)\r\n else:\r\n table_pre_mir[pre_mir] = {mir}\r\n methods.logINFO(\"len miRDist: \" + str(len(exp_dist)) + \" total: \" + str(total))\r\n in_table_dist = {}\r\n not_in_table = {}\r\n total_prob = 0\r\n not_in_table_total_prob = 0\r\n for gene in exp_dist:\r\n pre_mir = gene\r\n if miRormRNA == \"miR\" and gene.endswith(\"p\"):\r\n pre_mir = gene[:-3]\r\n if gene in table_genes:\r\n in_table_dist[gene] = exp_dist[gene]\r\n total_prob += exp_dist[gene]\r\n elif pre_mir in table_genes:\r\n if pre_mir in in_table_dist:\r\n in_table_dist[pre_mir] += exp_dist[gene]\r\n print(pre_mir, \"already found\")\r\n else:\r\n in_table_dist[pre_mir] = exp_dist[gene]\r\n total_prob += exp_dist[gene]\r\n elif miRormRNA == \"miR\" and gene in table_pre_mir:\r\n print(gene, \"\\t\", table_pre_mir[gene], \"pre mir in expression, mature in table\")\r\n elif miRormRNA == \"miR\" and pre_mir in table_pre_mir:\r\n print(gene, \"\\t\", pre_mir, table_pre_mir[pre_mir], \"\\tpre mir in expression, mature in table\")\r\n else:\r\n not_in_table[gene.split('_')[0]] = exp_dist[gene]\r\n not_in_table_total_prob += exp_dist[gene]\r\n methods.logINFO(miRormRNA + \": not_in_table_total_prob: \" + str(not_in_table_total_prob))\r\n if miRormRNA == \"miR\":\r\n print(\"-------miRNA not in table---------------\")\r\n for mir in not_in_table:\r\n print(mir)\r\n print(\"----------------------------------------\")\r\n zero_count = 0 # count the number of genes that have counter lower than1\r\n total_prob_corrected = 0\r\n genes2delete = set()\r\n for gene in in_table_dist.keys():\r\n # Removing genes that their counter is lower than 1:\r\n if in_table_dist[gene]*total/total_prob < 1:\r\n genes2delete.add(gene)\r\n zero_count += 1\r\n else:\r\n in_table_dist[gene] /= total_prob\r\n total_prob_corrected += in_table_dist[gene]\r\n for gene in genes2delete:\r\n del in_table_dist[gene]\r\n for gene in in_table_dist:\r\n in_table_dist[gene] /= total_prob_corrected\r\n total = math.floor(total*total_prob_corrected)\r\n methods.logINFO(miRormRNA + \": number of genes below threshold: \" + str(zero_count))\r\n methods.logINFO(miRormRNA + \": number of passed threshold: \" + str(len(in_table_dist)))\r\n # updating the current object:\r\n if miRormRNA == 'mRNA':\r\n self.mRNA2dist = in_table_dist\r\n self.totalmRNA = total\r\n elif miRormRNA == 'miR':\r\n self.miR2dist = in_table_dist\r\n self.totalmiR = total\r\n return [in_table_dist, zero_count, not_in_table]",
"def prune(self):\n\n rem_list = []\n for i, cp in enumerate(self.cov_pairs):\n if FILTER_SLOW and cp.td > self.td_max:\n rem_list.append(i)\n for i in reversed(rem_list):\n self.cov_pairs.pop(i)\n\n rem_list = []\n for i, sp in enumerate(self.stretch_pairs):\n if FILTER_SLOW and sp.td > self.td_max:\n rem_list.append(i)\n for i in reversed(rem_list):\n self.stretch_pairs.pop(i)",
"def _remove_lines(self):\n single = set()\n seen = []\n to_remove = []\n # Find lines with only one vertex and mark duplicates for removal\n for i, c in enumerate(self.end):\n if len(c) == 1:\n single |= c\n if c not in seen:\n seen.append(c)\n else:\n to_remove.append(i)\n\n # Remove all duplicate lines that lead to a single vertex\n for i in reversed(to_remove):\n del self.end[i]\n\n # Remove vertices from lines that already exist in other single lines\n for c in self.end:\n if len(c) > 1:\n c -= single\n\n # Clean up any empty columns\n for i in reversed(range(len(self.end))):\n if not self.end[i]:\n del self.end[i]",
"def remove_redundant_entries(self):\n all_index_list=[]\n tmp_sheet = self.wks.get_all_values()\n for i in self.emails:\n email_re= re.compile(i, re.IGNORECASE)\n index_list=[]\n # pdb.set_trace()\n for index,j in enumerate(self.emails):\n if bool(re.search(email_re,j)):\n index_list.append(index)\n if len(index_list)>1:\n print(i)\n double_emails=[]\n for email_row in index_list:\n double_emails.append(tmp_sheet[email_row])\n #print(index_list)\n ind=max(enumerate(double_emails), key=lambda x: len(x[1]))[0]\n index_list.pop(ind)\n #print(index_list)\n all_index_list.append(index_list)\n flat_all_index_list=[item for sublist in all_index_list for item in sublist]\n s=set(flat_all_index_list)\n flat_all_index_list=list(s)\n flat_all_index_list.sort()\n print(flat_all_index_list)\n for i,val in enumerate(flat_all_index_list):\n self.wks.delete_row(val-i+1)",
"def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)",
"def red(self):\n self.simplify()\n reds = self._hypergraph.redundant_hyperedges()\n for hyperedge, supersets in reds.items():\n absorbing_factor = self.some_factor(supersets.pop())\n absorbing_factor *= self.factor(hyperedge)\n self.remove(hyperedge)\n reds = reds.keys()\n if emptyset in self._hypergraph:\n reds.append(emptyset)\n self.remove_hyperedge(emptyset)\n return reds",
"def remove_hydrogens(self) -> None:\n for cid, c in self:\n for rid, r in c:\n for aid, a in r:\n if a.element == 'H':\n print('removing H at %s' % aid)\n r.remove_atom(a)",
"def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']",
"def row_inout_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0]\n \n if location in location_dict.keys():\n outside = location_dict[location][0]\n \n if str(6) not in box: #only look at periods 1-5\n \n following_activity = inout_dict[box][0]\n if following_activity not in solved_values:\n temp_list = list(values[following_activity])\n \n for locations_next in values[following_activity]:\n \n if location_dict[locations_next][0] == outside and outside == True:\n \n try:\n temp_list.remove(locations_next)\n except:\n pass\n \n \n values[following_activity] = temp_list\n\n return values",
"def _remove_dangling_bonds(self) -> None:\n for residue in self.residues:\n bonds, impropers, cross_maps, ics = [], [], [], []\n for bond in residue.bonds:\n for atom_id in bond:\n if atom_id not in self._id_to_index:\n break\n else:\n bonds.append(bond)\n for improper in residue.impropers:\n for atom_id in improper:\n if atom_id not in self._id_to_index:\n break\n else:\n impropers.append(improper)\n for cross_map in residue.cross_maps:\n for atom_id in cross_map:\n if atom_id not in self._id_to_index:\n break\n else:\n cross_maps.append(cross_map)\n for ic in residue.ics:\n for res_index, atom_name in ic[:4]:\n if atom_name.replace(\"*\", \"\") not in self._id_to_index:\n break\n else:\n ics.append(ic)\n residue.bonds = bonds\n residue.impropers = impropers\n residue.cross_maps = cross_maps\n residue.ics = ics",
"def vacuum(self):\n\t\t\n\t\tstart_resnr = 1\n\t\t#---check for remark 999 to see if we have a starting residue not 1\n\t\twith open(self.rootdir+'system-input.pdb','r') as fp: lines = fp.readlines()\n\t\tregex = 'REMARK 999 starting residue = ([0-9]+)'\n\t\ttrawl = [re.findall(regex,line) for line in lines if re.match(regex,line)]\n\t\tif trawl != []: start_resnr = int(trawl[0][0])\n\n\t\t#---fix histidine naming according to the convention set by the force field\n\t\tif self.settings['force_field'] == 'charmm27':\n\t\t\tif self.settings['histype'] == 'd':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISD\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tif self.settings['histype'] == 'e':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISE\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tif self.settings['histype'] == 'p':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISP\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tcall(hisfix,cwd=self.rootdir)\n\t\telse: copy(self.rootdir+'system-input.pdb',self.rootdir+'prep-protein-start.pdb')\n\t\t\t\n\t\tprint \"stripping non-protein molecules\"\n\t\tcmd = [gmxpaths['make_ndx'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-index-protein.ndx']\n\t\tcall(cmd,logfile='log-make-ndx-prep-protein',cwd=self.rootdir,inpipe=\"q\\n\")\n\t\tprotgrp = int(checkout([\"awk\",\"'/[ ,\\t]+Protein[ ,\\t]+:/ {print $1}'\",\n\t\t\t\"log-make-ndx-prep-protein\"],cwd=self.rootdir).strip())\n\t\tcmd = [gmxpaths['make_ndx'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-index-protein-only.ndx']\n\t\tcall(cmd,logfile='log-make-ndx-prep-protein-only',cwd=self.rootdir,\n\t\t\tinpipe=\"keep \"+str(protgrp)+\"\\nq\\n\")\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-protein-start-stripped.pdb',\n\t\t\t'-n prep-index-protein-only.ndx',\n\t\t\t'-resnr '+str(start_resnr)]\n\t\tcall(cmd,logfile='log-editconf-prep-protein-strip',cwd=self.rootdir)\n\n\t\tprint \"running pdb2gmx\"\n\t\tcmd = [gmxpaths['pdb2gmx'],\n\t\t\t'-f prep-protein-start-stripped.pdb',\n\t\t\t'-o vacuum-alone-number1.gro',\n\t\t\t'-p vacuum-standard.top',\n\t\t\t'-ignh',\n\t\t\t'-i system-posre.itp',\n\t\t\t'-ff '+self.settings['force_field'],\n\t\t\t'-water '+self.settings['water_model']]\n\t\tcall(cmd,logfile='log-pdb2gmx',cwd=self.rootdir)\n\t\t\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone-number1.gro',\n\t\t\t'-o vacuum-alone.gro']\n\t\tcall(cmd,logfile='log-editconf-renumber',cwd=self.rootdir)\n\t\t\n\t\t#---intervening step will isolate the ITP data from the TOP file to use standardized TOP\n\t\twith open(self.rootdir+'vacuum-standard.top','r') as f: topfile = f.read()\n\t\t#---extract protein chain names here if necessary\n\t\tchains = []\n\t\tstartline = [ii for ii,i in enumerate(topfile.split('\\n')) \n\t\t\tif re.match('^(\\s+)?\\[(\\s+)?system(\\s+)?\\]',i)][0]\n\t\tfor line in topfile.split('\\n')[startline:]:\n\t\t\tif re.match('^Protein',line):\n\t\t\t\tchains.append(line.split(' ')[0])\n\t\tif len(chains) > 1:\n\t\t\t#---assume one domain per chain\n\t\t\tself.nprots = [1 for i in chains]\n\t\t\tself.protname = chains\n\t\telse:\t\n\t\t\tself.protname = chains[0]\n\t\t\tself.nprots = 1\n\t\tfp = open(self.rootdir+'protein.itp','w') \n\t\tfor line in topfile.split('\\n'):\n\t\t\t#---skip any part of the top that follows the water topology and/or system composition\n\t\t\tif re.match('; Include water topology',line): break\n\t\t\tif re.match('; Include topology for ions',line): break\n\t\t\tif re.match('\\[ system \\]',line): break\n\t\t\t#---you must extract forcefield.itp from the file to prevent redundant includes\n\t\t\tif not re.match(\".+forcefield\\.itp\",line) and not \\\n\t\t\t\tre.match(\"; Include forcefield parameters\",line): \n\t\t\t\tfp.write(line+'\\n')\n\t\tfp.close()\n\t\t\n\t\t#---note that this method is currently set to only simulate one protein\n\t\tself.write_topology_protein('vacuum.top')\n\t\t\n\t\tprint \"building box with \"+str(self.settings['wbuffer'])+'nm of water'\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone.gro',\n\t\t\t'-d '+str(self.settings['wbuffer']),\n\t\t\t('-princ' if 'align_x' in self.settings.keys() \n\t\t\tand self.settings['align_x'] == True else ''),\n\t\t\t'-o vacuum.gro']\n\t\tcall(cmd,logfile='log-editconf-vacuum',cwd=self.rootdir,\n\t\t\tinpipe=('0\\n' if 'align_x' in self.settings.keys() \n\t\t\tand self.settings['align_x'] == True else None))\t\t\n\t\tself.minimization_method('vacuum')",
"def iseliminated(self):\n return self.eliminated",
"def clean_up(self, emphasis):\n redundant = Queue(field=self.field, color=\"redundant\")\n for cell in self[::-1]:\n block = Block(self.field, cell)\n if not block.unknown_neighbors:\n self.remove(cell)\n redundant.append(cell)\n elif self.field[cell].bg != self.color:\n self.field[cell].bg = self.color\n\n if redundant and emphasis.is_checked:\n [self.field[cell].update() for cell in redundant]\n pause(emphasis.pause_time)\n while redundant:\n redundant.remove(redundant[-1])",
"def del_unwanted_cols_fact(data):\r\n # del data['do_plu']\r\n del data['dorder_receiveon']\r\n # del data['dorder_receiveon_time']\r\n return data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert epis reconstructed on the scanner. | def ConvertRtEpis(self):
if self.verbose:
print 'Convert EPIs to brik'
for entry in self.entry_map['epi']:
if ('epirt' in self.info[entry]['psdname'] or \
self.info[entry]['psdname'] == 'epi' or \
self.info[entry]['psdname'] == '*epfid2d1_64') and \
self.info[entry]['data_filetype'] == 'dicom':
series = self.info[entry]['series']
if self.info[entry]['skip'] > 0:
skip = '--skip=%s' % self.info[entry]['skip']
else:
skip = ''
cmd = 'convert_file %s %s %s brik' % \
(skip, entry, self.info[entry]['imgfile'])
checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])
self.CheckExec(cmd, [checkname]) | [
"def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)",
"def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname",
"def _convert():",
"def convertEG(self, expression):\n for i in range(len(expression)):\n if expression[i:i+2] == list('EG'):\n expression = self.convertGeneral(expression, list('AF'), i)\n break\n\n return expression",
"def convert_to_evoros_input(self, enki_input):\n # ramp inclines\n ramp_inclines = [\n enki_input['incline1'],\n enki_input['incline2'],\n enki_input['incline3'],\n enki_input['incline4'],\n enki_input['incline5']\n ]\n\n # convert to Evo-ROS input\n evoros_input = {\n 'genome': PID_SETTINGS,\n 'enki_genome': ramp_inclines\n }\n return evoros_input",
"def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps",
"def reconstruct_evp(self):\n new_evp = \"e%s:%s;%s;%s;%s\\n\" % (self.id, self.min_max_points[\"min_x\"], self.min_max_points[\"min_y\"],\n self.min_max_points[\"max_x\"], self.min_max_points[\"max_y\"])\n for path_id in self.paths_order:\n path = self.paths[path_id]\n new_evp += \"p%s%s:\" % (path.id, path.type)\n for point_id in path.points_order:\n coords = path.points[point_id]\n new_evp += \"t%s~%s,%s;%s,%s;%s,%s;\" % (point_id, coords[0][0], coords[0][1], coords[1][0],\n coords[1][1], coords[2][0], coords[2][1])\n\n if path.type in [\"r\", \"l\"]: # skip if the path is not closed\n new_evp += \":r,%s,%s,%s,%s,%s\" % tuple(path.radial)\n new_evp += \":l,%s,%s,%s,%s:\" % tuple(path.linear)\n\n for stop in path.stops:\n params = stop[\"params\"]\n new_evp += \"o%s~%s,%s,%s;\" % (stop[\"stop_id\"], params[0], params[1], params[2])\n\n new_evp += \":s%s,%s,%s\\n\" % tuple(path.stroke)\n self.evp = new_evp\n return",
"def virus_epitopes2tsv(self):\n\t\tprint(\"Save current virus epitopes to csv\")\n\n\t\tepitopes_name = \"imported_iedb_epitopes.tsv\"\n\t\tif not exists(join(self.output_path, epitopes_name)):\n\t\t\tprint(\"Create file: {}\".format(epitopes_name))\n\t\t\twith open(join(self.output_path, epitopes_name), \"w\") as epitopes_out:\n\t\t\t\tepitopes_out.write(\n\t\t\t\t\t\"epitope_id\\tvirus_taxid\\tsource_host_iri\\tsource_host_name\\thost_ncbi_id\\tprotein_ncbi_id\\tcell_type\\tmhc_class\\tmhc_restriction\\tresponse_frequency_pos\\tassay_types\\tepitope_sequence\\tepitope_start\\tepitope_stop\\texternal_links\\tprediction_process\\tis_linear\\n\")\n\n\t\twith open(join(self.output_path, epitopes_name), \"a\") as epitopes_out:\n\t\t\tprint(\"Update file: {}\".format(epitopes_name))\n\t\t\tfor epitope in self.current_virus_epitopes:\n\t\t\t\tprint(\"Write IEDB imported epitope\")\n\t\t\t\tepi_attributes = epitope.get_all_attributes()\n\n\t\t\t\tif epi_attributes[\"is_linear\"]:\n\t\t\t\t\tepi_seq = epi_attributes[\"region_seq\"]\n\t\t\t\telse:\n\t\t\t\t\tepi_seq = None\n\t\t\t\tif not epi_seq:\n\t\t\t\t\tepi_seq = ''\n\t\t\t\tif not epi_attributes[\"mhc_class\"]:\n\t\t\t\t\tepi_attributes[\"mhc_class\"] = ''\n\t\t\t\tif not epi_attributes[\"mhc_allele\"]:\n\t\t\t\t\tepi_attributes[\"mhc_allele\"] = ''\n\n\t\t\t\tepitope_row = \"\\t\".join(\n\t\t\t\t\t[str(epi_attributes[\"epitope_id\"]), epi_attributes[\"virus_taxid\"],\n\t\t\t\t\t epi_attributes[\"host_iri\"], epi_attributes['host_name'], epi_attributes['host_ncbi_id'],\n\t\t\t\t\t epi_attributes[\"protein_ncbi_id\"], epi_attributes[\"cell_type\"],\n\t\t\t\t\t epi_attributes[\"mhc_class\"], epi_attributes[\"mhc_allele\"],\n\t\t\t\t\t str(epi_attributes[\"response_frequency_positive\"]),\n\t\t\t\t\t str(epi_attributes[\"assay_types\"]), epi_seq,\n\t\t\t\t\t str(epi_attributes[\"region_start\"]), str(epi_attributes[\"region_stop\"]),\n\t\t\t\t\t \",\".join(epi_attributes[\"external_links\"]),\n\t\t\t\t\t str(epi_attributes[\"prediction_process\"]), str(epi_attributes[\"is_linear\"]),\n\t\t\t\t\t epi_attributes[\"epitope_iri\"], epi_attributes[\"iedb_epitope_id\"]])\n\t\t\t\tepitopes_out.write(epitope_row + \"\\n\")\n\t\tprint(\"====\")",
"def decode_pes(self, pes: bytes)-> PES.PES:\n pesdk = PES.PES()\n try:\n pesdk.stream_id, PES_packet_length = struct.unpack('>BH', pes[0:3])\n if pesdk.stream_id not in [33, 188, 190, 191, 240, 241, 242, 248, 255]:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 190 (0xBE) - padding_stream\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n if pesdk.stream_id >> 4 == 14:\n pesdk.stream_type = 'video-stream'\n pesdk.stream_number = (pesdk.stream_id & 15)\n elif pesdk.stream_id >> 5 == 6:\n pesdk.stream_type = 'audio-stream'\n pesdk.stream_number = (pesdk.stream_id & 31)\n b1, b2, PES_header_data_length = struct.unpack('>BBB', pes[3:6])\n pesdk.PES_scrambling_control = (b1 & 16) >> 4\n # PES_priority = bool((b1 & 8) >> 3)\n # data_alignment_indicator = bool((b1 & 4) >> 2)\n pesdk.copyright = bool((b1 & 2) >> 1)\n pesdk.original_or_copy = bool(b1 & 1)\n pesdk.PTS_DTS_flags = (b2 & 192) >> 6\n pesdk.ESCR_flag = bool((b2 & 32) >> 5)\n pesdk.ES_rate_flag = bool((b2 & 16) >> 4)\n pesdk.DSM_trick_mode_flag = bool((b2 & 8) >> 3)\n pesdk.additional_copy_info_flag = bool((b2 & 4) >> 2)\n pesdk.PES_CRC_flag = bool((b2 & 2) >> 1)\n pesdk.PES_extension_flag = bool(b2 & 1)\n pos = 6\n if pesdk.PTS_DTS_flags in [2, 3]:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos+5])\n pesdk.PTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n if pesdk.PTS_DTS_flags == 3:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos + 5])\n pesdk.DTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n elif pesdk.stream_id == 190:\n # 190 (0xBE) - padding_stream\n pass\n else:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n pass\n return pesdk\n except Exception as err:\n logging.warning('PES parsing error:' + str(err))\n return None",
"def convert_season_episode(self, se_input):\n if type(se_input) == str:\n se_input = se_input[1:]\n se_input.replace(' ', '')\n\n e_ndx = se_input.index('E')\n\n #sometimes it looks like \"S14 E10\" and sometimes it's \"S14 Ep10\"\n if \"Ep\" in se_input:\n ep_offset = 2\n else:\n ep_offset = 1\n\n # return two ints\n return int(se_input[:e_ndx]), int(se_input[e_ndx+ep_offset:])\n\n else:\n # return it as \"S14 Ep10\"\n return \"S%s Ep%s\" % (se_input[0], se_input[1])",
"def convert(self):",
"def find_es_genes(self):\n\n\t\t# get only novel edges\n\t\tif 'annotation' not in self.edge_df.columns:\n\t\t\traise Exception('Cannot find novel IR events without '\n\t\t\t\t'annotation in SwanGraph.')\n\n\t\tedge_ids = self.edge_df.loc[ \\\n\t\t\t(self.edge_df.annotation == False)& \\\n\t\t\t(self.edge_df.edge_type == 'intron'), 'edge_id']\n\t\tprint('Analyzing {} intronic edges for ES'.format(len(edge_ids)))\n\n\t\t# get subset of transcripts that are novel to look for ir edges in\n\t\tnt_df = self.t_df.loc[self.t_df.annotation == False]\n\n\t\t# for each edge, see if the subgraph between the edge vertices \n\t\t# contains an exonic edge\n\t\tes_edges = []\n\t\tes_genes = []\n\t\tes_transcripts = []\n\t\tfor eid in edge_ids:\n\t\t\t# subgraph consisting of all nodes between the candidate\n\t\t\t# exon-skipping edge coords in order and its edges\n\t\t\tsub_nodes = [i for i in range(eid[0]+1,eid[1])]\n\t\t\tsub_G = self.G.subgraph(sub_nodes)\n\t\t\tsub_edges = list(sub_G.edges())\n\t\t\tsub_edges = self.edge_df.loc[sub_edges]\n\t\t\t# find edges that are exonic; if there are none, this is not \n\t\t\t# an exon-skipping edge\n\t\t\tsub_edges = sub_edges.loc[sub_edges.edge_type == 'exon']\n\n\t\t\tif len(sub_edges.index) > 0:\n\n\t\t\t\t# transcripts that contain the candidate exon-skipping edge\n\t\t\t\tskip_t_df = nt_df[[eid in vertex_to_edge_path(x) \\\n\t\t\t\t\tfor x in nt_df.path.values.tolist()]]\n\n\t\t\t\t# circumvent the ISM bug\n\t\t\t\tif len(skip_t_df) == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# does at least one of the skipped exons belong\n\t\t\t\t# to the same gene as the skipping edge?\n\t\t\t\telse:\n\t\t\t\t\t# genes that contain the candidate exon-skipping edge\n\t\t\t\t\tskip_genes = skip_t_df.gid.values.tolist()\n\t\t\t\t\tskip_g_df = self.t_df.loc[self.t_df.gid.isin(skip_genes)]\n\n\t\t\t\t\t# check if the skipped edges are in one of the \n\t\t\t\t\t# exon-skipping genes (wow this is confusing)\n\t\t\t\t\tfor gid in skip_genes:\n\t\t\t\t\t\tif gid in es_genes: continue\n\t\t\t\t\t\tfor skip_eid in sub_edges.index:\n\t\t\t\t\t\t\t# transcripts with the exons that are skipped\n\t\t\t\t\t\t\ttemp_df = skip_g_df[[skip_eid in vertex_to_edge_path(x) \\\n\t\t\t\t\t\t\t\t\tfor x in skip_g_df.path.values.tolist()]]\n\t\t\t\t\t\t\ttids = skip_t_df.tid.tolist()\n\t\t\t\t\t\t\tif len(temp_df.index) > 0:\n\t\t\t\t\t\t\t\tes_edges.append(eid)\n\t\t\t\t\t\t\t\tes_genes.append(gid)\n\t\t\t\t\t\t\t\tes_transcripts.extend(tids)\n\n\t\tes_genes = list(set(es_genes))\n\t\tes_transcripts = list(set(es_transcripts))\n\t\tes_edges = list(set(es_edges))\n\n\t\tprint('Found {} novel es events in {} transcripts.'.format(len(es_edges),\n\t\t\tlen(es_transcripts)))\n\n\n\t\treturn es_genes, es_transcripts, es_edges",
"def prepare_for_ESR(self):\r\n _debug('Anapico: prepare ESR')\r\n # This is all for the ANAPICO to use the external trigger. \r\n # BONUS for preparing the list with the external trigger. \r\n print('Testing query: ', self.query('*IDN?'))\r\n print('Source for Trigger?: ', self.query('TRIG:SEQ:SOUR?'))\r\n self.write('TRIG:SEQ:SOUR EXT') # Set the external trigger to ext\r\n print('Source for Trigger?: ', self.query('TRIG:SEQ:SOUR?'))\r\n print('First frequency?: ', self.query('SOUR:FREQ:STAR?'))\r\n print('Last frequency?: ', self.query('SOUR:FREQ:STOP?'))\r\n \r\n # Prepare the list mode\r\n self.write('SOUR:FREQ:MODE LIST') # Set the frequency mode to list\r\n print('Frequency mode ?: ', self.query('SOUR:FREQ:MODE?'))\r\n self.write('SOUR:POW:MODE LIST') # Set the power mode to list\r\n print('Power mode ?: ', self.query('SOUR:POW:MODE?'))\r\n self.write('SOUR:LIST:MODE AUTO') # Set the list mode to auto\r\n print('List mode ?: ', self.query('SOUR:LIST:MODE?'))\r\n# self.api.write('TRIG:SEQ:TYPE GATE') # An external trigger signal repeatedly starts and stops the waveform’s playback.\r\n self.write('TRIG:SEQ:TYPE POIN')# Upon triggering, only a single point of the sweep (list) is played.\r\n print('Trig type?: ', self.query('TRIG:SEQ:TYPE?'))\r\n \r\n # Set stuff for the modulation\r\n self.write('SOUR:PULM:SOUR EXT')# Set the pulse modulation to be external\r\n print('Pulse modulation source?: ', self.query('SOUR:PULM:SOUR?'))\r\n self.write('SOUR:PULM:STAT ON') # Switch the pulse modulation ON\r\n print('State of pulse modulation? ', self.query('SOUR:PULM:STAT?'))\r\n self.write('SOUR:PULM:POL NORM') # Polarity NORMal, in case it was INVerted\r\n print('Polarity of modulation?: ', self.query('SOUR:PULM:POL?')) \r\n # This is all for the ANAPICO to use the external trigger. \r\n # BONUS for preparing the list with the external trigger. \r",
"def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes",
"def toPES(radial, intensity, energy_cal_factor, per_energy_scaling=True,\n photon_energy=None, Vrep=None, zoom=1):\n\n if Vrep is not None:\n energy_cal_factor *= np.abs(Vrep) / zoom**2\n\n eKE = radial**2 * energy_cal_factor\n\n if photon_energy is not None:\n # electron binding energy\n eBKE = photon_energy - eKE\n else:\n eBKE = eKE\n\n # Jacobian correction to intensity, radius has been squared\n # We have E = c1 - c2 * r**2, where c1 and c2 are constants. To get thei\n # Jacobian, we find dE/dr = 2c2r. Since the coordinates are getting\n # stretched at high E and \"squished\" at low E, we know that we need to\n # divide by this factor.\n intensity[1:] /= (2 * radial[1:]) # 1: to exclude R = 0\n if per_energy_scaling:\n # intensity per unit energy\n intensity /= energy_cal_factor\n\n # sort into ascending order\n indx = eBKE.argsort()\n\n return eBKE[indx], intensity[indx]",
"def convert_ere2eer(input_filename, output_filename):\n with codecs.open(input_filename, \"r\") as input_file:\n with codecs.open(output_filename, \"w\") as output_file:\n for line in input_file:\n line = line.strip().split('\\t')\n if len(line)<3:\n output_file.write('\\t'.join([str(c) for c in line])+'\\n')\n continue\n\n line = [line[0],line[2],line[1]]\n # print(line)\n output_file.write('\\t'.join([str(c) for c in line])+'\\n')",
"def _convert_mv2eps(self):\n if (self.clay is None) or (self.sand is None):\n self.eps = None\n print('WARNING: Permittivity can not be calculated due to missing soil texture!')\n if self.dc_model == 'Dobson85':\n DC = Dobson85(clay=self.clay, sand=self.sand, mv=self.mv, freq=self.f, debye=self.debye)\n else:\n assert False, 'Invalid DC model! ' + self.dc_model\n\n self.eps = DC.eps",
"def test_rewrite_epsilons(self):\n grammar = Grammar()\n grammar.add_terminals(['a', 'b', 'c'])\n grammar.add_production('X', [])\n grammar.add_production('X', ['c'])\n grammar.add_production('Y', ['X', 'a'])\n self.assertFalse(grammar.is_normal)\n self.assertEqual(1, len(grammar.productions_for_name('Y')))\n grammar.rewrite_eps_productions()\n self.assertEqual(2, len(grammar.productions_for_name('Y')))\n self.assertTrue(grammar.is_normal)",
"def _convert_emb(self):\n if self._model in ['Ope','ope','OPE']:\n dic_EmbPar = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DAM), FILE_OSE_OPE_1, self._ose_dir))\n else: #if self._model in ['Opt','opt','OPT']:\n dic_EmbPar = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DAM), FILE_OSE_OPT_1, self._ose_dir))\n\n\n dic_EmbEta = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DAM), FILE_OSE_2, self._ose_dir))\n # TODO: Replace parameters list below with right list\n # If we integrate Ameba code we can import libraries with correct names\n\n directory = os.path.join(self._ameba_dir,DIR_AMEBA_DAM)\n check_directory(directory)\n\n writer_1 = writer_csv(os.path.join(DIR_AMEBA_DAM,FILE_AMEBA_1), COLUMNS_AMEBA_1, self._ameba_dir)\n writer_max = writer_csv(os.path.join(DIR_AMEBA_DAM,FILE_AMEBA_2), COLUMNS_AMEBA_2, self._ameba_dir)\n writer_min = writer_csv(os.path.join(DIR_AMEBA_DAM,FILE_AMEBA_3), COLUMNS_AMEBA_2, self._ameba_dir)\n\n\n # TODO: Replace below correct column values\n \"\"\"escribe la primera fila en formato Ameba\"\"\"\n writer_1.writeheader()\n writer_max.writeheader()\n writer_min.writeheader()\n \"\"\"escribe el resto de las filas\"\"\"\n for emb in dic_EmbPar:\n\n writer_1.writerow({\n COLUMNS_AMEBA_1[0]:emb[COLUMNS_OSE_1[1]],\n COLUMNS_AMEBA_1[1]:self._date_emb(emb[COLUMNS_OSE_1[3]], True),\n COLUMNS_AMEBA_1[2]:self._date_emb(emb[COLUMNS_OSE_1[4]], False),\n COLUMNS_AMEBA_1[3]:'',\n COLUMNS_AMEBA_1[4]:'',\n COLUMNS_AMEBA_1[5]:'',\n COLUMNS_AMEBA_1[6]:'',\n })\n year_ini='2017'\n year_end='2030'\n\n for emb in dic_EmbEta:\n\n for j in range(self._year_ini(year_ini,emb[COLUMNS_OSE_2[2]]),\n self._year_end(self._year_ini(year_ini,emb[COLUMNS_OSE_2[2]]),\n year_end,emb[COLUMNS_OSE_2[2]])):\n if emb[COLUMNS_OSE_2[5]] == 'EmbCotMax':\n for i in range(8,len(COLUMNS_OSE_2)):\n if emb[COLUMNS_OSE_2[i]] != emb[COLUMNS_OSE_2[i-1]] or i==17:\n writer_max.writerow({\n COLUMNS_AMEBA_2[0]:remove(emb[COLUMNS_OSE_2[4]]),\n COLUMNS_AMEBA_2[1]:self._time_emb(COLUMNS_OSE_2[i],str(j)),\n COLUMNS_AMEBA_2[2]:emb[COLUMNS_OSE_2[i]],\n })\n elif emb[COLUMNS_OSE_2[5]] == 'EmbCotMin':\n for i in range(8,len(COLUMNS_OSE_2)):\n if emb[COLUMNS_OSE_2[i]] != emb[COLUMNS_OSE_2[i-1]] or i==17:\n writer_min.writerow({\n COLUMNS_AMEBA_2[0]:remove(emb[COLUMNS_OSE_2[4]]),\n COLUMNS_AMEBA_2[1]:self._time_emb(COLUMNS_OSE_2[i],str(j)),\n COLUMNS_AMEBA_2[2]:emb[COLUMNS_OSE_2[i]],\n })"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Correct for motion and call SliceTimeCorrect. | def CorrectMotion(self):
if self.verbose:
print "Correct for motion"
for entry in self.entry_map['epi']:
info = self.info[entry]
if os.path.exists(info['imgfile_m'] + info['suffix']):
return
# Always use brik for 3dDeconvolve.
suffix = '+orig'
epifile = '%s%s' % (info['imgfile'], suffix)
prefix = info['imgfile_m']
base_entry = info['base_entry']
if info['base'] == 'start':
# Use the first frame specified in template file. Defaults
# to zero.
base = info['motion_ref_frame']
else:
# Use the last frame.
base = self.info[base_entry]['tdim'] - info['skip']-1
base = ('%d' % base).replace(' ','')
# Correct for slice-timing.
self.SliceTimeCorrect(info, epifile)
plane = info['plane']
anat_tgt = info['anat_tgt']
# anat_entry = self.anat_entry[plane]
if info['catmats']:
# Include additonal transformation in motion correction such
# that final image is in register with the fieldmap, which has
# been registered to the structural image that will be used for
# spatial normalization.
self.MotcorCatenate(info, base, anat_tgt)
else:
# Assume fieldmap is in register with the structural.
self.Motcor(info, base)
if info.get('fmapname', None) is None:
# No fieldmap correction.
if self.fsl_flip:
# Flip the way fslview likes it.
self.FSLFlip(info['imgfile_m'], info['imgfile_final'])
elif info['suffix'] == '.nii':
# Copy motion-corrected images from /tmp to output directory
outfile = info['imgfile_final'] + info['suffix']
cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)
self.CheckExec(cmd, [outfile], force=True)
cmd = '/bin/rm %s+orig*' % info['imgfile_m']
self.CheckExec(cmd, [], force=True) | [
"def _drift_correct(self, loc, target_callback):\n e = \"Drift correct has not been implemented for this tracker.\"\n raise NotImplementedError(e)",
"def update(self, mAcrotime_clickEquivalentIn_second):\n self.start_tick = self.photons['timestamps'][0]\n self.end_tick = self.photons['timestamps'][-1]\n self.nb_of_tick = self.photons['timestamps'].size\n self.CPS = float(self.nb_of_tick) / (self.end_tick - self.start_tick) / mAcrotime_clickEquivalentIn_second",
"def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()",
"def drift_correction(self, pos=None, fix_triggered=False):\n\t\t\n\t\tif pos == None:\n\t\t\tpos = self.dispsize[0] / 2, self.dispsize[1] / 2\n\t\tif fix_triggered:\n\t\t\treturn self.fix_triggered_drift_correction(pos)\t\t\n\t\tself.draw_drift_correction_target(pos[0], pos[1])\n\t\tpressed = False\n\t\twhile not pressed:\n\t\t\tpressed, presstime = self.kb.get_key()\n\t\t\tif pressed:\n\t\t\t\tif pressed == 'escape' or pressed == 'q':\n\t\t\t\t\tprint(\"libeyetribe.EyeTribeTracker.drift_correction: 'q' or 'escape' pressed\")\n\t\t\t\t\treturn self.calibrate()\n\t\t\t\tgazepos = self.sample()\n\t\t\t\tif ((gazepos[0]-pos[0])**2 + (gazepos[1]-pos[1])**2)**0.5 < self.pxerrdist:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tself.errorbeep.play()\n\t\treturn False",
"def update(self, now=True):\n self.Time.update(now=now)\n self.Precess()\n self.ApparentPlace()\n self.RaC, self.DecC = self.RaA, self.DecA\n self.posviolate = False\n self.AltAziConv()\n if prefs.RefractionOn:\n dRA, dDEC = self.Refrac()\n self.RaC += dRA\n self.DecC += dDEC\n if prefs.FlexureOn:\n dRA, dDEC = self.Flex()\n self.RaC += dRA\n self.DecC += dDEC",
"def update_dead_reckoning(self):\n now = time.time()\n time_diff_s = now - self._last_observation_s\n self._last_observation_s = now\n\n self._prediction_step(time_diff_s)",
"def update_on_timestep(self):\n raise NotImplementedError",
"def non_causal_timecrop(self, length):\n assert length < self.time_length\n\n cut = (self.time_length - length) / 2\n\n _, i_start = _find_nearest(self.times, cut)\n _, i_end = _find_nearest(self.times, self.time_length - cut)\n\n h = np.fft.ifftshift(np.fft.fftshift(self.in_time)[..., i_start:i_end])\n\n new_response = self.from_time(self.fs, h)\n\n if new_response.time_length != length:\n w = f\"Could not precisely shrink to {length}s with fs = {self.fs}\"\n warnings.warn(w)\n\n return new_response",
"def reverseFlickStep(self):\n\n if self.timer >= self.duration:\n\n # reset the timer\n self.timer = 0\n\n if self.status == 1:\n # if currently flicking then change to a reverse\n self.reverseRun()\n self.status = 2\n\n elif self.status == 3:\n\n # if already extending it is time to flick\n self.flick()\n self.status = 1\n\n elif self.status == 2:\n # if currently reversing then see if reverse should be extended\n # current_conc = self.pos[0]\n current_conc = self.getConcentration()\n self.c_start = self.c_end\n self.c_end = current_conc\n\n # if it is increasing then extend the run\n if self.c_end > self.c_start:\n self.duration = alpha * self.duration\n # self.duration = alpha * self.getDuration(mean_run)\n self.status = 3\n\n # if not then flick\n else:\n self.flick()\n self.status = 1",
"def set_correct_acqiris_time(self, correct_time=True):\n self.pyda.set_correct_acqiris_time(correct_time)",
"def recalc_spd(*args):\n return _ida_frame.recalc_spd(*args)",
"def _correct_for_light_travel_time4(observer, target):\n t = observer.t\n ts = t.ts\n whole = t.whole\n tdb_fraction = t.tdb_fraction\n\n cposition = observer.position.au\n cvelocity = observer.velocity.au_per_d\n\n print('======', cposition.shape)\n # ?\n print('tdb_fraction before:', tdb_fraction.shape)\n tdb_fraction = tdb_fraction[:,None]\n print('tdb_fraction after:', tdb_fraction.shape)\n\n tposition, tvelocity, gcrs_position, message = target._at(t)\n\n distance = length_of(tposition - cposition)\n light_time0 = 0.0\n for i in range(10):\n print('i =', i)\n light_time = distance / C_AUDAY\n delta = light_time - light_time0\n if abs(max(delta)) < 1e-12:\n break\n\n # We assume a light travel time of at most a couple of days. A\n # longer light travel time would best be split into a whole and\n # fraction, for adding to the whole and fraction of TDB.\n t2 = ts.tdb_jd(whole, tdb_fraction - light_time)\n\n tposition, tvelocity, gcrs_position, message = target._at(t2)\n distance = length_of(tposition - cposition)\n light_time0 = light_time\n else:\n raise ValueError('light-travel time failed to converge')\n return tposition - cposition, tvelocity - cvelocity, t, light_time",
"def _crop_time_range(self, time_range, alter_coord=None):\n if alter_coord is None:\n t_coords = self.time_dim\n else:\n t_coords = alter_coord\n time_ind = np.logical_and(self.ds[t_coords] >= time_range[0],\n self.ds[t_coords] < time_range[1])\n if np.sum(time_ind) == 0:\n self.ds.close()\n print(\"The requested time range: {0} to {1} is out of the \\\n model output range; Ignoring crop request.\".format(time_range[0], time_range[1]))\n else:\n self.ds = self.ds.isel({self.time_dim: time_ind})",
"def __change_pitch(self) -> None:\n if self.pitch == self.pitch_target:\n return\n if self.pitch > self.pitch_target:\n self.pitch -= self.pitch_rate_of_change * self.dt\n elif self.pitch < self.pitch_target:\n self.pitch += self.pitch_rate_of_change * self.dt",
"def adjust_times(self, original_times, new_times):\n # Only include notes within start/end time of the provided times\n for instrument in self.instruments:\n valid_notes = []\n for note in instrument.notes:\n if note.start >= original_times[0] and \\\n note.end <= original_times[-1]:\n valid_notes.append(copy.deepcopy(note))\n instrument.notes = valid_notes\n # Get array of note-on locations and correct them\n note_ons = np.array([note.start for instrument in self.instruments\n for note in instrument.notes])\n aligned_note_ons = np.interp(note_ons, original_times, new_times)\n # Same for note-offs\n note_offs = np.array([note.end for instrument in self.instruments\n for note in instrument.notes])\n aligned_note_offs = np.interp(note_offs, original_times, new_times)\n # Same for pitch bends\n pitch_bends = np.array([bend.time for instrument in self.instruments\n for bend in instrument.pitch_bends])\n aligned_pitch_bends = np.interp(pitch_bends, original_times, new_times)\n ccs = np.array([cc.time for instrument in self.instruments\n for cc in instrument.control_changes])\n aligned_ccs = np.interp(ccs, original_times, new_times)\n # Correct notes\n for n, note in enumerate([note for instrument in self.instruments\n for note in instrument.notes]):\n note.start = (aligned_note_ons[n] > 0)*aligned_note_ons[n]\n note.end = (aligned_note_offs[n] > 0)*aligned_note_offs[n]\n # After performing alignment, some notes may have an end time which is\n # on or before the start time. Remove these!\n self.remove_invalid_notes()\n # Correct pitch changes\n for n, bend in enumerate([bend for instrument in self.instruments\n for bend in instrument.pitch_bends]):\n bend.time = (aligned_pitch_bends[n] > 0)*aligned_pitch_bends[n]\n for n, cc in enumerate([cc for instrument in self.instruments\n for cc in instrument.control_changes]):\n cc.time = (aligned_ccs[n] > 0)*aligned_ccs[n]",
"def accelerate(self):\n if self.is_touching_ground()==True:\n self.ay=0\n else:\n self.ay=.005",
"def update_callback(self):\n self.t = timeit.default_timer() - self.t0\n self.process_interaction('Animate', (self.t,))",
"def revolver(self):\r\n\t\tself.__revuelto=True",
"def reverseStep(self):\n\n if self.timer >= self.duration:\n\n # reset the timer\n self.timer = 0\n\n # if the run has already been extended then reverse\n if self.status == 2:\n self.reverseRun()\n self.status = 1\n\n # check if the run should be extened\n elif self.status == 1:\n\n # if not biasing movement already see if it can be extended\n # current_conc = self.pos[0]\n current_conc = self.getConcentration()\n self.c_start = self.c_end\n self.c_end = current_conc\n\n # if it is increasing then continue\n if self.c_end > self.c_start:\n self.duration = alpha * self.duration\n # self.duration = alpha * self.getDuration(mean_run)\n self.status = 2\n\n\n # if it is not increasing then reverse\n else:\n self.reverseRun()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call the jump_censor program to characterize the degree of motion. | def JumpCensor(self):
if self.verbose:
print 'Computing censor files.'
for entry in self.entry_map['epi']:
if self.censor_interleave:
input_file = '%s+orig' % self.info[entry]['imgfile']
interleave = '--interleave'
else:
interleave = ''
if os.path.exists(self.info[entry]['mot_file']):
input_file = self.info[entry]['mot_file']
else:
input_file = '%s+orig' % self.info[entry]['imgfile']
cmd = \
"jump_censor -v --prefix=%s %s --store-plot --threshold=%f %s" % \
(self.info[entry]['censor_prefix'],
interleave,
self.censor_thresh,
input_file)
try:
self.CheckExec(cmd, ['%s_censor.1D' %
self.info[entry]['censor_prefix']],
force=False)
except:
print 'Error computing censor files.' | [
"def censoring_fcn(self, q):\n return 1.0",
"def censor(text, censor_char=\"*\"):\n\n if not isinstance(text, str):\n text = str(text)\n if not isinstance(censor_char, str):\n censor_char = str(censor_char)\n\n if not CENSOR_WORDSET:\n load_censor_words()\n return hide_swear_words(text, censor_char)",
"def censor(text: str, censor_char: str='*') -> str:\n\n if not isinstance(text, str):\n text = str(text)\n if not isinstance(censor_char, str):\n censor_char = str(censor_char)\n\n if not CENSOR_WORDSET:\n load_censor_words()\n return hide_swear_words(text, censor_char)",
"def _censor_word(self, word: str) -> str:\n if self.options.censor_msg:\n return word[0] + '*' * (len(word) - 1)\n return word",
"def jump(self):\n self.vel = -10.5\n self.tick_count = 0\n self.height = self.y",
"def jump(self):\n self.vel = -10\n self.tick_count = 0\n self.height = self.y",
"def jump_cut(self):\n\t\tif self.jumping:\n\t\t\tif self.vel.y < -3:\n\t\t\t\tself.vel.y = -3",
"def on_jump_press(self) -> None:\r\n if not self.node:\r\n return\r\n if don.jumpFly:\r\n self.node.handlemessage(\"impulse\",self.node.position[0],self.node.position[1]-2,self.node.position[2],self.node.velocity[0]*0,20 +0.01,self.node.velocity[2]*0,10,5,0,0,self.node.velocity[0]*-0.1,20 + 0.01,self.node.velocity[2]*0)\r\n self.node.handlemessage(\"impulse\",self.node.position[0],self.node.position[1],self.node.position[2],self.node.velocity[0]*0,20 +0.01,self.node.velocity[2]*0,10,5,0,0,self.node.velocity[0]*-0.1,20 + 0.01,self.node.velocity[2]*0)\r\n self.node.color = ((0+random.random()*6.5),(0+random.random()*6.5),(0+random.random()*6.5))\r\n self.node.highlight = ((0+random.random()*6.5),(0+random.random()*6.5),(0+random.random()*6.5)) \r\n self.node.jump_pressed = True\r\n self._turbo_filter_add_press('jump')",
"def handle_jump(self):\n self.avatar.jump()\n self.resources['zap'].play()\n collided_eq = self.get_collision()\n if collided_eq is not None:\n if collided_eq.is_correct():\n self.score.increment_score(1)\n self.level_score += 1\n if self.level_score >= configvalues.SCORE_PER_LEVEL:\n self.won_level = True\n self.wait_tick = configvalues.WIN_DELAY\n self.resources['boom'].play()\n collided_eq.explode()\n else:\n self.score.increment_score(-1)\n self.resources['boom'].play()\n collided_eq.explode()",
"def censor(text: str) -> str:\n\n # Split up individual words in the text\n tokens: List[str] = text.split(\" \")\n\n # Create a mapping of 0 if the word is okay, 1 if it should be censored\n censor_mask: List[int] = predict([word for word in tokens])\n\n # A list of tuples with the first element being the word and the second being 0 or 1\n censor_map: List[Tuple[str, int]] = list(zip(tokens, censor_mask))\n\n # A list of the words that make up the censored text\n censored_text: List[str] = [\n censor_word(word) if should_censor else word\n for word, should_censor in censor_map\n ]\n\n return \" \".join(censored_text)",
"def jump(self):\r\n #if (self.isJumping == False):\r\n #\tself.yVel = -15\r\n #\tself.isJumping = True\r\n self.rect.centery -= 40\r\n self.isJumping = True",
"def content_jump(self, jump: np.ndarray, data: np.ndarray) -> None:\n if len(jump) != 1:\n raise ValueError(\"`jump` must be a one dimensional vector\")\n if jump > 0.5:\n euc_dist = np.abs(self._buffer - data).sum(axis=1)\n self._head = np.argmin(euc_dist)",
"async def cabbage(ctx):\n await ctx.send(joy.spread_joy())",
"def on_r_joy_y(self):\r\n self.log()",
"def constant_current(beam, channel_width, z_part_min, ptcl_per_step):\n # top.inject = 1 must be specified in main script\n\n # fixed cathode temperature\n myInjector = injectors.injectorUserDefined(beam, 4.0, channel_width, z_part_min, ptcl_per_step)\n\n installuserinjection(myInjector.inject_electrons)\n\n # These must be set for user injection\n top.ainject = 1.0\n top.binject = 1.0",
"def apply_action(self, action):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n robot_x = robot_state.pose.position.x\n robot_y = robot_state.pose.position.y\n # Set the distance moved in an action such that it is at least as large as the\n # minimum distance that would let a robot in the middle of the goal go to either side\n #self.move_dist = max(((C.GOAL_TOP + C.GOAL_BOTTOM) / 2) / C.NUM_POS_SENDS, 0.5)\n if action == Learn.MOVE_LEFT:\n print(\"Move left\")\n self.set_robot(robot_x, robot_y+self.move_dist)\n elif action == Learn.MOVE_RIGHT:\n print(\"Move right\")\n self.set_robot(robot_x, robot_y-self.move_dist)\n else:\n print(\"Stay put\")",
"def ev_joyhatmotion(self, event: tcod.event.JoystickHat) -> T | None:",
"async def jog_axis(stdscr: Any, api: OT3API, mount: OT3Mount) -> None:\n CONTINUE_JOG = True\n z = False\n step_index = DEFAULT_STEP_INDEX\n jog_win = stdscr.derwin(0, 0)\n await _refresh_jog_window(jog_win)\n\n y, x = jog_win.getyx()\n stdscr.move(y, x)\n while CONTINUE_JOG:\n c = stdscr.getkey()\n stdscr.addstr(f\"In jog, received {c} character\\n\")\n amount = STEP_LIST[step_index]\n if c == \"z\":\n z = not z\n elif c == \"-\":\n temp_idx = step_index - 1\n step_index = min(temp_idx, MIN_STEP_INDEX)\n stdscr.addstr(f\"Reducing step size to {STEP_LIST[step_index]}\\n\")\n elif c == \"=\":\n temp_idx = step_index + 1\n step_index = min(temp_idx, MAX_STEP_INDEX)\n stdscr.addstr(f\"Increasing step size to {STEP_LIST[step_index]}\\n\")\n elif c == \"a\":\n stdscr.addstr(\"Jogging left\\n\")\n await api.move_rel(mount, Point(x=amount * -1, y=0, z=0))\n elif c == \"d\":\n stdscr.addstr(\"Jogging right\\n\")\n await api.move_rel(mount, Point(x=amount, y=0, z=0))\n elif c == \"w\" and z:\n stdscr.addstr(\"Jogging up\\n\")\n await api.move_rel(mount, Point(x=0, y=0, z=amount))\n elif c == \"s\" and z:\n stdscr.addstr(\"Jogging down\\n\")\n await api.move_rel(mount, Point(x=0, y=0, z=amount * -1))\n elif c == \"w\":\n stdscr.addstr(\"Jogging backwards\\n\")\n await api.move_rel(mount, Point(x=0, y=amount, z=0))\n elif c == \"s\":\n stdscr.addstr(\"Jogging forwards\\n\")\n await api.move_rel(mount, Point(x=0, y=amount * -1, z=0))\n elif c == \"n\":\n stdscr.addstr(\"Exiting jog.\\n\")\n CONTINUE_JOG = False\n stdscr.move(y, x)\n stdscr.refresh()",
"def acceleration(self, acc):\n self.messenger.call('kAcceleration', acc)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the temporal SNR for each epi, save in a nifti file, and store a summmary in a png file. | def ComputeSNR(self):
for epi in self.entry_map['epi']:
epifile = self.info[epi]['imgfile_final'] + self.info[epi]['suffix']
prefix = self.info[epi]['imgfile_final'] + '_snr'
if not os.path.exists('%s_snr.png' % prefix):
if self.verbose:
print 'TemporalSnr(epifile=%s, prefix=%s)' % \
(epifile, prefix)
try:
TemporalSnr(epifile=epifile, prefix=prefix)()
except:
print("Error computing temporal SNR") | [
"def plot_tsnr(self, participant_list = [], input_pth = None, use_atlas_rois = True,\n file_ext = {'pRF': '_cropped_dc_psc.npy', 'FA': '_cropped_confound_psc.npy'}): \n\n ## output path to save plots\n output_pth = op.join(self.outputdir, 'tSNR')\n\n ## input path, if not defined get's it from post-fmriprep dir\n if input_pth is None:\n input_pth = op.join(self.MRIObj.derivatives_pth, 'post_fmriprep', self.MRIObj.sj_space)\n\n ## empty dataframe to save mean values per run\n tsnr_df = pd.DataFrame({'sj': [], 'ses': [], 'task': [], 'ROI': [], 'mean_tsnr': []})\n \n ## if no participant list set, then run all\n if len(participant_list) == 0:\n participant_list = self.MRIObj.sj_num\n\n ## get vertices for each relevant ROI\n # from glasser atlas\n ROIs, roi_verts, color_codes = plot_utils.get_rois4plotting(self.MRIObj.params, \n sub_id = participant_list,\n pysub = self.MRIObj.params['plotting']['pycortex_sub'], \n use_atlas = use_atlas_rois, \n atlas_pth = op.join(self.MRIObj.derivatives_pth,\n 'glasser_atlas','59k_mesh'), \n space = self.MRIObj.sj_space)\n \n ## loop over participants\n\n for pp in participant_list:\n\n # and over sessions (if more than one)\n for ses in self.MRIObj.session['sub-{sj}'.format(sj=pp)]:\n \n bold_files = {}\n \n # path to post fmriprep dir\n postfmriprep_pth = op.join(input_pth, 'sub-{sj}'.format(sj=pp), ses)\n\n outdir = op.join(output_pth,'sub-{sj}'.format(sj=pp), ses)\n # if output path doesn't exist, create it\n if not op.isdir(outdir): \n os.makedirs(outdir)\n print('saving files in %s'%outdir)\n \n ## load data for both tasks\n for tsk in self.MRIObj.tasks:\n\n ## bold filenames\n bold_files[tsk] = [op.join(postfmriprep_pth, run) for run in os.listdir(postfmriprep_pth) if 'space-{sp}'.format(sp=self.MRIObj.sj_space) in run \\\n and 'acq-{a}'.format(a=self.MRIObj.acq) in run and 'task-{t}'.format(t=tsk) in run and run.endswith(file_ext[tsk])]\n\n ## calculate tSNR for each run\n tsnr_arr = []\n for ind,r in enumerate(bold_files[tsk]):\n\n ## use non-PSC file to calculate tSNR\n if 'cropped' in file_ext[tsk]:\n r = r.replace(file_ext[tsk], '_cropped.npy')\n else:\n r = r.replace(file_ext[tsk], '.npy')\n\n ## stack whole brain tsnr - will be used to weight correlations\n tsnr_arr.append(mri_utils.get_tsnr(np.load(r), return_mean = False))\n\n tsnr_df = pd.concat((tsnr_df, \n pd.DataFrame({'sj': np.tile(pp, len(ROIs['sub-{sj}'.format(sj=pp)])), \n 'ses': np.tile(ses, len(ROIs['sub-{sj}'.format(sj=pp)])), \n 'task': np.tile(tsk, len(ROIs['sub-{sj}'.format(sj=pp)])), \n 'ROI': ROIs['sub-{sj}'.format(sj=pp)], \n 'mean_tsnr': [np.nanmean(mri_utils.get_tsnr(np.load(r), return_mean = False)[roi_verts['sub-{sj}'.format(sj=pp)][roi_name]]) for roi_name in ROIs['sub-{sj}'.format(sj=pp)]]})\n ))\n \n ## plot average tSNR values on flatmap surface ##\n tSNR_flatmap = cortex.Vertex(np.mean(tsnr_arr, axis=0), \n self.MRIObj.params['plotting']['pycortex_sub'],\n vmin = 0, vmax = 150,\n cmap='hot')\n #cortex.quickshow(tSNR_flatmap, with_curvature=True, with_sulci=True)\n _ = cortex.quickflat.make_png(op.join(outdir,\n 'tSNR_flatmap_sub-{sj}_{ses}_task-{tsk}.png'.format(sj=pp, ses=ses, tsk=tsk)), \n tSNR_flatmap, \n recache = False, with_colorbar = True,\n with_curvature = True, with_sulci = True,\n curvature_brightness = 0.4, curvature_contrast = 0.1)\n\n ### plot tSNR across runs for the participant and session\n fig, ax1 = plt.subplots(1, 1, figsize=(15,5), dpi=100, facecolor='w', edgecolor='k')\n sns.set_theme(style=\"darkgrid\")\n sns.set(font_scale=1.5) \n b1 = sns.barplot(x = 'ROI', y = 'mean_tsnr', hue = 'task', data = tsnr_df,\n capsize = .2 ,linewidth = 1.8, ax=ax1)\n b1.set(xlabel=None)\n b1.set(ylabel=None)\n ax1.set_ylabel('mean tSNR',fontsize = 20,labelpad=18)\n ax1.set_ylim(0,150)\n \n fig.savefig(op.join(outdir,'tSNR_ROIS_sub-{sj}_{ses}.png'.format(sj=pp, ses=ses)), dpi=100,bbox_inches = 'tight')\n\n #return tsnr_df",
"def do_SEIR(self, t_max=200, dt=1.):\n dt = float(dt)\n g = Graph()\n\n for node in ['S', 'E', 'I', 'R']:\n g.add_node(node, 0)\n\n g.set_node('S', self.population)\n g.set_node('E', 0)\n g.set_node('I', self.N_init)\n g.set_node('R', 0)\n\n # cumulative time series\n S = [g.get_node_value('S')] # Susceptible\n E = [g.get_node_value('E')] # Exposed\n I = [g.get_node_value('I')] # noqa Infected\n R = [g.get_node_value('R')] # Recovered\n\n ts = [0.] # time series\n nms = ['prob', 'lag']\n\n g.add_edge('S', 'S', nms, [0.1, 2])\n g.add_edge('E', 'E', nms, [0.4, 21])\n g.add_edge('I', 'I', nms, [0.1, 2])\n\n g.add_edge('S', 'E', nms, [1.2, 1])\n g.add_edge('E', 'I', nms, [0.1, 14]) # [, tiempo de incubacion]\n g.add_edge('I', 'R', nms, [0.7, 2]) # [, tiempo de recuperacion]\n\n t, time_steps = 0., 0\n while t < t_max:\n\n time_steps = time_steps + 1\n\n t = t + dt\n ts.append(t)\n\n # (( S ))\n prob_SS = g.get_edge('S', 'S', 'prob') # beta\n\n dS = - S[-1] * (I[-1] / self.population) * prob_SS\n\n # n_S = min(S[-1] + min(dS * dt, 0), self.population)\n n_S = S[-1] + dS * dt\n\n # (( E ))\n prob_EE = g.get_edge('E', 'E', 'prob')\n dE = - dS - prob_EE * E[-1]\n\n # n_E = min(E[-1] + max(dE * dt, 0), self.population)\n n_E = E[-1] + dE * dt\n\n # (( I ))\n prob_EI = g.get_edge('E', 'I', 'prob')\n lag_EI = g.get_edge('E', 'I', 'lag')\n update_EI = E[-lag_EI] if lag_EI < len(E) else 0.\n\n prob_IR = g.get_edge('I', 'R', 'prob')\n lag_IR = g.get_edge('I', 'R', 'lag')\n update_IR = I[-lag_IR] if lag_IR < len(I) else 0.\n\n prob_II = g.get_edge('I', 'I', 'prob')\n\n dI = prob_EI * update_EI - prob_IR * update_IR\n dI = -dI # porque ????\n n_I = min(I[-1] + dI * dt, self.population)\n\n # (( R ))\n prob_II = g.get_edge('I', 'I', 'prob')\n dR = prob_II * I[-1]\n n_R = min(R[-1] + max(dR * dt, 0), self.population)\n\n S.append(n_S)\n E.append(n_E)\n I.append(n_I)\n R.append(n_R)\n\n df = pd.DataFrame(\n {'ts': ts, 'S': S, 'E': E, 'I': I, 'R': R}).set_index(\"ts\")\n\n extra = attr.asdict(self)\n extra[\"model_name\"] = \"SEIR\"\n return ModelResultFrame(df=df, extra=extra)",
"def write_seisan(filename, args):\n bf = BaikalFile(filename)\n if not bf.valid:\n print(\"Invalid file {}\".format(filename))\n return\n header = bf.MainHeader\n # datetime\n date = datetime.datetime(header[\"year\"], header[\"month\"], header[\"day\"])\n delta = datetime.timedelta(seconds=header[\"to\"])\n dt = date + delta\n _time = dt.time() # time\n # make utc datetime\n utcdatetime = UTCDateTime(date.year, date.month, date.day,\n _time.hour, _time.minute, _time.second, _time.microsecond, precision=3)\n bf.traces = bf.traces.astype(np.int32)\n bf.traces = bf.traces[:3]\n traces = []\n for channel, data in zip(CHANNELS, bf.traces):\n stats = DEFAULT_STATS.copy()\n stats.update({\n \"station\": header['station'].upper()[:3],\n 'channel': channel,\n 'sampling_rate': int( 1./header[\"dt\"] ),\n \"delta\": header[\"dt\"],\n \"npts\": data.size,#shape[0]\n 'starttime': utcdatetime,\n })\n # save coordinates\n stats['gse2'][\"lat\"] = header['latitude']\n stats['gse2'][\"lon\"] = header[\"longitude\"]\n trace = Trace(data=data, header=stats)\n traces.append(trace)\n # create Stream\n stream = Stream(traces)\n #== write seisan\n # date\n name = \"{year:04}-{month:02}-{day:02}\".format(**header)\n # time\n name += \"-{t.hour:02}-{t.minute:02}\".format(t=stats['starttime'])\n # + station name + Day_of_Year\n name += \"{0}__{1:03}\".format(stats[\"station\"], stats['starttime'].timetuple().tm_yday)\n print('Writing GSE2 file %s.' % name)\n writeGSE2(stream, os.path.join(args.outdir, name))",
"def in_situ_tair_snd(sno0, year0=2016, npr_date=-1, ascat_date=-1):\n if npr_date < 0:\n npr_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n if ascat_date < 0:\n ascat_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n snd_name = \"snow\"\n print 'the %d was processing' % sno0\n sno = str(sno0)\n tair_name = \"Air Temperature Observed (degC)\"\n if sno0 in [2065, 2081]:\n if year0 == 2016:\n tair_name = \"Air Temperature Average (degC)\"\n # read measurements\n hr_list = [5, 7, 9, 14, 18, 21]\n t_air_one_year = read_site.in_situ_series(sno, y=year0, hr=hr_list) # [:, :, 0] temperature at 7:00 (local)\n # time_above_zero_0 = data_process.zero_find(t_air_one_year[:, :, 0], w=10, th=-0.1) #\n # time_above_zero_1 = data_process.zero_find(t_air_one_year[:, :, 1], w=10, th=-0.1)\n # time_above_zero_2 = data_process.zero_find(t_air_one_year[:, :, 3], w=10, th=-0.1)\n time_above_zero_list = [data_process.zero_find(t_air_one_year[:, :, i], w=10, th=-0.1)\n for i in range(0, len(hr_list))]\n date_tuple = bxy.time_getlocaltime(time_above_zero_list, ref_time=[2000, 1, 1, 0], t_source='US/Alaska')\n t_value, t_date = read_site.read_measurements\\\n (sno, tair_name, np.arange(1, 365), year0=year0, hr=18, t_unit='sec')\n\n\n tair_zero_day2 = data_process.zero_find(np.array([t_date, -t_value]), w=7, th=0) # in unit of sec\n tair_zero_day1 = data_process.zero_find_gt(np.array([t_date, t_value]), w=7, th=1)\n air_win = 7 # check days during window shown air temperature gt 0 degC\n w, w_valid = data_process.n_convolve3(t_value, air_win)\n air0_index0 = np.where(w>5)\n for ind0 in air0_index0[0]:\n if t_date[ind0] > bxy.get_total_sec('%d0307' % year0):\n tair_zero_day = t_date[ind0] - air_win*24*3600\n break\n # check\n zero_date = bxy.time_getlocaltime([tair_zero_day,tair_zero_day2, npr_date[0], ascat_date[0]],\n ref_time=[2000, 1, 1, 0], t_source=\"US/Alaska\")[-2]\n i_zero = np.where(bxy.time_getlocaltime(t_date, ref_time=[2000, 1, 1, 0],\n t_source=\"US/Alaska\")[-2] == zero_date[0])[0][0]\n t_check = t_value[i_zero - 3: i_zero + 4]\n air_0, air00 = read_site.read_measurements(sno, tair_name, 366+np.arange(50, 70), hr=18)\n a_extend = np.array([-3600*24, 3600*24])\n period0, period1 = np.array(sorted([tair_zero_day, npr_date])) + a_extend, \\\n np.array(sorted([tair_zero_day, ascat_date])) + a_extend\n snow_value, snow_date = read_site.read_measurements\\\n (sno, snd_name, np.arange(1, 365), year0=year0, hr=0, t_unit='sec')\n # get the in situ measurements during a period\n snow2date0 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period0)\n snow2date1 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period1)\n air2date0, air2date1 = data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period0),\\\n data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period1)\n return tair_zero_day, snow2date0, snow2date1, air2date0, air2date1",
"def writeNoise(self):\n\n if (self.noise_file == None or self.noise_file == \"\"):\n return\n ofname = self.noise_file\n ofh = open(ofname,'w')\n\n # these have to be there as long as we've read the FAST file already\n ## not true: we don't store these in the dict.\n have_data = False\n if (\"TipRad\" in self.fstDict and 'TowerHt' in self.fstDict and 'Twr2Shft' in self.fstDict):\n tiprad = self.fstDict['TipRad']\n towerht = self.fstDict['TowerHt']\n twr2shft = self.fstDict['Twr2Shft']\n have_data = True\n\n for line in self.lines_noise:\n if (have_data and line.find('Observer location') >= 0):\n xdist = -1.0 * (tiprad + (towerht + twr2shft))\n ofh.write('{:.1f} 0.0 0.0'.format(xdist))\n ofh.write(' (x,y,z) Observer location in tower-base coordinate system. Use -(RotRad+HubHt)\\n')\n else:\n ofh.write(line)\n ofh.close()",
"def generate_nrrd(file):\n for root, dirs, files in os.walk(file):\n path = root.split(file)\n if path[1] != \"\":\n patient_id = int(path[1].split('/')[1][3:])\n path = file + \"_nrrd\" + path[1]\n # if path.find(\"frisk\")==-1 & path.find(\"M+\")==-1 & path.find('T2M')==-1:\n if (path.find('T2M') != -1 & path.find('frisk') ==\n -1 & path.find('+') == -1\n ) or path.find('masks') != -1: # Only T2M or mask can be found\n os.makedirs(path, exist_ok=True)\n print(path)\n Nrrd = ImageCollection(os.path.join(root, \"*.tiff\"),\n plugin='tifffile',\n load_func=convert_to_gray)\n Nrrd = np.asarray(Nrrd)\n\n if get_image_info(patient_id):\n print(patient_id)\n (spacings, thickness) = get_image_info(patient_id)\n thicknesses = [float('nan'), float('nan'), thickness]\n spacing_direction = np.eye(3)\n # Note: All header fields are specified in Fortran order,\n # per the NRRD specification, regardless of the index order. For example,\n # a C-ordered array with shape (60, 800, 600) would have a sizes field of (600, 800, 60).\n if len(Nrrd) > 0:\n header = {\n 'spacings': spacings,\n 'thicknesses': thicknesses\n \n }\n nrrd.write(os.path.join(path,\n str(patient_id) + '.nrrd'),\n Nrrd,\n header,\n index_order='C')",
"def computeSnrStack(self, filterType, est, tTh='', theory=None, name=None):\n\n print \"- compute SNR and significances for \"+filterType+\" \"+est+\" \"+tTh\n\n # replace data with theory if requested\n if tTh=='tsz':\n tTh = '_theory_tsz'\n elif tTh=='ksz':\n tTh = '_theory_ksz'\n else:\n tTh = ''\n \n if name is None:\n name = ''\n else:\n name = '_'+name\n \n if theory is None:\n sigma_cluster = 3. \n theory = self.ftheoryGaussianProfile(sigma_cluster, filterType=filterType)\n\n\n\n path = self.pathFig+\"/snr_\"+filterType+\"_\"+est+tTh+name+\".txt\"\n with open(path, 'w') as f:\n f.write(\"*** \"+est+\" SNR ***\\n\")\n\n # data and covariance\n d = self.stackedProfile[filterType+\"_\"+est+tTh].copy()\n cov = self.covBootstrap[filterType+\"_\"+est].copy()\n dof = len(d)\n\n # Compute chi^2_null\n chi2Null = d.dot( np.linalg.inv(cov).dot(d) )\n # goodness of fit for null hypothesis\n f.write(\"number of dof:\"+str(dof)+\"\\n\")\n f.write(\"null chi2Null=\"+str(chi2Null)+\"\\n\")\n pteNull = 1.- stats.chi2.cdf(chi2Null, dof)\n f.write(\"null pte=\"+str(pteNull)+\"\\n\")\n # pte as a function of sigma, for a Gaussian random variable\n fsigmaToPTE = lambda sigma: special.erfc(sigma/np.sqrt(2.)) - pteNull\n sigmaNull = optimize.brentq(fsigmaToPTE , 0., 1.e3)\n f.write(\"null pte significance=\"+str(sigmaNull)+\"sigmas\\n\\n\")\n\n # Gaussian model: find best fit amplitude\n sigma_cluster = 1.5 # arcmin\n def fdchi2(p):\n a = p[0]\n result = (d-a*theory).dot( np.linalg.inv(cov).dot(d-a*theory) )\n result -= chi2Null\n return result\n # Minimize the chi squared\n p0 = 1.\n res = optimize.minimize(fdchi2, p0)\n abest = res.x[0]\n #sbest= res.x[1]\n f.write(\"best-fit amplitude=\"+str(abest)+\"\\n\")\n f.write(\"number of dof:\"+str(dof - 1)+\"\\n\\n\")\n\n # goodness of fit for best fit\n chi2Best = fdchi2([abest])+chi2Null\n f.write(\"best-fit chi2=\"+str(chi2Best)+\"\\n\")\n pteBest = 1.- stats.chi2.cdf(chi2Best, dof-1.)\n f.write(\"best-fit pte=\"+str(pteBest)+\"\\n\")\n # pte as a function of sigma, for a Gaussian random variable\n fsigmaToPTE = lambda sigma: special.erfc(sigma/np.sqrt(2.)) - pteBest\n sigma = optimize.brentq(fsigmaToPTE , 0., 1.e3)\n f.write(\"best-fit pte significance=\"+str(sigma)+\"sigmas\\n\\n\")\n\n # favour of best fit over null\n f.write(\"best-fit sqrt(delta chi2)=\"+str(np.sqrt(abs(fdchi2([abest]))))+\"sigmas\\n\")\n fsigmaToPTE = lambda sigma: special.erfc(sigma/np.sqrt(2.))\n pte = fsigmaToPTE( np.sqrt(abs(fdchi2([abest]))) )\n f.write(\"pte (if Gaussian)=\"+str(pte)+\"\\n\")",
"def test_niriss_soss():\n\n input_model = create_input('NIRISS', 'NIS', 'NIS_SOSS',\n filter='CLEAR', pupil='GR700XD')\n save_input = input_model.copy()\n ds = photom.DataSet(input_model)\n ftab = create_photom_niriss_soss(min_r=8.0, max_r=9.0)\n ds.calc_niriss(ftab)\n\n input = save_input.spec[0].spec_table['FLUX']\n output = ds.input.spec[0].spec_table['FLUX'] # ds.input is the output\n sp_order = 1 # to agree with photom.py\n rownum = find_row_in_ftab(save_input, ftab, ['filter', 'pupil'],\n slitname=None, order=sp_order)\n photmj = ftab.phot_table['photmj'][rownum]\n nelem = ftab.phot_table['nelem'][rownum]\n wavelength = ftab.phot_table['wavelength'][rownum][0:nelem]\n relresponse = ftab.phot_table['relresponse'][rownum][0:nelem]\n test_ind = len(input) // 2\n wl = input_model.spec[0].spec_table['WAVELENGTH'][test_ind]\n rel_resp = np.interp(wl, wavelength, relresponse,\n left=np.nan, right=np.nan)\n compare = photmj * rel_resp\n # Compare the values at the center pixel.\n ratio = output[test_ind] / input[test_ind]\n assert np.allclose(ratio, compare, rtol=1.e-7)",
"def RVI(): # doesnt work with .plot but shows in ENVI \r\n # Calculate RVI\r\n rvi = (band4 / band5) \r\n # Plot and show graphic\r\n plt.imshow(rvi)\r\n plt.show()\r\n new_rvi = rasterio.open(path+'\\\\rvi.tif','w', driver='Gtiff',\r\n height=height, width=width,\r\n count=1, dtype='float64', crs=crs,\r\n transform = affine)\r\n new_rvi.write(rvi,1)\r\n new_rvi.close()",
"def _update_total_fibre_snr_plot(self, images):\n\n # Sometimes (18nov10013 vs [18nov10014 or 18nov20013]) the fibre table is the first index\n # sometimes it is not.\n\n fibre_table_extension = 2 if images[0][1].data.shape == (1, ) else 1\n fibre_table = images[0][fibre_table_extension].data\n\n # Find which fibres are sky fibres\n sky_fibres = np.where(fibre_table[\"TYPE\"] == \"S\")[0]\n\n # Find which fibres are program fibres\n program_fibres = np.where(fibre_table[\"TYPE\"] == \"P\")[0]\n\n def clear_snr_plot():\n [patch.set_visible(False) for patch in self.diagnostic_display.axes[2].patches]\n self.diagnostic_display.axes[2].patches = []\n if self.diagnostic_display.canvas is not None:\n wx.CallAfter(self.diagnostic_display.canvas.draw)\n \n\n if len(sky_fibres) == 0:\n clear_snr_plot()\n raise IOError(\"No sky fibres found in image! No estimate of fibre S/N can be made.\")\n \n if len(program_fibres) == 0:\n clear_snr_plot()\n raise IOError(\"No program fibres found in image! No estimate of fibre S/N can be made.\")\n \n extraction_widths = {\n \"blue\": 3.5,\n \"green\": 3.5,\n \"yellow\": 3.5,\n \"red\": 3.5\n } \n\n estimated_channel_snr = {}\n for channel, image in zip(extraction_widths.keys(), images):\n start_time = time()\n\n # Open a tram-line map\n tram_map_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"tram_maps/{0}.map\".format(channel))\n if not os.path.exists(tram_map_filename):\n raise IOError(\"tram map filename not found: {0}\".format(tram_map_filename))\n\n with open(tram_map_filename, \"r\") as fp:\n tram_map = pickle.load(fp)\n\n # Perform fibre extraction\n sky_fibre_fluxes = calibrate.extract_fibres(image, tram_map,\n extraction_widths[channel], sky_fibres, mode=\"median\")\n program_fibre_fluxes = calibrate.extract_fibres(image, tram_map,\n extraction_widths[channel], program_fibres, mode=\"sum\")\n\n # Calculate mean sky fibre fluxes along each column (Wavelength)\n median_sky_fibre_flux = np.mean(sky_fibre_fluxes, axis=0)\n \n # Calculate object pixel fluxes / mean sky fibre fluxes\n #estimated_pixel_snr = program_fibre_fluxes / (program_fibre_fluxes + median_sky_fibre_flux)**0.5\n estimated_pixel_counts = program_fibre_fluxes / median_sky_fibre_flux\n estimated_mean_fibre_counts = np.mean(estimated_pixel_counts, axis=1)\n\n print(\"num fibres\", len(estimated_mean_fibre_counts))\n\n\n # Draw histogram of S/N\n estimated_channel_snr[channel] = estimated_mean_fibre_counts\n print(\"Completed {0} channel in {1:.2f}\".format(channel, time() - start_time))\n\n # Clear any previous patches\n clear_snr_plot()\n\n bin_size = 2\n x_limits = sum([[np.min(channel_snr), np.max(channel_snr)] for channel_snr in estimated_channel_snr.values()], [])\n x_limits = [np.floor(np.min(x_limits)) - bin_size, np.ceil(np.max(x_limits)) + bin_size]\n\n bins = np.arange(x_limits[0], x_limits[1] + bin_size, bin_size)\n all_y_values = []\n for channel, snr_values in estimated_channel_snr.iteritems():\n y_values, returned_bins, patches = self.diagnostic_display.axes[2].hist(\n snr_values, bins=bins, color=channel, alpha=0.5)\n all_y_values.append(y_values)\n\n self.diagnostic_display.axes[2].set_ylim(0, max(map(max, all_y_values)))\n self.diagnostic_display.axes[2].set_xlim(x_limits)\n \n if self.diagnostic_display.canvas is not None:\n wx.CallAfter(self.diagnostic_display.canvas.draw)",
"def SaveNIFTI(data, file_path):\n if(np.iscomplex(data).any()):\n data = abs(data)\n nii = nib.Nifti1Image(data, np.eye(4)) \n nib.save(nii, file_path)",
"def compute_gti(self ):\n\t\tgti_file = os.path.join( self._data_dir, \"erass.gti\")\n\n\t\tcmd = [\"ero_vis\",\n\t\t\t\t\"GTIfile=%s\" % gti_file,\n\t\t\t\t\"Simput=%s\" % self._simput,\n\t\t\t\t\"Exposure=%f\" % self._exposure,\n\t\t\t\t\"Attitude=/data40s/erosim/eRASS/eRASS_4yr_epc85_att.fits\",\n\t\t\t\t\"TSTART=%f\" % self._t_start,\n\t\t\t\t#\"RA=%s\" % self._ra_cen,\n\t\t\t\t#\"Dec=%s\" % self._dec_cen,\n\t\t\t\t\"dt=1.0\",\n\t\t\t\t\"visibility_range=1.0\",\n\t\t\t\t\"clobber=yes\"\n\t\t\t\t]\n\n\t\tprint(\" \".join(cmd))\n\t\tsubprocess.check_call(cmd)",
"def generate_filename(itile, typestat, reso, timeflag, date, mode):\n filename = '%s/%s/%s/%s/%s/%s_%s_%s_%003i.nc' % (path_to_stats, \n reso, date[0], \n mode, typestat, \n typestat, reso, \n timeflag, itile)\n return filename",
"def serval_read_snr(fil, inst='CARM_VIS', ifnofilout='empty', nrow=1):\n\n # If instrument known, get the number of orders\n ords_known = False\n if inst == 'CARM_VIS':\n nord = 61\n ords_known = True\n elif inst == 'CARM_NIR':\n nord = 28\n ords_known = True\n elif inst == 'HARPS' or inst == 'HARPN':\n nord = 72\n ords_known = True\n\n # If no instrument specified, try to get the number of orders from the filename\n else:\n if 'vis' in fil or 'VIS' in fil:\n nord = 61\n ords_known = True\n elif 'nir' in fil or 'NIR' in fil:\n nord = 28 # don't know actually\n ords_known = True\n\n # -----------------------------------------------------\n\n # If number of orders known, read into pandas dataframe directly\n if ords_known:\n ords = np.arange(0, nord, 1)\n column_names = ['bjd', 'servalsnr'] + ['servalsnro{:02d}'.format(o) for o in ords]\n data = read_file2dataframe(fil, column_names, ifnofilout=ifnofilout, nrow=nrow)\n\n # If number of orders not known, read the file and get it from there\n else:\n try:\n # Read data 1st to get number of orders\n data_raw = np.loadtxt(fil, unpack=True)\n nord = len(data_raw[5:])\n ords = np.arange(0, nord, 1)\n\n # Put data in pandas dataframe\n data_dic = {'bjd': data_raw[0], 'servalsnr': data_raw[1]}\n dic_ords = {'servalsnro{:02d}'.format(i): data_raw[i+5] for i in ords}\n data_dic.update(dic_ords) # Merge dictionaries\n data = pd.DataFrame(data_dic)\n data.set_index('bjd', inplace=True)\n # If cannot read the file, return the output of the function `read_file2dataframe` specified by `ifnofilout`\n except:\n ords = np.arange(0, 1, 1)\n column_names = ['bjd', 'servalsnr'] + ['servalsnro{:02d}'.format(o) for o in ords]\n data = read_file2dataframe(fil, column_names, ifnofilout=ifnofilout, nrow=nrow)\n\n return data",
"def datasize_vs_iou():\n\n #TODO instead of writing this manually, you can parse each baseline-XX file and compute the number of images\n #used by doing XX * 670 (for DSB), where XX is the percentage.\n #You can read the y values from the jaccard.txt files.\n x = [16, 33, 67, 167, 335, 502, 670] #number of images\n y = [0.43, 0.47, 0.66, 0.71, 0.71, 0.72, 0.73] #the mean IoU score\n\n fig, ax = plt.subplots()\n ax.scatter(x, y)\n ax.set_xticks(x)\n ax.set(xlabel=\"Number of images\", ylabel=\"mean IoU\", title=\"DSB\")\n\n fig.savefig(\"plot.png\")",
"def TICwriter(TIC, dataFile, saveDirectory):\n #Create savename from data file name:\n savefile = dataFile.split('/')[-1].split('.')[0] + '_TIC.png'\n #Create ouput directory:\n saveDirectory = os.path.join(saveDirectory, 'output/')\n os.makedirs(os.path.dirname(saveDirectory), exist_ok=True)\n #Plot figure:\n Plot = pl.figure()\n TICplot = Plot.add_subplot(111)\n TICplot.plot([d[0] for d in TIC], [d[1] for d in TIC])\n \n #Save and close plot:\n pl.savefig(saveDirectory + savefile)\n pl.close(Plot)",
"def export_nifti(self, file_path:str):\n pet_array = super().get_numpy_array()\n\n if self.export_type == 'raw' : \n pass\n\n elif self.export_type == 'suv' : \n try : \n pet_array = pet_array * self.__calculateSUVFactor() \n except Exception as err : \n print(\"Error generating result array (suv mode)\", err)\n\n elif self.export_type == 'sul' : \n try : \n pet_array = pet_array * self.__calculateSUVFactor() * self.calculateSULFactor() \n except Exception as err :\n print(\"Error generating result array (sul mode)\", err) \n\n\n sitk_img = sitk.GetImageFromArray( np.transpose(pet_array, (2,0,1) )) \n sitk_img = sitk.Cast(sitk_img, sitk.sitkFloat32) \n original_pixel_spacing = self.instance_array[0].get_pixel_spacing() \n original_direction = self.instance_array[0].get_image_orientation()\n sitk_img.SetDirection( (float(original_direction[0]), float(original_direction[1]), float(original_direction[2]), \n float(original_direction[3]), float(original_direction[4]), float(original_direction[5]), \n 0.0, 0.0, 1.0) )\n sitk_img.SetOrigin( self.instance_array[0].get_image_position() )\n sitk_img.SetSpacing( (original_pixel_spacing[0], original_pixel_spacing[1], self.get_z_spacing()) )\n sitk.WriteImage(sitk_img, file_path)",
"def image_save_nii(data,path):\n data = data.astype('float')\n data_nii = np.transpose(data)\n output = nib.Nifti1Image(data_nii, affine=np.eye(4))\n nib.save(output, path)",
"def snr_stats(\r\n t,\r\n y,\r\n period,\r\n duration,\r\n T0,\r\n transit_times,\r\n transit_duration_in_days,\r\n per_transit_count,\r\n):\r\n\r\n snr_per_transit = numpy.zeros([len(transit_times)])\r\n snr_pink_per_transit = numpy.zeros([len(transit_times)])\r\n intransit = transit_mask(t, period, 2 * duration, T0)\r\n flux_ootr = y[~intransit]\r\n\r\n try:\r\n pinknoise = pink_noise(flux_ootr, int(numpy.mean(per_transit_count)))\r\n except:\r\n pinknoise = numpy.nan\r\n\r\n # Estimate SNR and pink SNR\r\n # Second run because now the out of transit points are known\r\n if len(flux_ootr) > 0:\r\n std = numpy.std(flux_ootr)\r\n else:\r\n std = numpy.nan\r\n for i in range(len(transit_times)):\r\n mid_transit = transit_times[i]\r\n tmin = mid_transit - 0.5 * transit_duration_in_days\r\n tmax = mid_transit + 0.5 * transit_duration_in_days\r\n if numpy.isnan(tmin) or numpy.isnan(tmax):\r\n idx_intransit = []\r\n mean_flux = numpy.nan\r\n else:\r\n idx_intransit = numpy.where(numpy.logical_and(t > tmin, t < tmax))\r\n if len(y[idx_intransit]) > 0:\r\n mean_flux = numpy.mean(y[idx_intransit])\r\n else:\r\n mean_flux = numpy.nan\r\n\r\n intransit_points = numpy.size(y[idx_intransit])\r\n try:\r\n snr_pink_per_transit[i] = (1 - mean_flux) / pinknoise\r\n if intransit_points > 0 and not numpy.isnan(std):\r\n std_binned = std / intransit_points ** 0.5\r\n snr_per_transit[i] = (1 - mean_flux) / std_binned\r\n else:\r\n snr_per_transit[i] = 0\r\n snr_pink_per_transit[i] = 0\r\n except:\r\n snr_per_transit[i] = 0\r\n snr_pink_per_transit[i] = 0\r\n\r\n return snr_per_transit, snr_pink_per_transit"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Empties the models within the bag | def empty_bag(self):
if self.peds is not None:
for _, model in self.peds.items():
model.reset()
self.drone.reset()
self.subject.reset() | [
"def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n ModelPhenotypeTable(dbpath).empty()\n ModelScoreTable(dbpath).empty() \n self._end()",
"def clear(self):\n for m in self._models:\n m.clear()\n\n if self._comm is not None:\n self._comm.stop()\n del self._comm\n self._comm = None",
"def clear(self) -> None:\n self.objects = []",
"def clear(self):\n self.beginResetModel()\n self.root_item = RootItem()\n self.requests_items = {}\n self.endResetModel()",
"def resetmodel(self):\n for key, value in self._dentsvertsdata.items():\n value.free()\n self._dentsvertsdata.clear()",
"def clear_batch(self):\n self._batch_idx = 0\n self.variant_states = None\n self.object_specs = None\n self.object_attribute_values = None",
"def __clear_models_backup__(self):\n session = self.__get_session()\n for ds in self._data_streams:\n models = ds.get_models()\n for m in models:\n try:\n self._doc.remove_root(m, setter=session)\n except Exception as e:\n self.exception(e)",
"def clear(self):\r\n\t\tself.free_objects[:] = []",
"def clear(self) -> None:\n # Creates a new, empty bag and assigns self.da to the new, empty bag.\n new_bag = Bag()\n self.da = new_bag.da",
"def clear(self):\n # to be able to be restored into the database, we need to make sure\n # no existing objects are present or there could be conflicts\n Replay.objects.all().delete()\n replayer.models.Update.objects.all().delete()",
"def clear():\n MIGRATIONS.clear()",
"def clear(self):\n self.middlewares = []\n self.processors = []\n self.reset()",
"def _clear(self):\n for priced_model in (Product, ProductVariation):\n priced_model.objects.filter(sale_id=self.id).update(sale_id=None, \n sale_from=None, sale_to=None, sale_price=None)",
"def clear (self):\n\n\t\tself.meta = {}\n\t\tself.data = {}",
"def reset_model(self):\n pass",
"def clear_models(self):\n if not os.path.isdir(self.models_dir):\n return\n model_dir = os.path.join(self.models_dir, self.run_name)\n if not os.path.isdir(model_dir):\n return\n for f in os.listdir(model_dir):\n f_path = os.path.join(model_dir, f)\n if not os.path.isfile(f_path):\n continue\n os.remove(f_path)",
"def clearItems(self):\n self.items = {}\n self.raw_items = []",
"def reset(self):\n for index in self.values():\n index.reset()\n self.objectids = self.family.IF.TreeSet()",
"def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if an input is a float. | def is_float(self, input):
try:
float(input)
return True
except ValueError:
return False | [
"def IsFloat(param):\n if type(param) is types.FloatType:\n return True\n return False",
"def isfloat(value): \n try:\n float(value)\n return True\n\n except ValueError:\n return False",
"def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False",
"def is_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def is_float(x):\n _check_is_1d_frame(x)\n return is_numeric(x) and not is_integer(x)",
"def is_float(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def isfloat(s):\n try:\n x = float(s)\n return True\n except:\n return False",
"def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def could_be_float(val):\n if val == None:\n return False\n\n if isinstance(val, float):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n f = float(val)\n if not isinstance(f, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False",
"def isFloat(self):\r\n return self._wrap(type(self.obj) is float)",
"def is_floatable(value):\n\n try:\n float(value)\n return True\n except:\n return False",
"def isFloat(str):\n try:\n float(str)\n return True\n except ValueError:\n return False",
"def isfloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False",
"def is_float( str ):",
"def isfloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False",
"def _floatable(item):\n try:\n float(item)\n return True\n except ValueError:\n return False",
"def is_float(s: str) -> bool:\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_float(string: str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
c = 2 ⋅ atan2( √a, √(1−a) ) | def _calculate_c(self, a: float) -> float:
sqrt_a = cmath.sqrt(a)
sqrt_one_minus_a = cmath.sqrt(1 - a)
return 2 * math.atan2(sqrt_a.real, sqrt_one_minus_a.real) | [
"def arctan2(a, b):",
"def _atan2(y, x):\n tan = tf.atan(y / (x + 1e-8)) # this returns in -pi/2 .. pi/2\n\n one_map = tf.ones_like(tan)\n\n # correct quadrant error\n correction = tf.where(tf.less(x + 1e-8, 0.0), 3.141592653589793*one_map, 0.0*one_map)\n tan_c = tan + correction # this returns in -pi/2 .. 3pi/2\n\n # bring to positive values\n correction = tf.where(tf.less(tan_c, 0.0), 2*3.141592653589793*one_map, 0.0*one_map)\n tan_zero_2pi = tan_c + correction # this returns in 0 .. 2pi\n\n # make symmetric\n correction = tf.where(tf.greater(tan_zero_2pi, 3.141592653589793), -2*3.141592653589793*one_map, 0.0*one_map)\n tan_final = tan_zero_2pi + correction # this returns in -pi .. pi\n return tan_final",
"def atan2(cls, arg1, arg2):\n if arg1 + arg2 == arg1:\n return cls.PIO2 if arg1 >= 0 else -cls.PIO2\n arg1 = cls.atan(arg1 / arg2)\n return arg1 + PI if arg1 <= 0 else arg1 - PI if arg2 < 0 else arg1",
"def atand(x):\r\n return atan(x) * 180 / pi",
"def _atan2(y, x):\n tan = tf.atan(y / (x + 1e-8)) # this returns in -pi/2 .. pi/2\n\n one_map = tf.ones_like(tan)\n\n # correct quadrant error\n correction = tf.where(tf.less(x + 1e-8, 0.0), 3.141592653589793*one_map, 0.0*one_map)\n tan_c = tan + correction # this returns in -pi/2 .. 3pi/2\n\n # bring to positive values\n correction = tf.where(tf.less(tan_c, 0.0), 2*3.141592653589793*one_map, 0.0*one_map)\n tan_zero_2pi = tan_c + correction # this returns in 0 .. 2pi\n\n # make symmetric\n correction = tf.where(tf.greater(tan_zero_2pi, 3.141592653589793), -2*3.141592653589793*one_map, 0.0*one_map)\n tan_final = tan_zero_2pi + correction # this returns in -pi .. pi\n return tan_final",
"def angle(self, a):\n n1 = self.length()\n n2 = a.length()\n if n1 == 0. or n2 == 0.:\n return None\n ## Probably this should be done with arctan2 instead..\n rads = acos(fn.clip_scalar(QtGui.QVector3D.dotProduct(self, a) / (n1 * n2), -1.0, 1.0)) ### in radians\n# c = self.crossProduct(a)\n# if c > 0:\n# ang *= -1.\n return degrees(rads)",
"def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))",
"def atan_term(x, i):\n n = 2*i+1\n return alternate(i, x**n/n)",
"def tangent(angle):\r\n\r\n return math.tan(angle)",
"def angle(a, b):\n ab_vec = (b[1] - a[1], a[0] - b[0]) # vector with bottom-right origin\n res = atan2(*ab_vec) * 180 / pi # angle with yaxis in [-180, 180]\n if res < 0: # from [-180, 180] to [0, 360]\n res += 360\n return res",
"def angle(z):",
"def cart_pol(c1):\r\n r = math.sqrt(c1[0]**2 + c1[1]**2)\r\n angle = math.degrees(math.atan2(c1[1], c1[0]))\r\n return [r, angle]",
"def arctan(x):",
"def arcsinh(a):",
"def atan(x):\n getcontext().prec += 2\n i, lasts, s, num, sign = 1, 0, x, x, 1\n\n for _ in range(2000000):\n lasts = s\n i += 2\n num *= x * x\n sign *= -1\n s += (num / i) * sign\n getcontext().prec -= 2\n return +s",
"def IAngle(a, b, t):\n \n # http://www.engineersedge.com/material_science/moment-inertia-gyration-7.htm\n d = b - t \n y = b - (t*(2*d + a) + d**2)/(2*(d+a))\n I = 1/3 * (t*y**3 + a*(b-y)**3 - (a-t)*(b-y-t)**3)\n return I",
"def cotangent(a: list, b: list, c: list):\n ba = np.subtract(a, b)\n bc = np.subtract(c, b)\n return (np.dot(bc, ba) / abs(np.cross(bc, ba)))",
"def var_atan2 ( a , b = 1 , name = '' , title = '' ) :\n fa = isinstance ( a , num_types )\n fb = isinstance ( b , num_types )\n if fa and fb :\n ab = math.atan2 ( float ( a ) , float ( b ) ) \n return ROOT.RooFit.RooConst ( ab ) ## RETURN\n return Ostap.MoreRooFit.Atan2 ( a, b , name , title )",
"def angle(pt_a, pt_b):\n x1, y1 = pt_a\n x2, y2 = pt_b\n return atan2(y2-y1, x2-x1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gpu_model_to_scale is a dict from model string to scale. | def avail_gpu_compute(self, gpu_model_to_scale):
self._check_spy_stats_available()
l = []
for u, model in zip(self._util.gpu_compute, self._capacity.gpu_model):
found = False
for k, scale in gpu_model_to_scale.items():
if k in model:
found = True
break
if found:
l.append(scale * (1 - u))
else:
raise Exception('Unknown GPU model %s found on host %s' %
(model, self.name))
return l | [
"def scale_model(model, scale):\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)",
"def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res",
"def petab_scale_to_amici_scale(scale_str):\n\n if scale_str == 'lin':\n return amici.ParameterScaling_none\n if scale_str == 'log':\n return amici.ParameterScaling_ln\n if scale_str == 'log10':\n return amici.ParameterScaling_log10\n raise ValueError(\"Invalid pscale \" + scale_str)",
"def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)",
"def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def cli_scale(ctx):\n pass",
"def scale_module(module, scale):\n for p in module.parameters():\n p.detach().mul_(scale)\n return module",
"def itkRegistrationParameterScalesFromPhysicalShiftEDPSTPSMPSUC2_cast(obj: 'itkLightObject') -> \"itkRegistrationParameterScalesFromPhysicalShiftEDPSTPSMPSUC2 *\":\n return _itkEuclideanDistancePointSetToPointSetMetricPython.itkRegistrationParameterScalesFromPhysicalShiftEDPSTPSMPSUC2_cast(obj)",
"def ggml_scale(ctx: ffi.CData, a: ffi.CData, b: ffi.CData) -> ffi.CData:\n ...",
"def itkRegistrationParameterScalesFromPhysicalShiftEBPSTPSMPSUC2_cast(obj: 'itkLightObject') -> \"itkRegistrationParameterScalesFromPhysicalShiftEBPSTPSMPSUC2 *\":\n return _itkExpectationBasedPointSetToPointSetMetricv4Python.itkRegistrationParameterScalesFromPhysicalShiftEBPSTPSMPSUC2_cast(obj)",
"def init_model_attr(model):\n if not hasattr(model, 'pscale'):\n model.pscale = OrderedDict()\n for k in model.param_names:\n model.pscale[k] = 1.\n else:\n for k in model.param_names:\n if k not in model.pscale:\n model.pscale[k] = 1.\n \n return model",
"def scale(cam_file, x_scale, y_scale):\n # TODO\n pass",
"def _scale_loads(case, original_loads, scaling_factor):\n for load in case.loads:\n base_mw = original_loads[load.key]['MW']\n base_mvar = original_loads[load.key]['MVAR']\n load.nominal_constant_P_mw = base_mw * (1 + scaling_factor)\n load.nominal_constant_P_mvar = base_mvar * (1 + scaling_factor)",
"def _hyperparam_to_scale(self, hyperparam):\n\n # If logscale is used, input hyperparam is log of the scale.\n if self.use_log_scale:\n scale = 10.0**hyperparam\n else:\n scale = numpy.abs(hyperparam)\n\n return scale",
"def normalize_parameters(model, config, free_GB=2, **kwargs):\n\n from snntoolbox.parsing.utils import get_inbound_layers_with_params\n\n print(\"Normalizing parameters...\")\n\n norm_dir = kwargs[str('path')] if 'path' in kwargs else \\\n os.path.join(config.get('paths', 'log_dir_of_current_run'),\n 'normalization')\n\n activ_dir = os.path.join(norm_dir, 'activations')\n if not os.path.exists(activ_dir):\n os.makedirs(activ_dir)\n # Store original weights for later plotting\n if not os.path.isfile(os.path.join(activ_dir, 'weights.npz')):\n weights = {}\n for layer in model.layers:\n w = layer.get_weights()\n if len(w) > 0:\n weights[layer.name] = w[0]\n np.savez_compressed(os.path.join(activ_dir, 'weights.npz'), **weights)\n\n batch_size = config.getint('simulation', 'batch_size')\n\n # Either load scale factors from disk, or get normalization data set to\n # calculate them.\n filepath = os.path.join(norm_dir, config.get('normalization',\n 'percentile') + '.json')\n x_norm = None\n if 'scale_facs' in kwargs:\n scale_facs = kwargs[str('scale_facs')]\n elif 'x_norm' in kwargs or 'dataflow' in kwargs:\n if 'x_norm' in kwargs:\n x_norm = kwargs[str('x_norm')]\n elif 'dataflow' in kwargs:\n x_norm = []\n dataflow = kwargs[str('dataflow')]\n num_samples_norm = config.getint('normalization', 'num_samples',\n fallback='')\n if num_samples_norm == '':\n num_samples_norm = len(dataflow) * dataflow.batch_size\n while len(x_norm) * batch_size < num_samples_norm:\n x = dataflow.next()\n if isinstance(x, tuple): # Remove class label if present.\n x = x[0]\n x_norm.append(x)\n x_norm = np.concatenate(x_norm)\n print(\"Using {} samples for normalization.\".format(len(x_norm)))\n sizes = [\n len(x_norm) * float(np.array(layer.output_shape[1:]).prod()) * float(32 /(8 * 1e9)) \\\n for layer in model.layers if len(layer.weights) > 0 ]\n size_str = ['{:.2f}'.format(s) for s in sizes]\n print('INFO: Size of layer activations: ', size_str ,'GB\\n') \n req_space = max(sizes)\n print(\"Required {:.2f} GB of free space for the largest activation. \\n\".format(req_space))\n print(\"In total, {:.2f} GB of information flow. \\n\".format(sum(sizes)))\n if req_space > free_GB:\n import warnings\n warnings.warn(\"Required space is larger than specified free space of \"+str(free_GB)+\n \"GB. Reduce size of data set or increase available space.\", ResourceWarning)\n print('[Skipping normalization]')\n return\n scale_facs = OrderedDict({model.layers[0].name: 1})\n else:\n import warnings\n warnings.warn(\"Scale factors or normalization data set could not be \"\n \"loaded. Proceeding without normalization.\",\n RuntimeWarning)\n return\n\n # If scale factors have not been computed in a previous run, do so now.\n if len(scale_facs) == 1:\n i = 0\n sparsity = []\n for layer in model.layers:\n # Skip if layer has no parameters\n if len(layer.weights) == 0:\n continue\n\n activations = try_reload_activations(layer, model, x_norm,\n batch_size, activ_dir)\n nonzero_activations = activations[np.nonzero(activations)]\n sparsity.append(1 - nonzero_activations.size / activations.size)\n del activations\n perc = get_percentile(config, i)\n scale_facs[layer.name] = get_scale_fac(nonzero_activations, perc)\n print(\"Scale factor: {:.2f}.\".format(scale_facs[layer.name]))\n # Since we have calculated output activations here, check at this\n # point if the output is mostly negative, in which case we should\n # stick to softmax. Otherwise ReLU is preferred.\n # Todo: Determine the input to the activation by replacing the\n # combined output layer by two distinct layers ``Dense`` and\n # ``Activation``!\n # if layer.activation == 'softmax' and settings['softmax_to_relu']:\n # softmax_inputs = ...\n # if np.median(softmax_inputs) < 0:\n # print(\"WARNING: You allowed the toolbox to replace \"\n # \"softmax by ReLU activations. However, more than \"\n # \"half of the activations are negative, which \"\n # \"could reduce accuracy. Consider setting \"\n # \"settings['softmax_to_relu'] = False.\")\n # settings['softmax_to_relu'] = False\n i += 1\n # Write scale factors to disk\n from snntoolbox.utils.utils import confirm_overwrite\n if config.get('output', 'overwrite') or confirm_overwrite(filepath):\n with open(filepath, str('w')) as f:\n json.dump(scale_facs, f)\n np.savez_compressed(os.path.join(norm_dir, 'activations', 'sparsity'),\n sparsity=sparsity)\n\n # Apply scale factors to normalize the parameters.\n for layer in model.layers:\n # Skip if layer has no parameters\n if len(layer.weights) == 0:\n continue\n\n # Scale parameters\n parameters = layer.get_weights()\n if layer.activation.__name__ == 'softmax':\n # When using a certain percentile or even the max, the scaling\n # factor can be extremely low in case of many output classes\n # (e.g. 0.01 for ImageNet). This amplifies weights and biases\n # greatly. But large biases cause large offsets in the beginning\n # of the simulation (spike input absent).\n scale_fac = 1.0\n print(\"Using scale factor {:.2f} for softmax layer.\".format(\n scale_fac))\n else:\n scale_fac = scale_facs[layer.name]\n inbound = get_inbound_layers_with_params(layer)\n if len(inbound) == 0: # Input layer\n parameters_norm = [\n parameters[0] * scale_facs[model.layers[0].name] / scale_fac,\n parameters[1] / scale_fac]\n elif len(inbound) == 1:\n parameters_norm = [\n parameters[0] * scale_facs[inbound[0].name] / scale_fac,\n parameters[1] / scale_fac]\n else:\n # In case of this layer receiving input from several layers, we can\n # apply scale factor to bias as usual, but need to rescale weights\n # according to their respective input.\n parameters_norm = [parameters[0], parameters[1] / scale_fac]\n if parameters[0].ndim == 4:\n # In conv layers, just need to split up along channel dim.\n offset = 0 # Index offset at input filter dimension\n for inb in inbound:\n f_out = inb.filters # Num output features of inbound layer\n f_in = range(offset, offset + f_out)\n parameters_norm[0][:, :, f_in, :] *= \\\n scale_facs[inb.name] / scale_fac\n offset += f_out\n else:\n # Fully-connected layers need more consideration, because they\n # could receive input from several conv layers that are\n # concatenated and then flattened. The neuron position in the\n # flattened layer depend on the image_data_format.\n raise NotImplementedError\n\n # Check if the layer happens to be Sparse\n # if the layer is sparse, add the mask to the list of parameters\n if len(parameters) == 3:\n parameters_norm.append(parameters[-1])\n # Update model with modified parameters\n layer.set_weights(parameters_norm)\n\n # Plot distributions of weights and activations before and after norm.\n if 'normalization_activations' in eval(config.get('output', 'plot_vars')):\n from snntoolbox.simulation.plotting import plot_hist\n from snntoolbox.simulation.plotting import plot_max_activ_hist\n\n # All layers in one plot. Assumes model.get_weights() returns\n # [w, b, w, b, ...].\n # from snntoolbox.simulation.plotting import plot_weight_distribution\n # plot_weight_distribution(norm_dir, model)\n\n print(\"Plotting distributions of weights and activations before and \"\n \"after normalizing...\")\n\n # Load original parsed model to get parameters before normalization\n weights = np.load(os.path.join(activ_dir, 'weights.npz'))\n for idx, layer in enumerate(model.layers):\n # Skip if layer has no parameters\n if len(layer.weights) == 0:\n continue\n\n label = str(idx) + layer.__class__.__name__ \\\n if config.getboolean('output', 'use_simple_labels') \\\n else layer.name\n parameters = weights[layer.name]\n parameters_norm = layer.get_weights()[0]\n weight_dict = {'weights': parameters.flatten(),\n 'weights_norm': parameters_norm.flatten()}\n plot_hist(weight_dict, 'Weight', label, norm_dir)\n\n # Load activations of model before normalization\n activations = try_reload_activations(layer, model, x_norm,\n batch_size, activ_dir)\n\n if activations is None or x_norm is None:\n continue\n\n # Compute activations with modified parameters\n nonzero_activations = activations[np.nonzero(activations)]\n activations_norm = get_activations_layer(model.input, layer.output,\n x_norm, batch_size)\n activation_dict = {'Activations': nonzero_activations,\n 'Activations_norm':\n activations_norm[np.nonzero(activations_norm)]}\n scale_fac = scale_facs[layer.name]\n plot_hist(activation_dict, 'Activation', label, norm_dir,\n scale_fac)\n ax = tuple(np.arange(len(layer.output_shape))[1:])\n plot_max_activ_hist(\n {'Activations_max': np.max(activations, axis=ax)},\n 'Maximum Activation', label, norm_dir, scale_fac)\n print('')",
"def cast(obj: 'itkLightObject') -> \"itkRegistrationParameterScalesFromPhysicalShiftEDPSTPSMPSUC2 *\":\n return _itkEuclideanDistancePointSetToPointSetMetricPython.itkRegistrationParameterScalesFromPhysicalShiftEDPSTPSMPSUC2_cast(obj)",
"def set_scale(self, motor_model):\n for driver_re, motor_dict in self.__SCALE_FACTORS_BY_MODEL.iteritems():\n if driver_re.match(self._apt.model_number) is not None:\n if motor_model in motor_dict:\n self.scale_factors = motor_dict[motor_model]\n return\n else:\n break\n # If we've made it down here, emit a warning that we didn't find the\n # model.\n logger.warning(\n \"Scale factors for controller {} and motor {} are unknown\".format(\n self._apt.model_number, motor_model\n )\n )",
"def scale():\n if viewMode == Mode_Isometric:\n return scale2d\n if viewMode == Mode_Perspective:\n return scale3d",
"def scale_model (wdat,fdat,wmod,fmod,wmin,wmax):\n\tdata_ave=ave(wdat,fdat,wmin,wmax)\n\tmod_ave=ave(wmod,fmod,wmin,wmax);\n\tscale=data_ave/mod_ave\n\treturn scale*fmod"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
From all the data, it takes the columns TopicID, and count the topic based on the gender | def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame:
data_frame_topic = data_frame \
.filter(data_frame["Stratification1"].contains("Male")) \
.distinct() \
.groupBy("TopicID") \
.count() \
.sort("TopicID")
print("The following table represent the number of men group by the topic: ")
data_frame_topic.show()
data_frame_pandas = data_frame.toPandas()
return data_frame_pandas | [
"def get_male_female_topicsDF(data_dict, gender):\n dataDF = pd.DataFrame.from_dict(data_dict[gender], orient='index')\n outlet_gender_topicsDF = pd.json_normalize(dataDF['topic_mean'])\n outlet_gender_topicsDF.index = dataDF.index\n outlet_gender_topicsDF = outlet_gender_topicsDF.sort_index()\n outlet_gender_topicsDF = outlet_gender_topicsDF.transpose()\n return outlet_gender_topicsDF",
"def construct_gender_df(data_dict):\n gender_dict = data_dict['perGenderTopics']\n topics = data_dict['topics']\n # Convert to Pandas DataFrame\n genderDF = pd.DataFrame.from_dict(gender_dict, orient='index').transpose()\n genderDF = genderDF[['female', 'male']]\n genderDF['diff'] = genderDF['female'] - genderDF['male']\n # Sort in order of the sum of mean values for each topic\n genderDF = genderDF.sort_values('diff')\n genderDF['topic'] = [f\"t{i}\" for i in genderDF.index]\n\n # Get a properly ordered list of topics based on prominence for axis labelling\n ordered_topics_dict = {idx: topics[idx] for idx in genderDF.index}\n ordered_names = [topics[idx]['name'] for idx in genderDF.index]\n top_5_words = get_top_n_words(ordered_topics_dict)\n # Get topic names for axis labels if they exist, otherwise return top-5 words per topic\n y_labels = [name if name else top_5_words[i] for i, name in enumerate(ordered_names)]\n genderDF['topic_names'] = y_labels\n return genderDF",
"def topic_stats(df_topic_sents_keywords):\n\n # Number of Documents for Each Topic\n topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n # Percentage of Documents for Each Topic\n topic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n # Topic Number and Keywords\n topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n # Concatenate Column wise\n df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n # Change Column names\n df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n # Show\n df_dominant_topics",
"def user_gender_statistics(df):\n print('Count of gender \\n')\n gender_counts=df['Gender'].value_counts()\n #loop through to print the total number of gender\n for index, gender_count in enumerate(gender_counts):\n print(' {}: {}'.format(gender_counts.index[index],gender_count))\n \n print()",
"def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics",
"def count_gender(data):\n data = column_to_list(data, -2)\n male = data.count(\"Male\")\n female = data.count(\"Female\")\n return [male, female]",
"def gender_word_counts(data):\n\n # We use the stopwords package from NLTK corpus.\n stop_words = set(stopwords.words('english'))\n data['tweet_words'] = data['text_cleaned'].str.split()\n # Ignoring all the stop words\n data['tweet_words'] = data['tweet_words'].apply(lambda tweet: [word for word in tweet if word not in stop_words])\n\n # Separating Male, Female and Brand profiles.\n male_profiles = data[data['gender'] == 'male']\n female_profiles = data[data['gender'] == 'female']\n brand_profiles = data[data['gender'] == 'brand']\n\n print(\"Top 20 most frequent words used by Men\")\n all_male_tweets = ' '.join(male_profiles['tweet_words'].astype(str))\n Male_words = pd.Series(all_male_tweets.split(\" \")).value_counts()[:20]\n print(Male_words)\n print()\n\n print(\"Top 20 most frequent words used by Women\")\n all_female_tweets = ' '.join(female_profiles['tweet_words'].astype(str))\n Female_words = pd.Series(all_female_tweets.split(\" \")).value_counts()[:20]\n print(Female_words)\n print()\n\n print(\"Top 20 most frequent words used by Brands\")\n all_brand_tweets = ' '.join(brand_profiles['tweet_words'].astype(str))\n Brand_words = pd.Series(all_brand_tweets.split(\" \")).value_counts()[:20]\n print(Brand_words)\n\n # Plotting horizontal bar graphs showing Top 20 tweet words used Vs. the word frequency.\n mp = Male_words.plot(kind='barh', stacked=True, colormap='plasma', title=\"Top 20 most frequently words used by Men\")\n mp.set_ylabel(\"Tweet words used by Males\")\n mp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n fp = Female_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Women\")\n fp.set_ylabel(\"Tweet words used by Females\")\n fp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n bp = Brand_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Brands\")\n bp.set_ylabel(\"Tweet words used by Brands\")\n bp.set_xlabel(\"Word Frequency\")\n plt.show()",
"def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Black, non-Hispanic\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of black ethnicity people group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas",
"def getTopicsData(self):\n\t\ttry:\n\t\t\ttotal_questions = 0\n\t\t\ttopic_id = self.topicID\n\t\t\tfilterTopics = {'topic_id__in':self.topicID}\n\t\t\tif self.topicID:\n\t\t\t\tfilterTopics['channel_id__in']=self.channelID\n\t\t\ttopic = Content.objects.filter(**filterTopics)\n\t\t\tfor t in topic:\n\t\t\t# topic = Content.objects.filter(topic_id__in=topic_ids).filter(channel_id__in=channel_ids).first()\n\t\t\t\ttotal_questions += t.total_questions\n\t\t\treturn total_questions\n\t\texcept Exception as e:\n\t\t\tlogger.error(e)",
"def get_gender_counts(self, source='tips', per_day_or_month='day'):\n\n # self.sqlContext.clearCache()\n\n if source == 'users':\n source_df = self.users\n elif source == 'reviews':\n source_df = self.reviews\n elif source == 'tips':\n source_df = self.tips\n else:\n print ('unknown source. Must be one of [\"tips\", \"reviews\", \"users\"]')\n return\n\n source_df.registerTempTable('source_df')\n\n if source != 'users':\n date = 'date'\n if per_day_or_month == 'day':\n mf_df = self.spark.sql(\"\"\" SELECT s.date, mf.gender\n FROM source_df s, maleFemale_table mf\n WHERE mf.user_id = s.user_id\n \"\"\")\n else:\n mf_df = self.spark.sql(\"\"\" SELECT s.mod_date as date, mf.gender\n FROM source_df s, maleFemale_table mf\n WHERE mf.user_id = s.user_id\n \"\"\")\n else:\n mf_df = self.users\n date = 'yelping_since'\n\n mf_df.registerTempTable('mf_df')\n male_counts = self.spark.sql(\"\"\" SELECT {0} AS date, COUNT(gender) AS male_count\n FROM mf_df\n WHERE gender = 1\n GROUP BY {0}\n \"\"\".format(date))\n\n female_counts = self.spark.sql(\"\"\" SELECT {0} AS date, COUNT(gender) AS female_count\n FROM mf_df\n WHERE gender = 0\n GROUP BY {0}\n \"\"\".format(date))\n\n gender_counts = male_counts.join(female_counts, 'date')\n gender_counts.registerTempTable('gender_counts')\n\n gender_counts = self.spark.sql('SELECT * FROM gender_counts ORDER BY date')\n\n return gender_counts.toPandas()",
"def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)",
"def top100_per_gender_and_topic(female, male, topic):\n t_female = female.sort_values(by=topic, ascending=False).iloc[:LIMIT, :]\n t_male = male.sort_values(by=topic, ascending=False).iloc[:LIMIT, :]\n return t_female, t_male",
"def construct_outlet_gender_DF(data_dict):\n outlet_gender_dict = data_dict['perOutletGenderTopics']\n topics = data_dict['topics']\n male_outlet_topics = get_male_female_topicsDF(outlet_gender_dict, 'male')\n female_outlet_topics = get_male_female_topicsDF(outlet_gender_dict, 'female')\n # Plot the difference between the male-dominant and female-dominant topics\n diff = female_outlet_topics - male_outlet_topics\n # Calculate sum of all columns to decide sorting order\n diff['net'] = diff[diff.columns].sum(axis=1)\n diff = diff.sort_values('net').drop('net', axis=1)\n # Get a properly ordered list of topics based on prominence for axis labelling\n ordered_topics_dict = {idx: topics[idx] for idx in diff.index}\n ordered_names = [topics[idx]['name'] for idx in diff.index]\n top_5_words = get_top_n_words(ordered_topics_dict)\n # Get topic names for axis labels if they exist, otherwise return top-5 words per topic\n y_labels = [name if name else top_5_words[i] for i, name in enumerate(ordered_names)]\n return diff, y_labels",
"def get_multiclass_msg_stats(df):\n \n categories = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n categories['total_categories'] = categories.sum(axis=1)\n multiclass_msgs = categories.groupby('total_categories').count()[['related']]\n multiclass_msgs.columns = ['total']\n \n return multiclass_msgs",
"def Gender_data():\n # Query for Audience Count by Gender\n sel = [Leads_table.Gender, func.count(Leads_table.ConsumerID)]\n results = db.session.query(*sel).\\\n group_by(Leads_table.Gender).all()\n df1 = pd.DataFrame(results, columns=['Gender', 'AudienceCount'])\n return jsonify(df1.to_dict(orient=\"records\"))",
"def getSubTopicsData(self):\n\t\ttry:\n\t\t\ttotal_subtopics = 0\n\t\t\ttopic_id= self.topicID\n\t\t\tfilterTopics = {'topic_id__in':self.topicID}\n\t\t\tif self.topicID:\n\t\t\t\tfilterTopics['channel_id__in']=self.channelID\n\t\t\ttopic = Content.objects.filter(**filterTopics)\n\t\t\t# topic = Content.objects.filter(topic_id__in=topic_ids).filter(channel_id__in=channel_ids).first()\n\t\t\tfor st in topic:\n\t\t\t\ttotal_subtopics += st.sub_topics_total\n\t\t\treturn total_subtopics\n\t\texcept Exception as e:\n\t\t\tlogger.error(e)",
"def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)",
"def get_diseases(self):\n self.diseases = self.data.groupby('topic')['topic'].count()",
"def word_count_topic_keywords(lda_model, final_list):\n\n #Création d'un DataFrame avec les mots, les topics, le poids relatif des mots et leur nombre d'occurences\n topics = lda_model.show_topics(formatted=False)\n data_flat = [w for w_list in final_list for w in w_list]\n counter = Counter(data_flat)\n out = []\n for i, topic in topics:\n for word, weight in topic:\n out.append([word, i , weight, counter[word]])\n df = pd.DataFrame(out, columns=['word', 'topic_id', 'importance', 'word_count'])\n\n num_topics = lda_model.num_topics\n\n if num_topics%2 == 0:\n r = 2\n n = int(num_topics/2)\n elif num_topics%3 == 0:\n r = 3\n n = int(num_topics/3)\n else:\n r = 1\n n = num_topics\n\n #Visualisation des données sous la forme de graphiques avec le module matplotlib\n fig, axes = plt.subplots(n, r, figsize=(16,10), sharey=True, dpi=160)\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]\n for i, ax in enumerate(axes.flatten()):\n ax.bar(x='word', height=\"word_count\", data=df.loc[df.topic_id==i, :], color=cols[i], width=0.5, alpha=0.3, label='Word Count')\n ax_twin = ax.twinx()\n ax_twin.bar(x='word', height=\"importance\", data=df.loc[df.topic_id==i, :], color=cols[i], width=0.2, label='Weights')\n ax.set_ylabel('Word Count', color=cols[i])\n ax_twin.set_ylim(0, 0.030); ax.set_ylim(0, )\n ax.set_title('Topic: ' + str(i), color=cols[i], fontsize=16)\n ax.tick_params(axis='y', left=False)\n ax.set_xticklabels(df.loc[df.topic_id==i, 'word'], rotation=30, horizontalalignment= 'right')\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\n fig.tight_layout(w_pad=2)\n fig.suptitle('Word Count and Importance of Topic Keywords', fontsize=22, y=1.05)\n plt.show()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
From all the data, it takes the columns TopicID, and count the topic based on the ethnicity | def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame:
data_frame_topic = data_frame \
.filter(data_frame["Stratification1"].contains("Black, non-Hispanic")) \
.distinct() \
.groupBy("TopicID") \
.count() \
.sort("TopicID")
print("The following table represent the number of black ethnicity people group by the topic: ")
data_frame_topic.show()
data_frame_pandas = data_frame.toPandas()
return data_frame_pandas | [
"def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Male\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of men group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas",
"def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics",
"def getTopicsData(self):\n\t\ttry:\n\t\t\ttotal_questions = 0\n\t\t\ttopic_id = self.topicID\n\t\t\tfilterTopics = {'topic_id__in':self.topicID}\n\t\t\tif self.topicID:\n\t\t\t\tfilterTopics['channel_id__in']=self.channelID\n\t\t\ttopic = Content.objects.filter(**filterTopics)\n\t\t\tfor t in topic:\n\t\t\t# topic = Content.objects.filter(topic_id__in=topic_ids).filter(channel_id__in=channel_ids).first()\n\t\t\t\ttotal_questions += t.total_questions\n\t\t\treturn total_questions\n\t\texcept Exception as e:\n\t\t\tlogger.error(e)",
"def topic_stats(df_topic_sents_keywords):\n\n # Number of Documents for Each Topic\n topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n # Percentage of Documents for Each Topic\n topic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n # Topic Number and Keywords\n topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n # Concatenate Column wise\n df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n # Change Column names\n df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n # Show\n df_dominant_topics",
"def getSubTopicsData(self):\n\t\ttry:\n\t\t\ttotal_subtopics = 0\n\t\t\ttopic_id= self.topicID\n\t\t\tfilterTopics = {'topic_id__in':self.topicID}\n\t\t\tif self.topicID:\n\t\t\t\tfilterTopics['channel_id__in']=self.channelID\n\t\t\ttopic = Content.objects.filter(**filterTopics)\n\t\t\t# topic = Content.objects.filter(topic_id__in=topic_ids).filter(channel_id__in=channel_ids).first()\n\t\t\tfor st in topic:\n\t\t\t\ttotal_subtopics += st.sub_topics_total\n\t\t\treturn total_subtopics\n\t\texcept Exception as e:\n\t\t\tlogger.error(e)",
"def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)",
"def word_count_topic_keywords(lda_model, final_list):\n\n #Création d'un DataFrame avec les mots, les topics, le poids relatif des mots et leur nombre d'occurences\n topics = lda_model.show_topics(formatted=False)\n data_flat = [w for w_list in final_list for w in w_list]\n counter = Counter(data_flat)\n out = []\n for i, topic in topics:\n for word, weight in topic:\n out.append([word, i , weight, counter[word]])\n df = pd.DataFrame(out, columns=['word', 'topic_id', 'importance', 'word_count'])\n\n num_topics = lda_model.num_topics\n\n if num_topics%2 == 0:\n r = 2\n n = int(num_topics/2)\n elif num_topics%3 == 0:\n r = 3\n n = int(num_topics/3)\n else:\n r = 1\n n = num_topics\n\n #Visualisation des données sous la forme de graphiques avec le module matplotlib\n fig, axes = plt.subplots(n, r, figsize=(16,10), sharey=True, dpi=160)\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]\n for i, ax in enumerate(axes.flatten()):\n ax.bar(x='word', height=\"word_count\", data=df.loc[df.topic_id==i, :], color=cols[i], width=0.5, alpha=0.3, label='Word Count')\n ax_twin = ax.twinx()\n ax_twin.bar(x='word', height=\"importance\", data=df.loc[df.topic_id==i, :], color=cols[i], width=0.2, label='Weights')\n ax.set_ylabel('Word Count', color=cols[i])\n ax_twin.set_ylim(0, 0.030); ax.set_ylim(0, )\n ax.set_title('Topic: ' + str(i), color=cols[i], fontsize=16)\n ax.tick_params(axis='y', left=False)\n ax.set_xticklabels(df.loc[df.topic_id==i, 'word'], rotation=30, horizontalalignment= 'right')\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\n fig.tight_layout(w_pad=2)\n fig.suptitle('Word Count and Importance of Topic Keywords', fontsize=22, y=1.05)\n plt.show()",
"def get_num_topics(corp, dic,dwb):\n max=0\n topics=0\n print('Getting ideal number of topics...')\n for i in range(1,21):\n #building the topic model\n lda_model = gensim.models.ldamodel.LdaModel(corpus=corp, id2word=dic,num_topics=i,random_state=100,\n update_every=1, chunksize=100,passes=10,alpha='auto',per_word_topics=True)\n\n #pprint(lda_model.print_topics())\n doc_lda = lda_model[corp]\n print(i)\n\n # Compute Perplexity\n print('\\nPerplexity: ', lda_model.log_perplexity(corp)) # a measure of how good the model is. lower the better.\n\n # Compute Coherence Score\n coherence_model_lda = CoherenceModel(model=lda_model, texts= dwb, dictionary=dic, coherence='c_v')\n coherence_lda = coherence_model_lda.get_coherence()\n print('Coherence:',coherence_lda)\n if coherence_lda>max:\n max=coherence_lda\n \n topics=i\n #print('\\nIdeal Number of Topics:',topics,'Coherence Score: ',coherence_lda)\n return topics",
"def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)",
"def get_diseases(self):\n self.diseases = self.data.groupby('topic')['topic'].count()",
"def get_multiclass_msg_stats(df):\n \n categories = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n categories['total_categories'] = categories.sum(axis=1)\n multiclass_msgs = categories.groupby('total_categories').count()[['related']]\n multiclass_msgs.columns = ['total']\n \n return multiclass_msgs",
"def _testTopics(self):\n # TODO: Enable when appropriate\n topics = [\"attitudes/expectations/happiness\", \"behavior\", \"cognitive skills\", \"childcare - calendar\",\n \"childcare services and availability\",\n \"childcare center composition\", \"childcare staff characteristics\", \"accidents and injuries\",\n \"disabilities\", \"fertility history\",\n \"health behavior\", \"health care access and insurance\", \"height and weight\", \"medication\",\n \"mental health\", \"physical health\",\n \"sexual health and behavior\", \"substance use and abuse\", \"child living arrangements\",\n \"current partner living arrangements\", 'home environment',\n \"household composition\", \"housing status\", \"parents' living arrangements\", \"residential mobility\",\n \"grandparents\", \"parents' family background\",\n \"social support\", \"community participation\", \"neighborhood conditions\", \"age\",\n \"citizenship and nativity\", \"language\", \"mortality\",\n \"race/ethnicity\", \"religion\", \"sex/gender\", \"child support\", \"earnings\", \"expenses\",\n \"financial assets\", \"household income/poverty\",\n \"income tax\", \"material hardship\", \"private transfers\", \"public transfers and social services\",\n \"educational attainment/achievement\",\n \"parent school involvement\", \"peer characteristics\", \"school characteristics\", \"school composition\",\n \"student experiences\", \"teacher characteristics\", \"employment - calendar\",\n \"employment - traditional work\", \"employment - non-traditional work\",\n \"unemployment\", \"work stress/flexibility\", \"criminal justice involvement\", \"legal custody\",\n \"paternity\", \"police contact and attitudes\",\n \"new partner relationship quality\", \"new partner relationship status\",\n \"parental relationship history\", \"parental relationship quality\",\n \"parental relationship status\", \"paradata\", \"survey weights\", \"child welfare services\",\n \"parent-child contact\", \"parenting abilities\", \"parenting behavior\"]\n\n self.assertEqual(len(self.df[self.df.topic1.notnull()][~self.df.topic1.isin(topics)]),0)\n self.assertEqual(len(self.df[self.df.topic2.notnull()][~self.df.topic2.isin(topics)]), 0)",
"def get_paper_counter_per_topic_id(all_topic_assignments):\n counter = {}\n for topic_assignment in all_topic_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index not in counter:\n counter[topic_index] = 0\n\n counter[topic_index] += 1\n\n return counter",
"def num_topics(model, response, **kwargs):\n return {\"topics\": model.num_topics}",
"def map_topics_to_support_count(city_ids):\r\n support_for_topics = {}\r\n for identification in city_ids:\r\n idea_url = 'https://neighborland.com/api/v1/ideas/' + identification\r\n idea = urllib.urlopen(idea_url).read()\r\n city_topics = extract_topics(idea)\r\n support_count = extract_support_count(idea)\r\n for topic in city_topics:\r\n if topic in support_for_topics:\r\n support_for_topics[topic] = support_for_topics[topic] + support_count\r\n else:\r\n support_for_topics[topic] = support_count\r\n return support_for_topics",
"def test_extract_topics(base_bertopic):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(documents)\n c_tf_idf = base_bertopic._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)",
"def __find_classification_count__(self):\n new_classification_count = []\n ids = []\n\n cur = self.conn.cursor()\n #cur.execute(\"Select * from subject_sets where \")\n\n # keeping the below commands in as a reminder of how to do the relevant SQL commands\n # cur.execute(\"SELECT subject_id from set_member_subjects where subject_set_id=4 OR subject_set_id=3\")\n cur.execute(\"SELECT expert_set from subject_sets where subject_sets.project_id=\"+str(project_id)+\" and subject_sets.workflow_id=\" + str(workflow_id))\n rows = cur.fetchall()\n\n\n cur.execute(\"SELECT subject_id,classification_count from set_member_subjects inner join subject_sets on set_member_subjects.subject_set_id=subject_sets.id where (subject_sets.expert_set = FALSE or subject_sets.expert_set IS NULL) and subject_sets.project_id=\"+str(self.project_id)+\" and subject_sets.workflow_id=\" + str(self.workflow_id) +\" ORDER BY subject_id\")\n rows = cur.fetchall()\n for subject_id,count in rows:\n if count >= 5:\n new_classification_count.append(count)\n ids.append(subject_id)\n\n return ids,new_classification_count",
"def get_category_stats(df):\n message_count = len(df)\n\n categories = df.drop(['id', 'message', 'original', 'genre'], axis=1).columns\n\n cat_msg_counts = []\n\n for category in categories:\n cat_msg_counts.append(len(df[df[category] != 0]))\n\n categorical_message_counts_df = pd.DataFrame(cat_msg_counts, columns=['Message Counts'], index=categories)\n \n return message_count, categorical_message_counts_df.sort_values(by=['Message Counts'], ascending=False), categories",
"def connect_topic_id_to_topics(self, model):\n confidence = []\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key, tp_num, val / len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2), reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n self.log_writer.add_log(\n 'Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],\n conf[1],\n conf[2]))\n self.topic_indexes[conf[0]] = conf[1]\n\n for key, value in self.topic_indexes.items():\n self.topics_of_index[value] = [key]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot a data frame with bar type | def plot_type_of_topic(data_frame: pb.DataFrame) -> None:
plt.interactive(False)
plt.figure()
data_frame.plot(kind='bar', x= data_frame['TopicID'])
plt.show() | [
"def bar(df, ax=None, **kwargs):\n if df.shape[1] != 2:\n raise ValueError('Two columns required: %s.' % (df.columns,))\n else:\n df = _preprocess_dataframe(df)\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n\n xlabels = df.ix[:,0].values\n if len(set(xlabels)) != len(xlabels):\n raise ValueError('Unique nominal values required: %s.' % (xlabels,))\n\n ax.bar([x-.5 for x in xrange(df.shape[0])], df.ix[:,1].values, alpha=0.7)\n\n ax.set_xticks(range(df.shape[0]))\n ax.set_xticklabels(xlabels, rotation=90)\n ax.set_xlim([-1, df.shape[0] - .5])\n\n ax.set_xlabel(df.columns[0])\n ax.set_ylabel(df.columns[1])\n ax.grid()\n\n return fig",
"def bar(data=None, xlabel='', ylabel='', **kwargs):\n if 'color' not in kwargs:\n kwargs['color'] = list(shared.znes_colors().values())[0:data.shape[0]] #index 0 statt urpsrünglich 1?\n if 'rot' not in kwargs:\n kwargs['rot'] = 0\n ax = data.plot(kind='bar', grid=True, **kwargs)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return ax",
"def plot_chart(df):\n df.plot(x ='Dice Number', y='Rolls', kind = 'bar')\n plt.show()",
"def barplot(self, *args, **kwargs):\n self.add_visual(vs.BarVisual, *args, **kwargs)",
"def bar(self,cols, title=\"\"):\n self.create_plot(cols, title = title, lineType='bar')\n return self",
"def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)",
"def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")",
"def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()",
"def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()",
"def featuresBarPlot(barNames,barValues):\n plt.bar(range(0,len(barNames)),barValues)\n plt.xticks(range(0,len(barNames)), barNames,rotation='vertical')\n plt.show()",
"def plotly_bar_chart(df: pd.DataFrame):\n fig = px.bar(df, x=\"x\", y=\"y\")\n return fig",
"def visulize_type(parsed_data):\n counter = Counter(item[\"Category\"] for item in parsed_data)\n\n labels = counter.keys()\n\n # width of each bar\n width = 0.5\n xlocations = np.arange(len(labels)) + width\n\n # The location of the Bars left edges\n plt.bar(xlocations, counter.values(), width = width)\n\n # the labels should be at the half of each bar\n plt.xticks(xlocations + width / 2, labels, rotation = 90)\n\n plt.subplots_adjust(bottom=0.4)\n\n # plt.rcParams['figure.figsize'] = 12, 20\n plt.savefig(\"Type.png\")\n\n plt.clf()",
"def plot_bv_bar(df, xcolname, ycolname, icol=0):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box\n sns.barplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()",
"def bar_chart(\n df,\n orientation='v',\n bar_width=None,\n opacity=0.9,\n textpos=None,\n linewidth=1,\n linecolor='#2C3347',\n marker_color=None,\n **kwargs):\n\n traces = []\n rng = df.index.size if orientation == 'v' else df.columns.size\n otn = orientation\n for i in range(rng):\n x = [str(x) for x in df.columns] if otn == 'v' else df.iloc[:, i]\n y = df.iloc[i] if otn == 'v' else [str(x) for x in df.index]\n text = df.iloc[i] if otn == 'v' else df.iloc[:, i]\n name = df.iloc[i].name if otn == 'v' else df.columns[i]\n\n preset_args = dict(\n x=x,\n y=y,\n text=text,\n textposition=textpos,\n marker=dict(\n opacity=opacity,\n color=marker_color,\n line=dict(\n color=linecolor,\n width=linewidth)),\n name=name,\n width=bar_width,\n orientation=orientation\n )\n\n all_args = {**preset_args, **kwargs}\n bar = go.Bar(all_args)\n traces.append(bar)\n\n return traces",
"def plot_bar_chart(objects, data, title='', ylabel='', bar_color = 'blue'):\n y_pos = np.arange(len(objects))\n\n plt.bar(y_pos, data, align='center', alpha=0.5)\n plt.xticks(y_pos, objects, rotation='vertical')\n plt.ylabel(ylabel, fontsize=12)\n plt.title(title, fontsize=12)\n plt.ylim([0,1300])\n plt.bar(range(len(data)), data, color=bar_color)\n\n return plt.show()",
"def plot_bars(DataFrame, c_vars):\r\n \r\n for c_var in c_vars:\r\n plt.figure()\r\n DataFrame.plot.bar([c_var])\r\n plt.savefig('bargraph_'+str(c_var)+'.png', dpi = 100)\r\n plt.close",
"def plot_bar(keys, x, df, show=True, **kwargs):\n names = kwargs.pop('names', None)\n colors = kwargs.pop('colors', None)\n widget = kwargs.pop('widget', False)\n\n traces = [\n go.Bar(\n name=names[ind] if names is not None else key,\n x=x,\n y=df[key],\n marker_color=colors[ind] if colors is not None else None\n )\n for ind, key in enumerate(keys)\n\n ]\n fig = plot(\n traces=traces,\n show=show,\n widget=widget,\n **kwargs\n )\n if widget:\n return fig\n else:\n return df",
"def StackBarplot(df,\n bin_width=0.1,\n rank=False,axes=None,\n fontsize=15,\n linewidth=0.1,\n yticklabel=True,save=False):\n if rank:\n df = resortFirstSample(df)\n fig,ax = axesConf(df,axes=axes)\n # prefix data structure\n featureList = list(df.index)[::-1]\n\n # color\n colors = list(cm.tab20.colors)\n category_colors = [to_hex(color) for color in colors]\n\n xrange = np.arange(0,len(df.columns))\n #xrange = np.arange(0,bin_width* len(df.columns),step=bin_width) ## todo:乘法有问题,容易使得xrange与df.columns的长度不一致\n starts= [0 for i in range(len(df.columns))]\n\n for (i,feature) in enumerate(featureList):\n # stacked barplot: add bar one by one sample\n ## color\n #category_colors = color_conf(len(taxonList))\n #category_colors = plt.get_cmap('tab20')(np.linspace(0.15, 0.85, len(taxonList)))\n\n ## stacked bar\n\n height = df.loc[feature,:].values\n height = np.array(height)\n ax.bar(xrange, height, bottom=starts, width=bin_width,\n linewidth=linewidth,\n edgecolor='black',\n align='edge',\n label=feature, color=category_colors[i])\n\n starts = [i+j for i,j in zip(starts,height)]\n\n ax.legend(bbox_to_anchor=(1, 0),\n loc='lower left',\n fontsize=fontsize,\n facecolor='w')\n ## tick setting\n for xline,yline in zip(ax.get_xticklines(),ax.get_yticklines()):\n xline.set_visible(False)\n #yline.set_visible(False)\n\n for (xlabel,ylabel) in zip(ax.get_xticklabels(),ax.get_yticklabels()):\n ylabel.set_color('black')\n ylabel.set_fontsize(10)\n\n\n ax.xaxis.set_major_locator(ticker.NullLocator())\n ## set spines invisible\n ax.spines['bottom'].set_color(None)\n ax.spines['right'].set_color(None)\n ax.spines['top'].set_color(None)\n #if save:\n #baseconf.BaseAxes.savefig('StackBarplot')\n plt.tight_layout()\n return fig,ax",
"def plot_tornado(title, data, sorted=False):\n df = pd.Series(data).to_frame()\n\n if sorted is True:\n df = df.sort_values(by=0, ascending=True)\n\n # Horizontal bar charts start at the bottom\n # so we reverse the row order before plotting\n df = df.iloc[::-1]\n ax = df.plot.barh(figsize=(10, len(df.index) * 0.4), legend=False)\n ax.set_title(title, fontsize=18)\n\n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axvline(x=0, color=\"black\", lw=0.8)\n\n for (p, resource) in zip(ax.patches, list(df.index)):\n b = p.get_bbox()\n x_pos = b.x1 if b.x1 >= 0 else b.x0\n val = \"%.2e\" % b.x1\n ax.annotate(\n f\" {resource}: {val} \", (x_pos, b.y1), fontsize=12, verticalalignment=\"top\"\n )\n plt.show()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the account for the given client. If it does not exist a new one is created and returned | def get_account(self, client: int):
try:
return self.accounts[client]
except KeyError:
return self._create_account(client) | [
"def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]",
"def get(client_id):\n return Client.query.filter_by(client_id=client_id).first()",
"def get_account():\n return api.get_account()",
"def find_client(self, client_id):\n return self.__repo.find(client_id)",
"def get_client_id(account):\n return get_oauth_credentials(account, item_name='Client ID')",
"def get_account_for_user(cls, user):\n email = user.email()\n assert email\n key = '<%s>' % email\n # Since usually the account already exists, first try getting it\n # without the transaction implied by get_or_insert().\n account = cls.get_by_key_name(key)\n if account is not None:\n return account\n nickname = cls.create_nickname_for_user(user)\n return cls.get_or_insert(key, user=user, email=email, nickname=nickname,\n fresh=True)",
"def get_account(account_id):\n return Account.objects.get(pk=account_id)",
"async def account(sessionid: str = Form(...),\n clients: ClientStorage = Depends(get_clients)) -> Dict:\n cl = clients.get(sessionid)\n return cl.insights_account()",
"def get_client_by_id(self, client_id=None):\n # search client_id in list and return the client object\n for client in self.client_list:\n if client_id == client.client_id:\n return client.copy()\n\n # return empty client otherwise\n return Client()",
"def get_account(self):\n r = requests.get(ACCOUNT_URL, headers = {'APCA-API-KEY-ID': self.API_KEY, 'APCA-API-SECRET-KEY': self.SECRET_KEY})\n account = json.loads(r.content)\n return account",
"def account(self, account_id):\r\n return resources.Account(self, account_id)",
"def _GetAccountFromUser(self):\n name = self._GetAccountNameFromUser()\n number = self._GetAccountNumberFromUser()\n # Validate that the number is a number (assumes no alphabet characters in\n # the account number).\n if re.match(\"^[0-9]*$\", number) is None:\n raise ValueError(\"Account number is invalid: %r\" % number)\n return accounts_lib.Account(name, int(number))",
"def get_account(self, address):\n return self._get_account(self._call(\"getAccount\", address))",
"async def create_client(client: Client):\n # Récupère les clients de la base\n db = Database()\n db_clients = db.get_clients()\n\n # Trouve un nouvel identifiant\n client_id = str(int(max(db_clients.keys(), default=0)) + 1)\n # Ajoute le client à la base\n db_clients[client_id] = {\n 'name': client.name,\n 'first_name': client.first_name,\n 'accounts': {}\n }\n\n # Sauvegarde la base\n db.save()\n\n return client",
"def client(self, id):\n return self.query(Client).filter(Client.id == id).one()",
"def get_accounts_client(self):\n return self._connection.get_client('azure.devops.released.accounts.accounts_client.AccountsClient')",
"def get_account(self, name):\n return self._accounts[name]",
"def get_client(self, client_name):\n client_class = self.config.pids_providers_clients[client_name]\n return client_class(name=client_name)",
"def account(self, account_id: str):\n return get_from_list(self.accounts, \"id\", account_id)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate Linke turbidity using Kasten pyrheliometric formula. Note that broadband aerosol optical depth (AOD) can be approximated by AOD measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an alternate approximation using AOD measured at 380 nm and 500 nm. Based on original implementation by Armel Oumbe. | def kasten96_lt(airmass_absolute, precipitable_water, aod_bb):
# "From numerically integrated spectral simulations done with Modtran
# (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth
# of a clean and dry atmospshere (fictitious atmosphere that comprises only
# the effects of Rayleigh scattering and absorption by the atmosphere gases
# other than the water vapor) the following expression"
# - P. Ineichen (2008)
delta_cda = -0.101 + 0.235 * airmass_absolute ** (-0.16)
# "and the broadband water vapor optical depth where pwat is the integrated
# precipitable water vapor content of the atmosphere expressed in cm and am
# the optical air mass. The precision of these fits is better than 1% when
# compared with Modtran simulations in the range 1 < am < 5 and
# 0 < pwat < 5 cm at sea level" - P. Ineichen (2008)
delta_w = 0.112 * airmass_absolute ** (-0.55) * precipitable_water ** 0.34
# broadband AOD
delta_a = aod_bb
# "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke
# turbidity at am = 2 can be written. The extension of the Linke turbidity
# coefficient to other values of air mass was published by Ineichen and
# Perez (2002)" - P. Ineichen (2008)
lt = -(9.4 + 0.9 * airmass_absolute) * np.log(
np.exp(-airmass_absolute * (delta_cda + delta_w + delta_a))
) / airmass_absolute
# filter out of extrapolated values
return lt | [
"def linke_turbidity(wv_i, aod550_i, p_air_i, p_air_0_i):\n # prel = p0 / p # Papers mixes p/p0 and p0/p????\n prel = p_air_i / p_air_0_i\n\n term1 = 3.91 * np.exp(0.689 * prel) * aod550_i\n term2 = 0.376 * np.log(wv_i)\n\n Tl2 = term1 + term2 + (2 + 0.54 * prel - 0.5 * prel**2 + 0.16 * prel**2)\n\n return Tl2",
"def KLucbvalue(empiricalMean, armPulls, t):\n low = empiricalMean\n high = 1\n epsilon = 1e-3\n check = lambda q: armPulls * KL(empiricalMean, q) <= np.log(t)\n diff = float('inf')\n q = (low + high) / 2\n while diff > epsilon:\n if check(q):\n low = q\n else:\n high = q\n diff = abs(q - (low + high) / 2)\n q = (low + high) / 2\n\n return q",
"def esw(self, t):\n\n\n es0 = 6.1078\n\n # ES0 = SATURATION VAPOR RESSURE OVER LIQUID WATER AT 0C \n pol = t * (t * (t * (t * (t * (t * (t * (t * (t * \n - 3.0994571e-20 + 1.1112018e-17) - 1.7892321e-15) + \n 2.1874425e-13) - 2.9883885e-11) + 4.3884187e-9) - \n 6.1117958e-7) + 7.8736169e-5) - 0.0090826951) + 0.99999683\n\n # Computing 8th power\n r1 = pol\n r1 *= r1\n r1 *= r1\n ret_val = es0 / (r1 * r1)\n return ret_val\n \n \n def tcon(self, t, d):\n \"\"\" THIS FUNCTION RETURNS THE TEMPERATURE TCON (CELSIUS) AT THE LIFTING */\n CONDENSATION LEVEL, GIVEN THE TEMPERATURE T (CELSIUS) AND THE\n DEW POINT D (CELSIUS).\n\n BAKER,SCHLATTER 17-MAY-1982 Original version \"\"\"\n\n # COMPUTE THE DEW POINT DEPRESSION S.\n\n s = t - d;\n\n # THE APPROXIMATION BELOW, A THIRD ORDER POLYNOMIAL IN S AND T,\n # IS DUE TO HERMAN WOBUS. THE SOURCE OF DATA FOR FITTING THE\n # POLYNOMIAL IS UNKNOWN.\n\n dlt = s * (t * 0.001278 + 1.2185 + s * (s * 1.173e-5\n - 0.00219 - t * 5.2e-6))\n ret_val = t - dlt\n return ret_val\n \n def tsa(self, os, pres):\n \"\"\" Very little documentation on these following routines, so unsure \n of the origin and derivation of these algorithms. \"\"\"\n\n rocp = 0.28571482\n\n a = os # +273.16\n tq = 253.16\n d = 120.0\n \n i = 0\n for i in range(12):\n tqk = tq - 273.16\n d /= 2\n x = a * exp(- 2.6518986 * self.w(tqk, pres) / tq) - tq * pow((1000.0 / pres), rocp) \n if (fabs(x) <= 0.0):\n break\n if x < 0.0:\n sign = - 1\n else:\n sign = 1 \n tq += (d * sign)\n\n return tq # -273.16\n \n def w(self, temp, pres):\n \"\"\" Very little documentation on these following routines, so unsure \n of the origin and derivation of these algorithms. \"\"\"\n \n x = self.esat(temp)\n return (622.0 * x / (pres - x))\n \n def temp_of_te(self, te, press):\n import Temp_of_te\n return Temp_of_te.temp_of_te(te,press)\n\n def capeFunc(self, usetv, p_dat_PPointer, tve_dat_PPointer, p0, th0, sh0):\n import CapeFunc\n return CapeFunc.capeFunc(usetv, p_dat_PPointer, tve_dat_PPointer, p0, th0, sh0)\n \n def lfcpar(self, eptpar, pcb, tcb, hcb, t1, t2, p1, ht1):\n \"\"\" his routine computes the level of free convection of a rising parcel.\n History.\n -------- \n Don Baker 01 Jun 85 Original version.\n Dale Perry Oct 96 Adapted code to work with WFO\n\n Description of input and output.\n --------------------------------\n On input:\n --------- \n EPTPAR Real Moist adiabat along which parcel rises above\n the LCL (K).\n PCB Real LCL pressure (mb).\n TCB Real LCL temperature (K).\n HCB Real LCL height (m asl).\n T1 Real Array Parcel temperatures at lifted parcel levels (K).\n T2 Real Array Sounding temperatures at parcel levels (K).\n P1 Real Array Lifted parcel pressure levels (mb).\n HT1 Real Array Lifted parcel level heights (m asl).\n NPAR Integer Number of lifted parcel levels passed.\n\n On output:\n ---------- \n PLFC1 Real Level of free convection pressure (mb).\n HLFC1 Real Level of free convection height (m asl).\n TLFC1 Real Level of free convection temperature (K). \"\"\"\n \n lfcReturn = zeros((1, 6), 'float32')\n TOLER = 0.05\n npar = p.shape[0]\n print \"npar=\", npar\n # Find the location in the parcel arrays that corresponds to the LCL\n i = 0\n for ii in range(npar) :\n i = ii\n if math.fabs(p1[i] - pcb) < 0.1 :\n break\n else :\n continue\n print \"found pressure at \", i\n # Initially assign flag values to the LFC in case no buoyancy exists.\n plfc1 = meteo.TOP_FLG\n hlfc1 = meteo.TOP_FLG\n tlfc1 = meteo.TOP_FLG\n plfc2 = meteo.TOP_FLG\n hlfc2 = meteo.TOP_FLG\n tlfc2 = meteo.TOP_FLG\n \n if i == npar :\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n # Check and see if parcel is positively buoyant at the LCL already. If\n # this is true, then the LFC is coincident with the LCL. This may be\n # the case in 00Z soundings when a super-adiabatic layer exists near\n # the surface.\n \n if t1[i] >= t2[i] :\n plfc1 = pcb\n hlfc1 = hcb\n tlfc1 = tcb\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n # Loop upward from the LCL until the parcel temperature becomes warmer\n # than the environment. If this does not occur, no positive buoyancy\n # exists and the routine exits with whatever flag value was assigned to\n # the level of free convection.\n # To prevent a stack out of bounds error when I=1, set it equal to the\n # next level if I=1.\n \n if i == 0 : \n i = 1\n \n runLoop = True\n print \"entering loop1 with i=\", i\n for j in range(i, npar) :\n if t1[j] >= t2[j] :\n pt = p1[j]\n pb = p1[j - 1]\n plog1 = math.log(p1[j])\n plog3 = math.log(p1[j - 1])\n \n print \"entering inner loop1 j=\", j\n for count in range(100) :\n pm = 0.5 * (pb + pt)\n plog2 = math.log(pm)\n etpar = eptpar * math.pow((pm / 1000.0), 0.286)\n t1m = self.temp_of_te(etpar, pm)\n t2m = self.interp1(t2[j], t2[j - 1], plog1, plog2, plog3)\n if math.fabs(t1m - t2m) <= TOLER :\n plfc1 = pm\n hlfc1 = self.interp1(ht1[j], ht1[j - 1], plog1, math.log(plfc1), plog3)\n tlfc1 = t1m\n runLoop = False;\n print \"attempting to break out of loop 1\"\n break\n if (t1m - t2m) > TOLER :\n pt = pm\n if (t2m - t1m) > TOLER :\n pb = pm\n if runLoop != True :\n break\n else :\n continue\n\n # Continue looping to find a possible second LFC per conditions\n # above rules.\n j = j + 1\n print \"entering loop2 with j=\", j\n for k in range(j, npar) :\n if t1[k] >= t2[k] :\n pt = p1[k]\n pb = p1[k - 1]\n plog1 = math.log(p1[k])\n plog3 = math.log(p1[k - 1])\n \n print \"entering inner loop2 k=\", k\n for count in range(100) :\n pm = 0.5 * (pb + pt)\n plog2 = math.log(pm)\n etpar = eptpar * math.pow(pm / 1000.0, 0.286)\n t1m = self.temp_of_te(etpar, pm)\n t2m = self.interp1(t2[k], t2[k - 1], plog1, plog2, plog3)\n if math.fabs(t1m - t2m) <= TOLER :\n plfc2 = pm\n hlfc2 = self.interp1(ht1[k], ht1[k - 1], plog1, math.log(plfc2, plog3))\n tlfc2 = t1m\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n print \"exiting loop2 k=\", k\n return lfcReturn\n if (t1m - t2m) > TOLER :\n pt = pm\n if (t2m - t1m) > TOLER :\n pb = pm\n \n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n def richno(self, ht, hw, uw, vw, rho, buoy):\n \"\"\" Statement of purpose.\n Compute the dimensionless bulk Richardson number as defined by\n Weisman and Klemp (1982).\n History.\n -------- \n Tom Schlatter Late 1982 Original code based on MWR article by \n Weisman and Klemp (1982).\n D. Baker 01 Jun 84 Removed computation of positive energy...\n made it an input argument.\n D. Baker 01 Jul 85 Updated code for documentation.\n J. Ramer 16 Jun 92 Added divide-by-zero prevention.\n D. Perry 10 Oct 96 Adapted code for WFO \n\n Description of input and output.\n --------------------------------\n On input:\n --------- \n HT Sounding heights (m asl).\n HW Heights of wind reports (m asl).\n UW Wind u-components (m/s).\n VW Wind v-components (m/s).\n RHO Air density at each sounding level (kg/m**3).\n BUOY Positive buoyant energy (J/kg).\n\n On output:\n ---------- \n RICHNUM Dimensionless bulk Richardson number. \"\"\"\n \n \n mnl = 500\n nlvls = ht.shape[0]\n nw = uw.shape[0]\n HALFKM = 500.0\n SIXKM = 6000.0\n richnum = meteo.MISSING\n rhow = rho\n # Interpolate an air density value to each reported wind level\n if nlvls != nw :\n rhow = self.wndrho(rho, ht, hw)\n else :\n for i in range(nlvls) :\n rhow[i] = rho[i]\n \n # QC\n qc = 1\n for i in range (2, nw) :\n if uw[i] != uw[0] and vw[i] != vw[0] :\n qc = 0\n \n if nlvls < 3 or nlvls > 500 :\n qc = 1\n \n for i in range(nw) :\n if rhow[i] <= 0.0 : \n qc = 1\n break\n \n for i in range(2, nw) :\n if (hw[i] - hw[i - 1]) <= 0.0 :\n qc = 1\n break\n \n for i in range(2, nlvls) :\n if (ht[i] - ht[i - 1]) <= 0.0 :\n qc = 1\n \n if qc == 1 :\n return richnum\n \n # initialize sums\n \n sumu = 0\n sumv = 0\n sumr = 0\n sumul = 0\n sumvl = 0\n sumrl = 0\n \n # define shear layer bounds (above ground level)\n hbl = hw[0] + HALFKM\n htop = hw[0] + SIXKM\n \n if hw[nw] < htop or hw[1] > htop :\n return richnum\n \n # Loop to calculate shear terms\n \n i = 0\n rulay = 0.5 * (rhow[i] * uw[i])\n rvlay = 0.5 * (rhow[i] * vw[i])\n rlay = 0.5 * rhow[i]\n dz = hw[i]\n \n for i in range(1, nw) :\n rulay = 0.5 * (rhow[i] * uw[i] + rhow[i - 1] * uw[i - 1])\n rvlay = 0.5 * (rhow[i] * vw[i] + rhow[i - 1] * vw[i - 1])\n rlay = 0.5 * (rhow[i] + rhow[i - 1])\n dz = hw[i] - hw[i - 1]\n if hw[i] > htop :\n break\n sumu = sumu + rulay * dz\n sumv = sumv + rvlay * dz\n sumr = sumr + rlay * dz\n if hw[i] > hbl and i > 1 :\n sumul = sumul + rulay * dz\n sumvl = sumvl + rvlay * dz\n sumrl = sumrl + rlay * dz\n \n sumu = sumu + rulay * dz\n sumv = sumv + rvlay * dz\n sumr = sumr + rlay * dz\n \n if sumr <= 0.0 :\n u6 = 0.0\n v6 = 0.0\n else : \n u6 = sumu / sumr\n v6 = sumv / sumr\n \n if sumrl <= 0.0 :\n ul = 0.0\n vl = 0.0\n else :\n ul = sumul / sumrl\n vl = sumvl / sumrl\n \n # calculate one half the square of the shear vector in the lowest 6 km\n u6 = u6 - ul\n v6 = v6 - vl\n ske = 0.5 * (u6 * u6 + v6 * v6)\n \n # compute the bulk richardson number\n \n if ske > 0 :\n richnum = buoy / ske\n \n return richnum\n \n def wndrho(self, rho, ht, hw):\n \"\"\" PURPOSE:\n --------\n INTERPOLATE TO DETERMINE DENSITY AT WIND LEVELS GIVEN DENSITY AT\n PRESSURE LEVELS IN A SOUNDING. INTERPOLATION IS LINEAR BY HEIGHT.\n\n T. Schlatter late 82 Probable original author.\n D. Baker 17 Dec 85 Added doc and indentation (?)\n D. Baker (?) after Dec 85 Replaced 100 loop with 300 loop. It\n appears that the interpolation is out.\n J. Wakefield 17 Nov 92 Added parameter list documentation.\n D. Perry Sep 96 Adapted code to work with WFO.\n\n Argument I/O Description\n -------- --- -----------------------------------------------\n Rho I Density (kg m-3) at sounding levels.\n Ht I Heights (m) at sounding levels.\n NLvls I Number of sounding levels.\n HW I Heights (m) of wind obs.\n NW I Number of wind obs.\n RhoW O Density interpolated to wind obs heights. \"\"\"\n \n \n # Interpolate to derive density at wind heights\n j = 0\n nw = len(hw)\n skip = False\n for i in range(nw) :\n if skip == True :\n break\n k = j\n for j in range(k, nlvls - 1) :\n if hw[i] >= ht[j] and hw[i] <= ht[j + 1] :\n rhow[i] = self.interp1(rho[j], rho[j + 1], ht[j], hw[i], ht[j + 1])\n skip = True\n break\n \n rhow[0] = rho[0]\n k1 = 0\n k2 = 1\n \n for i in range(1, nw) :\n if ht[k2] < hw[i] :\n k1 = k2\n k2 = k2 + 1\n if k2 > nlvls :\n for j in range(i, nw) :\n rhow[j] = rho[k1]\n return rhow\n \n rhow[i] = self.interp1(rho[k1], rho[k2], ht[k1], hw[i], ht[k2])\n \n return rhow\n \n def lclpar(self, meanmix, ts, p, ht, t, td):\n \"\"\" Statement of purpose.\n ---------------------\n This routine computes the pressure, height, and temperature of the\n lifting condensation level (LCL) from a sounding.\n \n History.\n -------- \n Dale Perry 20 Sep 96 Bootlegged version of cclpar.f modified for\n determining the LCL.\n \n Description of input and output.\n --------------------------------\n On input:\n --------- \n MEANMIX Mixing ratio used to intersect the sounding (g/kg).\n TS Surface temp (12Z-forecast max temp;00Z-sfc temp) (K). \n P Sounding pressures (mb).\n HT Sounding heights (m asl).\n T Sounding temperatures (K).\n TD Sounding dewpoint temperatures (K).\n \n On output:\n ---------- \n PLCL Pressure of the lifting condensation level (mb).\n TLCL Temperature of the lifting condensation level (K).\n HTLCL Height of the lifting condensation level (m asl).\n \n User notes:\n -----------\n The low level mean mixing ratio is input to this routine...\n computed outside. \"\"\"\n\n TOLER = 0.5\n nlvls = len(p)\n lfcReturn = zeros((1, 3), 'float32')\n \n # Loop up through sounding until mixing ratio line corsses the dry \n # adiabat through the surface temperature. Initially set the LCL\n # parameters to MISSING values in case no LCL is found\n \n plcl = meteo.TOP_FLG\n hlcl = meteo.TOP_FLG\n tlcl = meteo.TOP_FLG\n t2 = ts * math.pow(1000.0 / p[0], 0.286)\n \n for i in range(nlvls) :\n t1 = self.temp_mixratio(p[i], meanmix)\n t1 = t1 * math.pow(1000.0 / p[i], 0.286)\n if t1 >= t2 :\n break\n \n if i == 1 : #LCL at the surface\n plcl = p[0]\n hlcl = ht[0]\n tlcl = t[0]\n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn\n \n # We were at the top of the sounding, but 'I' got incremented one more\n # beyond. Reset it to the top of the sounding index 'NLVLS'\n if i > nlvls :\n i = nlvls - 1\n \n pt = p[i]\n pb = p[i - 1]\n plog1 = math.log(p[i])\n plog3 = math.log(p[i - 1])\n \n # Iterate to find the LCL. Keep cutting level in half until the point\n # of intersection is found\n \n for count in range(100) :\n pm = 0.5 * (pt + pb)\n plog2 = math.log(pm)\n t1 = self.temp_mixratio(pm, meanmix)\n t1 = t1 * math.pow(1000.0 / pm, 0.286)\n if math.fabs(t1 - t2) <= TOLER :\n plcl = pm\n tlcl = t1 * math.pow(plcl / 1000.0, 0.286) \n hlcl = self.interp1(ht[i], ht[i - 1], plog1, math.log(plcl), plog3)\n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn \n if (t1 - t2) > TOLER :\n pt = pm\n if (t2 - t1) > TOLER :\n pb = pm\n \n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn",
"def planckwavelen(wavel,Temp):\n wavel=wavel*1.e-6 #convert to meters\n c1=2.*h*c**2.\n c2=h*c/kb\n Blambda=1.e-6*c1/(wavel**5.*(np.exp(c2/(wavel*Temp)) -1))\n return Blambda",
"def thermal_deBroglie_wavelength(T_e: u.K) -> u.m:\n return h / np.sqrt(2 * np.pi * m_e * k_B * T_e)",
"def calc_wetbulb_temperature(t, td, p=P0_, eps=1e-8, n=10):\n\n \"\"\"\n Options:\n * A Start with the 1/3-rule for wet-bulb.\n * B Start with wet-bulb = dry-bulb\n B has better overall convergence\n \"\"\"\n\n # Humidity ratio\n W = calc_humidity_ratio(td, p=p)\n\n # First guess for wet-bulb (option A)\n twb = 2 / 3 * t + 1 / 3 * td\n\n dtwb = np.inf\n i = 0\n while np.abs(dtwb).max() > eps:\n\n # Indices with above zero wet bulb temperatures\n above = twb >= 0\n\n # Calculate vapor pressure (and derivative) at current wet-bulb\n pws_wb, dpws_wb = calc_saturation_vapor_pressure(twb, jacobian=True)\n\n # Calculate saturation humidity ratio (and derivative) at wet-bulb\n # temperature\n Ws_wb = epsilon * pws_wb / (p - pws_wb)\n dWs_wb = epsilon * p / (p - pws_wb) ** 2 * dpws_wb\n\n # Calculate a humidty ratio (and derivative) at current wet-bulb\n # using HOF 2013, Chap 1, eqn 36, split into numerator A and\n # denominator B\n A = np.where(\n above,\n (2501 - 2.326 * twb) * Ws_wb - 1.006 * (t - twb),\n (2830 - 0.24 * twb) * Ws_wb - 1.006 * (t - twb),\n )\n\n dA = np.where(\n above,\n -2.326 * Ws_wb + (2501 - 2.326 * twb) * dWs_wb + 1.006,\n -0.24 * Ws_wb + (2830 - 0.24 * twb) * dWs_wb + 1.006,\n )\n\n B = np.where(above, 2501 + 1.86 * t - 4.186 * twb, 2830 + 1.86 * t - 2.1 * twb)\n\n dB = np.where(above, -4.186, -2.1)\n\n W_ = A / B\n dW = (dA * B - A * dB) / B ** 2\n\n # Newton iteration\n dtwb = (W - W_) / dW\n twb += dtwb\n\n # Limit iterations\n i += 1\n if i > n:\n break\n\n return twb",
"def mytw (self, k, kd, p):\n \n # This function takes temperature in degrees K, dewpoint in degrees K\n # and pressure in millibars and returns the isobaric wet-bulb temperature\n # C in degrees K using an iterative technique. For a given guess for the\n # wet bulb temp, one tries to do an energy balance, matching cp*(T-Tw) to\n # (esat(Tw)-esat(Td))*eps*L/p*.\n \n # c0, c1, and c2 are the same constants as from the esat.f function.\n # f = cp/(L*epsilon).\n \n f = 0.0006355\n c0 = 26.66082\n c1 = 0.0091379024\n c2 = 6106.3960\n \n # Special cases of Td >= T or a ridiculously low T.\n if kd >= k :\n kw = (k + kd) / 2\n return kw\n elif k < 100 :\n kw = k\n return kw\n \n # Special case of a ridiculously high saturation vapor pressure.\n ew = c0 - c1 * k - c2 / k\n \n if ew > 10.0 :\n kw = (k + kd) / 2\n return kw\n \n ew = math.exp(ew)\n \n # kw is our current guess for wet-bulb, ed the vapor pressure corresponding\n # to the depoint. Deal with case of a ridiculously small dewpoint vapor\n # pressure.\n \n kdx = kd\n ed = c0 - c1 * kdx - c2 / kdx\n while True:\n if ed < - 50.0 :\n kdx = kdx + 10\n ed = c0 - c1 * kdx - c2 / kdx\n else :\n break\n \n ed = math.exp(ed)\n fp = p * f\n s = (ew - ed) / (k - kdx)\n \n kw = (k * fp + kdx * s) / (fp + s)\n \n # At each step of the iteration, esat(Tw)-esat(Td) is compared to\n # (T-Tw)*p/(eps*L). When that difference is less than one part in \n # 10000 of esat(Tw), or ten iterations have been done, the iteration stops.\n # This is basically trying to find the value of kw where de is 0. The\n # value s is the derivative of de with respect to kw, a fairly standard\n # numerical technique for finding the zero value of a function.\n \n for l in range (10) :\n ew = c0 - c1 * kw - c2 / kw\n if ew < - 50.0 or ew > 10.0 :\n break\n ew = math.exp(ew)\n de = fp * (k - kw) + ed - ew\n if math.abs(de / ew) < 1e-5 :\n continue\n s = ew * (c1 - c2 / (kw * kw)) - fp\n kw = kw - de / s\n \n return kw",
"def bigTower(re, ri, ht):\n def bigT(hd1, hd2, dd1, dd2):\n \"\"\" This function returns details of a simple tower\n :param hd1: height of the first detail\n :param hd2: height of the second detail\n :param dd1: distance from the tower of the first detail\n :param dd2: distance from the tower of the second detail\n \"\"\"\n tower = DIFFERENCE([MY_CYLINDER([re, ht])(16), MY_CYLINDER([ri, ht])(16)])\n detail1 = STRUCT([T(3)(hd1)(DIFFERENCE([MY_CYLINDER([re + dd2, hd2])(16), MY_CYLINDER([ri, hd2])(16)])),\n DIFFERENCE([MY_CYLINDER([re + dd1, hd1])(16), MY_CYLINDER([ri, hd1])(16)])])\n\n detail2 = STRUCT([T(3)(hd2)(DIFFERENCE([MY_CYLINDER([re + dd1, hd1])(16), MY_CYLINDER([re, hd1])(16)])),\n DIFFERENCE([MY_CYLINDER([re + dd2, hd2])(16), MY_CYLINDER([ri, hd2])(16)])])\n column = T([1, 2])([double((-re * 3) / 2), -0.2])(CUBOID([re * 3, 0.4, ht - dd1]))\n columns = STRUCT([R([1, 2])(PI/6)(column), R([1, 2])(-PI/6)(column), R([1, 2])(PI/3)(column), R([1, 2])(-PI/3)(column)])\n detail3 = T(3)(hd1)(INTERSECTION([DIFFERENCE([MY_CYLINDER([re + double(dd1/2)+0.05, ht - hd1])(16),\n MY_CYLINDER([re, ht - hd1])(16)]), columns]))\n holeDet = STRUCT([T([1, 3])([0.15, 0.1])(CUBOID([0.1, 0.6, 0.4])), T([1, 3])([0.35, 0.1])(CUBOID([0.1, 0.6, 0.4])),\n T([2, 3])([0.15, 0.1])(CUBOID([0.6, 0.1, 0.4])), T([2, 3])([0.35, 0.1])(CUBOID([0.6, 0.1, 0.4]))])\n det = DIFFERENCE([CUBOID([0.6, 0.6, 0.6]), holeDet])\n detail4 = STRUCT([MY_CYLINDER([0.65, 0.15])(16), T([1, 2, 3])([-0.4, -0.4, 0.15])(CUBOID([0.8, 0.8, 0.2])),\n T([1, 2, 3])([-0.3, -0.3, 0.35])(det), T([1, 2, 3])([-0.4, -0.4, 0.95])\n (CUBOID([0.8, 0.8, 0.1])), T(3)(1.05)(CONE([0.27, 0.8])(16))])\n window1 = MY_CYLINDER([0.56, 8])(16)\n window2 = MY_CYLINDER([0.3, 8])(16)\n windows = T(3)(double(ht/2) + hd1+hd2)(STRUCT([T(1)(4)(R([1, 3])(PI/2)(window1)),\n R([1, 2])(PI/4)(T(1)(4)(R([1, 3])(PI/2)(window2))),\n R([1, 2])(-PI/4)(T(1)(4)(R([1, 3])(PI/2)(window2))),\n R([1, 2])(PI/2)(T(1)(4)(R([1, 3])(PI/2)(window1)))]))\n\n finalTower = STRUCT([tower, T(3)(ht)(detail1), detail2, detail3, T(3)(6.799)(detail4)])\n return TEXTURE(\"marmo.png\")(DIFFERENCE([finalTower, windows]))\n return bigT",
"def get_life(self, C, k):\n # -- spectral moments for each narrowband\n moments = self.spectral_data.get_spectral_moments(self.PSD_splitting, moments=[0,2])\n m0L, m2L = moments[0] #spectral moments for lower band\n m0H, m2H = moments[1] #spectral moments for upper band\n\n # -- Vanmarcke bandwidth parameter\n _, epsV_H = self.spectral_data.get_vanmarcke_parameter(self.PSD_splitting)\n\n # -- positive slope zero crossing frequency\n v0L, v0H = self.spectral_data.get_nup(self.PSD_splitting)\n\n # -- normalized variances\n m0 = np.sum(moments[:, 0])\n m0L_norm = m0L/m0\n m0H_norm = m0H/m0\n\n v0Large = m0L_norm * v0L* np.sqrt(1 + m0H_norm/m0L_norm * (v0H/v0L*epsV_H)**2) #low + high frequency, large amplitudes\n v0Small = v0H - v0Large #freqeuncy of small cycless\n\n #dNB small\n #small cycles consist of high frequency component\n dNB_small = self.damage_intesity_NB(m0H, v0Small, C, k) \n\n #dNB large\n #large cycles consist of low and high frequency component\n pdf_large = pdf_rayleigh_sum(m0L,m0H)\n S_large = integrate.quad(lambda x: x**k * pdf_large(x), 0, np.inf)[0]\n dNB_large = v0Large * S_large / C\n d = dNB_small + dNB_large\n T = 1 / d\n return T",
"def calculate_plancks_constant_light(self):\n energy_of_light_photon = input(\"Enter the energy of the light photon: \\n\")\n wavelength_of_light = input(\"Enter the wavelength of light in meters: \\n\")\n speed_of_light_answer = raw_input(\n \"Enter 1 to use 299792458 meters per a second as the speed of light.\\n\"\n \"Enter 2 to input the speed of light meters per a second.\\n\")\n if speed_of_light_answer == '1':\n speed_of_light = 299792458\n elif speed_of_light_answer == '2':\n speed_of_light = input(\"Enter the speed of light:\\n\")\n else:\n print(\"Enter either 1 or 2 for the speed of light.\\n\")\n self.calculate_plancks_constant_light()\n\n result = float(energy_of_light_photon * (wavelength_of_light / speed_of_light))\n print(\"Planck's constant per a second is: \" + str(result))",
"def black_body_temperature_to_flux(temperature_k: float) -> float:\n return _STEFAN_BOLTZMAN * temperature_k**4",
"def calculate_wb_stull(self) -> float:\n T = toC(self.temp_out_k)\n H = self.hum_out\n if H > 5 and H < 99 and T > -20 and T < 50:\n return T * m.atan(0.151977 * m.pow(H + 8.313659, 0.5)) + m.atan(T + H) - m.atan(H - 1.676331) + 0.00391838 * m.pow(H, 3/2) * m.atan(0.023101 * H) - 4.686035\n return None",
"def _lambertw_series(ctx, z, k, tol):\n magz = ctx.mag(z)\n if (-10 < magz < 900) and (-1000 < k < 1000):\n # Near the branch point at -1/e\n if magz < 1 and abs(z+0.36787944117144) < 0.05:\n if k == 0 or (k == -1 and ctx._im(z) >= 0) or \\\n (k == 1 and ctx._im(z) < 0):\n delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)])\n cancellation = -ctx.mag(delta)\n ctx.prec += cancellation\n # Use series given in Corless et al.\n p = ctx.sqrt(2*(ctx.e*z+1))\n ctx.prec -= cancellation\n u = {0:ctx.mpf(-1), 1:ctx.mpf(1)}\n a = {0:ctx.mpf(2), 1:ctx.mpf(-1)}\n if k != 0:\n p = -p\n s = ctx.zero\n # The series converges, so we could use it directly, but unless\n # *extremely* close, it is better to just use the first few\n # terms to get a good approximation for the iteration\n for l in xrange(max(2,cancellation)):\n if l not in u:\n a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l))\n u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1)\n term = u[l] * p**l\n s += term\n if ctx.mag(term) < -tol:\n return s, True\n l += 1\n ctx.prec += cancellation//2\n return s, False\n if k == 0 or k == -1:\n return _lambertw_approx_hybrid(z, k), False\n if k == 0:\n if magz < -1:\n return z*(1-z), False\n L1 = ctx.ln(z)\n L2 = ctx.ln(L1)\n elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0):\n L1 = ctx.ln(-z)\n return L1 - ctx.ln(-L1), False\n else:\n # This holds both as z -> 0 and z -> inf.\n # Relative error is O(1/log(z)).\n L1 = ctx.ln(z) + 2j*ctx.pi*k\n L2 = ctx.ln(L1)\n return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False",
"def test_klauder(self):\n ideal = np.array([0.14899879, -0.16633309, -0.42806931, 0.16605633,\n 0.70769336, 0.16605633, -0.42806931, -0.16633309])\n actual = misc.klauder(8)\n np.testing.assert_allclose(ideal, actual, atol=1e-8, rtol=1e-8)",
"def PlankFunction(wavelen,T=5778.):\n\n c1=1.191042E8\n c2=1.4387752E4\n L=c1/(wavelen**5*(np.exp(c2/(wavelen*T))-1))\n return L",
"def hodgkin_huxley(tt, I, u_rest=-65.):\n\n #SODIUM CHANNEL (m, h are the gating variables)\n alpha_m = lambda u: (2.5 - 0.1 * (u - u_rest)) / (np.exp(2.5 - 0.1 * (u - u_rest)) - 1)\n alpha_h = lambda u: (0.07 * np.exp((-u + u_rest) / 20))\n beta_m = lambda u: 4 * np.exp((-u + u_rest) / 18)\n beta_h = lambda u: 1 / (np.exp(3 - 0.1 * (u - u_rest)) + 1)\n g_Na = 120\n E_Na = 55\n\n #POTASSIUM (n is the gating variable)\n alpha_n = lambda u: (0.1 - 0.01 * (u - u_rest)) / (np.exp(1 - 0.1 * (u - u_rest)) - 1)\n beta_n = lambda u: 0.125 * np.exp((-u + u_rest) / 80)\n g_K = 36\n E_K = -75\n\n #other channels\n g_L = 0.3\n E_L = -69\n\n #capacitance\n C = 0.1\n\n #initialize gating variables and voltage\n h, m, n, u = [np.zeros_like(I) for i in range(4)]\n h[0] = alpha_h(u_rest) / (alpha_h(u_rest) + beta_h(u_rest))\n m[0] = alpha_m(u_rest) / (alpha_m(u_rest) + beta_m(u_rest))\n n[0] = alpha_n(u_rest) / (alpha_n(u_rest) + beta_n(u_rest))\n u[0] = u_rest\n\n #calculate using explicit euler method\n for t in range(1, len(tt)):\n dt = tt[t] - tt[t-1]\n\n dhdt = alpha_h(u[t - 1]) * (1 - h[t - 1]) - beta_h(u[t - 1]) * h[t - 1]\n dmdt = alpha_m(u[t - 1]) * (1 - m[t - 1]) - beta_m(u[t - 1]) * m[t - 1]\n dndt = alpha_n(u[t - 1]) * (1 - n[t - 1]) - beta_n(u[t - 1]) * n[t - 1]\n dudt = ( g_Na * (E_Na - u[t-1]) * m[t-1]**3 * h[t-1] \\\n + g_K * (E_K - u[t-1]) * n[t-1]**4 \\\n + g_L * (E_L - u[t-1]) + I[t-1]) / C\n\n h[t] = h[t - 1] + dt * dhdt\n m[t] = m[t - 1] + dt * dmdt\n n[t] = n[t - 1] + dt * dndt\n u[t] = u[t - 1] + dt * dudt\n\n return h, m, n, u",
"def ReadTinker():\n # Total Potential Energy : {f} Kcal/mole\n total_line = \" Total Potential Energy :\"\n with open('LICHM_TINKEREnergy_0.log') as f:\n for line in f:\n if line.startswith(total_line):\n # print(line)\n TinkE = re.findall(r'\\-\\d+\\.*\\d*', line)\n TinkE = float(TinkE[0])\n # if AMOEBA == True:\n # if line.startswith(\"Polarization\"):\n # Epol = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Epol = float(Epol[0])\n # elif line.startswith(\"Implicit Solvation\")\n # Esolv = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Esolv = float(Esolv[0])\n f.close()\n # if AMOEBA == True:\n # TINKERPolForces = EPol + ESolv\n # TinkE += TINKERPolForces\n #\n TinkE *= kcal2ev\n return TinkE",
"def earth_tide(theta, lamda, gtime):\n\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist # bpos common block\n global h, k, l # love common block\n h = [0.6114, 0.2891, 0.175]\n k = [0.304, 0.09421, 0.043]\n l = [0.0832, 0.0145, 0.0103]\n\n global azt, azs # azimut common block\n global etmut # tdiff common block\n global moon # sunny common block\n moon = 0\n # hardwire these - you can only send it ONE droptime\n deltat = 1\n NPT = 1\n\n temp_time = num2date(gtime)\n\n YY = temp_time.year\n MO = temp_time.month\n DD = temp_time.day\n HH = temp_time.hour\n MM = temp_time.minute\n SS = temp_time.second\n # Initialize variables\n irl = 1\n iflag = 0\n ntotl = 1\n iget = [0, 0, 0, 0, 0, 0, 0] # ' !!!\n ispc = [0, 0, 0, 0] # ' !!!\n ntw = [1, 0, 0] # ' !!!\n ioptn = 't'\n ielement = 0\n # \tdata statements for input and output unit numbers (on terminal I/O)\n inun = 5\n ioun = 6\n nptpb = 6\n\n yr1 = YY - 1900\n day1 = date2num(datetime(YY, MO, DD))\n # \tfind times in hours from 0 hr, 1 jan 1900\n # matlab:\n ts = (\n SS / 3600\n + MM / 60\n + HH\n + 24 * (day1 - 1)\n + 8760 * yr1\n + 24 * np.fix((yr1 - 1) / 4)\n )\n # python:\n dj = date_to_julian_day(datetime(YY, MO, DD))\n djref = date_to_julian_day(datetime(1899, 12, 31, 0, 0, 0))\n delta_dj = (\n dj - djref\n ) # difference in days from current date (0hr) to 0hr, 1 jan 1900\n delta_djhr = float(delta_dj) * 24.0 + HH - 12.0 + MM / 60.0 + SS / 3600.0\n te = ts + (NPT - 1) * deltat / 3600\n d = deltat / 3600\n # terms=(te-ts)/d + 1\n terms = NPT\n\n # done asking questions - begin execution\n i = 1\n tt = ts\n sph(theta, lamda, 0)\n etmut = 41.184 + yr1 - 70\n # matlab:\n # t = (tt+12 + (etmut/3600))/876600\n t = (delta_djhr + etmut / 3600) / 876600\n # t is ephemeris time in julian centuries from 12 hr 0 jan 1900\n ephem(t)\n\n # calculate normalized gravity tides\n [grav, tilt, strain, gdc] = elastd(ntw)\n\n gravtide = 1.0e5 * grav\n # convert m/s² to mgal: 1m/s² = 100 gal = 100 000 mgal\n\n iflag = 1\n\n iterms = np.fix(terms)\n i = 1\n return gravtide",
"def get_eta (wedge): #wedge_eta?\n ss = 2.5\n if wedge <= 46: eta = wedge * ss - 57.5\n else: eta = wedge * ss - 57.5 - 180.0\n return eta"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
input h (meters) and the coefficients for the linear profile for the free troposphere theta (ft_intercept (K) and slope gamma (K/m)) return the free tropospher theta at height h | def theta_ft(h,ft_intercept,gamma):
theta_top = ft_intercept + h*gamma
return theta_top | [
"def phase_screen_r(h, k, theta=0):\n term = np.exp(-2 * h**2 * k**2 * np.cos(theta)**2)\n return term",
"def rungeKutta(t0,uu,h):\n \n g1=slope(t0,uu) #slope1\n g2=slope(t0+h/2,uu+(h/2)*g1) #slope2\n g3=slope(t0+h/2,uu+(h/2)*g2) #slope3\n g4=slope(t0+h,uu+h*g3) #slope4\n return uu+(h/6)*(g1+2*g2+2*g3+g4)",
"def two_theta_hkl(self, H, K, L):\n return self.unit_cell.two_theta((H, K, L), self.wavelength, deg=True)",
"def curve_with_hillcoef(ph, pka, hillcoef):\n# return hillcoef * ph - pka\n return 1/(1+10**(hillcoef*(pka-ph)))",
"def theta_transversal(hoy:np.array=HOYS_DEFAULT)->float : \n\n return np.arctan(np.sin(np.radians(azim(hoy))) * np.tan(np.radians(sgh.z(hoy))))",
"def _compute_hrf(t):\n\n hx = (t ** (n1 - 1)) * np.exp(\n -t / lambda1) / ((lambda1 ** n1) * math.factorial(n1 - 1))\n hy = (t ** (n2 - 1)) * np.exp(\n -t / lambda2) / ((lambda2 ** n2) * math.factorial(n2 - 1))\n\n # create hrf = weighted difference of two gammas\n hrf = a * (c1 * hx - c2 * hy)\n\n return hrf",
"def __s_polynomial(g, h):\n\n deg_g = __multidegree(g)\n deg_h = __multidegree(h)\n max_deg = map(max, zip(deg_g, deg_h))\n R = g.parent()\n\n # Builds a polynomial with the variables raised to max_deg, in order\n vars = map(R, R.variable_names())\n x_pow_max_deg = reduce(operator.mul, [x ** d for (d, x) in zip(max_deg, vars)], R(1))\n\n quo_g, _ = x_pow_max_deg.quo_rem(g.lt())\n quo_h, _ = x_pow_max_deg.quo_rem(h.lt())\n return quo_g * g - quo_h * h",
"def ab2rhotheta(a, b):\n \"\"\" also : y - ax - b = 0 \"\"\"\n \"\"\" y*sin(theta) + x*cos(theta) - rho = 0 \"\"\"\n #print(\"a: %f b: %f\" % (a, b))\n theta = math.atan(a) + math.pi/2.0\n rho = b*math.sin(theta)\n #print(\"a: %f b: %f rho: %f theta: %f\" % (a, b, rho, theta))\n return (rho, theta)",
"def alkTphosfac(hguess,ks):\n #mick - first estimate of contribution from phosphate\n #mick based on Dickson and Goyet\n h3po4g,h2po4g,hpo4g,po4g = phosfracs(hguess,ks)\n return h3po4g-hpo4g-2*po4g",
"def get_desired_heading(self, h, p):\n heading_vector = [self.alpha*h[0] + self.beta*p[0],\n self.alpha*h[1] + self.beta*p[1]]\n a = [(heading_vector[0]/linalg.norm(heading_vector)),\n (heading_vector[1]/linalg.norm(heading_vector))]\n return a",
"def mort(self, h):\n return(self.mu_bg +\\\n (1.0 - self.mu_bg) * self.pr_P * self.p_att *\\\n (1 - h**self.ap))",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n # Using u = ubar to avoid algeabric loops\n \n y = self.plant.h( x , self.plant.ubar , t )\n \n return y",
"def phireturn(xhat0, tof):\n\t\n\t\tstoptime = tof\n\t\tnumpoints = 2\n\t\t#Integration time array:\n\t\tt = [stoptime * float(i) / (numpoints - 1) for i in range(numpoints)]\n\t\t\n\t\txsol = twomode.intfull(xhat0, t, abserror=1.0e-14, relerror=1.0e-12)\n\t\t#Phase of the first mode is the slice phase\n\t\tphi = np.angle(xsol[1,0] + 1j*xsol[1,1]) \t\n\t\t\n\t\treturn -phi",
"def enthalpy(**kwargs):\n\n\n\n\t# Find C_p for a calorically perfect gas from gamma and R if not already supplied\n\tif (('gamma_var' in kwargs) and ('R' in kwargs) and ('C_p' not in kwargs)\n\t\tand ('theta_v' in kwargs)):\n\t\tkwargs['C_p'] = C_p_calc(gamma_var=kwargs['gamma_var'], R=kwargs['R'],\n\t\t\ttheta_v=kwargs['theta_v'], T=kwargs['T'])\n\telif ('gamma_var' in kwargs) and ('R' in kwargs) and ('C_p' not in kwargs):\n\t\tkwargs['C_p'] = C_p_calc(gamma_var=kwargs['gamma_var'], R=kwargs['R'])\n\telif ('C_p' in kwargs):\n\t\tpass\n\telse:\n\t\traise KeyError('Incorrect variable assignment')\n\n\t# Calculate enthalpy for calorically perfect gas\n\tif ('T' in kwargs) and ('C_p' in kwargs):\n\t\th = kwargs['C_p'] * kwargs['T']\n\telif ('T_0)' in kwargs) and ('C_p' in kwargs) and ('Vel' in kwargs):\n\t\th = (kwargs['C_p'] * kwargs['T_0']) + ((kwargs['Vel']**2) / 2)\n\telif ('U' in kwargs) and ('p' in kwargs) and ('Vol' in kwargs):\n\t\th = kwargs['U'] + (kwargs['p'] * kwargs['Vol'])\n\telse:\n\t\traise KeyError('Incorrect variable assignment')\n\n\t# Check for inclusion of vibrational temperature variable, theta_v\n\t# Calculate enthalpy for calorically imperfect gas\n\tif ('theta_v' in kwargs) and ('R' in kwargs) and ('T' in kwargs):\n\t\tterm = kwargs['theta_v'] / kwargs['T']\n\t\th += (term / (np.exp(term) - 1)) * kwargs['R'] * kwargs['T']\n\telse:\n\t\tpass\n\n\treturn h",
"def calc_t_from_h(h, w):\n\n t1 = (-1359.24*(w-3.998e-4*(h+0.026)))/(w+0.547283)\n\n t2 = (-1359.24*(w-3.998e-4*h))/(w+0.546196)\n\n # choose appropriate result based on temperature range\n if 0 < t1 < 60:\n t = t1\n elif -100 < t2 <= 0:\n t = t2\n else:\n raise\n t = np.nan\n # print('Warning: Temperature out of bounds (>60°C or <-100°C)')\n # print(t1,t2)\n\n return t",
"def calcTrh(N, rh, m, G, gamma=0.02):\n return 0.138*N**0.5*rh**1.5/(m**0.5*np.log(gamma*N)*G**0.5)",
"def __call__(self, t, h):\n # Evaluate the force value\n fs = self.fs(t)\n\n # Compute force\n return fs * self.A * (0.25 * self.rho_ice * self.g * h * (1 - self.rho_ratio))**self.n",
"def recover_theta_rot(H):\n '''\n Minor annoyance: Recall that there are two separate rotation matrices:\n | cos(R) , sin(R) | | cos(R) , -sin(R) |\n |-sin(R) , cos(R) | | sin(R) , cos(R) | \n In the left one, positive R implies counter-clockwise rotation.\n In the right one, positive R implies clockwise rotation.\n We need to disambiguate this to always return theta s.t.\n Positive theta -> counter-clockwise rotation.\n '''\n H00 = min(max(H[0,0], -1.0), 1.0) # clamp to [-1.0, 1.0] to avoid numerical instability\n H01 = min(max(H[0,1], -1.0), 1.0)\n H10 = min(max(H[0,1], -1.0), 1.0)\n theta_0 = math.degrees(math.acos(H00))\n theta_1 = math.degrees(math.asin(H01))\n if theta_0 >= 0.0 and theta_1 >= 0.0:\n # theta is in counter-clockwise mode\n return theta_0\n else:\n # theta is in clockwise mode\n return -theta_0",
"def hugoniot_locus(h, hstar, hustar, wave_family, g=1., y_axis='u'):\n ustar = hustar / hstar\n alpha = h - hstar\n d = np.sqrt(g*hstar*(1 + alpha/hstar)*(1 + alpha/(2*hstar)))\n if wave_family == 1:\n if y_axis == 'u':\n return (hustar + alpha*(ustar - d))/pospart(h)\n else:\n return hustar + alpha*(ustar - d)\n else:\n if y_axis == 'u':\n return (hustar + alpha*(ustar + d))/pospart(h)\n else:\n return hustar + alpha*(ustar + d)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
the_vars[0]= thetabar the_vars[1] = h the_vars[2] = qv surface flux from drag law with subsidence and diagnosed deltheta | def dmixed_vars(the_vars,tstep,coeffs):
deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0]
F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux
Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux
Fint = -coeffs.k*F0 #entrainment heat flux
if coeffs.use_NT: # use NT parameterization by calculating we using function
went = calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0) # Nicholls-Turton parameterization
else: # use simple we parameterization
went = -Fint/deltheta #simple entrainment parameterization
# calculate delta_Fr
delta_Frstar = 82.0 # Wm^-2
Frlambda = 7.9 # Wm^-2, using with CTL from Gesso
delta_Fr = delta_Frstar - Frlambda*coeffs.ft_qv*1000 # convert qt_ft to g kg^-1
Fqvent = -went*( coeffs.ft_qv - the_vars[2])
wsubs = -coeffs.D*the_vars[1]
rho=1.
cp=1004.
derivs=np.empty_like(the_vars)
# higher delta_Fr from drier air at mixed-layer top...hence cloudy air results in less radiative cooling
derivs[0]=(F0 - Fint)/(the_vars[1]*rho) - delta_Fr/1004./the_vars[1]
derivs[1] = went + wsubs
derivs[2] = (Fqv0 - Fqvent)/the_vars[1]
return derivs | [
"def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):\n thetal_m = the_vars[0]\n qt_m = the_vars[2]\n zi = the_vars[1]\n dth = deltheta\n \n thetal_ft = thetal_m + dth\n qt_ft = coeffs.ft_qv\n \n dqt = qt_ft - qt_m\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-zi)\n LTS = thetal_3000 - coeffs.sst # lower tropospheric stability\n\n # calculate coefficients\n press=tf.find_press(zi)\n Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)\n Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)\n \n invert= tf.t_uos_thetal(thetal_m,qt_m,press)\n T_0 = invert.temp\n lv=tf.L_t(invert.temp)\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n del_thv_dry = Ad * dth + Bd * dqt\n del_thv_sat = Aw * dth + Bw * dqt\n \n # account for evaporative cooling (increases we)\n ql_max = invert.ql\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n Del_thv = del_thv_dry - Cl * ql_max\n \n # calculate buoyancy integral terms\n rho = 1.\n lcl_press=tf.LCL_thetal(thetal_m,qt_m)\n zb=tf.find_height(lcl_press)\n\n T1 = zb/zi\n T2 = 0.5 * zb**2 / zi**2\n T3 = (zi-zb)/zi\n T4 = 0.5 * (zi**2 - zb**2) / zi**2\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n wtl_0=F0\n wqt_0=Fqv0\n Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3\n term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))\n term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))\n term3 = Del_F * (Ad * T2 + Aw * T4)\n\n Theta_NE = term1 + term2 + term3\n \n # calculate w*\n wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)\n \n # calculate chi*\n chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)\n \n # calculate del_m\n Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)\n \n # calculate we\n a2=15.\n Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))\n \n A_NT = 0.2\n fac_NT = 2.5\n\n term4 = Del_thv_NT\n term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)\n denominator = term4 + term5\n\n we = A_NT * fac_NT * Theta_NE / denominator\n \n return we",
"def flux(h,u):\n return np.array([h*u, 0.5*g*h**2 + h*u**2])",
"def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)",
"def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)",
"def dynamics(self,eta,nu,u_actual,u_control,sampleTime): \n \n # Current velocities\n u_c = self.V_c * math.cos(self.beta_c - eta[5]) # current surge velocity\n v_c = self.V_c * math.sin(self.beta_c - eta[5]) # current sway velocity \n \n nu_c = np.array([u_c,v_c,0,0,0,0],float) # current velocity vector\n nu_r = nu - nu_c # relative velocity vector\n \n U_r = math.sqrt( nu_r[0]**2 + nu_r[1]**2 ) # relative speed\n \n # Rudder command and actual rudder angle\n delta_c = u_control[0]\n delta = u_actual[0]\n \n # Rudder forces and moment (Fossen 2021, Chapter 9.5.1)\n b = 0.7 * self.T # rudder height\n AR = b**2 / self.Lambda # aspect ratio: Lamdba = b**2/AR \n CN = 6.13 * self.Lambda / ( self.Lambda + 2.25 ) # normal coefficient\n t_R = 1 - 0.28 * self.Cb - 0.55\n a_H = 0.4\n x_R = -0.45 * self.L\n x_H = -1.0 * self.L\n\n Xdd = -0.5 * ( 1 - t_R ) * self.rho * U_r**2 * AR * CN\n Yd = -0.25 * ( 1 + a_H ) * self.rho * U_r**2 * AR * CN \n Nd = -0.25 * ( x_R + a_H * x_H ) * self.rho * U_r**2 * AR * CN \n \n # Control forces and moment\n delta_R = -delta # physical rudder angle (rad)\n T = self.tau_X # thrust (N)\n t_deduction = 0.1 # thrust deduction number\n tau1 = ( 1 - t_deduction ) * T - Xdd * math.sin( delta_R )**2 \n tau2 = -Yd * math.sin( 2 * delta_R ) \n tau6 = -Nd * math.sin( 2 * delta_R ) \n tau = np.array( [ tau1, tau2, tau6 ],float) \n \n # Linear maneuvering model\n T_surge = self.L # approx. time constant in surge (s)\n xg = 0 # approx. x-coordinate, CG (m) \n \n # 3-DOF ship model\n [M,N] = clarke83(U_r,self.L, self.B, self.T,self.Cb,self.R66,xg,T_surge)\n Minv = np.linalg.inv(M)\n nu3 = np.array( [ nu_r[0], nu_r[1], nu_r[5] ]) \n nu3_dot = np.matmul( Minv, tau - np.matmul(N,nu3) ) \n \n # 6-DOF ship model\n nu_dot = np.array( [ nu3_dot[0],nu3_dot[1],0,0,0,nu3_dot[2] ]) \n\n # Rudder angle saturation\n if ( abs(delta) >= self.deltaMax * math.pi / 180 ):\n delta = np.sign(delta) * self.deltaMax * math.pi / 180\n \n # Rudder dynamics\n delta_dot = (delta_c - delta) / self.T_delta \n\n # Forward Euler integration [k+1]\n nu = nu + sampleTime * nu_dot\n delta = delta + sampleTime * delta_dot\n\n u_actual = np.array([delta],float) \n\n return nu, u_actual",
"def _add_variables(self):\r\n #per dof variables\r\n self.addPerDofVariable('sigma', 0.0)\r\n self.addPerDofVariable('x1', 0.0) #save pre_constrained positions\r\n self.addPerDofVariable(\"vold\", 0) # metropolize save old velocities\r\n self.addPerDofVariable(\"xold\", 0) # metropolize save old positions\r\n\r\n #global variables\r\n self.addGlobalVariable('a', np.exp(-1 * self._gamma * self.timestep_split)) #deterministic velocity update mixing\r\n self.addGlobalVariable('b', np.sqrt(1.0 - np.exp(-2.0 * self._gamma * self.timestep_split))) #stochastic velocity update mixing\r\n self.addGlobalVariable('shadow_work', 0.) #initialize a 'shadow_work' for metropolization\r\n self.addGlobalVariable('proposal_work', 0.) #a proposal work\r\n self.addGlobalVariable('old_ke', 0.) # old kinetic energy\r\n self.addGlobalVariable('new_ke', 0.) #new kinetic energy\r\n self.addGlobalVariable('old_pe', 0.) #old potential energy\r\n self.addGlobalVariable('new_pe', 0.) #new potential energy\r\n self.addGlobalVariable(\"accept\", 0) #whether to accept a metropolized move\r\n self.addGlobalVariable(\"ntrials\", 0) #whether to reject a metropolized move\r\n self.addGlobalVariable(\"nreject\", 0) # number of rejected metropolized proposals\r\n self.addGlobalVariable(\"naccept\", 0) #number of accepted metropolized proposals\r",
"def _init_rhs(self): \n # prepare interpolated wave function\n \n wfinter=np.empty((self.nqpoints+1,self.nx),dtype=np.double)\n for iq in range(self.nqpoints+1):\n for ix in range(self.nx): \n wfinter[iq,ix]=np.sum(self.wfd*self.splpip[0:self.npoints,iq,self.nqpoints,ix])\n \n # prepare CG coefficient and Ylam0 factor \n \n cgfakt=np.empty((self.nalpha),dtype=np.double)\n for qnsetp in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnsetp[\"alpha\"]\n lp=qnsetp[\"l\"]\n lamp=qnsetp[\"lam\"]\n cgfakt[alphap]=float(CG(lp,0,lamp,0,self.bl,0).doit())*np.sqrt((2*lamp+1)/(4*m.pi))\n \n # then also perform interpolation of tmatrix a priori\n tinter=np.empty((self.lmax//2+1,self.npoints,self.nqpoints+1,self.nx),dtype=np.cdouble) \n for l in range(0,self.lmax+1,2):\n for ip in range(self.npoints): \n for iq in range(self.nqpoints+1):\n for ix in range(self.nx): \n tinter[l//2,ip,iq,ix]=np.sum(self.tmat[l//2,iq,ip,0:self.npoints]*self.splpi[0:self.npoints,iq,self.nqpoints,ix])\n \n \n \n # the vector depends on the combined index \n # indx_h_rhs=self.npoints*(self.nqpoints+1)*alpha+self.npoints*iq+ip\n # dimensionality: self.npoints*(self.nqpoints+1)*self.nalpha\n \n self.h_rhs=np.zeros((self.npoints*(self.nqpoints+1)*self.nalpha),dtype=np.cdouble)\n for qnset in self.qnalpha: # go through allowed l,lam combinations\n alpha=qnset[\"alpha\"]\n l=qnset[\"l\"]\n for qnsetp in self.qnalpha: # go through allowed l,lam combinations\n alphap=qnsetp[\"alpha\"]\n lp=qnsetp[\"l\"]\n if lp==0: # bound state only in s-wave \n for iq in range(self.nqpoints+1):\n for ip in range(self.npoints): \n indx_h_rhs=self.npoints*(self.nqpoints+1)*alpha+self.npoints*iq+ip \n for jp in range(self.npoints):\n self.h_rhs[indx_h_rhs]+=np.sum(self.xw*tinter[l//2,ip,iq,:]\n *2*self.gfunc[alpha,alphap,iq,self.nqpoints,:]\n *cgfakt[alphap]*wfinter[iq,:])",
"def main():\r\n #Drag Force Equation: 1/2 * rho * Cd * A * v^2\r\n\r\n #User-Defined Constants\r\n global m\r\n global v0\r\n global theta\r\n global rho #Fluid Density\r\n global A #Cross-sectional Area\r\n global Cd #Drag coefficient\r\n global tStep\r\n global g\r\n\r\n m = 1\r\n v0 = 30\r\n theta = math.radians(45)\r\n rho = 1.225\r\n A = 0.05\r\n Cd = 0.5 #A ball is approx. 0.5\r\n tStep = 0.005\r\n g = 9.8\r\n\r\n\r\n #Data Structures\r\n global tHist\r\n global xHist\r\n global yHist\r\n global thetaHist\r\n global vHist\r\n global vXHist\r\n global vYHist\r\n tHist = [] #list for all time steps\r\n xHist = [] #list for all x position steps\r\n yHist = [] #list for all y position steps\r\n thetaHist = [] #List for all theta at every time step\r\n vHist = [] #list for all velocities at every time step\r\n vXHist = [] #list for all x-axis velocities at every time step\r\n vYHist = [] #list for all y-axis velocities at every time step\r\n\r\n #Initialize intial values\r\n tHist.append(0.0)\r\n xHist.append(0.0)\r\n yHist.append(0.0)\r\n thetaHist.append(theta)\r\n vHist.append(v0)\r\n vXHist.append(v0 * math.cos(theta))\r\n vYHist.append(v0 * math.sin(theta))\r\n vTheta = math.atan(vYHist[0] / vXHist[0])\r\n # print(\"t: \" + str(tHist[0]))\r\n # print(\"x: \" + str(xHist[0]))\r\n # print(\"y: \" + str(yHist[0]))\r\n # print(\"v: \" + str(vHist[0]))\r\n # print(\"Vx: \" + str(vXHist[0]))\r\n # print(\"Vy: \" + str(vYHist[0]))\r\n\r\n #Convenience variables\r\n global k\r\n\r\n counter = 1\r\n #Loop until the y-displacement becomes negative (projectile reaches ground again)\r\n while True:\r\n tHist.append(counter * tStep) #increment time\r\n print(\"t: \" + str(tHist[counter]))\r\n\r\n #This large hunk is the solution to the net force differential equation in the x-axis\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep)) #STABLE\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd)/(2*m))*(tStep))\r\n # oneOverVX = (1/vHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep))\r\n oneOverVX = (1/vXHist[counter-1]) + ((rho*A*Cd)/(2*m*math.cos(thetaHist[counter-1]))*(tStep)) #This is one over the solution for velocity in the x-axis net force differential equation\r\n vXHist.append(1 / oneOverVX) #Adding the velocity to the list of velocities\r\n\r\n vY0 = vYHist[counter-1] #Convenience variable\r\n # k = 0.5 * rho * A * Cd * math.sin(abs(thetaHist[counter-1])) #STABLE\r\n # k = 0.5 * rho * A * Cd\r\n k = (rho * A * Cd) / (2 * math.sin(abs(thetaHist[counter-1]))) #Convenience variable\r\n print(\"k: \" + str(k))\r\n print(\"vX: \" + str(vXHist[counter]))\r\n rootGMK = math.sqrt(g*m*k) #Convenience variable\r\n if vYHist[counter-1] > 0.0: #If the projectile is going upwards\r\n #Solving the y-axis differential equation for velocity\r\n equationRight = -rootGMK * ((tStep/m) - (math.atan((k*vY0)/(rootGMK))/rootGMK))\r\n vYHist.append((math.tan(equationRight) * rootGMK) / k)\r\n elif vYHist[counter-1] < 0.0: #If the projectile is going downwards\r\n #Solving the y-axis differential equation for velocity\r\n\r\n # Hand-solved integral\r\n # exponent = -(2*tStep*rootGMK)/m\r\n # numerator = g*m*math.exp(exponent) - math.exp(exponent)*vY0*rootGMK - vY0*rootGMK - g*m\r\n # denominator = math.exp(exponent)*(vY0-rootGMK) - vY0*k - rootGMK\r\n # vYHist.append(numerator / denominator)\r\n\r\n #Wolfram Alpha arctanh integral\r\n arctanh =(vY0*math.sqrt(k))/(math.sqrt(g*m))\r\n print(\"arctanh: \" + str(arctanh))\r\n equationRight = (np.arctanh(arctanh))/(rootGMK) - (tStep/m)\r\n vYHist.append(np.tanh(rootGMK * equationRight) * ((math.sqrt(g*m))/(math.sqrt(k))))\r\n else: #If current y velocity is 0\r\n vYHist.append(vY0 - g*tStep)\r\n print(\"vY: \" + str(vYHist[counter]))\r\n\r\n vHist.append(math.hypot(vXHist[counter], vYHist[counter])) #Calculate the net velocity and add it to the velocities list\r\n print(\"v: \" + str(vHist[counter]))\r\n thetaHist.append(math.atan(vYHist[counter]/vXHist[counter])) #Calculate the current angle based on the velocities and add it to the theta list\r\n print(\"0: \" + str(math.degrees(thetaHist[counter])))\r\n\r\n x0 = xHist[counter-1]\r\n y0 = yHist[counter-1]\r\n\r\n # yIntegral = trigintegrate()\r\n\r\n \"\"\"\r\n Note: What I wanted to do here was to integrate the velocity functions over the time interval to find the exact\r\n changes in position. Unfortunately, I was running short of time and decided it was not worth it to move forward with\r\n this final step, and instead worked on the presentation and testing different cases.\r\n \"\"\"\r\n xHist.append(x0 + vXHist[counter]*tStep) #Calculate new x position using x = x0 + vt\r\n yHist.append(y0 + vYHist[counter]*tStep) #Calculate new y position using y = y0 + vt\r\n print(\"x: \" + str(xHist[counter]))\r\n print(\"y: \" + str(yHist[counter]))\r\n print()\r\n\r\n # xHist.append(xHist[counter-1] + vXHist[counter-1]*tStep + 0.5*aXHist[counter-1]*tStep**2)\r\n # yHist.append(yHist[counter-1] + vYHist[counter-1]*tStep + 0.5*aYHist[counter-1]*tStep**2)\r\n # vXHist.append(vXHist[counter-1] + aXHist[counter-1]*tStep)\r\n # vYHist.append(vYHist[counter-1] + aYHist[counter-1]*tStep)\r\n # vHist.append(math.hypot(vXHist[counter], vYHist[counter]))\r\n #\r\n # vTheta = math.atan(vYHist[counter] / vXHist[counter])\r\n # xDragAccel = -0.5*rho*Cd*A*vHist[counter]**2*math.cos(vTheta) / m\r\n # yDragAccel = -math.copysign(0.5*rho*Cd*A*vHist[counter]**2*math.sin(vTheta) / m, vYHist[counter])\r\n #\r\n # aXHist.append(xDragAccel)\r\n # aYHist.append(-g*tStep + yDragAccel)\r\n\r\n if vYHist[counter-1] > 0.0 and vYHist[counter] < 0.0: #Check if the projectile has reached it's peak by checking for a critical point\r\n print(\"max height reached at time=\" + str(tHist[counter]))\r\n # break\r\n\r\n # print(\"t: \" + str(tHist[counter]))\r\n # print(\"x: \" + str(xHist[counter]))\r\n # print(\"y: \" + str(yHist[counter]))\r\n # print(\"Vx: \" + str(vXHist[counter]))\r\n # print(\"Vy: \" + str(vYHist[counter]))\r\n # print(\"Ax: \" + str(aXHist[counter]))\r\n # print(\"Ay: \" + str(aYHist[counter]))\r\n\r\n if yHist[counter] < 0 or counter > 99999: #End the loop if the projectile has reached the ground (or limit the number of iterations to avoid computer death)\r\n break\r\n\r\n counter += 1\r\n\r\n plotData()",
"def a3t2(t, g, nu1, c1, temp, nu2, c2, wc, phi1, phim1):\n \n \n w1w1t2 = w_w.w1_w1(t, g, temp, nu1, c1, nu1 , c1, wc, phi1, phi1)\n w1mw1mt2 =w_w.w1_w1(t, g, temp, nu2, c2, nu2 , c2, wc, phim1, phim1)\n w1w1mt2 =w_w.w1_w1(t, g, temp, nu1, c1, nu2 , c2, wc, phi1, phim1)\n \n a11 = w1w1t2+w1mw1mt2-2*w1w1mt2\n a12 = w1w1t2-w1mw1mt2\n a21 = a12\n a22 = w1w1t2+w1mw1mt2+2*w1w1mt2 \n \n return 1/4 * np.array([[a11, a12], [a21, a22]])",
"def trans_eddy_flux(variables, **kwargs):\n return aht(variables[4:], **kwargs) - moc_st_eddy_flux(variables[:4], **kwargs)",
"def hydro_solver(self):\n u_dx = self.central_x(self.u)\n w_dy = self.central_y(self.w)\n P_dx = self.central_x(self.P)\n P_dy = self.central_y(self.P)\n\n rho_dx_upwind = self.upwind_x(self.rho, self.u)\n rho_dy_upwwind = self.upwind_y(self.rho, self.w)\n rho_udx_upwind = self.upwind_x(self.rho * self.u, self.u)\n rho_udy_upwind = self.upwind_y(self.rho * self.u, self.w)\n rho_wdx_upwind = self.upwind_x(self.rho * self.w, self.u)\n rho_wdy_upwind = self.upwind_y(self.rho * self.w, self.w)\n u_dx_uu = self.upwind_x(self.u, self.u)\n u_dx_uw = self.upwind_x(self.u, self.w)\n w_dy_uu = self.upwind_y(self.w, self.u)\n w_dy_uw = self.upwind_y(self.w, self.w)\n e_dx = self.upwind_x(self.e, self.u)\n e_dy = self.upwind_y(self.e, self.w)\n\n self.rho_dt = (\n -self.rho * (u_dx + w_dy)\n - self.u * rho_dx_upwind\n - self.w * rho_dy_upwwind\n )\n self.e_dt = (\n -(self.e + self.P) * (u_dx + w_dy) - self.u * e_dx - self.w * e_dy\n )\n self.rho_udt = (\n -self.rho * self.u * (u_dx_uu + w_dy_uu)\n - self.u * rho_udx_upwind\n - self.w * rho_udy_upwind\n - P_dx\n )\n self.rho_wdt = (\n -self.rho * self.w * (u_dx_uw + w_dy_uw)\n - self.u * rho_wdx_upwind\n - self.w * rho_wdy_upwind\n - P_dy\n + self.rho * self.g\n )\n\n self.time_step()\n rho_previous = np.zeros_like(self.rho)\n rho_previous[:, :] = self.rho\n self.rho[:, :] = self.rho + self.rho_dt * self.dt\n self.e[:, :] = self.e + self.e_dt * self.dt\n self.u[:, :] = (\n rho_previous * self.u + self.rho_udt * self.dt\n ) / self.rho\n self.w[:, :] = (\n rho_previous * self.w + self.rho_wdt * self.dt\n ) / self.rho\n\n self.boundary_conditions()\n self.T[:, :] = (\n (self.Y - 1) * self.e * self.mu * self.m_u / (self.kb * self.rho)\n )\n self.P[:, :] = (self.Y - 1) * self.e\n uw = (self.u, self.w)\n v = np.linalg.norm(uw)\n dt = self.dt\n\n return dt",
"def _calc_spec_enthalpy_vector(self):\r\n #Vector streams need to loop through the temperature, pressure, and composition lines to set the Cantera phase one by one\r\n self.spec_enthalpy = np.zeros(len(self.temperature[0]))\r\n self.spec_enthalpy[:] = np.nan\t\t\t\t#appropriate until values are filled in\r\n if self.basis == \"mass\":\r\n kw = \"mass\"\r\n else:\r\n kw = \"mole\"\r\n for i in range(0, len(self.temperature[0])):\r\n #need to build a composition dictionary to set the Cantera phase\r\n comp = {}\r\n for k in self.composition:\r\n comp[k] = self.composition[k][i]\r\n self.ct_setcomp(comp)\r\n self.cantera_helper.set(self.ctphase, T = conv.convert_units(self.temperature[0][i], self.temperature[1], 'K'), P = conv.convert_units(self.pressure[0][i], self.pressure[1], 'Pa'))\r\n #self.ctphase.set(T = conv.convert_units(self.temperature[0][i], self.temperature[1], 'K'), P = conv.convert_units(self.pressure[0][i], self.pressure[1], 'Pa'))\r\n self.spec_enthalpy[i] = getattr(self.cantera_helper, \"enthalpy_%s\" % kw)(self.ctphase)\r\n #self.spec_enthalpy[i] = getattr(self.ctphase, \"enthalpy_%s\" % kw)()\r",
"def DY_3fld(i, Y, A, K, wC, DwC, cs2C, wN, DwN, cs2N,\n OmegaC, OmegaN, OmegaB0, OmegaC0, OmegaG0, OmegaN0, H):\n \n dY = np.zeros((7, len(K)))\n Phi, deltaG, vG, deltaC, vC, deltaN, vN =\\\n Y[0, :], Y[1, :], Y[2, :], Y[3, :], Y[4, :], Y[5, :], Y[6, :]\n\n OmegaBi = OmegaB0 * A[i]**-3.\n OmegaCi = OmegaC[i]\n OmegaGi = OmegaG0 * A[i]**-4.\n OmegaNi = OmegaN[i]\n\n ybi = OmegaBi/OmegaGi\n\n # compute the derivatives of the perturbations.\n DPhi = -H[i]*Phi + (3/2.*A[i]**2/K) *\\\n (4./3.*(OmegaGi*vG + OmegaN[i]*vN) + OmegaC[i]*vC + OmegaBi*vG)\n\n DdeltaG = -4./3.*K*vG + 4*DPhi\n DvG = (-H[i] * ybi*vG + K*deltaG/3)/(\n 4./3. + ybi) + K*Phi\n\n DdeltaC = -(1+wC[i])*(K*vC-3*DPhi) - 3*H[i]*(cs2C[i, :]-wC[i])*deltaC\n DvC = -H[i]*(1-3*wC[i])*vC - vC*DwC[i]/(1+wC[i]) + \\\n K*deltaC*cs2C[i, :]/(1+wC[i]) + K*Phi\n\n DdeltaN = -(1+wN[i])*(K*vN-3*DPhi) - 3*H[i]*(cs2N[i, :]-wN[i])*deltaN\n DvN = -H[i]*(1-3*wN[i])*vN - vN*DwN[i]/(1+wN[i]) + \\\n K*deltaN*cs2N[i, :]/(1+wN[i]) + K*Phi\n\n dY = np.stack((DPhi, DdeltaG, DvG, DdeltaC, DvC, DdeltaN, DvN))\n return dY",
"def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]",
"def dohext(nf,sigma,s):\n#\n if nf==-1:return hextpars \n f=numpy.sqrt(2.*fcalc(2,nf))\n t2sum=0\n tau,Vdir=doseigs(s)\n for i in range(3): t2sum+=tau[i]**2\n chibar=(s[0]+s[1]+s[2])/3.\n hpars={}\n hpars['F_crit']='%s'%(fcalc(5,nf))\n hpars['F12_crit']='%s'%(fcalc(2,nf))\n hpars[\"F\"]=0.4*(t2sum-3*chibar**2)/(sigma**2)\n hpars[\"F12\"]=0.5*((tau[0]-tau[1])/sigma)**2\n hpars[\"F23\"]=0.5*((tau[1]-tau[2])/sigma)**2\n hpars[\"v1_dec\"]=Vdir[0][0]\n hpars[\"v1_inc\"]=Vdir[0][1]\n hpars[\"v2_dec\"]=Vdir[1][0]\n hpars[\"v2_inc\"]=Vdir[1][1]\n hpars[\"v3_dec\"]=Vdir[2][0]\n hpars[\"v3_inc\"]=Vdir[2][1]\n hpars[\"t1\"]=tau[0]\n hpars[\"t2\"]=tau[1]\n hpars[\"t3\"]=tau[2]\n hpars[\"e12\"]=numpy.arctan((f*sigma)/(2*abs(tau[0]-tau[1])))*180./numpy.pi\n hpars[\"e23\"]=numpy.arctan((f*sigma)/(2*abs(tau[1]-tau[2])))*180./numpy.pi\n hpars[\"e13\"]=numpy.arctan((f*sigma)/(2*abs(tau[0]-tau[2])))*180./numpy.pi\n return hpars",
"def compute_vertical_flux(da, w):\n dphi = get_horz_devition(da=da)\n if w.dims != da.dims:\n w = z_center_field(da=w)\n\n assert dphi.time == w.time\n # if dims aren't identical xarray ends up allocating huge arrays for\n # dealing with the missing overlap\n assert w.dims == dphi.dims\n\n # old routines using new array\n # phi_flux = dphi*w\n # phi_flux.attrs['units'] = \"{} {}\".format(w.units, dphi.units)\n # dphi_long_name = dphi.long_name.replace('horz deviation', '').strip()\n # phi_flux.attrs['long_name'] = \"{} vertical flux\".format(dphi_long_name)\n\n dphi_long_name = dphi.long_name.replace(\"horz deviation\", \"\").strip()\n\n # to inplace update to conserve memory\n phi_flux = dphi\n phi_flux *= w\n\n phi_flux.attrs[\"units\"] = \"{} {}\".format(w.units, dphi.units)\n phi_flux.attrs[\"long_name\"] = \"{} vertical flux\".format(dphi_long_name)\n phi_flux.name = \"{}_flux\".format(da.name)\n\n return phi_flux",
"def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p",
"def classic_var(self,tau21=0.864,tau31=0.864,Verbose=False):\n\n self.algo = 'dozier'\n\n# Compute radiances\n# -----------------\n L21 = B21(self.T21) \n L31 = B31(self.T31) \n E21 = B21(self.Tb21)\n E31 = B31(self.Tb31)\n N = L21.size\n\n if isscalar(tau21): tau21 = tau21 * ones(L21.shape)\n if isscalar(tau31): tau31 = tau31 * ones(L31.shape)\n \n# Use a variational approach - Needs vectorization\n# ------------------------------------------------\n sig21 = 1.\n sig31 = 1.\n x0 = [600.,P_SCALE*0.1] # [Tf,p]; p here is normalized; 10 ha/ 1 km2 = 0.1\n Tf = - ones(N)\n p = - ones(N)\n niter = 200\n for i in range(N):\n rvals = fmin(Jfunc2d, x0, ftol=0.001, maxiter=niter, disp=0, full_output=1, \\\n args=(L21[i],E21[i],tau21[i],sig21,L31[i],E31[i],tau31[i],sig31))\n x = rvals[0]\n iter = rvals[2]\n if iter < niter:\n Tf[i] = x[0]\n p[i] = 100. * x[1] / P_SCALE # units is %\n\n# Quality control\n# ---------------\n m = isnan(Tf) == False\n m = m & (Tf<1800.)\n m = m & (p>0) & (p<=100)\n\n# Add solution as attributes\n# --------------------------\n self.m = m\n self.Tf = Tf\n self.p = p\n\n# Replace fire size with median size for those fires that did not converge\n# ------------------------------------------------------------------------\n I = (m == False)\n self.p[I] = median(self.p[m])\n\n self.farea = (self.p/100.) * self.pixar # km2\n self.hflux = 0.001 * self.pow / self.farea # kW/m2\n\n# Print out results\n# -----------------\n y = 100. * ( Tf[m].size ) / N + 0.05\n if Verbose:\n print_stats('__header__','Classic Dozier - Variational Results (Yield: %4.1f%%)'%y)\n print_stats('Tf (K)',Tf[m])\n print_stats('p (%)',p[m])\n print_stats('A (km2)',self.farea[m])\n print_stats('HF (kW/m2)',self.hflux[m])\n print_stats('__footer__')",
"def advection_1d(var,vel,dz,dt,NN):\n \n # check cfl for advection and diffusion\n cfl = 0.5\n dtc = cfl*dz/(np.max(np.abs(vel)))\n dt = np.min([dt, dtc])\n \n # ghost cells required of my artificial boundary conditions:\n # non-reflecting Neumann type boundary conditions are implemented\n vargh = np.insert(var, [0,NN], [var[0],var[-1]]) \n velgh = np.insert(vel, [0,NN], [vel[0],vel[-1]])\n \n theta = np.ones(NN+2)\n theta[np.where(velgh<0)] = -1\n \n # calculate slopes for the flux limiter (phi)\n TVD_r = vargh[1:]\n TVD_r2 = np.insert(vargh[2:],np.shape(vargh[2:])[0],vargh[-1])\n TVD_m = vargh[:-1]\n TVD_l = np.insert(vargh[:-2],0,vargh[0])\n \n r_TVDup = (TVD_r2-TVD_r)/(TVD_r-TVD_m)\n r_TVDdown = (TVD_m-TVD_l)/(TVD_r-TVD_m)\n \n r_TVD = r_TVDdown\n r_TVD[np.where(theta[1:]<0)] = r_TVDup[np.where(theta[1:]<0)]\n r_TVD[np.where(np.diff(TVD_m)==0)] = 1\n r_TVD[0] = 1\n r_TVD[-1] = 1\n \n # define Flux Limiter function (Van Leer)\n phi = (r_TVD + np.abs(r_TVD))/(1 + np.abs(r_TVD))\n phi_r = phi[1:]\n phi_l = phi[:-1]\n \n # think about my ghost cells\n TVD_r = vargh[2:]\n TVD_l = vargh[:-2]\n \n # compute fluxes for TVD\n F_rl = .5*((1+theta[1:-1])*vel*var + (1-theta[1:-1])*vel*TVD_r)\n F_rh = .5*vel*(var + TVD_r) - .5*vel*vel*dt/dz*(TVD_r-var)\n \n F_ll = .5*((1+theta[1:-1])*vel*TVD_l + (1-theta[1:-1])*vel*var)\n F_lh = .5*vel*(TVD_l+var) - .5*vel*vel*dt/dz*(var-TVD_l)\n \n # do the job\n F_right = F_rl + phi_r*(F_rh - F_rl)\n F_left = F_ll + phi_l*(F_lh - F_ll)\n \n vari = var - dt*(F_right-F_left)/dz\n \n # might want to add a check for imaginary numbers...\n \n return vari,dt"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NichollsTurton entrainment parameterization the_vars and coeffs are inputs into dmixed_vars deltheta, F0, Fqv0 are calculated in dmixed_vars | def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):
thetal_m = the_vars[0]
qt_m = the_vars[2]
zi = the_vars[1]
dth = deltheta
thetal_ft = thetal_m + dth
qt_ft = coeffs.ft_qv
dqt = qt_ft - qt_m
# calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)
gamma = 6e-3
thetal_3000 = thetal_ft + gamma*(3000-zi)
LTS = thetal_3000 - coeffs.sst # lower tropospheric stability
# calculate coefficients
press=tf.find_press(zi)
Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)
Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)
invert= tf.t_uos_thetal(thetal_m,qt_m,press)
T_0 = invert.temp
lv=tf.L_t(invert.temp)
Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)
del_thv_dry = Ad * dth + Bd * dqt
del_thv_sat = Aw * dth + Bw * dqt
# account for evaporative cooling (increases we)
ql_max = invert.ql
Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)
Del_thv = del_thv_dry - Cl * ql_max
# calculate buoyancy integral terms
rho = 1.
lcl_press=tf.LCL_thetal(thetal_m,qt_m)
zb=tf.find_height(lcl_press)
T1 = zb/zi
T2 = 0.5 * zb**2 / zi**2
T3 = (zi-zb)/zi
T4 = 0.5 * (zi**2 - zb**2) / zi**2
# calculate delta_Fr
delta_Frstar = 82.0 # Wm^-2
Frlambda = 7.9 # Wm^-2, using with CTL from Gesso
delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1
wtl_0=F0
wqt_0=Fqv0
Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3
term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))
term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))
term3 = Del_F * (Ad * T2 + Aw * T4)
Theta_NE = term1 + term2 + term3
# calculate w*
wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)
# calculate chi*
chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)
# calculate del_m
Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)
# calculate we
a2=15.
Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))
A_NT = 0.2
fac_NT = 2.5
term4 = Del_thv_NT
term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)
denominator = term4 + term5
we = A_NT * fac_NT * Theta_NE / denominator
return we | [
"def dmixed_vars(the_vars,tstep,coeffs):\n\n deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0]\n F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux\n Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux\n Fint = -coeffs.k*F0 #entrainment heat flux\n \n if coeffs.use_NT: # use NT parameterization by calculating we using function\n went = calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0) # Nicholls-Turton parameterization\n \n else: # use simple we parameterization\n went = -Fint/deltheta #simple entrainment parameterization\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*coeffs.ft_qv*1000 # convert qt_ft to g kg^-1\n \n Fqvent = -went*( coeffs.ft_qv - the_vars[2])\n wsubs = -coeffs.D*the_vars[1]\n rho=1.\n cp=1004.\n \n derivs=np.empty_like(the_vars)\n \n # higher delta_Fr from drier air at mixed-layer top...hence cloudy air results in less radiative cooling\n derivs[0]=(F0 - Fint)/(the_vars[1]*rho) - delta_Fr/1004./the_vars[1] \n derivs[1] = went + wsubs\n derivs[2] = (Fqv0 - Fqvent)/the_vars[1]\n return derivs",
"def _add_variables(self):\r\n #per dof variables\r\n self.addPerDofVariable('sigma', 0.0)\r\n self.addPerDofVariable('x1', 0.0) #save pre_constrained positions\r\n self.addPerDofVariable(\"vold\", 0) # metropolize save old velocities\r\n self.addPerDofVariable(\"xold\", 0) # metropolize save old positions\r\n\r\n #global variables\r\n self.addGlobalVariable('a', np.exp(-1 * self._gamma * self.timestep_split)) #deterministic velocity update mixing\r\n self.addGlobalVariable('b', np.sqrt(1.0 - np.exp(-2.0 * self._gamma * self.timestep_split))) #stochastic velocity update mixing\r\n self.addGlobalVariable('shadow_work', 0.) #initialize a 'shadow_work' for metropolization\r\n self.addGlobalVariable('proposal_work', 0.) #a proposal work\r\n self.addGlobalVariable('old_ke', 0.) # old kinetic energy\r\n self.addGlobalVariable('new_ke', 0.) #new kinetic energy\r\n self.addGlobalVariable('old_pe', 0.) #old potential energy\r\n self.addGlobalVariable('new_pe', 0.) #new potential energy\r\n self.addGlobalVariable(\"accept\", 0) #whether to accept a metropolized move\r\n self.addGlobalVariable(\"ntrials\", 0) #whether to reject a metropolized move\r\n self.addGlobalVariable(\"nreject\", 0) # number of rejected metropolized proposals\r\n self.addGlobalVariable(\"naccept\", 0) #number of accepted metropolized proposals\r",
"def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Löwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT",
"def solveFluNet(T,Ntime,a,b0,b1,g,k,w,y0,P,N,RHS=3):\n #I have added the variables P the transport matrix \n #and N the network size because they are needed\n #in the RHS.\n #I have added the variable RHS to be able to \n #choose which RHS method we want to use when running\n #solveFluNet\n \n #add input variables to RHS functions if needed\n def RHSnet(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\"\"\"\n S = y[:N]\n E = y[N:2*N]\n C = y[2*N:3*N]\n b = b0 + b1*(1+np.cos(2*np.pi*t))\n dy = np.zeros(3*N)\n dy[:N]= k*(1-S)-b*C*S+w*np.dot(P,S)-w*S\n dy[N:2*N]= b*C*S-(k+a)*E+w*np.dot(P,E)-w*E\n dy[2*N:3*N]= a*E-(g+k)*C+w*np.dot(P,C)-w*C\n return dy\n \n def RHSnetF(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\"\n Calculations carried out by fn.rhs\n \"\"\"\n dy = fn.rhs(P,y,t,a,b0,b1,g,k,w)\n return dy\n \n def RHSnetFomp(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\n Calculations carried out by fn.rhs_omp\n \"\"\"\n dy = fn.rhs_omp(P,y,t,a,b0,b1,g,k,w,2)\n return dy\n\n #Add code here and to RHS functions above to simulate network flu model\n t = np.linspace(0,T,Ntime)\n if (RHS==1):\n sol = odeint(RHSnet,y0,t,args=(a,b0,b1,g,k,w))\n if (RHS==2):\n sol = odeint(RHSnetF,y0,t,args=(a,b0,b1,g,k,w))\n if (RHS==3):\n sol = odeint(RHSnetFomp,y0,t,args=(a,b0,b1,g,k,w))\n S = sol[:,:N]\n E = sol[:,N:2*N]\n C = sol[:,2*N:3*N]\n return t,S,E,C",
"def _partition_vars(n_vars, n_binding, variables0, del_f0, d_psi_d_x0, settings):\n\n # Initialize some things\n z0 = np.zeros(n_vars)\n del_f_z0 = np.zeros(n_vars)\n d_psi_d_z0 = np.zeros((n_binding, n_vars))\n z_ind0 = []\n var_ind = -1\n\n # Search for independent variables and determine gradients\n for i in range(n_vars):\n while True:\n\n var_ind += 1\n # and (abs(variables0[var_ind])<1e-4 or variables0[var_ind]<0): # Slack variable at limit\n if var_ind < n_binding:\n z0[i] = variables0[var_ind]\n del_f_z0[i] = 0.0 # df/ds is always 0\n d_psi_d_z0[i, i] = 1.0 # dg/ds is always 1\n z_ind0.append(var_ind)\n break\n\n else: # Design variable\n z0[i] = variables0[var_ind]\n del_f_z0[i] = del_f0[var_ind-n_binding]\n d_psi_d_z0[:, i] = d_psi_d_x0[:, var_ind-n_binding]\n z_ind0.append(var_ind)\n break\n\n # Search for dependent variables and determine gradients\n # Note the number of dependent variables is equal to the number of binding constraints\n y0 = np.zeros(n_binding)\n del_f_y0 = np.zeros(n_binding)\n d_psi_d_y0 = np.zeros((n_binding, n_binding))\n y_ind0 = []\n var_ind = -1\n for i in range(n_binding):\n while True:\n\n var_ind += 1\n\n # Check if this variable is not independent\n if var_ind not in z_ind0:\n y0[i] = variables0[var_ind]\n del_f_y0[i] = del_f0[var_ind-n_binding]\n d_psi_d_y0[:, i] = d_psi_d_x0[:, var_ind-n_binding]\n y_ind0.append(var_ind)\n break\n\n # Check that this matrix is not singular\n _, s, _ = np.linalg.svd(d_psi_d_y0)\n swap_var = 0\n\n # Swap things around until the matrix is not singular\n while (abs(s) < 1e-14).any():\n\n if settings.verbose:\n print(\"Swapping independent and dependent variables.\")\n\n tempind = copy.copy(z_ind0[n_binding+swap_var])\n z_ind0[n_binding+swap_var] = y_ind0[swap_var]\n y_ind0[swap_var] = tempind\n\n tempz = np.copy(z0[n_binding+swap_var])\n z0[n_binding+swap_var] = y0[swap_var]\n y0[swap_var] = tempz\n\n tempgrad = np.copy(del_f_z0[n_binding+swap_var])\n del_f_z0[n_binding+swap_var] = del_f_y0[swap_var]\n del_f_y0[swap_var] = tempgrad\n\n temppsi = np.copy(d_psi_d_z0[:, n_binding+swap_var])\n d_psi_d_z0[:, n_binding+swap_var] = d_psi_d_y0[:, swap_var]\n d_psi_d_y0[:, swap_var] = temppsi\n\n # Check that the matrix is not singular\n _, s, _ = np.linalg.svd(d_psi_d_y0)\n swap_var += 1\n\n return z0, del_f_z0, d_psi_d_z0, z_ind0, y0, del_f_y0, d_psi_d_y0, y_ind0",
"def solve_leg_transient_once(self):\n\n self.delta_x = self.x[1] - self.x[0]\n self.delta_t = self.t_array[1] - self.t_array[0]\n self.y0 = np.array([self.T_x, self.q_x, self.Vs_x, self.R_x]).flatten()\n\n try: \n self.T_xt\n\n except AttributeError:\n self.odeint_output = odeint(\n self.get_dTx_dt, y0=self.y0, t=self.t_array,\n full_output=1 \n )\n self.T_xt = self.odeint_output[0]\n\n # doesnt really get here\n # else:\n # self.y0 = self.T_xt[-1,:]\n # self.odeint_output = odeint(\n # self.get_dTx_dt, y0=self.y0, t=self.t_array,\n # full_output=1 \n # )\n # self.T_xt = np.concatenate((self.T_xt, self.odeint_output[0]))\n \n print \"\\nDid get through all the calculations without error \\n\"\n\n self.Txt = self.T_xt[:, :self.nodes]\n # Don't need this following line anymore\n # self.qxt = self.T_xt[:, self.nodes:2*self.nodes]\n self.Vsxt = self.T_xt[:, 2*self.nodes:3*self.nodes]\n self.Rxt = self.T_xt[:, 3*self.nodes:]\n\n self.R_internal_transient = self.Rxt[:,-1] \n self.Vs_transient = self.Vsxt[:,0] - self.Vsxt[:,-1]\n\n self.I_transient = (\n self.Vs_transient/(self.R_load +\n self.R_internal_transient)\n )\n\n dT_dx = np.zeros([self.t_array.size,self.nodes])\n dT_dx[:,0] = (\n (self.Txt[:,1] - self.Txt[:,0]) / self.delta_x\n )\n dT_dx[:,1:-1] = (\n 0.5 * (self.Txt[:,2:] - self.Txt[:,:-2]) / self.delta_x\n )\n dT_dx[:,-1] = (\n (self.Txt[:,-1] - self.Txt[:,-2]) / self.delta_x\n )\n \n self.qxt = np.zeros([self.t_array.size, self.nodes])\n for i in range(self.t_array.size):\n T_props = self.Txt[i,0] # i for central differencing\n self.set_TEproperties(T_props) \n J = (self.I_transient[i]/self.area)\n self.qxt[i,0] = (\n J * self.Txt[i,0] * self.alpha - self.k * dT_dx[i,0]\n )\n self.qxt[i,-1] = (\n J * self.Txt[i,-1] * self.alpha - self.k * dT_dx[i,-1]\n )\n self.qxt[i,1:-1] = (\n J * self.Txt[i,1:-1] * self.alpha - self.k * dT_dx[i,1:-1]\n )",
"def taylor_expansion(self,g_temp,g_step,var):\n A=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n A[self.n][i]=1\n for j in range(self.n):\n if(i==j): A[i][j]=2.*var[i]+2.+g_temp*np.sum([self.XXZ.Z(k,i) for k in range(self.n) if k!=i])\n else: A[i][j]=-g_temp*self.XXZ.Z(j,i)\n #First derivative\n B1=np.zeros(self.n+1)\n for i in range(self.n): \n B1[i]=self.gamma*2.*g_temp*self.N*(self.n-self.N)+np.sum([self.XXZ.Z(k,i)*(var[k]-var[i]) for k in range(self.n) if k!=i])\n Ainv=np.linalg.pinv(A)\n der1=np.dot(Ainv,B1)\n #Second derivative\n B2=np.zeros(self.n+1)\n for k in range(self.n):\n B2[k]=self.gamma*2.*self.N*(self.n-self.N) -2.*der1[k]**2+2.*np.sum([self.XXZ.Z(l,k)*(der1[l]-der1[k]) for l in range(self.n) if k!=l])\n der2=np.dot(Ainv,B2)\n #Third derivative\n B3=np.zeros(self.n+1)\n for k in range(self.n):\n B3[k]=-6*der1[k]*der2[k]+3.*np.sum([self.XXZ.Z(l,k)*(der2[l]-der2[k]) for l in range(self.n) if k!=l])\n der3=np.dot(Ainv,B3)\n #Fourth derivative\n B4=np.zeros(self.n+1)\n for k in range(self.n):\n B4[k]=-8.*der3[k]*der1[k]-6.*der2[k]*der2[k]+4.*np.sum([self.XXZ.Z(l,k)*(der3[l]-der3[k]) for l in range(self.n) if k!=l])\n der4=np.dot(Ainv,B4)\n \n return var+g_step*der1+g_step**2*der2/2.+g_step**3*der3/6.+g_step**4*der4/24.",
"def get_wt_from_newtonian_descent(xd, yd, v, phi, steps = 100, w0=0.1, t0 = 0.1, k=1):\n\twt = np.reshape(np.array([0.1, 0.1]), (2,1))\n\tpd = np.reshape(np.array([yd, xd]), (2,1))\n\n\tfor i in range(steps):\n\t\te = np.reshape(np.array([y_t(wt[0], wt[1], v, phi), x_t(wt[0], wt[1], v, phi)]), (2,1)) - pd \n\t\tgrad_e_wt = np.squeeze(np.array([[dy_w(wt[0], wt[1], v, phi), dy_t(wt[0], wt[1], v, phi)],\\\n\t\t\t\t\t\t\t\t[dx_w(wt[0], wt[1], v, phi), dx_t(wt[0], wt[1], v, phi)]]), axis=2)\n\t\twt = wt - k*np.reshape(np.array([np.matmul(np.linalg.pinv(np.reshape(grad_e_wt[:,0], (2,1))), e),\\\n\t\t\t\t\t\t\t np.matmul(np.linalg.pinv(np.reshape(grad_e_wt[:,1], (2,1))), e)]), (2,1))\n\n\n\treturn wt[0], wt[1]",
"def fluid_deriv(self):\n deriv = np.zeros((self.num_nw_fluids * self.num_o, 1 + self.num_o,\n self.num_nw_vars))\n k = 0\n for o in self.outl:\n i = 0\n for fluid in self.nw_fluids:\n deriv[i + k * self.num_nw_fluids, 0, i + 3] = 1\n deriv[i + k * self.num_nw_fluids, k + 1, i + 3] = -1\n i += 1\n k += 1\n return deriv",
"def niv_variable_selection(x, y, t, max_vars):\n y1_t = (y == 1) & (t == 1)\n y0_t = (y == 0) & (t == 1)\n y1_c = (y == 1) & (t == 0)\n y0_c = (y == 0) & (t == 0)\n\n sum_y1_t = sum(y1_t)\n sum_y0_t = sum(y0_t)\n sum_y1_c = sum(y1_c)\n sum_y0_c = sum(y0_c)\n\n niv_dict = {}\n for col in x.columns:\n df = pd.concat([x[col].rename(col), y1_t.rename('y1_t'), y0_t.rename('y0_t'),\n y1_c.rename('y1_c'), y0_c.rename('y0_c')], axis=1)\n x_group = df.groupby(x[col])\n x_sum = x_group.sum()\n\n if sum_y0_t == 0 or sum_y1_t == 0:\n woe_t = 0\n else:\n woe_t = x_sum.apply(lambda r: np.log((r['y1_t'] * sum_y0_t) / (r['y0_t'] * sum_y1_t))\n if r['y1_t'] > 0 and r['y0_t'] > 0 else 0, axis=1)\n\n if sum_y0_c == 0 or sum_y1_c == 0:\n woe_c = 0\n else:\n woe_c = x_sum.apply(lambda r: np.log((r['y1_c'] * sum_y0_c) / (r['y0_c'] * sum_y1_c))\n if r['y1_c'] > 0 and r['y0_c'] > 0 else 0, axis=1)\n\n nwoe = woe_t - woe_c\n\n p_x_y1_t = x_sum['y1_t'] / sum_y1_t if sum_y1_t > 0 else 0\n p_x_y0_t = x_sum['y0_t'] / sum_y0_t if sum_y0_t > 0 else 0\n p_x_y1_c = x_sum['y1_c'] / sum_y1_c if sum_y1_c > 0 else 0\n p_x_y0_c = x_sum['y0_c'] / sum_y0_c if sum_y0_c > 0 else 0\n niv_weight = (p_x_y1_t * p_x_y0_c - p_x_y0_t * p_x_y1_c)\n\n niv_row = 100 * nwoe * niv_weight\n niv = niv_row.sum()\n niv_dict[col] = niv\n\n s_niv = pd.Series(niv_dict)\n s_selected_niv = s_niv.sort_values(ascending=False)[: max_vars]\n\n return s_selected_niv.index",
"def initiate_variables(disc, s0):\r\n gb = disc.problem.gb\r\n lam_c0 = np.zeros(gb.num_mortar_cells())\r\n # Initial guess for the pressure and mortar flux\r\n p0_init = np.zeros(gb.num_cells())\r\n lam0_init = np.zeros(gb.num_mortar_cells())\r\n # Define Ad variables\r\n p, lam = pp.ad.initAdArrays([p0_init, lam0_init])\r\n # define dofs indices\r\n p_ix = slice(gb.num_cells())\r\n lam_ix = slice(gb.num_cells(), gb.num_cells() + gb.num_mortar_cells())\r\n s_ix = slice(\r\n gb.num_cells() + gb.num_mortar_cells(),\r\n 2 * gb.num_cells() + gb.num_mortar_cells(),\r\n )\r\n lam_c_ix = slice(\r\n 2 * gb.num_cells() + gb.num_mortar_cells(),\r\n 2 * gb.num_cells() + 2 * gb.num_mortar_cells(),\r\n )\r\n # Solve with Newton (should converge in 1 or 2 iterations. Is non-linear due to\r\n # upstream weights)\r\n p0 = p.val\r\n lam0 = lam.val\r\n sol = np.hstack((p.val, lam.val))\r\n q = np.zeros(gb.num_faces())\r\n\r\n err = np.inf\r\n newton_it = 0\r\n sol0 = sol.copy()\r\n newton_it = 0\r\n\r\n while err > 1e-9:\r\n newton_it += 1\r\n q = darcy(disc, p, s0, lam)\r\n eq_init = pp.ad.concatenate(\r\n (\r\n mass_conservation(disc, lam, q, 0),\r\n coupling_law_p(disc, p, lam, s0),\r\n )\r\n )\r\n err = np.max(np.abs(eq_init.val))\r\n sol = sol - sps.linalg.spsolve(eq_init.jac, eq_init.val)\r\n\r\n# sol = sol - linear_solvers.amg(eq_init.jac, eq_init.val, sol)\r\n p.val = sol[p_ix]\r\n lam.val = sol[lam_ix]\r\n\r\n if newton_it > 20:\r\n raise RuntimeError('Failed to converge Newton iteration in variable initiation')\r\n\r\n # Now that we have solved for initial condition, initalize full problem\r\n p, lam, s, lam_c = pp.ad.initAdArrays([p.val, lam.val, s0, lam_c0])\r\n sol = np.hstack((p.val, lam.val, s.val, lam_c.val))\r\n\r\n q = darcy(disc, p, s, lam)\r\n return p, lam, q, s, lam_c, sol, p_ix, lam_ix, s_ix, lam_c_ix",
"def variational_distribution(self):\n activation = tf.nn.relu\n if self.linear:\n activation = None\n\n #q(z | x, s)\n if self.log_variational:\n x = tf.log(1 + self.expression)\n else:\n x = self.expression\n\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n for layer in range(2, self.n_layers + 1):\n h = dense(h, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n\n \n self.qz_m = dense(h, self.n_latent, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.qz_v = dense(h, self.n_latent, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n \n if self.scalings:\n # q(l | x, s)\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n self.ql_m = dense(h, 1, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.ql_v = dense(h, 1, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)",
"def fluid_deriv(self):\n deriv = np.zeros((self.fluid_constraints['num_eq'],\n 2 * self.num_i + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n for j in range(self.num_nw_fluids):\n deriv[i * self.num_nw_fluids + j, i, j + 3] = 1\n deriv[i * self.num_nw_fluids + j, self.num_i + i, j + 3] = -1\n return deriv",
"def fluid_deriv(self):\n # derivatives for cooling liquid composition\n deriv = np.zeros((\n self.num_nw_fluids * 4,\n 5 + self.num_vars,\n self.num_nw_vars))\n\n k = 0\n for fluid, x in self.inl[0].fluid.val.items():\n deriv[k, 0, 3 + k] = 1\n deriv[k, 2, 3 + k] = -1\n k += 1\n\n # derivatives to constrain fluids to inlets/outlets\n i = 0\n for fluid in self.nw_fluids:\n if fluid == self.h2o:\n deriv[k, 1, 3 + i] = -1\n elif fluid == self.o2:\n deriv[k + 1, 3, 3 + i] = -1\n elif fluid == self.h2:\n deriv[k + 2, 4, 3 + i] = -1\n i += 1\n k += 3\n\n # derivatives to ban fluids off inlets/outlets\n i = 0\n for fluid in self.nw_fluids:\n if fluid != self.h2o:\n deriv[k, 1, 3 + i] = -1\n k += 1\n if fluid != self.o2:\n deriv[k, 3, 3 + i] = -1\n k += 1\n if fluid != self.h2:\n deriv[k, 4, 3 + i] = -1\n k += 1\n i += 1\n\n return deriv",
"def solve_TDSE(params, wavefunction_initial):\n\n # Initilise density\n density = np.zeros((params.Ntime,params.Nspace))\n density[0,:] = calculate_density_exact(params, wavefunction_initial)\n wavefunction = np.copy(wavefunction_initial)\n\n # Time stepping defined by numpy's expm function\n if params.time_step_method == 'expm':\n\n # Construct the perturbed hamiltonian\n v_ext = np.copy(params.v_ext)\n params.v_ext = params.v_ext_td[1,:]\n hamiltonian = construct_H_sparse(params, basis_type='position')\n\n for i in range(1,params.Ntime):\n\n # If the perturbation to the external potential is dependent on time\n # updated H at each time-step\n if params.td_pert:\n\n params.v_ext = params.v_ext_td[i,:]\n hamiltonian = construct_H_sparse(params, basis_type='position')\n\n # Evolve the wavefunction\n wavefunction = expm_step(params, wavefunction, hamiltonian)\n\n # Compute the density\n density[i,:] = calculate_density_exact(params, wavefunction)\n\n # Renormalise the wavefunction\n wavefunction *= (np.sum(abs(wavefunction[:])**2)*params.dx**2)**-0.5\n\n # Calculate + normalise density\n density[i,:] = calculate_density_exact(params, wavefunction)\n\n print('Time passed: {}'.format(round(params.time_grid[i],3)), end='\\r')\n\n params.v_ext = v_ext\n\n # Crank-Nicolson time-stepping\n elif params.time_step_method == 'CN':\n\n # Update external potential and construct corresponding hamiltonian\n params.v_ext = params.v_ext_td[1,:]\n hamiltonian = construct_H_sparse(params, basis_type='position')\n\n # Construct CN matrix A for CN's Ax=b equation\n CN_matrix = 0.5j*params.dt*hamiltonian\n identity = sp.sparse.csr_matrix(np.diag(np.ones(params.Nspace**2)))\n A = identity + CN_matrix\n\n for i in range(0,params.Ntime):\n\n # If the perturbation to the external potential is dependent on time\n # updated H at each time-step\n if params.td_pert:\n\n params.v_ext = params.v_ext_td[i,:]\n hamiltonian = construct_H_sparse(params, basis_type='position')\n\n wavefunction = crank_nicolson_step(params, wavefunction, hamiltonian)\n\n else:\n\n # Construct b for CN's Ax=b equation\n b = (identity - CN_matrix).dot(wavefunction)\n\n # Evolve the wavefunction\n wavefunction, status = sp.sparse.linalg.cg(A, b, x0=wavefunction, tol=1e-17, atol=1e-15)\n\n # Renormalise the wavefunction, potentially dubious\n wavefunction *= (np.sum(abs(wavefunction[:])**2) * params.dx**2)**-0.5\n\n # Calculate and renormalise density\n density[i,:] = calculate_density_exact(params, wavefunction)\n\n print('Time passed: {}'.format(round(params.time_grid[i],3)), end='\\r')\n\n else:\n raise RuntimeError('Not a valid time-stepping method: {}'.format(params.time_step_method))\n\n return density",
"def classic_var(self,tau21=0.864,tau31=0.864,Verbose=False):\n\n self.algo = 'dozier'\n\n# Compute radiances\n# -----------------\n L21 = B21(self.T21) \n L31 = B31(self.T31) \n E21 = B21(self.Tb21)\n E31 = B31(self.Tb31)\n N = L21.size\n\n if isscalar(tau21): tau21 = tau21 * ones(L21.shape)\n if isscalar(tau31): tau31 = tau31 * ones(L31.shape)\n \n# Use a variational approach - Needs vectorization\n# ------------------------------------------------\n sig21 = 1.\n sig31 = 1.\n x0 = [600.,P_SCALE*0.1] # [Tf,p]; p here is normalized; 10 ha/ 1 km2 = 0.1\n Tf = - ones(N)\n p = - ones(N)\n niter = 200\n for i in range(N):\n rvals = fmin(Jfunc2d, x0, ftol=0.001, maxiter=niter, disp=0, full_output=1, \\\n args=(L21[i],E21[i],tau21[i],sig21,L31[i],E31[i],tau31[i],sig31))\n x = rvals[0]\n iter = rvals[2]\n if iter < niter:\n Tf[i] = x[0]\n p[i] = 100. * x[1] / P_SCALE # units is %\n\n# Quality control\n# ---------------\n m = isnan(Tf) == False\n m = m & (Tf<1800.)\n m = m & (p>0) & (p<=100)\n\n# Add solution as attributes\n# --------------------------\n self.m = m\n self.Tf = Tf\n self.p = p\n\n# Replace fire size with median size for those fires that did not converge\n# ------------------------------------------------------------------------\n I = (m == False)\n self.p[I] = median(self.p[m])\n\n self.farea = (self.p/100.) * self.pixar # km2\n self.hflux = 0.001 * self.pow / self.farea # kW/m2\n\n# Print out results\n# -----------------\n y = 100. * ( Tf[m].size ) / N + 0.05\n if Verbose:\n print_stats('__header__','Classic Dozier - Variational Results (Yield: %4.1f%%)'%y)\n print_stats('Tf (K)',Tf[m])\n print_stats('p (%)',p[m])\n print_stats('A (km2)',self.farea[m])\n print_stats('HF (kW/m2)',self.hflux[m])\n print_stats('__footer__')",
"def variable_costs(dh: DataHandler):\n print(\"PtHydrogen not implemented\")\n\n scen_hor_map = dh.scenarios.horizon\n\n cost_var = dh.get(\"i_cost\").xs(\"varcost\", level=\"par_cost\")\n cost_var = cost_var.groupby([\"alltec\"]).apply(\n extract_horizon_specific_cost, scen_hor_map\n )\n cost_var = add_dimension(cost_var, dh.merge_stored_sets(\"r\"), \"r\")\n cost_var = cost_var.reorder_levels([\"alltec\", \"r\"])\n\n h2_price = dh.get(\"o_h2price_buy\")\n h2_price = add_dimension(h2_price, dh.merge_stored_sets(\"tec_h2g\"), \"alltec\")\n\n elec_price = dh.get(\"o_prices\")\n\n cost_fuel = dh.get(\"cost_fuel\")\n cost_fuel = add_dimension(cost_fuel, dh.merge_stored_sets(\"r\"), \"r\")\n cost_fuel = cost_fuel.reorder_levels([\"alltec\", \"r\"])\n\n cost_fuel.loc[h2_price.index, :] = h2_price\n\n eff = dh.get(\"eff\")\n\n co2_int = dh.get(\"co2_int\").div(1000)\n\n co2_price = dh.get(\"o_co2price\")\n\n co2_costs = co2_int * co2_price\n co2_costs.index.names = [\"alltec\", \"r\"]\n\n var_cost = (\n cost_fuel.add(co2_costs, fill_value=0).div(eff).add(cost_var, fill_value=0)\n )\n\n return var_cost",
"def trans_eddy_flux(variables, **kwargs):\n return aht(variables[4:], **kwargs) - moc_st_eddy_flux(variables[:4], **kwargs)",
"def taylor_exp_3(y0, t, f, jac, hess, df_dt=None, d2f_dt2=None, d2f_dtdu=None, verbose=True, krylov_subspace_dim=None,\n **_):\n try:\n n, d = len(t), len(y0)\n y = np.zeros((n, d))\n except TypeError:\n n, d = len(t), 1\n y = np.zeros((n,))\n if verbose is False:\n count = Counter('', 0)\n elif verbose is True:\n count = Counter('Taylor Exp 3', n)\n else:\n count = Counter(verbose, n)\n if df_dt is None:\n def df_dt(*_): return np.zeros((d,))\n if d2f_dt2 is None:\n def d2f_dt2(*_): return np.zeros((d,))\n if d2f_dtdu is None:\n def d2f_dtdu(*_): return np.zeros((d, d))\n y[0] = y0\n j = jac(y[0], t[0])\n w = np.zeros((d, 3))\n expanded_vector = np.zeros((d + 3,))\n expanded_vector[-1] = 1\n expanded_matrix = np.zeros((d + 3, d + 3))\n expanded_matrix[-3:-1, -2:] = np.eye(2)\n expanded_matrix[:d, :d] = j\n for i in range(n - 1):\n h = t[i + 1] - t[i]\n w[:, -1] = f(y[i], t[i]) - np.dot(j, y[i])\n w[:, -2] = np.dot(jac(y[i], t[i]) - j, f(y[i], t[i])) + df_dt(y[i], t[i])\n w[:, -3] = np.dot(np.dot(hess(y[i], t[i]), f(y[i], t[i])), f(y[i], t[i])) \\\n + np.dot(jac(y[i], t[i]) - j, np.dot(jac(y[i], t[i]), y[i])) \\\n + np.dot(jac(y[i], t[i]) - j, df_dt(y[i], t[i])) \\\n + 2 * np.dot(d2f_dtdu(y[i], t[i]), f(y[i], t[i])) \\\n + d2f_dt2(y[i], t[i])\n expanded_vector[:d] = y[i]\n expanded_matrix[:d, -3:] = w\n if krylov_subspace_dim is None:\n y[i + 1] = np.dot(expm_sp(h * expanded_matrix), expanded_vector)[:d]\n else:\n y[i + 1] = expm_krylov(h * expanded_matrix, expanded_vector, krylov_subspace_dim)[:d]\n count(i + 1)\n return y"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
find the lcl (in m) for a row in the dataframe | def calc_lcl(row,psfc):
Tdew = tf.tmr(row['qv'],psfc)
LCL = tf.LCL(Tdew,row['theta'],psfc) #kPa
#
# rough approximation: 10 kPa = 1 km
#
delp=psfc - LCL
lcl_h = delp*100.
return lcl_h | [
"def compute_kl(self, df):\n value_counts = [df[col].value_counts() for col in self.hist_cols]\n next_hists = self.value_counts_to_hists(value_counts)\n\n if self.prev_hists is None:\n self.prev_hists = next_hists\n return None\n\n output = []\n for prev_h, curr_h in zip(self.prev_hists, next_hists):\n for i in range(len(prev_h)):\n prev_h[i] = prev_h[i] if prev_h[i] != 0 else 1\n curr_h[i] = curr_h[i] if curr_h[i] != 0 else 1\n kl = entropy(prev_h, curr_h)\n output.append(kl)\n\n self.prev_hists = next_hists\n return output",
"def LN(df):\n close = df['close']\n return talib.LN(close)",
"def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)",
"def calculate_recovery_clifford(cl_in, desired_cl=0):\n row = list(clifford_lookuptable[cl_in])\n return row.index(desired_cl)",
"def CDLMATCHINGLOW(df):\n open = df['open']\n high = df['high']\n low = df['low']\n close = df['close']\n return talib.CDLMATCHINGLOW(open, high, low, close)",
"def L_Chol(df,idx):\r\n clm=[df.columns[i1] for i1 in idx]\r\n corr=df[clm].corr().to_numpy()\r\n L=scipy.linalg.cholesky(corr, lower=True, overwrite_a=False)\r\n return L",
"def get_Lk(m, lvec):\n\n raise NotImplementedError",
"def getL(self):\n return _fiasco_numpy.KalmanProcess_getL(self)",
"def lam(self):\n return self._lam",
"def findLocalMins(engMatrixRow):\n localMins = np.zeros_like(engMatrixRow)\n left = engMatrixRow[:-2].copy()\n center = engMatrixRow[1:-1].copy()\n right = engMatrixRow[2:].copy()\n left[0] = 10**5\n right[-1] = 10**5\n minLC = np.minimum(left,center)\n minLCR = np.minimum(minLC, right)\n\n localMins[0] = 10**5\n localMins[1:-1] = minLCR\n localMins[-1] = 10**5\n\n return localMins",
"def lagrange(self, x, l):\n N = self.nnode(l)\n c = np.array([x - self.x[l]]) * np.array([np.ones(N)]).T\n c = c * (1. - np.identity(N)) + np.identity(N)\n return np.prod(c, 1) / self.cnorm[l]",
"def lam_grid_c(self, order):\n index = slice(*self.i_bounds[order])\n return self.lam_grid[index]",
"def getLPos(self):\n c = 0\n while c <= ALIENS_IN_ROW-1:\n i = 0\n for a in range(ALIEN_ROWS):\n if self._aliens[a][c] != None:\n return self._aliens[a][c].x - ALIEN_WIDTH/2\n else:\n i +=1\n if i == ALIEN_ROWS:\n c +=1",
"def linear_branch_number(self):\n m = self.m\n n = self.n\n ret = (1<<m) + (1<<n)\n lat = self.linear_approximation_matrix()\n\n for a in range(1, 1<<m):\n for b in range(1<<n):\n if lat[a,b] != 0:\n w = ZZ(a).popcount() + ZZ(b).popcount()\n if w < ret:\n ret = w\n return ret",
"def CDLMORNINGDOJISTAR(df):\n open = df['open']\n high = df['high']\n low = df['low']\n close = df['close']\n return talib.CDLMORNINGDOJISTAR(open, high, low, close)",
"def get_closest_idx_lsearch(l1d, tgt_value):\n dis=abs(tgt_value-l1d)\n return np.argwhere(dis==dis.min())[0].tolist()[0]",
"def LDL_sparse(matrix):\n Lrowstruc = matrix.row_structure_symbolic_cholesky()\n L = matrix.eye(matrix.rows)\n D = matrix.zeros(matrix.rows, matrix.cols)\n\n for i in range(len(Lrowstruc)):\n for j in Lrowstruc[i]:\n if i != j:\n L[i, j] = matrix[i, j]\n summ = 0\n for p1 in Lrowstruc[i]:\n if p1 < j:\n for p2 in Lrowstruc[j]:\n if p2 < j:\n if p1 == p2: #cancel possible ici\n summ += L[i, p1]*L[j, p1]*D[p1, p1]\n else:\n break\n else:\n break\n L[i, j] -= summ #ici\n L[i, j] = (L[i,j] / D[j, j]).cancel() #ici\n else: # i == j\n D[i, i] = matrix[i, i].cancel() ### cancel rajouté\n summ = 0\n for k in Lrowstruc[i]:\n if k < i:\n summ += (L[i, k]**2*D[k, k]).cancel() ### cancelrajouté\n else:\n break\n D[i, i] -= summ\n D[i,i] = D[i,i].cancel() #rajouté\n\n return L, D",
"def extract_hillslope_profile(node_matrix):\n ncols = numpy.size(node_matrix, 1)\n z = numpy.zeros(ncols)\n for col in range(ncols):\n dirt = numpy.where(node_matrix[:,col]!=0)[0]\n if len(dirt)>0:\n z[col] = numpy.amax(dirt)\n return z",
"def return_bmcs(self,cl):\n\t\tbcl=np.array(np.matrix(self.pbl)*np.transpose(np.matrix(cl[self.lmin:self.lmax+1])))[:,0]\n\t\tbcl=ms.master.est_true_cl(bcl,self.mbbp,len(bcl))\n\n\t\t#ubcl=np.array(np.matrix(self.qlb_nobeam)*np.transpose(np.matrix(bcl)))[:,0]\n\t\t#ubcl=np.append(np.zeros(self.lmin,float),ubcl)\n\n\t\treturn self.lbin,bcl #,ubcl"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adapted from interactive_vaporflux.ipynb sst, sea surface temperature (K) ft_qv, mixedlayer top qv (kg kg^1) use_NT, True or False outputs csv and json files with equilibrium values | def run_main(sst, ft_qv, use_NT):
dtout=10. #minutes
end_time=8*24. #hours
del_time=dtout*60. #seconds
end_time=end_time*3600. #seconds
#sst=297
D=5.e-6 #s-1
U=7 #m/s
psfc=100. #kPa
qsfc=tf.qs_tp(sst,psfc)
ft_intercept = 292 #K
ft_gamma = 6.e-3 #K/m
#ft_qv = 2.e-3
k=0.2 #entrainment efficiency
Cd = 1.e-3 #drag coefficient
tspan = np.arange(0.,end_time,del_time)
vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start
the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,
qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT
the_tup=make_tuple(the_tup,'coeffs')
output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))
result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])
# save time/computation by only doing calculations for the last timestep (equilibrium)
result['time']=tspan[-1]/3600./24. #days
result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]
result['delqv'] = ft_qv - result['qv'].iloc[-1]
result['LCL'] = calc_lcl(result.iloc[-1], psfc)
result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)
result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)
result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)
# decide how to calculate entrainment
the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]
if use_NT:
result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1],
result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])
else:
result['went']=calc_went(result.iloc[-1],the_tup)
result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)
with open('dumpmodel.csv','w') as f:
result.to_csv(f,index=False)
return None | [
"def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_state\n \n # obtain steady-state values\n dth=steady_state.deltheta\n dqt=steady_state.delqv\n thetal_m=steady_state.theta\n qt_m=steady_state.qv\n h=steady_state.h\n press=tf.find_press(steady_state.h) #kPa\n thetal_ft = steady_state.theta + dth\n qt_ft = steady_state.qv + dqt\n zb = steady_state.LCL\n zi = steady_state.h\n we = steady_state.went\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-h)\n LTS = thetal_3000 - steady_state.theta\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n # calculate LWP\n rho = 1.\n LWP = 0.5*rho*(zi-zb)**2\n \n # put all required variables into output array\n out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])\n \n return out_array",
"def extractQuantities(path, run, t0, t1):\n data = pyLTR.Models.MIX(path, run)\n\n # hard-coded input for testing & debugging:\n #data = pyLTR.Models.LFM('/hao/aim2/schmitt/data/LTR-2_0_1b/r1432/March1995/LR/single', 'LRs')\n \n #Make sure variables are defined in the model.\n modelVars = data.getVarNames()\n for v in ['Grid X', 'Grid Y', \n 'Potential North [V]', 'Potential South [V]', \n 'FAC North [A/m^2]', 'FAC South [A/m^2]',\n 'Pedersen conductance North [S]', 'Pedersen conductance South [S]', \n 'Hall conductance North [S]', 'Hall conductance South [S]', \n 'Average energy North [keV]', 'Average energy South [keV]',\n 'Number flux North [1/cm^2 s]', 'Number flux South [1/cm^2 s]']:\n assert( v in modelVars )\n\n timeRange = data.getTimeRange()\n if len(timeRange) == 0:\n raise Exception(('No data files found. Are you pointing to the correct run directory?'))\n\n index0 = 0\n if t0:\n for i,t in enumerate(timeRange):\n if t0 >= t:\n index0 = i\n\n index1 = len(timeRange)-1\n if t1:\n for i,t in enumerate(timeRange):\n if t1 >= t:\n index1 = i \n\n print(( 'Extracting MIX quantities for time series over %d time steps.' % (index1-index0) ))\n \n # Output a status bar displaying how far along the computation is.\n progress = pyLTR.StatusBar(0, index1-index0)\n progress.start()\n\n t_doy = []\n cpcpNorth = []\n cpcpSouth = []\n hpNorth = []\n hpSouth = []\n ipfacNorth = []\n ipfacSouth = []\n\n # Pre-compute area of the grid.\n x = data.read('Grid X', timeRange[index0])\n y = data.read('Grid Y', timeRange[index0])\n # Fix singularity at the pole\n x[:,0] = 0.0\n y[:,0] = 0.0\n z = numpy.sqrt(1.0-x**2-y**2)\n ri = 6500.0e3 # Radius of ionosphere\n areaMixGrid = pyLTR.math.integrate.calcFaceAreas(x,y,z)*ri*ri\n\n for i,time in enumerate(timeRange[index0:index1]):\n try:\n # -- Day of Year\n tt = time.timetuple()\n t_doy.append(tt.tm_yday+tt.tm_hour/24.0+tt.tm_min/1440.0+tt.tm_sec/86400.0)\n\n # --- Cross Polar Cap Potential\n psiNorth = data.read('Potential North [V]', time) / 1000.0\n cpcpNorth.append(psiNorth.max() - psiNorth.min())\n\n psiSouth = data.read('Potential South [V]', time) / 1000.0\n cpcpSouth.append(psiSouth.max() - psiSouth.min())\n \n # --- Hemispheric Power\n energy = data.read('Average energy North [keV]', time)\n flux = data.read('Number flux North [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpNorth.append(hp.sum() * 1.6e-21) \n\n energy = data.read('Average energy South [keV]', time)\n flux = data.read('Number flux South [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpSouth.append(hp.sum() * 1.6e-21)\n\n # --- Positive current density\n fac = data.read('FAC North [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacNorth.append(pfac.sum()/1.0e6)\n\n fac = data.read('FAC South [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacSouth.append(pfac.sum()/1.0e6)\n\n progress.increment()\n except KeyboardInterrupt:\n # Exit when the user hits CTRL+C.\n progress.stop()\n progress.join() \n print('Exiting.')\n import sys\n sys.exit(0)\n except:\n # Cleanup progress bar if something bad happened.\n progress.stop()\n progress.join()\n raise\n progress.stop()\n progress.join()\n\n dataNorth = pyLTR.TimeSeries()\n dataSouth = pyLTR.TimeSeries()\n dataNorth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataSouth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataNorth.append('doy', 'Day of Year', '', t_doy)\n dataSouth.append('doy', 'Day of Year', '', t_doy)\n \n # \"N\" and \"S\" label subscripts are redundant here, potentially leading to\n # mis-labeling of plots\n #dataNorth.append('cpcp', r'$\\Phi_N$', 'kV', cpcpNorth)\n #dataSouth.append('cpcp', r'$\\Phi_S$', 'kV', cpcpSouth)\n #\n #dataNorth.append('hp', r'$HP_N$', 'GW', hpNorth)\n #dataSouth.append('hp', r'$HP_S$', 'GW', hpSouth)\n #\n #dataNorth.append('ipfac', r'$FAC_N$', 'MA', ipfacNorth)\n #dataSouth.append('ipfac', r'$FAC_S$', 'MA', ipfacSouth)\n \n dataNorth.append('cpcp', r'$\\Phi$', 'kV', cpcpNorth)\n dataSouth.append('cpcp', r'$\\Phi$', 'kV', cpcpSouth)\n \n dataNorth.append('hp', r'$HP$', 'GW', hpNorth)\n dataSouth.append('hp', r'$HP$', 'GW', hpSouth)\n \n dataNorth.append('ipfac', r'$FAC$', 'MA', ipfacNorth)\n dataSouth.append('ipfac', r'$FAC$', 'MA', ipfacSouth)\n\n return (dataNorth, dataSouth)",
"def get_thermochem(file_set, results_dict, save_vibes, out_dir, tog_output_fname, qh_h_opt, write_mode):\n h = []\n qh_h = []\n gt = []\n qh_gt = []\n temps = []\n for index, file in enumerate(file_set):\n base_name = os.path.basename(file)\n if file == REACT_PROD_SEP:\n h.append(np.full([len(temps)], np.nan))\n qh_h.append(np.full([len(temps)], np.nan))\n gt.append(np.full([len(temps)], np.nan))\n qh_gt.append(np.full([len(temps)], np.nan))\n continue\n vibes_out = results_dict[base_name][GOODVIBES_OUT]\n found_structure = False\n skip_line = True\n h.append([])\n qh_h.append([])\n gt.append([])\n qh_gt.append([])\n # we know the last line should be dropped, and at least the first 10\n for line in vibes_out[10:-2]:\n if GOODVIBES_ERROR_PAT.match(line):\n raise InvalidDataError(\"See GoodVibes output: {}\".format(vibes_out))\n if not found_structure:\n if GOODVIBES_DATA_PAT.match(line):\n found_structure = True\n continue\n elif skip_line:\n skip_line = False\n continue\n else:\n vals = line.split()\n if index == 0:\n temps.append(float(vals[1]))\n h[index].append(float(vals[2]))\n if qh_h_opt:\n qh_h[index].append(float(vals[3]))\n gt[index].append(float(vals[-2]))\n qh_gt[index].append(float(vals[-1]))\n if save_vibes:\n vibes_out_fname = os.path.relpath(create_out_fname(file, suffix='_vibes', base_dir=out_dir, ext='.dat'))\n list_to_file(vibes_out, vibes_out_fname, print_message=False)\n print('Saved GoodVibes output as: {}'.format(vibes_out_fname))\n if tog_output_fname:\n list_to_file(vibes_out, tog_output_fname, mode=write_mode, print_message=False)\n if write_mode == 'w':\n print(\"Adding all GoodVibes output to: {}\".format(tog_output_fname))\n write_mode = \"a\"\n\n temps = np.asarray(temps)\n # for each molecule, multiply the array to convert to kcal/mol\n for index in range(len(gt)):\n h[index] = np.asarray(h[index]) * EHPART_TO_KCAL_MOL\n if qh_h_opt:\n qh_h[index] = np.asarray(qh_h[index]) * EHPART_TO_KCAL_MOL\n gt[index] = np.asarray(gt[index]) * EHPART_TO_KCAL_MOL\n qh_gt[index] = np.asarray(qh_gt[index]) * EHPART_TO_KCAL_MOL\n\n return temps, h, qh_h, gt, qh_gt",
"def get_qor(self):\n\n filename = 'vivado_hls.log'\n f = open(filename, 'r')\n while True:\n line = f.readline()\n if not line: break\n if 'LUT:' in line: \n LUT = line.strip('\\n').split()[-1]\n continue\n if 'FF:' in line:\n FF = line.strip('\\n').split()[-1]\n continue\n if 'DSP' in line:\n DSP = line.strip('\\n').split()[-1]\n continue\n if 'BRAM:' in line:\n BRAM = line.strip('\\n').split()[-1]\n continue\n if 'achieved post-implementation' in line:\n timing = line.strip('\\n').split()[-1]\n continue\n f.close()\n \n outlog = 'cordic.prj/solution1/csim/build/out.dat'\n f = open(outlog, 'r')\n while True:\n line = f.readline()\n if not line: break\n if 'Overall_Error_Sin' in line:\n err_sin = line.strip('\\n').split()[-1]\n continue\n if 'Overall_Error_Cos' in line: \n err_cos = line.strip('\\n').split()[-1]\n continue\n f.close()\n err_rate = (float(err_sin) + float(err_cos))/2.0\n if err_rate > 5:\n LUT = 10000\n else: \n pass\n metadata = [FF, DSP, BRAM, timing, err_rate]\n #e = xml.etree.ElementTree.parse(filename).getroot()\n #for item in e.iter('EstimatedClockPeriod'):\n # timing = item.text\n #metadata = []\n #for tag in e.iter('Resources'):\n # for item in tag.getchildren():\n # metadata.append(item.text)\n return float(LUT), metadata",
"def main(folder, quiet=0):\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n\n color1 = \"I4\" #filter system for first color of CMD\n color2 = \"M1\" #filter system for second color of CMD\n zeromagc1 = zero.zero_mag[color1]\n zeromagc2 = zero.zero_mag[color2]\n min_mag = 8. #minimal observation limit\n max_mag = 0. #maximal observation limit\n\n#getting file list\n files = sorted(os.listdir('%s/%s' % (os.getcwdu(), folder))) \n out = []\n\n for fil in files:\n#only using files created by the automated simulation\n if fil.startswith('sim_') and not 'settings' in fil.encode(\"ascii\"):\n print(\"%s/%s\" % (folder,fil.encode(\"ascii\")), file=output_stream)\n \n\n # Read in\n hdulist = fits.open('%s/%s' %(folder,fil))\n data = hdulist[1].data\n\n #calculating magnitudes from fluxes and converting to CMD-data\n x = -2.5*(np.log10(data['c%s' % color1]/zeromagc1) - np.log10(data['c%s' % color2]/zeromagc2))\n y = -2.5*(np.log10(data['c%s' % color2]/zeromagc2))\n\n \n sel = np.logical_and( (y > -10./3. * (x-1.) + 10.), np.logical_and(max_mag < y, y < min_mag))\n sel = np.logical_and(sel, y < -x + 12.)\n n = sum(sel)\n t = Table(hdulist[1].data)\n if 'sel' in t.columns:\n t.remove_column('sel')\n t.add_column(Column(name='sel', data=sel.astype('int')))\n \n hdulist[1].data = np.array(t)\n tmp, av, apera, age = fil.split('_')\n fits.update('%s/%s' %(folder,fil), np.array(t), ext = 1, clobber=True)\n out.append([av, apera, age, n])\n\n #writing obtained data to \"folder/__expected_number\"\n head = ['#', 'AV', 'Aperature_size', 'Age', 'Expected_number']\n f = open('%s/__expected_number' % folder, 'w')\n f.write(','.join(head)+'\\n' )\n np.savetxt(f, np.asarray(out).astype(int))\n f.close()\n \n print (\"Analysed %s files and saved output to %s\" % (len(out),'%s/__expected_number' % folder), file=output_stream)",
"def extractQuantities(path, run, runExt, t0, t1):\n data = pyLTR.Models.LFMION(path, run,ext=runExt)\n\n # hard-coded input for testing & debugging:\n #data = pyLTR.Models.LFM('/hao/aim2/schmitt/data/LTR-2_0_1b/r1432/March1995/LR/single', 'LRs')\n \n #Make sure variables are defined in the model.\n modelVars = data.getVarNames()\n for v in ['x_interp','y_interp',\n 'potnorth','potsouth',\n 'curnorth','cursouth',\n 'SigmaP_north','SigmaP_south',\n 'SigmaH_north','SigmaH_south',\n 'avE_north','avE_south',\n 'fluxnorth','fluxsouth']:\n assert( v in modelVars )\n\n timeRange = data.getTimeRange()\n if len(timeRange) == 0:\n raise Exception(('No data files found. Are you pointing to the correct run directory?'))\n\n index0 = 0\n if t0:\n for i,t in enumerate(timeRange):\n if t0 >= t:\n index0 = i\n\n index1 = len(timeRange)-1\n if t1:\n for i,t in enumerate(timeRange):\n if t1 >= t:\n index1 = i \n\n print(( 'Extracting LFM ION quantities for time series over %d time steps.' % (index1-index0) ))\n \n # Output a status bar displaying how far along the computation is.\n progress = pyLTR.StatusBar(0, index1-index0)\n progress.start()\n\n t_doy = []\n cpcpNorth = []\n cpcpSouth = []\n hpNorth = []\n hpSouth = []\n ipfacNorth = []\n ipfacSouth = []\n\n # Pre-compute area of the grid.\n x = data.read('x_interp', timeRange[index0])\n y = data.read('y_interp', timeRange[index0])\n # Fix singularity at the pole\n x[:,0] = 0.0\n y[:,0] = 0.0\n z = numpy.sqrt(1.0-x**2-y**2)\n ri = 6370.0e3 # Radius of ionosphere\n areaMixGrid = pyLTR.math.integrate.calcFaceAreas(x,y,z)*ri*ri\n\n for i,time in enumerate(timeRange[index0:index1]):\n try:\n # -- Day of Year\n tt = time.timetuple()\n t_doy.append(tt.tm_yday+tt.tm_hour/24.0+tt.tm_min/1440.0+tt.tm_sec/86400.0)\n\n # --- Cross Polar Cap Potential\n psiNorth = data.read('potnorth', time) / 1000.0\n cpcpNorth.append(psiNorth.max() - psiNorth.min())\n\n psiSouth = data.read('potsouth', time) / 1000.0\n cpcpSouth.append(psiSouth.max() - psiSouth.min())\n \n # --- Hemispheric Power\n energy = data.read('avE_north', time)\n flux = data.read('fluxnorth', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpNorth.append(hp.sum() * 1.6e-21) \n\n energy = data.read('avE_south', time)\n flux = data.read('fluxsouth', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpSouth.append(hp.sum() * 1.6e-21)\n\n # --- Positive current density\n fac = data.read('curnorth', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacNorth.append(pfac.sum()/1.0e6)\n\n fac = data.read('cursouth', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacSouth.append(pfac.sum()/1.0e6)\n\n progress.increment()\n except KeyboardInterrupt:\n # Exit when the user hits CTRL+C.\n progress.stop()\n progress.join() \n print('Exiting.')\n import sys\n sys.exit(0)\n except:\n # Cleanup progress bar if something bad happened.\n progress.stop()\n progress.join()\n raise\n progress.stop()\n progress.join()\n\n dataNorth = pyLTR.TimeSeries()\n dataSouth = pyLTR.TimeSeries()\n dataNorth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataSouth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataNorth.append('doy', 'Day of Year', '', t_doy)\n dataSouth.append('doy', 'Day of Year', '', t_doy)\n\n dataNorth.append('cpcp', r'$\\Phi_N$', 'kV', cpcpNorth)\n dataSouth.append('cpcp', r'$\\Phi_S$', 'kV', cpcpSouth)\n\n dataNorth.append('hp', r'$HP_N$', 'GW', hpNorth)\n dataSouth.append('hp', r'$HP_S$', 'GW', hpSouth)\n\n dataNorth.append('ipfac', r'$FAC_N$', 'MA', ipfacNorth)\n dataSouth.append('ipfac', r'$FAC_S$', 'MA', ipfacSouth)\n\n return (dataNorth, dataSouth)",
"def TMY_CSV_to_solar_data(filename):\n if not os.path.isfile(filename):\n raise FileNotFoundError(filename + \" does not exist.\")\n wfd = defaultdict(list)\n with open(filename) as file_in:\n info = []\n for i in range(2):\n info.append(file_in.readline())\n info[i] = info[i].split(\",\")\n if \"Time Zone\" not in info[0]:\n raise ValueError(\"`Time Zone` field not found in solar resource file.\")\n latitude = info[1][info[0].index(\"Latitude\")]\n longitude = info[1][info[0].index(\"Longitude\")]\n tz = info[1][info[0].index(\"Time Zone\")]\n elev = info[1][info[0].index(\"Elevation\")]\n reader = csv.DictReader(file_in)\n for row in reader:\n for col, dat in row.items():\n if len(col) > 0:\n wfd[col].append(float(dat))\n\n weather = dict()\n weather['tz'] = float(tz)\n weather['elev'] = float(elev)\n weather['lat'] = float(latitude)\n weather['lon'] = float(longitude)\n weather['year'] = wfd.pop('Year')\n weather['month'] = wfd.pop('Month')\n weather['day'] = wfd.pop('Day')\n weather['hour'] = wfd.pop('Hour')\n weather['minute'] = wfd.pop('Minute')\n weather['dn'] = wfd.pop('DNI')\n weather['df'] = wfd.pop('DHI')\n weather['gh'] = wfd.pop('GHI')\n weather['wspd'] = wfd.pop('Wind Speed')\n weather['tdry'] = wfd.pop('Temperature')\n\n return weather",
"def load_export_data():\n exp = np.load('tracing_sim/results_exponential_withQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n exp_noQ = np.load('tracing_sim/results_exponential_withoutQ_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n exp_low_eff = np.load('tracing_sim/results_exponential_withQ_halfreact_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n lockdown = np.load('tracing_sim/results_smallworld_lockdown_withQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n no_lockdown = np.load('tracing_sim/results_erdosrenyi_withQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw = np.load('tracing_sim/results_smallworld_withQ_v3_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_noQ = np.load('tracing_sim/results_smallworld_withoutQ_v2_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_low_eff = np.load('tracing_sim/results_smallworld_withQ_halfreact_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_exp = np.load('tracing_sim/results_smallworld_exponential_asc_withQ_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n sw_exp_ = np.load('tracing_sim/results_smallworld_exponential_random_withQ_NMEAS_100_ONLYSAVETIME_False/results_mean_err.npz')\n\n data = {}\n data[\"exp\"] = exp['mean']\n data[\"exp_noQ\"] = exp_noQ['mean']\n data[\"exp_low_eff\"] = exp_low_eff['mean']\n data[\"lockdown\"] = lockdown['mean']\n data[\"no_lockdown\"] = no_lockdown['mean']\n data[\"sw\"] = sw['mean']\n data[\"sw_noQ\"] = sw_noQ['mean']\n data[\"sw_low_eff\"] = sw_low_eff['mean']\n data[\"sw_exp\"] = sw_exp['mean']\n data[\"sw_exp_\"] = sw_exp_['mean']\n\n\n datalist = [exp,exp_noQ,exp_low_eff,lockdown,no_lockdown,sw,sw_noQ,sw_low_eff,sw_exp,sw_exp_]\n stringlist = [\"exp\",\"exp_noQ\",\"exp_low_eff\",\"lockdown\",\"no_lockdown\",\"sw\",\"sw_noQ\",\"sw_low_eff\",\"sw_exp\",\"sw_exp_\"]\n\n data_dict = {}\n\n for k,v in data.items():\n\n data_dict[k] = {}\n data_dict[k][\"O\"] = np.array([sum([data[k][:,x,0,i] for i in range(5)]) for x in range(4)])/200_000\n data_dict[k][\"DF\"] = (data_dict[k][\"O\"])/(np.array([sum([data[k][:,x,0,i] for i in [2,3]]) for x in range(4)])/200_000)\n data_dict[k][\"red\"] = [(((data_dict[k][\"O\"][x]/data_dict[k][\"O\"][x][0])-1)*100) for x in range(4)]\n try:\n data_dict[k][\"O_y0.5\"] = np.array([sum([data[k][:,x,1,i] for i in range(5)]) for x in range(4)])/200_000\n data_dict[k][\"DF_y0.5\"] = (data_dict[k][\"O_y0.5\"])/(np.array([sum([data[k][:,x,1,i] for i in [2,3]]) for x in range(4)])/200_000)\n data_dict[k][\"red_y0.5\"] = [(((data_dict[k][\"O_y0.5\"][x]/data_dict[k][\"O_y0.5\"][x][0])-1)*100) for x in range(4)]\n except:\n pass\n\n data_new = {}\n\n for k,v in data_dict.items():\n data_new[k] = {}\n data_new[k+\"0.5\"] = {}\n data_new[k][\"absolute\"] = {}\n data_new[k][\"reduction\"] = {}\n data_new[k+\"0.5\"][\"absolute\"] = {}\n data_new[k+\"0.5\"][\"reduction\"] = {}\n for i in range(4):\n data_new[k][\"absolute\"][str(np.round(data_dict[k][\"DF\"][i][0]))] = list(data_dict[k][\"O\"][i])\n data_new[k][\"reduction\"][str(np.round(data_dict[k][\"DF\"][i][0]))] = list(data_dict[k][\"red\"][i])\n try:\n data_new[k+\"0.5\"][\"absolute\"][str(np.round(data_dict[k][\"DF_y0.5\"][i][0]))] = list(data_dict[k][\"O_y0.5\"][i])\n data_new[k+\"0.5\"][\"reduction\"][str(np.round(data_dict[k][\"DF_y0.5\"][i][0]))] = list(data_dict[k][\"red_y0.5\"][i])\n except:\n pass\n\n with open('data_new.json', 'w') as outfile:\n json.dump(data_new, outfile)",
"def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323",
"def get_result_info():\n\n # read in the waveform channel_data json from file\n with open('channel_data.json') as json_data:\n channel_data = json.load(json_data)\n\n # alternatively you can define your channel_data here with something like\n # the following, be sure to include a time_step and y_values\n # channel_data = [\n # {\n # 'time_step': 5e-08,\n # 'y_values': [\n # 1.28,\n # 1.36,\n # 1.28,\n # 1.36,\n # 1.44,\n # 1.44,\n # 1.36,\n # ],\n # },\n # {\n # 'time_step': 5e-08,\n # 'y_values': [\n # 2.28,\n # 2.36,\n # 2.28,\n # 2.36,\n # 2.44,\n # 2.44,\n # 2.36,\n # ],\n # },\n # ]\n\n info = {\n 'instrument_type': 'TektronixMSO5204B',\n 'timebase_scale': 5e-06,\n 'v_divs': 10,\n 'h_divs': 10,\n 'slice_length': 1000,\n 'num_of_slices': 1,\n 'timebase_position': 0,\n 'total_points': 1000,\n 'channels': [\n {\n 'scale': 2,\n 'name': 'ch1',\n 'trigger_level': 2.64,\n 'offset': -5,\n 'time_step': channel_data[0]['time_step'],\n 'coupling':'dc',\n 'y_values': channel_data[0]['y_values'],\n 'waveform_measurements_valid':True,\n 'waveform_measurements':[\n {\n 'units': 's',\n 'display_name': 'Rise Time',\n 'value': 9.73639807316e-09\n },\n {\n 'units': 's',\n 'display_name': 'Rise Time',\n 'value': 3.1428570433389998e-08\n },\n {\n 'units': 's',\n 'display_name': 'Fall Time',\n 'value': 9.5249996557830006e-08\n },\n {\n 'units': 'Hz',\n 'display_name': 'Frequency',\n 'value': 82545.399969980004\n },\n {\n 'units': 's',\n 'display_name': 'Period',\n 'value': 1.2114545454549999e-05\n },\n {\n 'units': 'V',\n 'display_name': 'Voltage RMS',\n 'value': 2.3313497073899998\n },\n {\n 'units': 's',\n 'display_name': 'Voltage Peak to Peak',\n 'value': 1.3600000000000001\n },\n {\n 'units': 'V',\n 'ivi_name': 'voltage_max',\n 'display_name': 'Voltage Max',\n 'value': 2.8399999999999999\n },\n {\n 'units': 'V',\n 'display_name': 'Voltage Min',\n 'value': 1.48\n },\n {\n 'units': 'V',\n 'display_name': 'Voltage High',\n 'value': 2.52\n },\n {\n 'units': 'V',\n 'display_name': 'Voltage Low',\n 'value': 1.6399999999999999\n },\n {\n 'units': 'V',\n 'display_name': 'Voltage Average',\n 'value': 2.29713713714\n },\n {\n 'units': 's',\n 'display_name': 'Width Negative',\n 'value': 1.0101071428570001e-05\n },\n {\n 'units': 's',\n 'display_name': 'Width Positive',\n 'value': 2.0134740259700001e-06\n },\n {\n 'units': 's',\n 'display_name': 'Duty Cycle Negative',\n 'value': 83.37969811968\n },\n {\n 'units': 's',\n 'display_name': 'Duty Cycle Positive',\n 'value': 16.62030188032\n },\n {\n 'units': 'V',\n 'display_name': 'Amplititude',\n 'value': 0.88\n },\n {\n 'units': 'V',\n 'display_name': 'Voltage Cycle RMS',\n 'value': 1.82686084344\n },\n {\n 'units': 'V',\n 'display_name': 'Voltage Cycle Average',\n 'value': 1.8030421731999999\n },\n {\n 'units': 'V',\n 'display_name': 'Overshoot Negative',\n 'value': 18.181818181819999\n },\n {\n 'units': 'V',\n 'display_name': 'Overshoot Positive',\n 'value': 36.363636363639998\n }\n ]\n }\n ],\n 'config_excerpt': {\n 'trigger_edge_slope': 'positive',\n 'trigger': {\n 'source': 'ch1',\n 'type': 'edge',\n 'coupling': 'dc',\n 'level': 2.6400000000000001\n },\n 'acquisition': {\n 'record_length': 1000,\n 'start_time': -2.5000000000000001e-05,\n 'number_of_envelopes': 0,\n 'sample_rate': 20000000.0,\n 'time_per_record': 5.0000000000000002e-05,\n 'type': 'normal',\n 'number_of_averages': 16\n }\n }\n }\n return info",
"def test_KO_stock(self):\n self.test_from_file(\"../datasets/segmentation/KO_no_date.csv\", n=100, b=0, k=8, eps=0.4, show=False)",
"def thermalization_analysis():\n\n verbose = True\n run_pre_analysis = True\n mark_every = 50\n mc_cutoff = -1 # Skip every 100 points with 2000 therm-steps!!\n batch_folder = check_relative_path(\"data/thermalization_data\")\n base_figure_folder = check_relative_path(\"figures/\")\n base_figure_folder = os.path.join(base_figure_folder,\n \"thermalization_analysis\")\n check_folder(base_figure_folder, verbose=verbose)\n\n default_params = get_default_parameters(\n data_batch_folder=\"temp\", include_euclidean_time_obs=False)\n\n ############ COLD START #############\n cold_beta60_params = copy.deepcopy(default_params)\n cold_beta60_params[\"batch_folder\"] = batch_folder\n cold_beta60_params[\"batch_name\"] = \"B60_THERM_COLD\"\n cold_beta60_params[\"load_binary_file\"] = False\n cold_beta60_params[\"beta\"] = 6.0\n cold_beta60_params[\"topc_y_limits\"] = [-2, 2]\n cold_beta60_params[\"num_bins_per_int\"] = 32\n cold_beta60_params[\"bin_range\"] = [-2.5, 2.5]\n cold_beta60_params[\"hist_flow_times\"] = [0, 250, 600]\n cold_beta60_params[\"NCfgs\"] = get_num_observables(\n cold_beta60_params[\"batch_folder\"],\n cold_beta60_params[\"batch_name\"])\n cold_beta60_params[\"obs_file\"] = \"8_6.00\"\n cold_beta60_params[\"N\"] = 8\n cold_beta60_params[\"NT\"] = 16\n cold_beta60_params[\"color\"] = \"#377eb8\"\n\n ########## HOT RND START ############\n hot_rnd_beta60_params = copy.deepcopy(default_params)\n hot_rnd_beta60_params[\"batch_folder\"] = batch_folder\n hot_rnd_beta60_params[\"batch_name\"] = \"B60_THERM_HOT_RND\"\n\n ########## HOT RST START ############\n hot_rst_beta60_params = copy.deepcopy(default_params)\n hot_rst_beta60_params[\"batch_folder\"] = batch_folder\n hot_rst_beta60_params[\"batch_name\"] = \"B60_THERM_HOT_RST\"\n\n if run_pre_analysis:\n # Submitting distribution analysis\n cold_data = load_observable(cold_beta60_params)\n hot_rnd_data = load_observable(hot_rnd_beta60_params)\n hot_rst_data = load_observable(hot_rst_beta60_params)\n\n # # Loads post analysis data\n # cold_data = post_analysis.PostAnalysisDataReader(\n # [cold_beta60_params],\n # observables_to_load=cold_beta60_params[\"observables\"],\n # verbose=verbose)\n\n # hot_rnd_data = post_analysis.PostAnalysisDataReader(\n # [hot_rnd_beta60_params],\n # observables_to_load=hot_rnd_beta60_params[\"observables\"],\n # verbose=verbose)\n\n # hot_rst_data = post_analysis.PostAnalysisDataReader(\n # [hot_rst_beta60_params],\n # observables_to_load=hot_rst_beta60_params[\"observables\"],\n # verbose=verbose)\n\n # TODO: plot termaliations for the 3 different observables\n\n plot_types = [\"default\", \"loglog\", \"logx\", \"logy\"]\n\n y_labels = [\n [r\"$P$\", r\"$Q$\", r\"$E$\"],\n [r\"$\\frac{|P - \\langle P \\rangle|}{\\langle P \\rangle}$\", \n r\"$\\frac{|Q - \\langle Q \\rangle|}{\\langle Q \\rangle}$\",\n r\"$\\frac{|E - \\langle E \\rangle|}{\\langle E \\rangle}$\"],\n [r\"$|P - \\langle P \\rangle|$\", r\"$|Q - \\langle Q \\rangle|$\",\n r\"$|E - \\langle E \\rangle|$\"]]\n # y_labels[i_dr] = [r\"$\\langle P \\rangle$\", r\"$\\langle P \\rangle$\",\n # r\"$\\langle P \\rangle$\"]\n\n subplot_rows = [1, 3]\n\n # Limits to be put on plot\n x_limits = [[] for i in range(3)]\n y_limits = [[], [], []]\n\n data_representations = [\"default\", \"relerr\", \"abserr\"]\n\n obs_list = cold_data[\"obs\"].keys()\n\n x_label = r\"$t_\\mathrm{MC}$\"\n\n for i_dr, dr in enumerate(data_representations):\n for pt in plot_types:\n for i_obs, obs in enumerate(obs_list):\n for plot_rows in subplot_rows:\n\n # Sets up figure folder for observable\n figure_folder = os.path.join(base_figure_folder, obs)\n check_folder(figure_folder, verbose=verbose)\n\n # Sets up plot type folder \n figure_folder = os.path.join(figure_folder, pt)\n check_folder(figure_folder, verbose=verbose)\n\n if obs == \"energy\":\n correction_factor = - 1.0 / 64\n cold_data[\"obs\"][obs] *= correction_factor\n hot_rnd_data[\"obs\"][obs] *= correction_factor\n hot_rst_data[\"obs\"][obs] *= correction_factor\n\n # Retrieves data and makes modifications\n _cold_data = modify_data(\n cold_data[\"obs\"][obs][:mc_cutoff], dr)\n _hot_rnd_data = modify_data(\n hot_rnd_data[\"obs\"][obs][:mc_cutoff], dr)\n _hot_rst_data = modify_data(\n hot_rst_data[\"obs\"][obs][:mc_cutoff], dr)\n\n # Creates figure name\n figure_name = \"{0:s}_{1:s}_{2:s}_{3:d}plotrows.pdf\".format(\n obs, pt, dr, plot_rows)\n\n plot_data_array([np.arange(_cold_data.shape[0])\n for i in range(3)],\n [_cold_data, _hot_rnd_data,\n _hot_rst_data],\n [\"Cold start\", \"Hot start\",\n r\"Hot start, $RST$\"],\n x_label,\n y_labels[i_dr][i_obs],\n figure_name,\n figure_folder,\n plot_type=pt,\n x_limits=x_limits[i_obs],\n y_limits=y_limits[i_obs],\n mark_every=mark_every,\n subplot_rows=plot_rows)",
"def observed_frame_fluxes(self, f_numbers=[325], filters=None, verbose=True, n_proc=-1, percentiles=[2.5,16,50,84,97.5]): \n from astropy.table import Table\n \n if verbose:\n if filters is None:\n msg = 'Observed-frame f_numbers: {0}'\n print(msg.format(f_numbers))\n else:\n fnames = '\\n'.join([f'{i:>4} {f.name}'\n for i, f in enumerate(filters)])\n print('Observed-frame filters:\\n~~~~~~~~~~~~~~~~~~~ ')\n print(fnames)\n \n _tempfilt = TemplateGrid(self.zgrid, self.templates, \n RES=self.RES, \n f_numbers=np.array(f_numbers), \n add_igm=self.param['IGM_SCALE_TAU'],\n galactic_ebv=self.MW_EBV, \n Eb=self.param['SCALE_2175_BUMP'], \n n_proc=n_proc, verbose=verbose, \n cosmology=self.cosmology,\n array_dtype=self.ARRAY_DTYPE, \n filters=filters)\n \n if filters is None:\n NOBS = len(f_numbers) \n else:\n NOBS = len(filters)\n f_numbers = [i for i, f in enumerate(filters)]\n\n \n #izbest = np.argmax(self.pz, axis=1)\n izbest = self.izbest*1\n \n templ_fluxes = _tempfilt.tempfilt[izbest, :, :]\n \n if percentiles is not None:\n draws_resh = np.transpose(self.coeffs_draws, axes=(1,0,2))\n \n tab = Table()\n for i in range(NOBS):\n flux_i = (self.coeffs_best*templ_fluxes[:,:,i]).sum(axis=1) \n \n tab['obs{0}'.format(f_numbers[i])] = flux_i\n\n if percentiles is not None:\n draws_i = (draws_resh*templ_fluxes[:,:,i]).sum(axis=2) \n perc = np.percentile(draws_i, percentiles, axis=0)\n tab['obs{0}_p'.format(f_numbers[i])] = perc.T\n del(draws_i)\n \n key = 'name{0}'.format(f_numbers[i])\n tab.meta[key] = _tempfilt.filter_names[i]\n key = 'pivw{0}'.format(f_numbers[i])\n tab.meta[key] = _tempfilt.lc[i]\n \n if percentiles is not None:\n del(draws_resh)\n \n return tab",
"def temperatures():\n hi_act= session.query(measurements.tobs,measurements.date,measurements.station).\\\n filter(measurements.station == 'USC00519281').\\\n filter(measurements.date >last_12).\\\n order_by(measurements.date).all()\n hi_act_df=pd.DataFrame(hi_act).set_index('date')\n hi_act_dict=hi_act_df.to_dict()\n return jsonify(hi_act_dict)",
"def ext_city_temp_data(spark, input_path, output_path):\n tmp_df = spark.read.option(\"header\", True).option(\"inferSchema\",True).csv(f\"{input_path}/globalTemperaturesByCity.csv\")\n tmp_df = tmp_df.filter(tmp_df.AverageTemperature.isNotNull())\n tmp_df = tmp_df.filter(tmp_df.Country == \"United States\") \\\n .withColumn(\"rank\", F.dense_rank().over(Window.partitionBy(\"City\").orderBy(F.desc(\"dt\"))))\n temp_df = temp_df.filter(temp_df[\"rank\"] == 1).orderBy(\"City\")\n temp_df.write.mode(\"overwrite\").parquet(f\"{output_path}/us_city_temperature_data\")",
"def ccd_temp(verbose=False):\n \n fields = np.arange(1,9,1,dtype=int)\n dates = [get_date(n_) for n_ in fields]\n if verbose:\n print(fields)\n print(dates)\n \n for e, n in enumerate(fields):\n for i in range(3):\n hdu = fits.open('/home/ana/data/hectochelle/tiles/gd1_{0:d}/{1:s}/reduced/v3.0/specptg_gd1_{0:d}_cluster_{1:s}.ex{2:1d}.fits'.format(n, dates[e], i+1))\n print(n, i, hdu[0].header['CCDTEMP'], hdu[0].header['ROTANGLE'], hdu[0].header['POSANGLE'], hdu[0].header['HA'], hdu[0].header['PARANGLE'])",
"def test_calc_temp():\n for dataset in GlobalMultiData:\n dataset.get_fit_auto(65e3, MakeFig=False, show_fig=False)\n T = optoanalysis.calc_temp(GlobalMultiData[0], GlobalMultiData[1])\n assert T.n == pytest.approx(2.6031509367704735, rel=float_relative_tolerance)\n assert T.std_dev == pytest.approx(0.21312482508893446, rel=float_relative_tolerance)\n return None",
"def temperatures():\n\n return station_9281",
"def calc_Q_ext_wet(ceil_lam, r_md, RH):\n\n from ellUtils import nearest\n\n def read_f_RH(ceil_lam):\n \"\"\"\n Read in the f_RH data from csv\n EW 21/02/17\n\n :param filename:\n :return: data = {RH:... f_RH:...}\n\n filename must be in the form of 'calculated_ext_f(RH)_[ceil_lambda]nm.csv'\n \"\"\"\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_910_ext_f(RH)_910-910nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH\n\n def read_Q_dry_ext(ceil_lam):\n \"\"\"\n Read in the Q_ext for dry murk.\n EW 21/02/17\n\n :param filename:\n :param lam:\n :return: Q_ext_dry = {radius:... Q_ext_dry:...}\n\n Requres the wavelength to be passed, just so in the future, the 910 nm file is not incorrectly used by mistake when\n it should use the file for another wavelength.\n \"\"\"\n\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n filename = 'calculated_Q_ext_' + str(ceil_lam) + 'nm.csv'\n\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n Q_ext_dry = {'radius': raw[:, 0],\n 'Q_ext': raw[:, 1]}\n\n return Q_ext_dry\n\n RH_factor = 0.01 # Relative Humidity in 0.38 not 38%\n\n # calculate Q_ext_wet\n f_RH = read_f_RH(ceil_lam)\n Q_ext_dry = read_Q_dry_ext(ceil_lam)\n\n # create matric of Q_ext_dry based on r_md\n Q_ext_dry_matrix = np.empty(r_md.shape)\n f_RH_matrix = np.empty(RH.shape)\n\n # find Q_ext dry, given the dry radius matrix\n if r_md.size != 1:\n for i in range(r_md.shape[0]):\n idx = nearest(Q_ext_dry['radius'], r_md[i])[1]\n Q_ext_dry_matrix[i] = Q_ext_dry['Q_ext'][idx]\n\n else:\n idx = nearest(Q_ext_dry['radius'], r_md)[1]\n Q_ext_dry_matrix = Q_ext_dry['Q_ext'][idx]\n\n # find f(RH), given the RH matrix\n # need RH factor as f_RH['RH'] in units of frac not percentage\n if RH.size != 1:\n for i in range(RH.shape[0]):\n idx = nearest(f_RH['RH'], RH_factor * RH[i])[1]\n f_RH_matrix[i] = f_RH['f_RH'][idx]\n else:\n idx = nearest(f_RH['RH'], RH_factor * RH)[1]\n f_RH_matrix = f_RH['f_RH'][idx]\n\n # calculate Q_ext_wet\n Q = Q_ext_dry_matrix * f_RH_matrix\n # print np.mean(Q_ext_dry_matrix[:,:20])\n\n return Q, Q_ext_dry_matrix, f_RH_matrix"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adapted from nicholls_turton.ipynb sst, sea surface temperature (K) ft_qv, mixedlayer top qv (kg kg^1) use_NT, True or False | def calc_equil(sst, ft_qv, use_NT=False):
run_main(sst, ft_qv, use_NT)
# grab csv file
with open('dumpmodel.csv','r') as f:
df_result=pd.read_csv(f)
# last time step into named tupple
out=df_result.iloc[-1]
steady_state=make_tuple(out.to_dict())
steady_state
# obtain steady-state values
dth=steady_state.deltheta
dqt=steady_state.delqv
thetal_m=steady_state.theta
qt_m=steady_state.qv
h=steady_state.h
press=tf.find_press(steady_state.h) #kPa
thetal_ft = steady_state.theta + dth
qt_ft = steady_state.qv + dqt
zb = steady_state.LCL
zi = steady_state.h
we = steady_state.went
# calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)
gamma = 6e-3
thetal_3000 = thetal_ft + gamma*(3000-h)
LTS = thetal_3000 - steady_state.theta
# calculate delta_Fr
delta_Frstar = 82.0 # Wm^-2
Frlambda = 7.9 # Wm^-2, using with CTL from Gesso
delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1
# calculate LWP
rho = 1.
LWP = 0.5*rho*(zi-zb)**2
# put all required variables into output array
out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])
return out_array | [
"def run_main(sst, ft_qv, use_NT):\n\n dtout=10. #minutes\n end_time=8*24. #hours\n del_time=dtout*60. #seconds\n end_time=end_time*3600. #seconds\n #sst=297\n D=5.e-6 #s-1\n U=7 #m/s\n psfc=100. #kPa\n qsfc=tf.qs_tp(sst,psfc)\n ft_intercept = 292 #K\n ft_gamma = 6.e-3 #K/m\n #ft_qv = 2.e-3\n k=0.2 #entrainment efficiency\n Cd = 1.e-3 #drag coefficient\n tspan = np.arange(0.,end_time,del_time)\n vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start\n the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,\n qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT\n the_tup=make_tuple(the_tup,'coeffs')\n output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))\n result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])\n\n # save time/computation by only doing calculations for the last timestep (equilibrium)\n result['time']=tspan[-1]/3600./24. #days\n result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]\n result['delqv'] = ft_qv - result['qv'].iloc[-1]\n result['LCL'] = calc_lcl(result.iloc[-1], psfc)\n result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)\n result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)\n result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)\n \n # decide how to calculate entrainment\n the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]\n if use_NT:\n result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], \n result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])\n else:\n result['went']=calc_went(result.iloc[-1],the_tup)\n\n result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)\n\n with open('dumpmodel.csv','w') as f:\n result.to_csv(f,index=False)\n \n return None",
"def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323",
"def esw(self, t):\n\n\n es0 = 6.1078\n\n # ES0 = SATURATION VAPOR RESSURE OVER LIQUID WATER AT 0C \n pol = t * (t * (t * (t * (t * (t * (t * (t * (t * \n - 3.0994571e-20 + 1.1112018e-17) - 1.7892321e-15) + \n 2.1874425e-13) - 2.9883885e-11) + 4.3884187e-9) - \n 6.1117958e-7) + 7.8736169e-5) - 0.0090826951) + 0.99999683\n\n # Computing 8th power\n r1 = pol\n r1 *= r1\n r1 *= r1\n ret_val = es0 / (r1 * r1)\n return ret_val\n \n \n def tcon(self, t, d):\n \"\"\" THIS FUNCTION RETURNS THE TEMPERATURE TCON (CELSIUS) AT THE LIFTING */\n CONDENSATION LEVEL, GIVEN THE TEMPERATURE T (CELSIUS) AND THE\n DEW POINT D (CELSIUS).\n\n BAKER,SCHLATTER 17-MAY-1982 Original version \"\"\"\n\n # COMPUTE THE DEW POINT DEPRESSION S.\n\n s = t - d;\n\n # THE APPROXIMATION BELOW, A THIRD ORDER POLYNOMIAL IN S AND T,\n # IS DUE TO HERMAN WOBUS. THE SOURCE OF DATA FOR FITTING THE\n # POLYNOMIAL IS UNKNOWN.\n\n dlt = s * (t * 0.001278 + 1.2185 + s * (s * 1.173e-5\n - 0.00219 - t * 5.2e-6))\n ret_val = t - dlt\n return ret_val\n \n def tsa(self, os, pres):\n \"\"\" Very little documentation on these following routines, so unsure \n of the origin and derivation of these algorithms. \"\"\"\n\n rocp = 0.28571482\n\n a = os # +273.16\n tq = 253.16\n d = 120.0\n \n i = 0\n for i in range(12):\n tqk = tq - 273.16\n d /= 2\n x = a * exp(- 2.6518986 * self.w(tqk, pres) / tq) - tq * pow((1000.0 / pres), rocp) \n if (fabs(x) <= 0.0):\n break\n if x < 0.0:\n sign = - 1\n else:\n sign = 1 \n tq += (d * sign)\n\n return tq # -273.16\n \n def w(self, temp, pres):\n \"\"\" Very little documentation on these following routines, so unsure \n of the origin and derivation of these algorithms. \"\"\"\n \n x = self.esat(temp)\n return (622.0 * x / (pres - x))\n \n def temp_of_te(self, te, press):\n import Temp_of_te\n return Temp_of_te.temp_of_te(te,press)\n\n def capeFunc(self, usetv, p_dat_PPointer, tve_dat_PPointer, p0, th0, sh0):\n import CapeFunc\n return CapeFunc.capeFunc(usetv, p_dat_PPointer, tve_dat_PPointer, p0, th0, sh0)\n \n def lfcpar(self, eptpar, pcb, tcb, hcb, t1, t2, p1, ht1):\n \"\"\" his routine computes the level of free convection of a rising parcel.\n History.\n -------- \n Don Baker 01 Jun 85 Original version.\n Dale Perry Oct 96 Adapted code to work with WFO\n\n Description of input and output.\n --------------------------------\n On input:\n --------- \n EPTPAR Real Moist adiabat along which parcel rises above\n the LCL (K).\n PCB Real LCL pressure (mb).\n TCB Real LCL temperature (K).\n HCB Real LCL height (m asl).\n T1 Real Array Parcel temperatures at lifted parcel levels (K).\n T2 Real Array Sounding temperatures at parcel levels (K).\n P1 Real Array Lifted parcel pressure levels (mb).\n HT1 Real Array Lifted parcel level heights (m asl).\n NPAR Integer Number of lifted parcel levels passed.\n\n On output:\n ---------- \n PLFC1 Real Level of free convection pressure (mb).\n HLFC1 Real Level of free convection height (m asl).\n TLFC1 Real Level of free convection temperature (K). \"\"\"\n \n lfcReturn = zeros((1, 6), 'float32')\n TOLER = 0.05\n npar = p.shape[0]\n print \"npar=\", npar\n # Find the location in the parcel arrays that corresponds to the LCL\n i = 0\n for ii in range(npar) :\n i = ii\n if math.fabs(p1[i] - pcb) < 0.1 :\n break\n else :\n continue\n print \"found pressure at \", i\n # Initially assign flag values to the LFC in case no buoyancy exists.\n plfc1 = meteo.TOP_FLG\n hlfc1 = meteo.TOP_FLG\n tlfc1 = meteo.TOP_FLG\n plfc2 = meteo.TOP_FLG\n hlfc2 = meteo.TOP_FLG\n tlfc2 = meteo.TOP_FLG\n \n if i == npar :\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n # Check and see if parcel is positively buoyant at the LCL already. If\n # this is true, then the LFC is coincident with the LCL. This may be\n # the case in 00Z soundings when a super-adiabatic layer exists near\n # the surface.\n \n if t1[i] >= t2[i] :\n plfc1 = pcb\n hlfc1 = hcb\n tlfc1 = tcb\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n # Loop upward from the LCL until the parcel temperature becomes warmer\n # than the environment. If this does not occur, no positive buoyancy\n # exists and the routine exits with whatever flag value was assigned to\n # the level of free convection.\n # To prevent a stack out of bounds error when I=1, set it equal to the\n # next level if I=1.\n \n if i == 0 : \n i = 1\n \n runLoop = True\n print \"entering loop1 with i=\", i\n for j in range(i, npar) :\n if t1[j] >= t2[j] :\n pt = p1[j]\n pb = p1[j - 1]\n plog1 = math.log(p1[j])\n plog3 = math.log(p1[j - 1])\n \n print \"entering inner loop1 j=\", j\n for count in range(100) :\n pm = 0.5 * (pb + pt)\n plog2 = math.log(pm)\n etpar = eptpar * math.pow((pm / 1000.0), 0.286)\n t1m = self.temp_of_te(etpar, pm)\n t2m = self.interp1(t2[j], t2[j - 1], plog1, plog2, plog3)\n if math.fabs(t1m - t2m) <= TOLER :\n plfc1 = pm\n hlfc1 = self.interp1(ht1[j], ht1[j - 1], plog1, math.log(plfc1), plog3)\n tlfc1 = t1m\n runLoop = False;\n print \"attempting to break out of loop 1\"\n break\n if (t1m - t2m) > TOLER :\n pt = pm\n if (t2m - t1m) > TOLER :\n pb = pm\n if runLoop != True :\n break\n else :\n continue\n\n # Continue looping to find a possible second LFC per conditions\n # above rules.\n j = j + 1\n print \"entering loop2 with j=\", j\n for k in range(j, npar) :\n if t1[k] >= t2[k] :\n pt = p1[k]\n pb = p1[k - 1]\n plog1 = math.log(p1[k])\n plog3 = math.log(p1[k - 1])\n \n print \"entering inner loop2 k=\", k\n for count in range(100) :\n pm = 0.5 * (pb + pt)\n plog2 = math.log(pm)\n etpar = eptpar * math.pow(pm / 1000.0, 0.286)\n t1m = self.temp_of_te(etpar, pm)\n t2m = self.interp1(t2[k], t2[k - 1], plog1, plog2, plog3)\n if math.fabs(t1m - t2m) <= TOLER :\n plfc2 = pm\n hlfc2 = self.interp1(ht1[k], ht1[k - 1], plog1, math.log(plfc2, plog3))\n tlfc2 = t1m\n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n print \"exiting loop2 k=\", k\n return lfcReturn\n if (t1m - t2m) > TOLER :\n pt = pm\n if (t2m - t1m) > TOLER :\n pb = pm\n \n lfcReturn[0][0] = plfc1\n lfcReturn[0][1] = hlfc1\n lfcReturn[0][2] = tlfc1\n lfcReturn[0][3] = plfc2\n lfcReturn[0][4] = hlfc2\n lfcReturn[0][5] = tlfc2\n return lfcReturn\n \n def richno(self, ht, hw, uw, vw, rho, buoy):\n \"\"\" Statement of purpose.\n Compute the dimensionless bulk Richardson number as defined by\n Weisman and Klemp (1982).\n History.\n -------- \n Tom Schlatter Late 1982 Original code based on MWR article by \n Weisman and Klemp (1982).\n D. Baker 01 Jun 84 Removed computation of positive energy...\n made it an input argument.\n D. Baker 01 Jul 85 Updated code for documentation.\n J. Ramer 16 Jun 92 Added divide-by-zero prevention.\n D. Perry 10 Oct 96 Adapted code for WFO \n\n Description of input and output.\n --------------------------------\n On input:\n --------- \n HT Sounding heights (m asl).\n HW Heights of wind reports (m asl).\n UW Wind u-components (m/s).\n VW Wind v-components (m/s).\n RHO Air density at each sounding level (kg/m**3).\n BUOY Positive buoyant energy (J/kg).\n\n On output:\n ---------- \n RICHNUM Dimensionless bulk Richardson number. \"\"\"\n \n \n mnl = 500\n nlvls = ht.shape[0]\n nw = uw.shape[0]\n HALFKM = 500.0\n SIXKM = 6000.0\n richnum = meteo.MISSING\n rhow = rho\n # Interpolate an air density value to each reported wind level\n if nlvls != nw :\n rhow = self.wndrho(rho, ht, hw)\n else :\n for i in range(nlvls) :\n rhow[i] = rho[i]\n \n # QC\n qc = 1\n for i in range (2, nw) :\n if uw[i] != uw[0] and vw[i] != vw[0] :\n qc = 0\n \n if nlvls < 3 or nlvls > 500 :\n qc = 1\n \n for i in range(nw) :\n if rhow[i] <= 0.0 : \n qc = 1\n break\n \n for i in range(2, nw) :\n if (hw[i] - hw[i - 1]) <= 0.0 :\n qc = 1\n break\n \n for i in range(2, nlvls) :\n if (ht[i] - ht[i - 1]) <= 0.0 :\n qc = 1\n \n if qc == 1 :\n return richnum\n \n # initialize sums\n \n sumu = 0\n sumv = 0\n sumr = 0\n sumul = 0\n sumvl = 0\n sumrl = 0\n \n # define shear layer bounds (above ground level)\n hbl = hw[0] + HALFKM\n htop = hw[0] + SIXKM\n \n if hw[nw] < htop or hw[1] > htop :\n return richnum\n \n # Loop to calculate shear terms\n \n i = 0\n rulay = 0.5 * (rhow[i] * uw[i])\n rvlay = 0.5 * (rhow[i] * vw[i])\n rlay = 0.5 * rhow[i]\n dz = hw[i]\n \n for i in range(1, nw) :\n rulay = 0.5 * (rhow[i] * uw[i] + rhow[i - 1] * uw[i - 1])\n rvlay = 0.5 * (rhow[i] * vw[i] + rhow[i - 1] * vw[i - 1])\n rlay = 0.5 * (rhow[i] + rhow[i - 1])\n dz = hw[i] - hw[i - 1]\n if hw[i] > htop :\n break\n sumu = sumu + rulay * dz\n sumv = sumv + rvlay * dz\n sumr = sumr + rlay * dz\n if hw[i] > hbl and i > 1 :\n sumul = sumul + rulay * dz\n sumvl = sumvl + rvlay * dz\n sumrl = sumrl + rlay * dz\n \n sumu = sumu + rulay * dz\n sumv = sumv + rvlay * dz\n sumr = sumr + rlay * dz\n \n if sumr <= 0.0 :\n u6 = 0.0\n v6 = 0.0\n else : \n u6 = sumu / sumr\n v6 = sumv / sumr\n \n if sumrl <= 0.0 :\n ul = 0.0\n vl = 0.0\n else :\n ul = sumul / sumrl\n vl = sumvl / sumrl\n \n # calculate one half the square of the shear vector in the lowest 6 km\n u6 = u6 - ul\n v6 = v6 - vl\n ske = 0.5 * (u6 * u6 + v6 * v6)\n \n # compute the bulk richardson number\n \n if ske > 0 :\n richnum = buoy / ske\n \n return richnum\n \n def wndrho(self, rho, ht, hw):\n \"\"\" PURPOSE:\n --------\n INTERPOLATE TO DETERMINE DENSITY AT WIND LEVELS GIVEN DENSITY AT\n PRESSURE LEVELS IN A SOUNDING. INTERPOLATION IS LINEAR BY HEIGHT.\n\n T. Schlatter late 82 Probable original author.\n D. Baker 17 Dec 85 Added doc and indentation (?)\n D. Baker (?) after Dec 85 Replaced 100 loop with 300 loop. It\n appears that the interpolation is out.\n J. Wakefield 17 Nov 92 Added parameter list documentation.\n D. Perry Sep 96 Adapted code to work with WFO.\n\n Argument I/O Description\n -------- --- -----------------------------------------------\n Rho I Density (kg m-3) at sounding levels.\n Ht I Heights (m) at sounding levels.\n NLvls I Number of sounding levels.\n HW I Heights (m) of wind obs.\n NW I Number of wind obs.\n RhoW O Density interpolated to wind obs heights. \"\"\"\n \n \n # Interpolate to derive density at wind heights\n j = 0\n nw = len(hw)\n skip = False\n for i in range(nw) :\n if skip == True :\n break\n k = j\n for j in range(k, nlvls - 1) :\n if hw[i] >= ht[j] and hw[i] <= ht[j + 1] :\n rhow[i] = self.interp1(rho[j], rho[j + 1], ht[j], hw[i], ht[j + 1])\n skip = True\n break\n \n rhow[0] = rho[0]\n k1 = 0\n k2 = 1\n \n for i in range(1, nw) :\n if ht[k2] < hw[i] :\n k1 = k2\n k2 = k2 + 1\n if k2 > nlvls :\n for j in range(i, nw) :\n rhow[j] = rho[k1]\n return rhow\n \n rhow[i] = self.interp1(rho[k1], rho[k2], ht[k1], hw[i], ht[k2])\n \n return rhow\n \n def lclpar(self, meanmix, ts, p, ht, t, td):\n \"\"\" Statement of purpose.\n ---------------------\n This routine computes the pressure, height, and temperature of the\n lifting condensation level (LCL) from a sounding.\n \n History.\n -------- \n Dale Perry 20 Sep 96 Bootlegged version of cclpar.f modified for\n determining the LCL.\n \n Description of input and output.\n --------------------------------\n On input:\n --------- \n MEANMIX Mixing ratio used to intersect the sounding (g/kg).\n TS Surface temp (12Z-forecast max temp;00Z-sfc temp) (K). \n P Sounding pressures (mb).\n HT Sounding heights (m asl).\n T Sounding temperatures (K).\n TD Sounding dewpoint temperatures (K).\n \n On output:\n ---------- \n PLCL Pressure of the lifting condensation level (mb).\n TLCL Temperature of the lifting condensation level (K).\n HTLCL Height of the lifting condensation level (m asl).\n \n User notes:\n -----------\n The low level mean mixing ratio is input to this routine...\n computed outside. \"\"\"\n\n TOLER = 0.5\n nlvls = len(p)\n lfcReturn = zeros((1, 3), 'float32')\n \n # Loop up through sounding until mixing ratio line corsses the dry \n # adiabat through the surface temperature. Initially set the LCL\n # parameters to MISSING values in case no LCL is found\n \n plcl = meteo.TOP_FLG\n hlcl = meteo.TOP_FLG\n tlcl = meteo.TOP_FLG\n t2 = ts * math.pow(1000.0 / p[0], 0.286)\n \n for i in range(nlvls) :\n t1 = self.temp_mixratio(p[i], meanmix)\n t1 = t1 * math.pow(1000.0 / p[i], 0.286)\n if t1 >= t2 :\n break\n \n if i == 1 : #LCL at the surface\n plcl = p[0]\n hlcl = ht[0]\n tlcl = t[0]\n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn\n \n # We were at the top of the sounding, but 'I' got incremented one more\n # beyond. Reset it to the top of the sounding index 'NLVLS'\n if i > nlvls :\n i = nlvls - 1\n \n pt = p[i]\n pb = p[i - 1]\n plog1 = math.log(p[i])\n plog3 = math.log(p[i - 1])\n \n # Iterate to find the LCL. Keep cutting level in half until the point\n # of intersection is found\n \n for count in range(100) :\n pm = 0.5 * (pt + pb)\n plog2 = math.log(pm)\n t1 = self.temp_mixratio(pm, meanmix)\n t1 = t1 * math.pow(1000.0 / pm, 0.286)\n if math.fabs(t1 - t2) <= TOLER :\n plcl = pm\n tlcl = t1 * math.pow(plcl / 1000.0, 0.286) \n hlcl = self.interp1(ht[i], ht[i - 1], plog1, math.log(plcl), plog3)\n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn \n if (t1 - t2) > TOLER :\n pt = pm\n if (t2 - t1) > TOLER :\n pb = pm\n \n lfcReturn[0][0] = plcl\n lfcReturn[0][1] = hlcl\n lfcReturn[0][2] = tlcl\n return lfcReturn",
"def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):\n thetal_m = the_vars[0]\n qt_m = the_vars[2]\n zi = the_vars[1]\n dth = deltheta\n \n thetal_ft = thetal_m + dth\n qt_ft = coeffs.ft_qv\n \n dqt = qt_ft - qt_m\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-zi)\n LTS = thetal_3000 - coeffs.sst # lower tropospheric stability\n\n # calculate coefficients\n press=tf.find_press(zi)\n Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)\n Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)\n \n invert= tf.t_uos_thetal(thetal_m,qt_m,press)\n T_0 = invert.temp\n lv=tf.L_t(invert.temp)\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n del_thv_dry = Ad * dth + Bd * dqt\n del_thv_sat = Aw * dth + Bw * dqt\n \n # account for evaporative cooling (increases we)\n ql_max = invert.ql\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n Del_thv = del_thv_dry - Cl * ql_max\n \n # calculate buoyancy integral terms\n rho = 1.\n lcl_press=tf.LCL_thetal(thetal_m,qt_m)\n zb=tf.find_height(lcl_press)\n\n T1 = zb/zi\n T2 = 0.5 * zb**2 / zi**2\n T3 = (zi-zb)/zi\n T4 = 0.5 * (zi**2 - zb**2) / zi**2\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n wtl_0=F0\n wqt_0=Fqv0\n Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3\n term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))\n term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))\n term3 = Del_F * (Ad * T2 + Aw * T4)\n\n Theta_NE = term1 + term2 + term3\n \n # calculate w*\n wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)\n \n # calculate chi*\n chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)\n \n # calculate del_m\n Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)\n \n # calculate we\n a2=15.\n Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))\n \n A_NT = 0.2\n fac_NT = 2.5\n\n term4 = Del_thv_NT\n term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)\n denominator = term4 + term5\n\n we = A_NT * fac_NT * Theta_NE / denominator\n \n return we",
"def dT_SZ(nu, y, z=0, f_zc=1, gamma=3.59, nu0=0.31, T_cmb=2.725, T0=24.1, alpha=-2.59):\n T_radio = T_R(nu, z=z, nu0=nu0, T0=T0, alpha=alpha)\n return T_cmb*dT_T_tSZ(y) + T_radio*dT_T_R(y, gamma=gamma, f_zc=f_zc)",
"def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial",
"def TM_fluid(layer, kx, om):\n\n h = layer.d\n rho = layer.medium.rho\n K = layer.medium.K\n k = om*np.sqrt(rho/K)\n ky = np.sqrt(k**2-kx**2)\n T = np.zeros((2, 2), dtype=complex)\n T[0, 0] = np.cos(ky*h)\n T[1, 0] = (om**2*rho/ky)*np.sin(ky*h)\n T[0, 1] = -(ky/(om**2*rho))*np.sin(ky*h)\n T[1, 1] = np.cos(ky*h)\n return T",
"def flux_devel_test(itc_w,sst): \n # Check for nan's\n if np.isnan(sst) or np.isnan(itc_w):\n QC = np.nan\n elif itc_w < 0 or sst < 0:\n QC = 0\n elif itc_w <=75 and sst <=30:\n QC = 1\n elif itc_w <=100 and sst <=100:\n QC = 2\n elif itc_w <=1000 and sst <=1000:\n QC = 3\n else:\n QC = 0 \n return QC",
"def _chianti_temp_emiss(\n goes_ts, satellite_number, secondary=0, abundance=\"coronal\", remove_scaling=False\n):\n\n longflux = goes_ts.quantity(\"xrsb\").to(u.W / u.m**2)\n shortflux = goes_ts.quantity(\"xrsa\").to(u.W / u.m**2)\n\n if \"xrsb_quality\" in goes_ts.columns:\n longflux[goes_ts.to_dataframe()[\"xrsb_quality\"] != 0] = np.nan\n shortflux[goes_ts.to_dataframe()[\"xrsa_quality\"] != 0] = np.nan\n\n obsdate = parse_time(goes_ts._data.index[0])\n\n longflux_corrected = longflux\n # For some reason that I can't find documented anywhere other than in the IDL code,\n # the long channel needs to be scaled by this value for GOES-6 before 1983-06-28.\n if obsdate <= Time(\"1983-06-28\") and satellite_number == 6:\n longflux_corrected = longflux * (4.43 / 5.32)\n\n shortflux_corrected = shortflux\n # Remove the SWPC scaling factors if needed.\n # The SPWC scaling factors of 0.7 and 0.85 for the XRSA and XSRB channels\n # respectively are documented in the NOAA readme file linked in the docstring.\n if remove_scaling and satellite_number >= 8 and satellite_number < 16:\n longflux_corrected = longflux_corrected / 0.7\n shortflux_corrected = shortflux / 0.85\n\n # Measurements of short channel flux of less than 1e-10 W/m**2 or\n # long channel flux less than 3e-8 W/m**2 are not considered good.\n # Ratio values corresponding to such fluxes are set to 0.003.\n index = np.logical_or(\n shortflux_corrected < u.Quantity(1e-10 * u.W / u.m**2),\n longflux_corrected < u.Quantity(3e-8 * u.W / u.m**2),\n )\n fluxratio = shortflux_corrected / longflux_corrected\n fluxratio.value[index] = u.Quantity(0.003, unit=u.dimensionless_unscaled)\n\n # Work out detector index to use from the table response based on satellite number\n # The counting in the table starts at 0, and indexed in an odd way for the GOES-R\n # primary/secondary detectors.\n if satellite_number <= 15:\n sat = satellite_number - 1 # counting starts at 0\n else:\n sat = (\n 15 + 4 * (satellite_number - 16) + secondary\n ) # to figure out which detector response table to use (see notes)\n\n resp_file_name = manager.get(\"goes_chianti_response_table\")\n response_table = fits.getdata(resp_file_name, extension=1)\n rcor = response_table.FSHORT_COR / response_table.FLONG_COR # coronal\n rpho = response_table.FSHORT_PHO / response_table.FLONG_PHO # photospheric\n\n table_to_response_em = 10.0 ** (\n 49.0 - response_table[\"ALOG10EM\"][sat]\n ) # for some reason in units of 1e49 (which was to stop overflow errors since 10^49 was\n # too big to express as a standard float in IDL.)\n\n modeltemp = response_table[\"TEMP_MK\"][sat]\n modelratio = rcor[sat] if abundance == \"coronal\" else rpho[sat]\n\n # Calculate the temperature and emission measure:\n\n # get spline fit to model data to get temperatures given the input flux ratio.\n spline = interpolate.splrep(modelratio, modeltemp, s=0)\n temp = interpolate.splev(fluxratio, spline, der=0)\n\n modelflux = (\n response_table[\"FLONG_COR\"][sat]\n if abundance == \"coronal\"\n else response_table[\"FLONG_PHO\"][sat]\n )\n\n modeltemp = response_table[\"TEMP_MK\"][sat]\n\n spline = interpolate.splrep(modeltemp, modelflux * table_to_response_em, s=0)\n denom = interpolate.splev(temp, spline, der=0)\n\n emission_measure = longflux_corrected.value / denom\n\n goes_times = goes_ts._data.index\n df = pd.DataFrame(\n {\"temperature\": temp, \"emission_measure\": emission_measure * 1e49},\n index=goes_times,\n )\n\n units = {\"temperature\": u.MK, \"emission_measure\": u.cm ** (-3)}\n\n header = {\"Info\": \"Estimated temperature and emission measure\"}\n\n # return a new timeseries with temperature and emission measure\n temp_em = ts.TimeSeries(df, header, units)\n\n return temp_em",
"def initTvtk3Dold(self):\r\n nc = self.Nc\r\n nz = self.Nkmax+1\r\n nv = len(self.xp)\r\n \r\n # Create the index to vertices (nodes) and the coordinates of the vertices (verts) arrays\r\n nodes = np.zeros((nc*(nz-1),6))\r\n pt1=0\r\n for k in range(1,nz):\r\n nodes[pt1:nc*k,0] = self.cells[:,0]+(k-1)*nv\r\n nodes[pt1:nc*k,1] = self.cells[:,1]+(k-1)*nv\r\n nodes[pt1:nc*k,2] = self.cells[:,2]+(k-1)*nv\r\n nodes[pt1:nc*k,3] = self.cells[:,0]+k*nv\r\n nodes[pt1:nc*k,4] = self.cells[:,1]+k*nv\r\n nodes[pt1:nc*k,5] = self.cells[:,2]+k*nv\r\n pt1+=nc\r\n \r\n verts = np.zeros((nv*nz,3))\r\n pv1 = 0\r\n for k in range(0,nz):\r\n verts[pv1:pv1+nv,0] = self.xp\r\n verts[pv1:pv1+nv,1] = self.yp\r\n verts[pv1:pv1+nv,2] = -self.z_w[k] * self.zscale\r\n pv1 += nv\r\n \r\n wedge_type = tvtk.Wedge().cell_type\r\n self.ug = tvtk.UnstructuredGrid(points=verts)\r\n self.ug.set_cells(wedge_type, nodes)\r\n \r\n self.ug.cell_data.scalars = self.data\r\n self.ug.cell_data.scalars.name = 'suntans_scalar'",
"def adjust_surface_temperature( ds_data, ds_topo, debug=False, verbose=None ):\n if verbose is None : verbose = default_verbose\n if verbose: print('\\nAdjusting surface temperature...')\n if debug: print('adjust_surface_temperature: DEBUG MODE ENABLED')\n\n # Make sure to use PHIS_d if file contains both\n if 'PHIS_d' in ds_topo.variables : \n if 'ncol' in ds_topo.variables: ds_topo = ds_topo.drop(['ncol'])\n if 'PHIS' in ds_topo.data_vars: ds_topo = ds_topo.drop(['PHIS'])\n ds_topo = ds_topo.rename({'PHIS_d':'PHIS','ncol_d':'ncol'})\n\n # Check for required variables in input datasets\n if 'TS' not in ds_data.variables : \n raise KeyError('sfc temperature (TS) variable is missing from ds_data')\n if 'PHIS' not in ds_data.variables : \n raise KeyError(f'sfc geopotential (PHIS) variable is missing from ds_data')\n if 'PHIS' not in ds_topo.variables : \n raise KeyError(f'sfc geopotential (PHIS) variable is missing from ds_topo')\n if ds_data.sizes['ncol'] != ds_topo.sizes['ncol'] : \n topo_ncol = ds_topo.sizes['ncol']\n data_ncol = ds_data.sizes['ncol']\n raise IndexError(f'dimensions of input datasets do not match: data_ncol={data_ncol} / topo_ncol={topo_ncol} ')\n\n if debug :\n # Debugging print statements\n print('Before Adjustment:')\n print_stat(ds_data['PHIS'],name='PHIS (old)')\n print_stat(ds_topo['PHIS'],name='PHIS (new)')\n print_stat(ds_data['TS'],name='TS (old)')\n\n # save attributes to restore later\n ts_attrs = ds_data['TS'].attrs\n\n ds_data['TS'].values = ds_data['TS'] - ( ds_data['PHIS'] - ds_topo['PHIS'] )*lapse/gravit\n\n # restore attributes\n ds_data['TS'].attrs = ts_attrs\n\n if debug :\n # Debugging print statements\n print('After Adjustment:')\n print_stat(ds_data['TS'],name='TS (new)')\n\n return",
"def test_virtual_temperature():\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n tv = virtual_temperature(t, qv)\n assert_almost_equal(tv, 288.2796 * units.kelvin, 3)",
"def three_sat():\n # TODO",
"def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux",
"def initTvtk3D(self):\r\n nc = self.Nc\r\n nz = self.Nkmax+1\r\n nv = len(self.xp)\r\n \r\n self.returnMask3D()\r\n self.nActive = np.sum(self.mask3D) # Total number of active cells\r\n \r\n nodes = np.zeros((self.nActive,6))\r\n pt1=0\r\n for k in range(1,nz):\r\n masklayer = self.mask3D[k-1,:]\r\n nc = np.sum(masklayer)\r\n pt2 = pt1+nc \r\n nodes[pt1:pt2,0] = self.cells[masklayer,0]+(k-1)*nv\r\n nodes[pt1:pt2,1] = self.cells[masklayer,1]+(k-1)*nv\r\n nodes[pt1:pt2,2] = self.cells[masklayer,2]+(k-1)*nv\r\n nodes[pt1:pt2,3] = self.cells[masklayer,0]+k*nv\r\n nodes[pt1:pt2,4] = self.cells[masklayer,1]+k*nv\r\n nodes[pt1:pt2,5] = self.cells[masklayer,2]+k*nv\r\n pt1=pt2\r\n #print k, nc\r\n \r\n self.verts = np.zeros((nv*nz,3))\r\n pv1 = 0\r\n for k in range(0,nz):\r\n self.verts[pv1:pv1+nv,0] = self.xp\r\n self.verts[pv1:pv1+nv,1] = self.yp\r\n self.verts[pv1:pv1+nv,2] = -self.z_w[k] * self.zscale\r\n pv1 += nv\r\n \r\n wedge_type = tvtk.Wedge().cell_type\r\n self.ug = tvtk.UnstructuredGrid(points=self.verts)\r\n self.ug.set_cells(wedge_type, nodes)\r\n \r\n self.ug.cell_data.scalars = self.data\r\n self.ug.cell_data.scalars.name = 'suntans_scalar'",
"def test_mixed_layer():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.hPa\n temperature = np.array([22.2, 14.6, 12., 9.4, 7., -38.]) * units.degC\n mixed_layer_temperature = mixed_layer(pressure, temperature, depth=250 * units.hPa)[0]\n assert_almost_equal(mixed_layer_temperature, 16.4024930 * units.degC, 6)",
"def testtemperature(self) -> None:\r\n assert round(abs(298.15 - self.data.temperature), 7) == 0",
"def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K",
"def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks a service's replication levels based on how the service's replication should be monitored. (smartstack or mesos) | def check_service_replication(
instance_config,
all_tasks,
smartstack_replication_checker,
):
expected_count = instance_config.get_instances()
log.info("Expecting %d total tasks for %s" % (expected_count, instance_config.job_id))
proxy_port = marathon_tools.get_proxy_port_for_instance(
name=instance_config.service,
instance=instance_config.instance,
cluster=instance_config.cluster,
soa_dir=instance_config.soa_dir,
)
registrations = instance_config.get_registrations()
# if the primary registration does not match the service_instance name then
# the best we can do is check marathon for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
smartstack_replication_checker=smartstack_replication_checker,
)
else:
check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_tasks=all_tasks,
) | [
"def ensure_service_level(self):\n try:\n self.session = SQLalchemyUtil.get_session()\n self.logger.info('Start to check service level.')\n current_tenant_mppdb_group_list = self.database_analyzer.get_current_tenant_mppdb_group_list()\n unsatisfied_tenant_mppdb_groups = self.find_unsatisfied_tenant_mppdb_groups(current_tenant_mppdb_group_list)\n\n if len(unsatisfied_tenant_mppdb_groups) != 0:\n self.logger.info('Find tenant MPPDB groups whose service level agreements are violated %s' % str(unsatisfied_tenant_mppdb_groups))\n for tenant_mppdb_group in unsatisfied_tenant_mppdb_groups:\n self.fix_unsatisfied_tenant_mppdb_group(tenant_mppdb_group, current_tenant_mppdb_group_list)\n ConsolidationLogDAO.update_consolidation_log_table(self.session, DateConverter.local_datetime_to_gmt_datetime(datetime.now()))\n self.logger.info('All unsatisfied tenant MPPDB groups are fixed.')\n else:\n self.logger.info('The service level of all tenant MPPDB groups are satisfied.')\n except:\n self.logger.exception(\"Fail to finish sla monitor\")",
"def cmd_all_service_check(self, arg):\n print(f'Web service is {\"\" if server_app.check_service() else \"not\"} running.')\n print(f'Database service is {\"\" if database.check_service() else \"not\"} running.')",
"def test_list_service_health_checks(self):\n with self.rbac_utils.override_role(self):\n self.service_client.list_service_health_checks()",
"def test_read_namespaced_replica_set_status(self):\n pass",
"def replication_status(self):\n psql = postgresql_svc.PSQL()\n try:\n query_out = psql.execute(self.replication_status_query)\n except PopenError, e:\n if 'function pg_last_xact_replay_timestamp() does not exist' in str(e):\n raise BaseException('This version of PostgreSQL server does not support replication status')\n else:\n raise e\n query_result = self._parse_query_out(query_out)\n\n is_master = int(__postgresql__[OPT_REPLICATION_MASTER])\n\n if query_result['xlog_delay'] is None:\n if is_master:\n return {'master': {'status': 'up'}}\n return {'slave': {'status': 'down',\n 'error': query_result['error']}}\n return {'slave': {'status': 'up',\n 'xlog_delay': query_result['xlog_delay']}}",
"def service_level(self, level=None):\n if level is None:\n flags = self._flags()\n if flags['auto_service_level']:\n return 0\n return flags['service_level']\n else:\n if self._request('SL', _service_levels[level])[0]:\n return level\n\n raise EvseError",
"def mmo_is_cfg_rs(self, mmo_connection):\n s = None\n if self.mmo_is_configsrv(mmo_connection):\n try:\n r = mmo_connection[\"admin\"].command(\"replSetGetStatus\")\n s = True\n except Exception as exception:\n if \"not running with --replSet\" in str(exception):\n s = False\n else:\n raise exception\n else:\n raise Exception(\"Not a config server\")\n return s",
"def check_consul_services(con):\n whitelist = get_whitelist(con)\n\n if whitelist:\n LOG.warning(\"Checks from the following hosts will be ignored, \" +\n \"because service/rebootmgr/ignore_failed_checks is set: {}\".format(\", \".join(whitelist)))\n\n local_checks = get_local_checks(con, tags=[\"rebootmgr\"])\n LOG.debug(\"relevant_checks: %s\" % local_checks)\n\n for name, check in get_failed_cluster_checks(con, local_checks).items():\n if check[\"Node\"] in whitelist:\n continue\n\n LOG.error(\"There were failed consul checks. Exit\")\n sys.exit(EXIT_CONSUL_CHECKS_FAILED)\n\n LOG.info(\"All checks passed\")",
"def check_service_running(cluster, name, role=None):\n if role is None:\n role = \"monitoring\"\n nodes = cluster.filter_by_role(role)\n for node in nodes:\n node.os.manage_service(name, \"status\")",
"def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def check_service(check_service_job):\n\n try:\n service = check_service_job['service']\n tag = check_service_job['tag']\n consul_config = check_service_job['consul_config']\n sink_config = check_service_job['sink_config']\n except KeyError as e:\n log.error(\n \"check_service | Missing key {e} in check_service_job dict\".format(\n e=e))\n raise e\n\n consul = Consul(consul_config)\n sink = Sink(sink_config)\n\n dc = consul.get_dc()\n log.debug('check_service | Service:{service} Tag:{tag} DC:{dc}'.format(\n service=service, tag=tag, dc=dc))\n\n consul_health_service = consul.get_health_service(service, tag)\n ok, critical = get_node_status(consul_health_service)\n\n sink.ok_count(ok, service, dc, tag)\n sink.critical_count(critical, service, dc, tag)\n\n if ok + critical > 0:\n sink.ok_percent((ok / (ok + critical)) * 100, service, dc, tag)\n sink.critical_percent((critical / (ok + critical)) * 100,\n service, dc, tag)\n\n return 0",
"def _check_rac_srv(cfg, warning=None, critical=None):\n regex = re.compile(\"Instance .* is running on node .*\")\n bin_name = \"srvctl\"\n _check_attrs(cfg, [\"sid\", \"oh\"])\n bin_name = os.path.join(cfg.oh, \"bin\", bin_name)\n try:\n args = bin_name + \" status database -d {sid}\".format(sid=cfg.sid)\n cp = subprocess.run(args, shell=True, check=True, stdout=subprocess.PIPE)\n if cp.stdout is None:\n print(\"None result from crsctl\")\n return UNKNOWN\n out = str(cp.stdout, \"utf-8\")\n running, not_running = 0, 0\n for l in out.split(os.linesep):\n if l.lstrip().rstrip() == \"\":\n continue\n if regex.search(l.lstrip().rstrip()):\n running += 1\n else:\n not_running += 1\n\n if not_running >= running:\n print(\"you got {0} nodes was not running\".format(not_running))\n return CRITICAL\n if not_running > 0:\n print(\"you got {0} nodes was not running\".format(not_running))\n return WARNING\n\n print(\"all {0} nodes is running\".format(running))\n return OK\n except subprocess.CalledProcessError as err:\n print(err.output)\n return UNKNOWN",
"def test_create_service_health(self):\n with self.rbac_utils.override_role(self):\n self._create_service_health_check()",
"def test_show_service_health(self):\n new_health_check = self._create_service_health_check()\n with self.rbac_utils.override_role(self):\n self.service_client.show_service_health_check(\n new_health_check['uuid'])",
"def check_replication_restarted(self):\n repl_restart_fail = self._input.param(\"fail_repl_restart\", False)\n restarted = False\n if not self.__is_cluster_run():\n goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\\\n + '/goxdcr.log*'\n for node in self._input.servers:\n if self.__is_cluster_run():\n goxdcr_log = NodeHelper.get_goxdcr_log_dir(node)\\\n + '/goxdcr.log*'\n reasons, new_repl_res_count = NodeHelper.check_goxdcr_log(node,\n \"Try to fix Pipeline\",\n goxdcr_log=goxdcr_log,\n print_matches=True)\n self.log.info(\"Initial replication restart count on {0} :{1}, now :{2}\".\n format(node.ip,\n self._repl_restart_count_dict[node.ip],\n new_repl_res_count))\n if (new_repl_res_count > self._repl_restart_count_dict[node.ip]):\n new_count = new_repl_res_count - \\\n self._repl_restart_count_dict[node.ip]\n restarted = True\n self.log.info(\"Number of new replication restarts this run: %s\"\n % new_count)\n for reason in reasons[-new_count:]:\n self.log.info(reason)\n if repl_restart_fail and restarted:\n self.fail(\"Replication restarted on one of the nodes, scroll above\"\n \"for reason\")",
"def test_replication(self):\n self.webHDFS.create(TEST_DIR_PATH + '/foo.txt', \"foobar\", True)\n self.webHDFS.set_replication(TEST_DIR_PATH + '/foo.txt', 2)\n file_status = self.webHDFS.status(TEST_DIR_PATH + '/foo.txt')\n self.assertEqual(file_status['replication'], 2)",
"def test_read_namespaced_replication_controller_dummy_scale(self):\n pass",
"def test_patch_namespaced_replica_set_status(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Traverses backwards through predecessors from end | def _deconstruct_path(predecessors, end):
if end not in predecessors:
return None
current = end
path = []
while current:
path.append(current)
current = predecessors.get(current)
return list(reversed(path)) | [
"def reverse_iterative(self):\n \"\"\"O(n) / O(1) solution.\"\"\"\n pre = None\n current = self.head\n while current is not None:\n next = current.next\n current.next = pre\n pre = current\n current = next\n self.head = pre",
"def forward_and_backward(sorted_nodes):\n for node in sorted_nodes:\n node.forward()\n\n for node in reversed(sorted_nodes):\n # another way to call reversed iteration on a list is [::-1]\n node.backward()",
"def __reversed__(self): \n yield from self._traverse_backward(self.root)",
"def backtrack(start, end, prev):\n backtracked = False\n curr_node = end\n # instantiate path as list with destination only\n path = [curr_node]\n while not backtracked:\n # retrieve previous node\n prev_node = prev[curr_node]\n # insert it at beginning of path\n path.insert(0, prev_node)\n # move onto previous node as current node for next iteration\n curr_node = prev_node\n # break loop if we reached start\n if curr_node == start:\n backtracked = True\n return path",
"def reverse(self):\n\n previous = None \n current = self.head\n\n while current:\n temp_buffer = current.next_node\n current.next_node = previous\n\n previous = current\n current = temp_buffer\n\n self.head = previous",
"def iter_reverse(self):\n node = self.tail\n while node is not None:\n yield node\n node = node.last_node",
"def reverse(self):\n current = self.head\n previous = None \n while current is not None:\n next_node = current.next_node \n current.next_node = previous\n current, previous = next_node, current \n self.head = previous",
"def iter_backward(self):\n element = self.last_element # вызов последнего элемента списка\n while element is not None: # выполнение цикла пока существует вызываемый элемент\n yield element # возвратить элемент\n element = element.link_previous # вызвать предыдущий элемент",
"def traverser(self, preload, nodeStart, nodeEnd, *args):\n dummy = lambda *args: None\n if not nodeStart:\n nodeStart = dummy\n if not nodeEnd:\n nodeEnd = dummy\n stack = [self]\n nodeStart(self, *args)\n for i in self.descendantsIter(preload):\n while(i.parent != stack[-1].idNum):\n nodeEnd(stack[-1], *args)\n stack.pop()\n stack.append(i)\n nodeStart(i, *args)\n while(stack):\n nodeEnd(stack[-1], *args)\n stack.pop()",
"def _traverse_backward(self, node):\n if node is not None:\n yield from self._traverse_backward(node.right)\n yield node.data\n yield from self._traverse_backward(node.left)",
"def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))",
"def reverse_iterative(self):\n # Create the new LinkedList.\n new_list = LinkedList()\n\n # Set the initial node to reverse from.\n node = self.first_node\n\n # iterate over each node and stop when node is None\n while node:\n next = node.next\n # Prepend the node to the new list.\n new_list.prepend(node)\n\n # Update the node reference.\n node = next\n return new_list",
"def __reversed__(self):\n yv = self.last\n while yv:\n yield yv\n yv = yv.prev",
"def backward(self):\n raise NotImplementedError",
"def iterate_ll_backwards(head):\n\tif head is None:\n\t\treturn\n\n\titerate_ll_backwards(head.next)\n\tprint head.data",
"def reverse(self):\n\n current = self.head\n prev = None\n\n while current is not None:\n tmp = current.next\n current.next = prev\n\n prev = current\n current = tmp\n\n # Update the head\n self.head = prev",
"def forward(self):\n for edge in self.outgoing:\n edge.forward()",
"def forwarding(predecessor, source):\r\n pass # TODO\r",
"def instrsreversed(self):\n x = self._lastInstr\n while x is not None:\n # now we can remove x and continue iterating :)\n x_prev = x.prev\n yield x\n x = x_prev"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively creates Level_Pair nodes from start up to end. Assumes that end's attack and strength are greater than start's. Neighbours for a node are stored in graph[node]. Distances between neighbours are stored in graph[nodeA][nodeB]. | def populate_graph(
graph, start, end, attack_bonus, strength_bonus):
# Check if already created
if start in graph:
return
graph[start] = dict()
# Recursively create neighbouring level pairs
if start.attack < end.attack:
inc_attack = Level_Pair(start.attack + 1, start.strength)
# Store level-up time
graph[start][inc_attack] = level_time_average(
start, Attack_Style.ATTACK, attack_bonus, strength_bonus)
# Continue at next node
populate_graph(graph, inc_attack, end,
attack_bonus, strength_bonus)
if start.strength < end.strength:
inc_strength = Level_Pair(start.attack, start.strength + 1)
# Store level-up time
graph[start][inc_strength] = level_time_average(
start, Attack_Style.STRENGTH, attack_bonus, strength_bonus)
# Continue at next node
populate_graph(graph, inc_strength, end,
attack_bonus, strength_bonus) | [
"def generate_adjacents(node):\n # Makes a dictionary where keys are current upper token positions and\n # values are the list of positions attainable from one slide move\n slide_moves = {}\n for key, value in node.boardstate.items():\n if value.isupper() and value != \"B\":\n slide_moves[key] = get_slide_moves(key, node.boardstate)\n\n # Append list of swing moves to get all moves\n moves_dict = {}\n #relevant_pieces = [name ]\n for key in slide_moves:\n all_moves = set(slide_moves[key] + get_swing_moves(key, slide_moves))\n moves_dict[key] = list(all_moves)\n\n # Convert from dictionary to list of list of tuples of the form:\n #[[(curr_move, next_move)...]...] where each tokens moves occupy a list\n moves_list = []\n for curr, news in moves_dict.items():\n moves_list.append([(curr, new) for new in news])\n\n # Get all combinations of moves and for each combo construct a new board state\n adjacent_states = []\n turns = list(product(*moves_list))\n\n for turn in turns:\n new_board = apply_turn(node, turn)\n if new_board:\n adjacent_states.append((turn, new_board))\n return adjacent_states",
"def a_star(start_node: Node, end_node: Node):\n open_nodes= PriorityQueue()\n closed_nodes = []\n solved = False\n open_nodes.put((1,start_node))\n loops = 0\n while not open_nodes.empty():\n loops += 1\n current_node = open_nodes.get()[1]\n closed_nodes.append(current_node)\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.state)\n current = current.parent_node\n return path[::-1]\n\n child_nodes = []\n left_state = move_left(current_node.state)\n right_state = move_right(current_node.state)\n up_state = move_up(current_node.state)\n down_state = move_down(current_node.state)\n if left_state != None:\n left_node = Node(current_node, left_state)\n child_nodes.append(left_node)\n if right_state != None:\n right_node = Node(current_node, right_state)\n child_nodes.append(right_node)\n if up_state != None:\n up_node = Node(current_node, up_state)\n child_nodes.append(up_node)\n if down_state != None:\n down_node = Node(current_node, down_state)\n child_nodes.append(down_node)\n for child_node in child_nodes:\n already_open = False\n if child_node not in closed_nodes and current_node != child_node:\n child_node.cost = current_node.cost + 1\n child_node.heuristic = misplaced_tiles_heuristic(child_node.state, end_node.state)\n child_node.estimate = child_node.cost + child_node.heuristic\n child_node.depth = current_node.depth + 1\n if not already_open: \n open_nodes.put((child_node.estimate, child_node))\n if len(closed_nodes) > 362880:\n print(\"Error\")",
"def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return",
"def build_tree(self, depth=2):\n total_nodes = np.sum([2 ** x for x in range(depth)])\n nodes = list(range(total_nodes))\n nodes_per_level = np.cumsum([2 ** x for x in range(depth - 1)])\n nodes_level = [x.tolist() for x in np.array_split(nodes, nodes_per_level)]\n\n adj_list = dict((idx, {}) for idx in nodes)\n for fr in nodes_level[:-1]:\n for i in fr:\n i_list = adj_list.get(i, {})\n # the connected nodes always follows this pattern\n i_list[\"left\"] = i * 2 + 1\n i_list[\"right\"] = i * 2 + 2\n adj_list[i] = i_list.copy()\n return adj_list",
"def _create_new_nodes(self, level, n):\n if (level + 1) == len(self._node_list):\n self._node_list.append([])\n\n split_val = self._node_list[level][n].get_split()\n idx = self._node_list[level][n].get_col()\n\n # Split data\n lower_x_data, lower_y_data, upper_x_data, upper_y_data = self._split_data(level, n, idx, split_val)\n\n # Now check if all the same in lower/upper\n # Do not change y_data to average over all values\n if (lower_x_data.shape[0] > 1) and ((lower_x_data - lower_x_data[0, :]) == 0).all():\n lower_x_data = lower_x_data[[0], :]\n if (upper_x_data.shape[0] > 1) and ((upper_x_data - upper_x_data[0, :]) == 0).all():\n upper_x_data = upper_x_data[[0], :]\n # Make lower node if one can\n if lower_x_data.shape[0] > 0:\n lower_curr_index = len(self._node_list[level + 1])\n self._node_list[level + 1].append(self._create_node(lower_x_data, lower_y_data))\n self._node_list[level][n].set_lower_split_index(lower_curr_index)\n else:\n lower_curr_index = None\n # Make upper node\n if upper_x_data.shape[0] > 0:\n upper_curr_index = len(self._node_list[level + 1])\n self._node_list[level + 1].append(self._create_node(upper_x_data, upper_y_data))\n self._node_list[level][n].set_upper_split_index(upper_curr_index)\n else:\n upper_curr_index = None\n\n return [level + 1, lower_curr_index], [level + 1, upper_curr_index]",
"def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1",
"def shortest_path(start, end):\n\n\tmoves = rubik.quarter_twists\n\n\t# Parent nodes: (Parent_State, move)\n\tstartParents = {}\n\tstartParents[start] = None # Start state has no parent\n\n\t# Parent nodes: (Parent_State, move)\n\tendParents = {}\n\tendParents[end] = None # End state has no parent\n\n\tstartFrontier = [] # Current frontier in start BFS\n\tendFrontier = [] # Current frontier in end BFS\n\n\tstartFrontier.append(start) # Add start state as first and only node to generate next frontier\n\tendFrontier.append(end) # Add end state as first and only node to generate next frontier\n\n\tif end in startParents:\n\t\treturn [] # Start == End : No moves required\n\n\t# We only have to search at most 14 levels in BFS\n\t# Two-way BFS therefore requires 7 concurrent levels from both states\n\tfor i in range(7):\n\n\t\tstartNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in startFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in startParents:\n\t\t\t\t\tstartParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tstartNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in endParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tstartFrontier = startNextFrontier # Make the next frontier the current one\n\n\t\tendNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in endFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in endParents:\n\t\t\t\t\tendParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tendNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in startParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tendFrontier = endNextFrontier # Make the next frontier the current one\n\n\treturn None",
"def get_2_step_neighbours(node):\n for i in range(len(node)):\n yield node[0:i] + (flip(node[i]),) + node[i+1:]\n\n for i, j in itertools.permutations(range(len(node)), 2):\n if i < j:\n yield node[0:i] + (flip(node[i]),) + node[i+1:j] + (flip(node[j]),) + node[j+1:]",
"def segtreeRangeCoveringNodes(n: int, start: int, end: int) -> List[int]:\r\n res, revRes = [], []\r\n start, end = start + n, end + n\r\n while start < end:\r\n if start & 1:\r\n res.append(start)\r\n start += 1\r\n if end & 1:\r\n end -= 1\r\n revRes.append(end)\r\n start >>= 1\r\n end >>= 1\r\n res += revRes[::-1]\r\n return res",
"def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph",
"def __init__(self,startNodes,endNode,nodeList):\n self.startNodes = startNodes\n self.endNode = endNode\n self.nodeList = nodeList #list of nodes from end to start\n endNode.graph = self",
"def get_all_simple_paths_from_node(graph:nx.Graph, start_node_id, depth_limit, max_num_paths_per_node:int, max_length_of_each_path_leading_to_node:int, bar:progressbar.ProgressBar):\n \n all_reachable_nodes = list(single_source_shortest_path(graph, start_node_id, depth_limit))\n all_reachable_nodes = list(filter(lambda a: a != start_node_id, all_reachable_nodes)) # Get rid of self from list\n random.shuffle(all_reachable_nodes) # This is so random nodes get selected as they are in a depth flow out pattern.\n\n sequence = [] # formatted as [0] = y/predict, [1] = pivot node id, [2] = x/train\n\n for i in range(len(all_reachable_nodes)):\n bar.update()\n\n # 1. get path\n paths_from_start_node_id_to_reachable_node_iterator = all_simple_paths(graph, start_node_id, all_reachable_nodes[i], depth_limit)\n paths_from_start_node_id_to_reachable_node = []\n \n num_simple_paths_added = 0\n for simple_path in paths_from_start_node_id_to_reachable_node_iterator:\n paths_from_start_node_id_to_reachable_node.append(simple_path)\n num_simple_paths_added += 1\n if num_simple_paths_added > max_length_of_each_path_leading_to_node:\n break\n \n for path in paths_from_start_node_id_to_reachable_node:\n # 2. get surrounding nodes\n surrounding_nodes = get_all_paths_to_node_depth_limited(graph, start_node_id, 1)\n surrounding_nodes = list(filter(lambda n_id: n_id != start_node_id, surrounding_nodes)) # remove self\n\n # 3.1 remove any paths that cross over surrounding nodes twice.\n if len(set(surrounding_nodes) & set(path)) > 1:\n continue;\n\n # 3. remove surrounding nodes contained in paths\n surrounding_nodes = list(filter(lambda a: a not in path, surrounding_nodes))\n\n sequence.append((surrounding_nodes, start_node_id, path))\n\n if len(sequence) > max_num_paths_per_node:\n break\n\n return sequence",
"def buildGraph(self,angle_dict,node,node_limit,link_limit,direc_constr=[]):\n #The list of unattached directions on the current node\n open_direc = [x for x in node.directions if node.directions[x] is None]\n angle = None\n #The number of links that will be attempted to be created this round\n if min(link_limit-self.num_links,len(open_direc))>0:\n num_links = random.randint(1,min(link_limit-self.num_links,len(open_direc)))\n else:\n #In the case there are no more links to add randint passes an error so handle\n # separately. Also since num_links = 0 then you never enter the for loop\n # where false is initialised, so intialise here to allow exit from the loop\n num_links = 0\n fail = False\n if self.debug:\n print(\"LABEL: {}\\tAVAIL: {}\".format(node.label,open_direc))\n print(\"CONSTRAINTS: {}\".format(direc_constr))\n print(\"TRYING TO PRINT {} LINKS\".format(num_links))\n #At the back end of the recursion the node might not exist\n if node not in self.node_list:\n node = random.choice(self.node_list)\n direc_constr = []\n #Coordinates of the current node\n node_coords = self.coords[self.node_list.index(node)]\n new_links = []\n new_direcs = []\n for i in range(num_links):\n #getAngle chooses an angle from angle_dict that corresponds to an available side\n # of the current node. If getAngle fails to find any such angle it leaves angle\n # as none and sets direc to be the set of directions for which there are angles\n # yet unassigned\n angle,direc = getAngle(open_direc,direc_constr,angle_dict,self.debug)\n #If angle is none then getAngle failed to find an angle that can be linked to\n # the node\n if angle is None:\n #Here we go over every node to find ones that still have openings on\n # sides corresponding to the angles we have\n open_nodes = []\n for entry in self.node_list:\n #direc is now a list of directions corresponding to the actions available\n for avail in direc:\n # Only add the node to the list of nodes to be considered \n if entry.directions[avail] is None:\n open_nodes.append(entry)\n #Don't need to iterate over all angles if node works for any one\n break\n #Keep checking while there are still nodes to check and angle is not assigned\n while angle is None and open_nodes != []:\n #These are the same steps that occur at the start of the program\n if node in open_nodes: open_nodes.remove(node)\n node = random.choice(open_nodes)\n if self.debug:\n print(\"Moved to Node {} ({})\".format(node.label,node.coordinates))\n new_direcs = []\n node_coords = self.coords[self.node_list.index(node)]\n open_direc = [x for x in node.directions if node.directions[x] is None]\n angle,direc = getAngle(open_direc,[],angle_dict,self.debug)\n if open_nodes == []:\n print(\"ERROR: No Suitable Nodes Available. Terminating\")\n exit(-1)\n #By this point we have chosen a direction, we add it to the list of directions\n # being used in this round\n new_direcs.append(direc)\n #Create the angle being inserted this round\n link = Link(angle)\n fail = True #Fail is true if we fail to insert the link by the end of the round\n #Copy the coordinates of the current node that will be modified to be the \n # coordinates for the node the link will connect to\n\n #Index is the index of the non-zero entry in the link.change list\n if math.fabs(link.change[0]) == 1:index = 0\n else: index = 1\n\n #We only consider nodes whose unchanged coord matches the current coord\n # in the direction of the change\n potentials = []\n if self.debug: print(\"Beginning this fix\")\n pot_coords = None\n for pot_to_node in self.node_list:\n pot_coords = self.coords[self.node_list.index(pot_to_node)]\n change = [pot_coords[i]-node_coords[i] for i in range(2)]\n for i in range(len(change)):\n if change[i]!=0: change[i]/=math.fabs(change[i])\n if change[1-index] == 0 and (change[index]/link.change[index])>0:\n if change == [1,0]: pot_direc = \"right\"\n elif change == [-1,0]: pot_direc = \"left\"\n elif change == [0,1]: pot_direc = \"top\"\n elif change == [0,-1]: pot_direc = \"bottom\"\n \n if pot_to_node.directions[pot_direc] is None:\n potentials.append(pot_coords)\n\n #Looking for the closest node to our current coordinate\n node_exists = False #indicates if there is a preexisting node to link to\n min_dist = -1 #initialise the minimal distance between nodes\n for coord in potentials:\n if min_dist == -1 or math.fabs(coord[index]-node_coords[index])<min_dist:\n min_dist = math.fabs(coord[index] - node_coords[index])\n min_coords = coord\n node_exists = True\n\n #There already exists a node in the graph with the same coordinates as the \n # node the link will connect to\n if node_exists:\n #set the target node to the the corresponding entry in the list of nodes\n new_node = self.node_list[self.coords.index(min_coords)]\n if self.debug:\n print(\"Found a node to connect to: {} ({})\".format(new_node.label,new_node.coordinates))\n #Identified target node is suitable for connection\n # In this case we add link to graph, remove angle from available angles\n # and remove direction from list of available directions on the node\n if new_node.directions[REVERSE_DICT[direc]] is None:\n self.addLink(link,node,new_node,direc)\n if self.debug:\n print(\"Created New Link: {}\\t{}\\t{}\".format(link.from_node.coordinates,link.to_node.coordinates,link.angle))\n angle_dict[direc].remove(angle)\n open_direc.remove(direc)\n #Add this link to the list of links created this round\n new_links.append(link)\n #We have not failed to create a new link\n fail = False\n #If the above clause does not trigger then we have failed to make a link\n else:\n #If a node with the same coordinates does not exist then the only\n # option is to create a new node. To do this we must check that we\n # are below the node limit\n if self.debug: print(\"There is no node available to link to\")\n new_coords = list(node_coords)\n for i in range(2): new_coords[i] += link.change[i]\n if self.num_nodes<node_limit:\n #Build a new node with the suitable coordinates\n new_node = Node(new_coords[0],new_coords[1],self.num_nodes)\n #Add the node and link to the graph\n self.addNode(new_node)\n self.addLink(link,node,new_node,direc)\n #Add this link to the list of links created this round\n new_links.append(link)\n #Removre direction and angle from lists of consideration\n angle_dict[direc].remove(angle)\n open_direc.remove(direc)\n if self.debug:\n print(\"Created New Node {}: {}\\t({})\".format(new_node.label,direc, new_coords))\n print(\"Created New Link: {}\\t{}\\t{}\".format(link.from_node.coordinates,link.to_node.coordinates,link.angle))\n #We have not failed to create a new link\n fail = False\n #If the above does not trigger we have faile to make a link\n if fail:\n if self.debug:\n print(\"Tried these directions: {} and failed\".format(new_direcs))\n else:\n if self.debug:\n print(\"Succeeded in creating a link: {}\".format(direc))\n #exit(-1)\n if fail:\n #If fail is true we have not made any new links this round\n if self.debug: print(\"Still in same node\")\n #We remove the directions we have previously tried this round from consideration\n complete = True\n for entry in new_direcs:\n if entry not in direc_constr:\n direc_constr.append(entry)\n complete = False\n if complete:\n if self.debug:\n print(\"There's something wrong here\")\n print(\"REMAINING ANGLES: {}\".format(angle_dict))\n self.try_count += 1\n expendable = [x for x in self.node_list if len([y for y in x.directions if x.directions[y] is not None])==1]\n if expendable == [] or self.try_count>2*node_limit:\n if self.debug:\n if self.try_count>2*node_limit: print(\"Giving Up\")\n print(\"Cannot make full graph\")\n #exit(-1)\n else:\n spare_node = random.choice(expendable)\n for entry in spare_node.directions:\n if spare_node.directions[entry] is not None:\n spare_link = spare_node.directions[entry]\n if spare_node is spare_link.from_node:\n angle_dict[entry].append(spare_link.angle)\n else:\n angle_dict[REVERSE_DICT[entry]].append(spare_link.angle)\n self.removeLink(spare_node.directions[entry])\n break #by definition there is only one filled direction on this node.\n if self.debug: print(\"Removing node at ({})\".format(spare_node.coordinates))\n self.removeNode(spare_node)\n if spare_node is node:\n node = self.node_list[0]\n if self.debug: \n print(\"Trying to build graph from node {} ({}) with no constraints\".format(node.label,node.coordinates))\n self.buildGraph(angle_dict,node,node_limit,link_limit,direc_constr=[])\n if self.debug: \n print(\"Back in {} ({}) after trying to fix what was wrong\".format(node.label,node.coordinates))\n #exit(-1)\n else:\n #Recursively try again building on the same node, but with the new constraints\n if self.debug:\n print(\"Trying to build graph from node {} ({}) with constraints {}\".format(node.label,node.coordinates,direc_constr))\n self.buildGraph(angle_dict,node,node_limit,link_limit,direc_constr)\n if self.debug:\n print(\"Back in {} ({}) after trying to put a link on this node with extra constraints\".format(node.label,node.coordinates))\n\n else:\n #We have successfully constructed at least one link. We move onto the new nodes\n # and try make new links from there (if we need to make more links)\n if self.debug: print(\"Going onto other nodes\")\n for link in new_links:\n if self.num_links<link_limit:\n #If more links to be made make them off the new connections\n self.buildGraph(angle_dict,link.to_node,node_limit,link_limit)\n if self.debug:\n print(\"Back in {} ({}) after making other nodes still in loop\".format(node.label,node.coordinates))\n else:\n #This is the end condition for the loop that does not exit out of the\n # program\n pass",
"def Dijkstra2(node_init, node_end, graph):\n\n ### Parameter initialisation\n node_list = list(graph.vertices.keys())\n dist = np.full(len(node_list), -np.inf)\n # At the beginning we have not reached the end_node\n node_end_reached = False\n # At the beginning, we assume there is a shortest path:\n no_path = False\n \n # Initialising the distances of the nodes\n dist[node_init] = 0\n # Setting the father_node which contains the provenance of the nodes\n father_node = np.full(len(node_list), -np.inf)\n # Initialising the current node\n current_node = node_init \n # Initialising the dictionnary of fixed node which has the following shape:\n #{fixed_node: (previous_node, iteration, cost)}\n # Fixing the number of iterations\n k = 0\n dict_fixed_node = {node_init:(None,k, 0)}\n \n # In the trivial case where the two nodes are identical\n if node_init == node_end:\n cost = 0\n shortest_path = [node_init]\n no_path = False\n return cost, shortest_path, no_path\n \n # While the end node has not been reached\n while not node_end_reached:\n current_node_adj = graph.node_get_adj(current_node).copy()\n # We get rid off the nodes that have been fixed, except at the first iteration\n if k != 0:\n current_node_adj.remove(dict_fixed_node[current_node][0])\n ## Updating the distances : either the node are neighbors and \n # something might change, either they are not, and their distance \n # does not change.\n # For the neighbors node\n for e in current_node_adj:\n dist_temp = dist[current_node] + graph.weights[(current_node, e)]\n # We change the distance only if it is lower than it used to be\n # otherwise, we keep it\n if dist_temp < dist[e] or dist[e] == -np.inf:\n dist[e] = dist_temp\n # Setting the father node\n father_node[e] = current_node\n father_node[current_node] = None\n # We set the distance of the current node to 0\n dist[current_node] = 0 \n # Index and distances which are not 0 and not minus infty\n sub_dist_index = [i for i, e in enumerate(dist) if e > 0]\n sub_dist_value = np.array([e for i, e in enumerate(dist) if e > 0])\n # If these two lists are empty, we stop the algorithm and that means\n # that we cannot reach our point\n if not sub_dist_index or sub_dist_value.size == 0:\n no_path = True\n cost = 'impossible path'\n shortest_path = 'impossible path'\n break\n # Now we need to set our choice for the next node\n if np.array_equal(sub_dist_value, np.ones(len(sub_dist_value))):\n ## If there are only ones : we pick them up randomly\n current_node = int(random.choice(list(sub_dist_index)))\n min_dist = sub_dist_value.min()\n else:\n ## If not we just pick up the one with the minimum distance.\n current_node = sub_dist_index[sub_dist_value.argmin()]\n min_dist = sub_dist_value.min()\n # Adding this node to the dictionnary\n dict_fixed_node[current_node] = (int(father_node[current_node]), k, min_dist)\n # If the end_node has been reached, we stop the search algorithm\n if node_end in dict_fixed_node.keys():\n node_end_reached = True\n # Incrementing the counter\n k += 1\n #print('current_node : {}'.format(current_node))\n #print(dict_fixed_node)\n # Now we need to get the shortest path from our iterations whose information \n # are in dict_fixed_node. To do this, we need to circle back from the end_node\n # to the init_node in this dictionnary.\n # This is done only if some path between node_init and end_node exists.\n if no_path == False:\n list_father_node = list(dict_fixed_node.values())\n previous_node = list_father_node[-1][0]\n shortest_path = [node_end, previous_node]\n # While the initial node has not been reached, we add the relevant\n # nodes to our shortest path\n while previous_node != node_init:\n previous_node = dict_fixed_node[previous_node][0]\n shortest_path.append(previous_node)\n \n # Computing the cost of this shortest path in terms of weights\n cost = int(dict_fixed_node[node_end][2])\n \n return cost, shortest_path, no_path",
"def complete_graph(players):\n for player1_index, _ in enumerate(players):\n for player2_index in range(player1_index, len(players)):\n yield (player1_index, player2_index)",
"def create(self, range_value):\n adjacency_list = []\n for idx, node in enumerate(range_value):\n if idx == len(range_value):\n break\n for node2 in range_value[idx + 1:]:\n adjacency_list.append([node, node2])\n return adjacency_list",
"def make_undirected_graph(num_nodes, propability):\r\n if num_nodes == 0:\r\n return {0: set([])}\r\n complete_dir_graph = {}\r\n for start_node in range(num_nodes):\r\n temp_list = []\r\n for edge_node in range(num_nodes):\r\n if edge_node > start_node:\r\n random_var = random.random()\r\n if random_var < propability:\r\n temp_list.append(edge_node)\r\n if edge_node in complete_dir_graph:\r\n complete_dir_graph[edge_node] = complete_dir_graph[edge_node].union(set([start_node]))\r\n else:\r\n complete_dir_graph[edge_node] = set([start_node])\r\n if start_node not in complete_dir_graph:\r\n complete_dir_graph[start_node] = set(temp_list)\r\n else:\r\n complete_dir_graph[start_node] = complete_dir_graph[start_node].union(set(temp_list))\r\n return complete_dir_graph",
"def _connect_nodes(from_nodes, to_nodes, weight=0):\n for to_node in to_nodes:\n for from_node in from_nodes:\n Connection(from_node, to_node, weight)",
"def _traverse_level_order_iterative(self, start_node, visit):\n # Create queue to store nodes not yet traversed in level-order\n queue = LinkedQueue()\n # Enqueue given starting node\n queue.enqueue(start_node)\n # Loop until queue is empty\n while queue.is_empty() == False:\n # Dequeue node at front of queue\n node = queue.dequeue()\n # Visit this node's data with given function\n visit(node.data)\n # Enqueue this node's left child, if it exists\n if node.left_child is not None:\n queue.enqueue(node.left_child)\n # Enqueue this node's right child, if it exists\n if node.right_child is not None:\n queue.enqueue(node.right_child)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs simulations to determine time (ticks) to level up attack or strength Enemy is set as sand crab (60hp, 1 def, 0 def bonus) Weapon is best available scimitar | def level_time_simulate(start_levels, attack_style, attack_bonus, strength_bonus):
ticks_per_attack = 4 # Scimitar attack speed
enemy_health = 60 # Sand crab health
max_hit, accuracy = get_max_hit_and_accuracy(
start_levels, attack_style, attack_bonus, strength_bonus)
if attack_style == Attack_Style.ATTACK:
start_exp = osrs.experience[start_levels.attack]
end_exp = osrs.experience[start_levels.attack+1]
elif attack_style == Attack_Style.STRENGTH:
start_exp = osrs.experience[start_levels.strength]
end_exp = osrs.experience[start_levels.strength+1]
experience = end_exp - start_exp
avg_ticks = combat_simulator.ticks_until_exp(max_hit, accuracy,
ticks_per_attack, enemy_health, experience,
osrs.BASE_EXP_PER_DAMAGE, ITERATIONS)
return avg_ticks | [
"def doMonsterAttack(self):\n #random monster attacks option\n #amoveNum = random.choices([0, 1, 2], [0.08, 0.46, 0.46])[0]\n weighted_choices = [(0, 8), (1, 46), (2, 46)]\n moveNum = random.choice([val for val, cnt in weighted_choices for i in range(cnt)])\n\n\n ########## Frame 1 ###########\n # -> Monster prepares attack\n # -> Player is Neutral\n # -> 0.5 time scale\n\n # Monster starts attack.\n self.monster.sprite = self.monster.startAttackSprite\n self.monster.moveInArena(self.monsterNeutralX, self.monsterY)\n\n # Player stays neutral.\n self.player.moveInArena(self.playerNeutralX, self.playerY)\n\n self.drawEverything(healthBar=True, textBox=True, controls=True, player=True, monster=True, playerDamage=False, monsterDamage=False)\n\n # Print frame.\n self.universe.screen.print()\n sleep(1/self.animationSpeedScale * 0.5)\n\n\n ########## Frame 2 ###########\n # -> Monster Moves to Attack Location\n # -> Monster attacks player\n # -> Player flinches (we don't have a flinching art tho)\n # -> Damage is calculated and saved in player (applied next frame)\n # -> Player's Damage Numbers Appear\n # -> 1.0 time scale\n\n # Calculate attack to be applied during the next frame.\n damageRecStat = self.monster.calcAttack(self.monster.moveset[moveNum], self.player)\n self.universe.damageReceived += damageRecStat\n\n # Player flinches.\n self.player.sprite = self.player.flinchSprite\n self.player.moveInArena(self.playerNeutralX, self.playerY)\n\n # Monster attacks\n self.monster.sprite = self.monster.endAttackSprite\n self.monster.moveInArena(self.monsterAttackX, self.monsterY)\n\n self.drawEverything(healthBar=True, textBox=True, controls=True, player=True, monster=True, playerDamage=True, monsterDamage=False)\n\n # Print frame.\n self.universe.screen.print()\n sleep(1 / self.animationSpeedScale)\n\n\n ########## Frame 3 ###########\n # -> Both Player and Monster in neutral\n # -> Damage is applied to player and health bar moves\n # -> 0.5 time scale\n\n # Apply attack to player and draws changed health bar\n self.monster.applyAttack(self.monster.moveset[moveNum], self.player)\n\n # Reset Monster to neutral position and neutral sprite\n self.monster.sprite = self.monster.neutralSprite\n self.monster.moveInArena(self.monsterNeutralX, self.monsterY)\n\n # Reset Player to neutral position and neutral sprite\n self.player.sprite = self.player.neutralSprite\n self.player.moveInArena(self.playerNeutralX, self.playerY)\n\n self.drawEverything(healthBar=True, textBox=True, controls=True, player=True, monster=True, playerDamage=False, monsterDamage=False)\n\n # Print frame at 0.5 time scale\n self.universe.screen.print()\n sleep(1 / self.animationSpeedScale * 0.5)\n\n\n ########## Frame 3.5 ###########\n # -> This frame only occurs if the player has status effects\n # -> Player Flinches\n # -> Status damage is applied to player\n # -> Player's damage text is drawn\n # -> 1.0 time scale\n\n player_healthPreEffect = self.player.currentHealth\n\n self.doPlayerStatusEffects()\n\n player_healthPostEffect = self.player.currentHealth\n\n #adds damage from effect to damageReceived in Universe for stats\n self.universe.damageReceived = self.universe.damageReceived + ( player_healthPreEffect - player_healthPostEffect)",
"def testrandom(self):\n for i in range(100):\n WeaponAbility()",
"async def attack(self, ctx):\r\n def simulate_battle(player1, player2):\r\n \"\"\"Simulate a battle between two players based solely off ATK and Crit.\r\n Each side has a small chance to land a \"crit\" (based off crit) and win.\r\n Otherwise it will base the victor off the proportions of the attack.\r\n Return the winner and loser in that order.\"\"\"\r\n #See if one side lands a critical hit - Highest crit possible is theoretically ~70%.\r\n p1vict = player1['Crit']\r\n p2vict = p1vict + player2['Crit'] #This should theoretically be <=140\r\n random_crit = random.randint(0,500)\r\n if random_crit < p1vict:\r\n return player1, player2 #player1 wins; Winner is returned first\r\n elif random_crit < p2vict:\r\n return player2, player1\r\n \r\n #If no victory occurs, then base it off proportion of ATK\r\n victory_number = random.randint(0, player1['ATK'] + player2['ATK'])\r\n if victory_number < player1['ATK']:\r\n return player1, player2\r\n else:\r\n return player2, player1\r\n\r\n #Only one attack per area every 3 hours. Check to see if attacking is available\r\n guild = await AssetCreation.getGuildFromPlayer(self.client.pg_con, ctx.author.id)\r\n last_attack = await AssetCreation.get_most_recent_area_attack(self.client.pg_con, guild['Base'])\r\n\r\n if last_attack is not None:\r\n if (datetime.now() - last_attack).total_seconds() < 10800:\r\n return await ctx.reply(f'This area has already suffered a recent attack. Please try again in `{time.strftime(\"%H:%M:%S\", time.gmtime(10800 - (datetime.now() - last_attack).total_seconds()))}`.')\r\n\r\n #If available, load the champions for both brotherhoods\r\n #If <3 champions, recycle the first with nerfed stats\r\n #If defender has no champions, attacker automatically wins\r\n attacker = await AssetCreation.get_brotherhood_champions(self.client.pg_con, guild['ID'])\r\n defending_guild_id = await AssetCreation.get_area_controller(self.client.pg_con, guild['Base'])\r\n\r\n if defending_guild_id is None: #No one currently holds the area, so attacker assumes control\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has seized control over {guild['Base']}.\")\r\n return await ctx.reply(f\"{guild['Name']} has seized control over {guild['Base']}.\")\r\n\r\n defending_guild = await AssetCreation.getGuildByID(self.client.pg_con, defending_guild_id)\r\n\r\n if defending_guild['Base'] != guild['Base']: #then the defending guild has since moved. Give freely\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has seized control over {guild['Base']}.\")\r\n return await ctx.reply(f\"{guild['Name']} has seized control over {guild['Base']}.\")\r\n\r\n if guild['ID'] == defending_guild_id:\r\n return await ctx.reply(f\"Your brotherhood is already in control of {guild['Base']}\")\r\n\r\n defender = await AssetCreation.get_brotherhood_champions(self.client.pg_con, defending_guild_id)\r\n\r\n if attacker[0] is None and attacker[1] is None and attacker[2] is None:\r\n return await ctx.reply(f'Your brotherhood has no champions. Set some with `{ctx.prefix}bh champion`!')\r\n \r\n if defender[0] is None and defender[1] is None and defender[2] is None: #If defender has no champs, give it up\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has seized control over {guild['Base']}.\")\r\n return await ctx.reply(f\"{guild['Name']} has seized control over {guild['Base']}.\")\r\n\r\n for i in range(0,3): #Replace their IDs with a dict containing battle info\r\n if attacker[i] is not None:\r\n name = await AssetCreation.getPlayerName(self.client.pg_con, attacker[i])\r\n # attack, crit = await AssetCreation.getAttack(self.client.pg_con, attacker[i])\r\n battle_stats = await AssetCreation.get_attack_crit_hp(self.client.pg_con, attacker[i])\r\n \r\n attacker[i] = {\r\n 'ID' : attacker[i],\r\n 'Name' : name,\r\n 'ATK' : battle_stats['Attack'],\r\n 'Crit' : battle_stats['Crit']\r\n }\r\n if defender[i] is not None:\r\n name = await AssetCreation.getPlayerName(self.client.pg_con, defender[i])\r\n # attack, crit = await AssetCreation.getAttack(self.client.pg_con, defender[i])\r\n battle_stats = await AssetCreation.get_attack_crit_hp(self.client.pg_con, defender[i])\r\n \r\n defender[i] = {\r\n 'ID' : defender[i],\r\n 'Name' : name,\r\n 'ATK' : battle_stats['Attack'],\r\n 'Crit' : battle_stats['Crit']\r\n }\r\n\r\n for i in range(1,3): #Sort the teams so that the first slot is always a person (and not empty)\r\n if attacker[0] is None and attacker[i] is not None:\r\n attacker[0] = attacker[i]\r\n if defender[0] is None and defender[i] is not None:\r\n defender[0] = defender[i]\r\n\r\n for i in range(1,3): #Now fill \"None\"s with the first champion. The above operation made sure the first is always a person\r\n if attacker[i] is None:\r\n attacker[i] = attacker[0]\r\n if defender[i] is None:\r\n defender[i] = defender[0]\r\n\r\n #Now check for repeats, nerfing stats for the second or third appearance. This can probably be optimized.\r\n if attacker[0]['ID'] == attacker[1]['ID']:\r\n attacker[1]['ATK'] = int(attacker[1]['ATK'] * .9)\r\n attacker[1]['Crit'] = int(attacker[1]['Crit'] * .9)\r\n\r\n if attacker[0]['ID'] == attacker[2]['ID']:\r\n attacker[2]['ATK'] = int(attacker[2]['ATK'] * .9)\r\n attacker[2]['Crit'] = int(attacker[2]['Crit'] * .9)\r\n\r\n if attacker[1]['ID'] == attacker[2]['ID']:\r\n attacker[2]['ATK'] = int(attacker[2]['ATK'] * .9)\r\n attacker[2]['Crit'] = int(attacker[2]['Crit'] * .9)\r\n\r\n if defender[0]['ID'] == defender[1]['ID']:\r\n defender[1]['ATK'] = int(defender[1]['ATK'] * .9)\r\n defender[1]['Crit'] = int(defender[1]['Crit'] * .9)\r\n\r\n if defender[0]['ID'] == defender[2]['ID']:\r\n defender[2]['ATK'] = int(defender[2]['ATK'] * .9)\r\n defender[2]['Crit'] = int(defender[2]['Crit'] * .9)\r\n\r\n if defender[1]['ID'] == defender[2]['ID']:\r\n defender[2]['ATK'] = int(defender[2]['ATK'] * .9)\r\n defender[2]['Crit'] = int(defender[2]['Crit'] * .9)\r\n\r\n #Conduct PvP operations between the brotherhoods to determine the winner\r\n attacker_wins = 0\r\n defender_wins = 0\r\n battle_info = ''\r\n\r\n for i in range(0,3):\r\n winner, loser = simulate_battle(attacker[i], defender[i]) #Same from PvP.tournament\r\n if attacker[i]['ID'] == winner['ID']:\r\n attacker_wins += 1\r\n battle_info += f\"{guild['Name']}'s {attacker[i]['Name']} defeated {defending_guild['Name']}'s {defender[i]['Name']}.\\n\"\r\n else:\r\n defender_wins += 1\r\n battle_info += f\"{defending_guild['Name']}'s {defender[i]['Name']} defeated {guild['Name']}'s {attacker[i]['Name']}.\\n\"\r\n\r\n #Log battle, change controller if applicable, return output\r\n if attacker_wins > defender_wins:\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await AssetCreation.log_area_attack(self.client.pg_con, guild['Base'], guild['ID'], defending_guild['ID'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has defeated **{defending_guild['Name']}**, seizing control over {guild['Base']}.\")\r\n await ctx.reply(f\"{battle_info}{guild['Name']} has seized control over {guild['Base']}!\")\r\n else:\r\n await AssetCreation.log_area_attack(self.client.pg_con, guild['Base'], defending_guild['ID'], guild['ID'], defending_guild['ID'])\r\n await ctx.reply(f\"{battle_info}Your attack on {guild['Base']} was put down by the champions of {defending_guild['Name']}.\")",
"def attack(self):\n assert self.enemy, \"Attack failed: No enemy facing now.\"\n\n injured_unit = weighted_random_selection(self, self.enemy)\n if injured_unit:\n injury = random.randint(10, 15)\n injured_unit.health_meter = max(injured_unit.health_meter - injury, 0)\n print(\"攻击! \", end='\\n')\n self.show_health_comparison(bold=True)",
"def run(self, agent_host):\r\n roundTimeStart = time.time()\r\n kill = 0\r\n round_enemy = None\r\n self.agent = agent_host\r\n max_score = 0\r\n min_score = 100\r\n S, A, R = deque(), deque(), deque()\r\n world_state = self.agent.getWorldState()\r\n if world_state.number_of_observations_since_last_state > 0:\r\n obs = json.loads(world_state.observations[-1].text)\r\n\r\n t = 0\r\n enemyHealth = -1\r\n agentHealth = -1\r\n state = (\"\",)\r\n action = \"\"\r\n lastActionTime = 0\r\n while world_state.is_mission_running and state != (\"Finished\",):\r\n time.sleep(0.01)\r\n currentTime = time.time()\r\n world_state = self.agent.getWorldState()\r\n if world_state.number_of_observations_since_last_state > 0:\r\n obs = json.loads(world_state.observations[-1].text)\r\n if state == (\"last check\",):\r\n state = (\"Finished\",)\r\n agentHealth = obs['Life']\r\n break\r\n if \"Name\" not in obs: #Edge case where we observe before load.\r\n continue\r\n enemy = None\r\n for e in obs['entities']:\r\n if e['name'] != obs['Name'] and 'life' in e:\r\n round_enemy = e['name']\r\n enemy = e\r\n break\r\n if enemy == None:\r\n state = (\"last check\",)\r\n continue\r\n self.track_target(obs, enemy)\r\n if currentTime - lastActionTime >= 200:\r\n state = self.get_curr_state(obs, enemy)\r\n self.clearAction(action)\r\n p_actions = self.get_possible_actions(self.weapon)\r\n action = self.choose_action(state, p_actions, self.epsilon)\r\n damageDelta = 0\r\n healthDelta = 0\r\n if enemyHealth == -1:\r\n enemyHealth = enemy['life']\r\n elif enemy['life'] != enemyHealth:\r\n damageDelta = enemyHealth - enemy['life']\r\n enemyHealth = enemy['life']\r\n if agentHealth == -1:\r\n agentHealth = obs['Life']\r\n elif obs['Life'] != agentHealth:\r\n healthDelta = obs['Life'] - agentHealth\r\n agentHealth = obs['Life']\r\n score = self.calc_reward(30, healthDelta, damageDelta)\r\n if score > max_score:\r\n max_score = score\r\n if score < min_score:\r\n min_score = score\r\n R.append(score)\r\n T = t - self.n + 1\r\n if T >= 0:\r\n self.update_q_table(t, S, A, R, T)\r\n S.append(state)\r\n A.append(action)\r\n t += 1\r\n self.act(action)\r\n if state == (\"Finished\",):\r\n break\r\n\r\n timeInRound = time.time() - roundTimeStart\r\n if state == (\"Finished\",) and agentHealth != 0:\r\n kill = 1\r\n self.agent.sendCommand(\"quit\")\r\n else:\r\n kill = 0\r\n if abs((Arena.TIMELIMIT/1000)-timeInRound) > 1:\r\n agentHealth = 0\r\n print ('max_score = {}, min_score = {}'.format(max_score, min_score))\r\n self.update_history(round_enemy, agentHealth, timeInRound, kill)\r\n return",
"def simulate_battle(player1, player2):\r\n #See if one side lands a critical hit - Highest crit possible is theoretically ~70%.\r\n p1vict = player1['Crit']\r\n p2vict = p1vict + player2['Crit'] #This should theoretically be <=140\r\n random_crit = random.randint(0,500)\r\n if random_crit < p1vict:\r\n return player1, player2 #player1 wins; Winner is returned first\r\n elif random_crit < p2vict:\r\n return player2, player1\r\n \r\n #If no victory occurs, then base it off proportion of ATK\r\n victory_number = random.randint(0, player1['ATK'] + player2['ATK'])\r\n if victory_number < player1['ATK']:\r\n return player1, player2\r\n else:\r\n return player2, player1",
"def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line",
"def sleep(self):\n # change the pet to 'sleeping'\n self.sprite = pygame.image.load(self.format_image(self.animal + \"spritesleep\"))\n # set the pet's energy to 10\n self.set_stat(\"energy\", 10)\n # change the background to night time\n self.game_inst.get_menus()[0].get_decorations()[0].set_sprite(pygame.image.load(self.format_image(\"backgroundnight\")))\n deco = self.game_inst.get_menus()[0].get_decorations()[0].get_sprite()\n self.game_inst.get_menus()[0].get_decorations()[0].set_sprite(pygame.transform.scale(deco, (int(deco.get_width() * 0.75), int(deco.get_height() * 0.75))))\n # create a dark cover to dim the display\n dark = pygame.Surface((WIDTH, HEIGHT))\n dark.fill(0)\n dark.set_alpha(100)\n self.game_inst.redraw()\n self.surface.blit(dark, (0, 0))\n pygame.display.update()\n # wait 3.5 seconds\n pygame.time.delay(3500)\n # change sprite and background back to normal and redraw (also gets rid of tint)\n self.sprite = pygame.image.load(self.format_image(self.animal + \"sprite\"))\n self.game_inst.get_menus()[0].get_decorations()[0].set_sprite(pygame.image.load(self.format_image(\"background\")))\n deco = self.game_inst.get_menus()[0].get_decorations()[0].get_sprite()\n self.game_inst.get_menus()[0].get_decorations()[0].set_sprite(pygame.transform.scale(deco, (int(deco.get_width() * 0.75), int(deco.get_height() * 0.75))))\n # set the time to 7:00 am\n self.game_inst.set_time([6, 59, 980])\n self.game_inst.redraw()\n pygame.display.update()",
"def test_h_decreases_speed_in_direction_opposite_to_banana(self):\n # using y and h keys basically let you switch from 2.3 to 2.4\n # in a continuous fashion. Works only from 2.3\n training = 2.3\n self.tb.set_level_variables(training)\n self.tb.restart_bananas()\n # check initial speed\n #print('wrong speed', self.tb.wrong_speed)\n # first two frames get messed up for timing, so go two steps\n #print self.tb.free_move\n taskMgr.step()\n taskMgr.step()\n messenger.send('x_axis', [2 * -self.tb.multiplier])\n camera_h = self.tb.base.camera.getH()\n #print camera_h\n # go a few steps, see how long it takes\n start = time.time()\n for i in range(30):\n #print self.tb.x_mag\n #print self.tb.speed\n taskMgr.step()\n first_time = time.time() - start\n #print('time', first_time)\n first_dist = camera_h - self.tb.base.camera.getH()\n #print('dist', first_dist)\n first_speed = abs(first_dist/first_time)\n # now change speed\n messenger.send('h')\n # have to reset for it to go into effect\n self.tb.restart_bananas()\n #print('wrong speed', self.tb.wrong_speed)\n taskMgr.step()\n taskMgr.step()\n messenger.send('x_axis', [2 * -self.tb.multiplier])\n avatar_h = self.tb.base.camera.getH()\n #print avatar_h\n start = time.time()\n for i in range(30):\n #print self.tb.x_mag\n #print self.tb.speed\n taskMgr.step()\n second_time = time.time() - start\n #print('time', second_time)\n #print self.tb.base.camera.getH()\n second_dist = avatar_h - self.tb.base.camera.getH()\n #print('dist', second_dist)\n second_speed = abs(second_dist / second_time)\n #print('first', first_speed)\n #print('second', second_speed)\n self.assertTrue(first_speed > second_speed)",
"def run_sim(self):\n for _ in range(self.steps): \n lever = self.bandit.get_lever_to_pull()\n reward, optimal = self.slots.get_reward(lever)\n self.bandit.update_lever_rewards(reward, lever, optimal)\n self.game_summary()",
"def run_time(self, speed: int, time: int, then: Stop = Stop.HOLD, wait: bool = True):\n ...",
"def testrandom(self):\n for i in range(100):\n AmuletAbility()",
"def test_strength_assignment(self):\n for trial in range(0, 100):\n self.set_up()\n self.assertBetween(self.team_gaussian.get_points(), 0, 1)\n self.assertBetween(self.team_random.get_points(), 0, 1)\n self.assertBetween(self.team_uniform.get_points(), 0, 1)",
"def run():\n number_of_trials = 100\n print 'RUNNING SIMULATION FOR {} TRIALS...'.format(number_of_trials)\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=.0001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=number_of_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n a.performace_report(number_of_trials)",
"def npc_shoot(self):\r\n if npc.fold_status == True:\r\n if npc.debuff_status == \"double\":\r\n print(\"Since \"+self.name+\" is allowed to fold, he only needs to take 1 shot.\")\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \"+self.name+\" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \"+self.name+\" luckily survived!\")\r\n elif npc.debuff_status == \"triple\":\r\n print(\"Since \"+self.name+\" is allowed to fold, he only needs to take 2 shots.\")\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \"+self.name+\" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \"+self.name+\" luckily survived!\")\r\n time.sleep(1)\r\n print(self.name+\" got 1 shot left. \"+self.name+\" respins and prepares to shoot.\")\r\n npc.firing_chamber = random.choice(CHAMBER_LIST)\r\n time.sleep(1)\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \" + self.name + \" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \" + self.name + \" luckily survived!\")\r\n else:\r\n print(\"Since \"+self.name+\" is allowed to fold, he does not need to take any shot.\")\r\n elif npc.fold_status == False:\r\n print(self.name+\" is preparing to shoot.\")\r\n if npc.debuff_status == \"double\":\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \"+self.name+\" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \"+self.name+\" luckily survived!\")\r\n time.sleep(1)\r\n print(self.name+\" got 1 shot left. \"+self.name+\" re-spins and prepares to shoot.\")\r\n npc.firing_chamber = random.choice(CHAMBER_LIST)\r\n time.sleep(1)\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \" + self.name + \" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \" + self.name + \" luckily survived!\")\r\n elif npc.debuff_status == \"triple\":\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \"+self.name+\" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \"+self.name+\" luckily survived!\")\r\n time.sleep(1)\r\n print(self.name+\" got 2 shots left. \"+self.name+\" re-spins and prepares to shoot.\")\r\n npc.firing_chamber = random.choice(CHAMBER_LIST)\r\n time.sleep(1)\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \" + self.name + \" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \" + self.name + \" luckily survived!\")\r\n time.sleep(1)\r\n print(self.name + \" got 1 shot left. \" + self.name + \" re-spins and prepares to shoot.\")\r\n npc.firing_chamber = random.choice(CHAMBER_LIST)\r\n time.sleep(1)\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \" + self.name + \" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \" + self.name + \" luckily survived!\")\r\n else:\r\n if set(npc.firing_chamber) <= set(npc.bullet_chamber):\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"BOOM! \" + self.name + \" died!\")\r\n time.sleep(1)\r\n npc.alive = False\r\n else:\r\n printIt(\"---\")\r\n time.sleep(1)\r\n print(\"There is nothing happened. \" + self.name + \" luckily survived!\")",
"def monster_action(self):\n if random.randint(1,2) == 1:\n return \"attack\"\n else:\n return \"defend\"",
"def starter_strategy(self, game_state):\n # First, place basic defenses\n gamelib.debug_write(\"we have \",game_state.get_resource(1,0), \"MP\")\n #self.i_dont_think_so(game_state)\n #dirty play time\n if game_state.turn_number == 0:\n game_state.attempt_spawn(DEMOLISHER, [0,13])\n\n if (game_state.turn_number > 3 and (game_state.contains_stationary_unit([25,13]) and game_state.contains_stationary_unit([25,14]) and AlgoStrategy.opponent_walls(game_state) > BORDER_LENGTH-3)) or self.cannon:\n #self.bunker_bust(game_state)\n self.cannon = True\n self.build_defences(game_state)\n #if game_state.get_resource(1,0) >40:\n #self.prep_cannon(game_state)\n #self.fire_cannon(game_state)\n\n\n\n else:\n self.build_defences(game_state)\n # Now build reactive defenses based on where the enemy scored\n #self.build_reactive_defense(game_state)\n\n # If the turn is less than 5, stall with interceptors and wait to see enemy's base\n #if game_state.turn_number < 5:\n # self.stall_with_interceptors(game_state)\n # print(\"weak\")\n #else:\n # Now let's analyze the enemy base to see where their defenses are concentrated.\n # If they have many units in the front we can build a line for our demolishers to attack them at long range.\n #if self.detect_enemy_unit(game_state, unit_type=None, valid_x=#None, valid_y=[14, 15]) > 10:\n # #self.demolisher_line_strategy(game_state)\n # scout_spawn_location_options = [[13, 0], [14, 0]]\n # game_state.attempt_spawn(DEMOLISHER, [[13, 0]], 1000)\n #else:\n # They don't have many units in the front so lets figure out their least defended area and send Scouts there.\n\n # Only spawn Scouts every other turn\n # Sending more at once is better since attacks can only hit a single scout at a time\n # if game_state.turn_number % 2 == 1:\n # To simplify we will just check sending them from back left and right\n # scout_spawn_location_options = [[13, 0], [14, 0]]\n # best_location = self.least_damage_spawn_location(game_state, scout_spawn_location_options)\n # game_state.attempt_spawn(SCOUT, best_location, 1000)\n # else:\n scout_spawn_location_options = [[13, 0], [14, 0]]\n if game_state.turn_number < 2:\n game_state.attempt_spawn(INTERCEPTOR, [[20, 6]], 2)\n game_state.attempt_spawn(INTERCEPTOR, [[7, 6]], 1)\n game_state.attempt_spawn(INTERCEPTOR, [[9, 4]], 1)\n\n #if game_state.turn_number > 5:\n # game_state.attempt_spawn(INTERCEPTOR, [[13, 0]], 1)\n\n if game_state.turn_number == 2:\n game_state.attempt_spawn(SCOUT, scout_spawn_location_options, 1000)\n\n if game_state.turn_number % 3:\n game_state.attempt_spawn(SCOUT, scout_spawn_location_options, 1000)",
"def test_y_increases_speed_in_direction_opposite_to_banana(self):\n # using y and h keys basically let you switch from 2.3 to 2.4\n # in a continuous fashion. Works only from 2.3\n training = 2.3\n self.tb.set_level_variables(training)\n self.tb.restart_bananas()\n # check initial speed\n initial_speed = self.tb.wrong_speed\n # first two frames get messed up for timing, so go two steps\n #print self.tb.free_move\n taskMgr.step()\n taskMgr.step()\n messenger.send('x_axis', [2 * -self.tb.multiplier])\n camera_h = self.tb.base.camera.getH()\n #print('wrong speed', self.tb.wrong_speed)\n #print camera_h\n # go a few steps, see how long it takes\n start = time.time()\n for i in range(30):\n #print self.tb.speed\n taskMgr.step()\n first_time = time.time() - start\n first_dist = camera_h - self.tb.base.camera.getH()\n #print('dist', first_dist)\n first_speed = abs(first_dist/first_time)\n # now change speed\n messenger.send('y')\n # have to reset for it to go into effect\n self.tb.restart_bananas()\n taskMgr.step()\n taskMgr.step()\n messenger.send('x_axis', [2 * -self.tb.multiplier])\n avatar_h = self.tb.base.camera.getH()\n #print avatar_h\n start = time.time()\n for i in range(30):\n #print self.tb.speed\n taskMgr.step()\n second_time = time.time() - start\n #print('time', second_time)\n #print('wrong speed', self.tb.wrong_speed)\n #print self.tb.base.camera.getH()\n second_dist = avatar_h - self.tb.base.camera.getH()\n #print('dist', second_dist)\n second_speed = abs(second_dist / second_time)\n #print('first', first_speed)\n #print('second', second_speed)\n # if wrong_speed was already the same as the speed in the opposite direction,\n # verify speed is actually the same\n if initial_speed == 1:\n self.assertAlmostEqual(first_speed, second_speed)\n else:\n self.assertTrue(first_speed < second_speed)",
"def run_simulation(num_robots, speed, capacity, width, height, dirt_amount, min_coverage, num_trials,\n robot_type):\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns tuple of the form, (max_hit, accuracy), for the given levels after factoring in the weapons available and the selected attack style. Assumes enemy has level 1 defence and 0 defence bonus | def get_max_hit_and_accuracy(
levels, attack_style, attack_bonus, strength_bonus):
weapon_attack, weapon_strength = get_weapon_stats(levels.attack)
attack_bonus += weapon_attack
strength_bonus += weapon_strength
if attack_style == Attack_Style.ATTACK:
effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)
effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)
elif attack_style == Attack_Style.STRENGTH:
effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)
effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)
enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)
max_hit = osrs.max_hit(effective_strength, strength_bonus)
accuracy = osrs.accuracy(effective_attack, attack_bonus,
enemy_effective_defence, 0)
return (max_hit, accuracy) | [
"def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)",
"def calculate_base_hp(classes_and_levels, con_modifier):\n is_level_1 = True\n current_hp = 0\n total_level = 0\n\n for class_and_level in classes_and_levels:\n dnd_class = class_and_level[0]\n level = class_and_level[1]\n\n avg_hit_dice = math.floor(dnd_class.hit_die / 2) + 1\n\n # For the base class\n if is_level_1:\n # On 1st level: max_hit_die + con_modifier\n current_hp = current_hp + dnd_class.hit_die + con_modifier\n # On every level above 1st: avg_hit_dice + con_modifier\n current_hp = current_hp + ((avg_hit_dice + con_modifier) * (level - 1))\n is_level_1 = False\n\n # On every level above 1st: avg_hit_dice + con_modifier\n else:\n current_hp = current_hp + ((avg_hit_dice + con_modifier) * level)\n\n # If matched_dnd_class is a Draconic Sorcerer\n if dnd_class.name == \"Draconic Sorcerer\":\n current_hp = current_hp + level\n\n total_level = total_level + level\n\n return (current_hp, total_level)",
"def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]",
"def weaponValue(self, level):\n if level == 1:\n bonus = 2\n elif level == 2:\n bonus = 4\n elif level == 3:\n bonus = 6\n elif level == 4:\n bonus = 8\n else:\n bonus = 0\n\n return bonus",
"def set_game_level(user_level_input):\n if user_level_input == \"easy\":\n return sample_1, answer_sample_1\n elif user_level_input == \"medium\":\n return sample_2, answer_sample_2\n elif user_level_input == \"medium\":\n return sample_3, answer_sample_3\n else:\n return \"That level does not exist\"",
"async def max_lvl_lookup(self, ctx, *username):\n async with ctx.typing():\n url_safe_name = '+'.join(username)\n safe_name = ' '.join(username)\n user = Hiscore(url_safe_name)\n embed = discord.Embed(title=\"99s\", description=f'{safe_name}\\n*{user.overall_level}* total level')\n count = 0\n msg = f''\n for (name, level, xp, rank) in user.levels:\n if int(level) == 99:\n msg += f'**{name}** {int(xp) / 1000000:.2f}M xp\\n'\n count += 1\n if count == 0:\n embed.add_field(name=\"**0 / 23**\", value=\"User doesn't have any level 99s\")\n else:\n embed.add_field(name=f\"**{count} / 23**\", value=msg)\n await ctx.send(f'{ctx.message.author.mention}', embed=embed)\n return",
"def choose_target(self):\n\n scores = list()\n for enemy in self.enemies:\n\n target_id = None\n for k, v in enemy.duel_params: # list of tuple (k,v)\n if k == 'target_id':\n target_id = v\n fbids = (self.get_fbid(), target_id)\n cursor = self.database_connection.execute('SELECT win, lose FROM arena_duel_result WHERE player_fbid = ? AND enemy_fbid = ?', fbids)\n history = cursor.fetchone()\n cursor.close()\n if history is not None:\n victory_ratio = float(history[0]) / float(history[0] + history[1])\n else:\n victory_ratio = 0.5\n\n # 0, 1, 2, 3, 4, 5, 6, 7, -7, -6, -5, -4, -3, -2, -1\n score_gain = [100, 105, 110, 115, 120, 125, 130, 135, 65, 70, 75, 80, 85, 90, 95][enemy.rank - self.arena_rank] * victory_ratio\n score_lost = [-10, -10, -10, -5, -5, 0, 0, 0, -35, -30, -25, -20, -15, -10, -5][enemy.rank - self.arena_rank] * (1.0 - victory_ratio)\n score_level_diff = float(enemy.level - self.get_level()) / float(self.get_level()) * -20.0\n score = score_gain + score_lost + score_level_diff\n\n scores.append(score)\n\n\n #print scores\n #print self.enemies\n\n score_enemy = sorted(zip(scores, self.enemies), reverse=True)\n\n for s, e in score_enemy:\n enemy_down = False\n #if self.target_arena_level_minus <= (e.rank - self.arena_rank) <= self.target_arena_level_plus:\n if self.rankn <= (e.rank - self.arena_rank) <= self.rankp:\n\n # skip known dead target\n for k, v in e.duel_params:\n if k == 'target_id':\n if v in self.known_dead_target_ids:\n enemy_down = True\n break\n\n if enemy_down:\n continue\n\n target = e\n break\n else:\n target = None\n print 'OMG! No target...'\n\n return target",
"def get_max_hit_increases(\n start_strength_level, end_strength_level,\n strength_bonus, stance_adder):\n greatest_max_hit = 0\n max_hit_increases = []\n cur_strength_level = start_strength_level\n while cur_strength_level < end_strength_level:\n effective_strength = osrs.effective_level(\n cur_strength_level, 1, stance_adder, 1)\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n\n if max_hit > greatest_max_hit:\n greatest_max_hit = max_hit\n max_hit_increases.append((cur_strength_level, max_hit))\n\n cur_strength_level += 1",
"def get_bonus_spells(casting_ability_modifier: AbilityModifier) \\\n -> Tuple[int, int, int, int, int, int, int, int, int, int]:\n if casting_ability_modifier < -5:\n raise ValueError(f'Ability score modifier must be at least -5: {casting_ability_modifier}')\n\n return 0, \\\n max(0, (casting_ability_modifier + 3) // 4), \\\n max(0, (casting_ability_modifier + 2) // 4), \\\n max(0, (casting_ability_modifier + 1) // 4), \\\n max(0, (casting_ability_modifier + 0) // 4), \\\n max(0, (casting_ability_modifier - 1) // 4), \\\n max(0, (casting_ability_modifier - 2) // 4), \\\n max(0, (casting_ability_modifier - 3) // 4), \\\n max(0, (casting_ability_modifier - 4) // 4), \\\n max(0, (casting_ability_modifier - 5) // 4)",
"def get_stats(level: int, vocation: str):\r\n try:\r\n level = int(level)\r\n except ValueError:\r\n return \"bad level\"\r\n if level <= 0:\r\n return \"low level\"\r\n elif level > 2000:\r\n return \"high level\"\r\n\r\n vocation = vocation.lower().strip()\r\n if vocation in KNIGHT:\r\n hp = (level - 8) * 15 + 185\r\n mp = (level - 0) * 5 + 50\r\n cap = (level - 8) * 25 + 470\r\n vocation = \"knight\"\r\n elif vocation in PALADIN:\r\n hp = (level - 8) * 10 + 185\r\n mp = (level - 8) * 15 + 90\r\n cap = (level - 8) * 20 + 470\r\n vocation = \"paladin\"\r\n elif vocation in MAGE:\r\n hp = (level - 0) * 5 + 145\r\n mp = (level - 8) * 30 + 90\r\n cap = (level - 0) * 10 + 390\r\n vocation = \"mage\"\r\n elif vocation in NO_VOCATION:\r\n vocation = \"no vocation\"\r\n else:\r\n return \"bad vocation\"\r\n\r\n if level < 8 or vocation == \"no vocation\":\r\n hp = (level - 0) * 5 + 145\r\n mp = (level - 0) * 5 + 50\r\n\r\n cap = (level - 0) * 10 + 390\r\n\r\n exp = (50*pow(level, 3)/3) - 100*pow(level, 2) + (850*level/3) - 200\r\n exp_tnl = 50*level*level - 150 * level + 200\r\n\r\n return {\"vocation\": vocation, \"hp\": hp, \"mp\": mp, \"cap\": cap, \"exp\": int(exp), \"exp_tnl\": exp_tnl}",
"def get_defense(self):\n return max(0, self.get_combat() - 2)",
"def find_ability(abilities: list, character_class: str, attack_type: str) -> Dict:\n # Find the ability to use\n ability_to_use = {\"effects\": [], \"enhancements\": []}\n for ability in abilities:\n if (ability[\"class\"] == character_class) and (ability[\"type\"] == attack_type):\n ability_to_use = ability\n break\n\n return ability_to_use",
"def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)",
"def determine_enemy_strategy(game):",
"def getAbilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n print \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])\n print \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])",
"def attack_bonus_on_level(self, level):\n raise NotImplementedError",
"def get_best_levels(levels: List[LevelType], scores: List[int], amount: int = 2) -> List[LevelType]:\n assert len(levels) == len(scores)\n assert len(levels) >= amount\n\n sorted_levels = sorted(zip(scores, levels), reverse=True)\n return [sorted_levels[i][1] for i in range(amount)]",
"def calc_loss_mult(average_lvl, player_lvl):\n return LOSS_BASE + (LOSS_MULT * (player_lvl / average_lvl))",
"def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns tuple of the form (attack_bonus, strength_bonus) for the best scimitar (weapon) at a given attack level. Scimitars are almost always the most efficient weapon | def get_weapon_stats(attack_level):
if attack_level >= 60:
# Dragon scimitar
return (67, 66)
elif attack_level >= 40:
# Rune scimitar
return (45, 44)
elif attack_level >= 30:
# Adamant scimitar
return (29, 28)
elif attack_level >= 20:
# Mithril scimitar
return (21, 20)
elif attack_level >= 10:
# Black scimitar
return (19, 14)
elif attack_level >= 5:
# Steel scimitar
return (15, 14)
else:
# Iron scimitar
return (10, 9) | [
"def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.weaponinventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon",
"def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon",
"def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)",
"def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value",
"def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between half of max_damage and max_damage\n \n weapon_attack_value = random.randint(self.max_damage//2, self.max_damage)\n return weapon_attack_value",
"def weaponValue(self, level):\n if level == 1:\n bonus = 2\n elif level == 2:\n bonus = 4\n elif level == 3:\n bonus = 6\n elif level == 4:\n bonus = 8\n else:\n bonus = 0\n\n return bonus",
"def getEquippedWeaponToHit(self):\n weapon = self.getEquippedWeapon()\n if weapon.getName() == \"fist\":\n return 0\n return weapon.getToHitBonus()",
"def get_max_hit_and_accuracy(\n levels, attack_style, attack_bonus, strength_bonus):\n weapon_attack, weapon_strength = get_weapon_stats(levels.attack)\n attack_bonus += weapon_attack\n strength_bonus += weapon_strength\n\n if attack_style == Attack_Style.ATTACK:\n effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)\n elif attack_style == Attack_Style.STRENGTH:\n effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)\n\n enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)\n\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n accuracy = osrs.accuracy(effective_attack, attack_bonus,\n enemy_effective_defence, 0)\n\n return (max_hit, accuracy)",
"def Attack_Weapon(self, bonus=0):\n bonus = str(bonus);\n if (bonus == \"0\"):\n return \"\".join((\"[[1d20+\", self.Attribute_Power(\"attack\"), \"]] vs \", self.Attribute_Power(\"def\")));\n else:\n return \"\".join((\"[[1d20+\", self.Attribute_Power(\"attack\"), \"+\", bonus, \"]] vs \", self.Attribute_Power(\"def\")));",
"def weapon_strength(weapon):\n weapon_strength_int = WEAPON_STRENGTHS[weapon]\n #print weapon_strength_int\n return weapon_strength_int",
"def get_weapon(self):\n item = Weapon()\n weapon_item_damage = item.get_weapon_damage()\n self.weapon_type = weapon_item_damage[0]\n self.weapon_bonus = weapon_item_damage[1]\n print('gladiator.get_weapon() called; weapon selected')\n self.determine_attack()\n print('gladiator.determine_attack() called; attack determined')",
"def attack(self):\n\n lowest_attack = int(self.attack_strength)// 2\n attack_strength = random.randint(lowest_attack, int(self.attack_strength))\n return attack_strength",
"def get_attack_bonus(self, stat_name):\n logger.debug(f\"Getting attack bonus for {stat_name}\")\n stat = self.get_stat_by_name(stat_name)\n if not stat:\n logger.error(f'Could not find stat:{stat_name}')\n return None\n else:\n # TODO: Add functionality to look for additives from items and spells\n if stat.prof:\n return self.get_prof_bonus() + stat.modifier\n else:\n return stat.modifier",
"def get_weapon(self):\n\n return self.suggestion_set[1]",
"def attack(self):\n damage = randint((self.attackStrenght / 2), self.attackStrenght)\n print(\"Weapon damage\",damage)\n return damage",
"def get_best_action(self, strategy, player):\n actions = self.game.get_actions(player)\n action = None\n if not actions:\n action = (player, None)\n elif strategy == \"q\":\n action = actions[np.argmax([self.weights @ extractor(self.game, a) for a in actions])]\n elif strategy == \"random\":\n action = actions[random.randint(0, len(actions) - 1)]\n feature = extractor(self.game.copy(), action)\n return feature, action",
"def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating",
"def calculate_hit_damage(weapon_power_value):\n return random.randrange(int(weapon_power_value / 2), (weapon_power_value + 1))",
"def getDamage(self):\n \n weapon_dmg = self.weapon.getDamage()\n cat_bonus, att_cats = self.getCatBonus(self.attacking_kittens,\n \"attacking\")\n true_dmg = weapon_dmg + cat_bonus + self.getBonusDamageFromInsanity()\n return true_dmg, att_cats"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns list of tuples of the form (level, max_hit) for levels between start_strength_level and end_strength_level that increase max_hit. Assumes start_strength_level < end_strength_level and no multipliers | def get_max_hit_increases(
start_strength_level, end_strength_level,
strength_bonus, stance_adder):
greatest_max_hit = 0
max_hit_increases = []
cur_strength_level = start_strength_level
while cur_strength_level < end_strength_level:
effective_strength = osrs.effective_level(
cur_strength_level, 1, stance_adder, 1)
max_hit = osrs.max_hit(effective_strength, strength_bonus)
if max_hit > greatest_max_hit:
greatest_max_hit = max_hit
max_hit_increases.append((cur_strength_level, max_hit))
cur_strength_level += 1 | [
"def calculateGagLimits():\r\n \"\"\"\r\n (may be different here with corporate clash, as this is from ttr)\r\n Limit from a maxed gagtrack: 30,25,20,15,7,3,1\r\n when level is 8 (max) is 30,25,20,15,7,3,1\r\n when level is 7 it is 30,25,20,15,7,3\r\n 6 is ?? (probably 25,20,15,10,7,3)\r\n 5 is 25,20,15,10,3\r\n 4 is 20,15,10,5\r\n 3 is 15,10,5\r\n 2 is 10,5\r\n 1 is 10\r\n \"\"\"\r\n \r\n for i in range(0,8):\r\n if MyGagLevel[i] > 0:\r\n if MyGagLevel[i] == 1:\r\n GagLevelLimit[i] = [10]\r\n elif MyGagLevel[i] == 2:\r\n GagLevelLimit[i] = [10,5]\r\n elif MyGagLevel[i] == 3:\r\n GagLevelLimit[i] = [15,10,5]\r\n elif MyGagLevel[i] == 4:\r\n GagLevelLimit[i] = [20,15,10,5]\r\n elif MyGagLevel[i] == 5:\r\n GagLevelLimit[i] = [25,20,15,10,3]\r\n elif MyGagLevel[i] == 6:\r\n GagLevelLimit[i] = [25,20,15,10,7,3]\r\n elif MyGagLevel[i] == 7:\r\n GagLevelLimit[i] = [30,25,20,15,7,3]\r\n elif MyGagLevel[i] == 8:\r\n GagLevelLimit[i] = [30,25,20,15,7,3,1]\r\n else:\r\n GagLevelLimit[i] = [None]",
"def _filter_level(self, min_level=1, max_level=20):\n low_level = self._data['level'] >= min_level\n high_level = self._data['level'] <= max_level\n return self._data[low_level & high_level]",
"def get_max_hit_and_accuracy(\n levels, attack_style, attack_bonus, strength_bonus):\n weapon_attack, weapon_strength = get_weapon_stats(levels.attack)\n attack_bonus += weapon_attack\n strength_bonus += weapon_strength\n\n if attack_style == Attack_Style.ATTACK:\n effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)\n elif attack_style == Attack_Style.STRENGTH:\n effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)\n\n enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)\n\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n accuracy = osrs.accuracy(effective_attack, attack_bonus,\n enemy_effective_defence, 0)\n\n return (max_hit, accuracy)",
"def filter_battle_search(user_level):\n level_range = 5 # range of level for the battle (+-)\n\n max_level_range = level_range + user_level\n min_level_range = user_level - level_range\n\n # the min level must be 1\n if min_level_range < 1:\n min_level_range = 1\n\n return min_level_range, max_level_range",
"def get_gain_limits(self):\n return 0, 100, 1",
"def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)",
"def _find_index_limits(dimension, start, end, method='lower'):\n if start == end:\n array = dimension - start\n if method == 'lower':\n useful_index = np.array([1, 1]) * np.argmax(array[array <= 0])\n elif method == 'higher':\n useful_index = np.array(\n [1, 1]) * (np.argmax(array[array <= 0]) + 1)\n else:\n useful_index = np.array([1, 1]) * np.argmin(np.fabs(array))\n else:\n useful_index = np.nonzero((dimension >= start) &\n (dimension <= end))[0]\n lims = useful_index[0], useful_index[-1] + 1\n return lims",
"def _hit_range_get(self):\n return (self.hit_start, self.hit_end)",
"def calc_tohit(attr, level):\n return level + calc_attr_mod(attr)",
"def get_effort_levels(self, *args):\n def geteffort(a, b=self.get_num() - 1):\n return CPX_PROC.getmipstarts_effort(self._env._e, self._cplex._lp, a, b)\n return apply_freeform_two_args(geteffort, self._conv, args)",
"def range_by_score(self, min, max):\r\n data = self.items()\r\n keys = [r[1] for r in data] \r\n start = bisect.bisect_left(keys, min)\r\n end = bisect.bisect_right(keys, max, start)\r\n return self._as_set()[start:end]",
"def get_max_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x800c+i,100)/100 for i in range(4)])",
"def get_best_levels(levels: List[LevelType], scores: List[int], amount: int = 2) -> List[LevelType]:\n assert len(levels) == len(scores)\n assert len(levels) >= amount\n\n sorted_levels = sorted(zip(scores, levels), reverse=True)\n return [sorted_levels[i][1] for i in range(amount)]",
"def calc_level(self):\n if self.xp < 3:\n xp_potential = 1\n if self.xp >= 3 and self.xp < 6:\n xp_potential = 2\n if self.xp >= 6 and self.xp < 12:\n xp_potential = 3\n if self.xp >= 12 and self.xp < 24:\n xp_potential = 4\n if self.xp >= 24 and self.xp < 48:\n xp_potential = 5\n if self.xp >= 48 and self.xp < 72:\n xp_potential = 6\n if self.xp >= 72 and self.xp < 96:\n xp_potential = 7\n if self.xp >= 96 and self.xp < 130:\n xp_potential = 8\n if self.xp >= 130 and self.xp < 170:\n xp_potential = 9\n if self.xp >= 170:\n xp_potential = 10\n if self.dominion < 2:\n dom_potential = 1\n if self.dominion >= 2 and self.dominion < 4:\n dom_potential = 2\n if self.dominion >= 4 and self.dominion < 10:\n dom_potential = 3\n if self.dominion >= 10 and self.dominion < 22:\n dom_potential = 4\n if self.dominion >= 22 and self.dominion < 38:\n dom_potential = 5\n if self.dominion >= 38 and self.dominion < 57:\n dom_potential = 6\n if self.dominion >= 57 and self.dominion < 76:\n dom_potential = 7\n if self.dominion >= 76 and self.dominion < 95:\n dom_potential = 8\n if self.dominion >= 95 and self.dominion < 124:\n dom_potential = 9\n if self.dominion >= 124:\n dom_potential = 10\n self.level = min(xp_potential, dom_potential)",
"def get_power_levels(self):",
"def bounds_slope(regressions):\n max_up_slope = 0\n min_down_slope = 0\n for regression in regressions.itervalues():\n min_slope = regression.find_min_slope()\n max_up_slope = max(max_up_slope, min_slope)\n min_down_slope = min(min_down_slope, min_slope)\n \n return (max_up_slope, min_down_slope)",
"def bites(hit_dict):\n x = [(qstart,qend) for qstart,qend,sstart,send,evalue in hit_dict]\n #print >>sys.stderr,x\n x.sort()\n mid = [abs(hstop-x[n+1][0]) for n,(hstart,hstop) in enumerate(x[:-1])]\n start = min(x)[0]\n stop= max(x)[1]\n return mid,start,stop",
"def get_level_list(cls, score):\n if 0 <= score <= 10:\n return cls.levels.get('1')\n elif 10 < score <= 20:\n return cls.levels.get('2')\n return cls.levels.get('3')",
"def __calculateSupportResistenceLevels(self):\n\n for i in range(2, self.df.shape[0] - 2):\n if self.__isSupport(self.df, i):\n l = self.df['low'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n elif self.__isResistance(self.df, i):\n l = self.df['high'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n return self.levels"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates steric beads required for checking for steric clashes between motifs. Each residues has three beads modeled after the typical three bead models used in coarse grain modeling. The three beads are, Phosphate (P, OP1, OP2) Sugar (O5',C5',C4',O4',C3',O3',C1',C2',O2') and Base (All remaining atoms). | def get_beads(self):
phos_atoms,sugar_atoms,base_atoms = [],[],[]
for i,a in enumerate(self.atoms):
if a is None:
continue
if i < 3:
phos_atoms.append(a)
elif i < 12:
sugar_atoms.append(a)
else:
base_atoms.append(a)
beads = []
types = [residue.BeadType.PHOS, residue.BeadType.SUGAR, residue.BeadType.BASE]
for i,alist in enumerate([phos_atoms,sugar_atoms,base_atoms]):
if len(alist) > 0:
beads.append(residue.Bead(util.center(alist), types[i]))
return beads | [
"def rec_events_scaffold_protein_binding_shc():\n \n # SHC binds to ErbB dimers\n for i in ['1', '2', '3', '4']:\n bind_complex(erbb(ty='1', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None) % erbb(ty=i, bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None),'b', SHC(batp=None, st='U', bgrb=None), 'bgap', par['GAP_bind_SHC'], m1=erbb(ty='1', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None))\n \n for i in ['2', '3', '4']:\n bind_complex(erbb(ty='2', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None) % erbb(ty=i, bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None),'b', SHC(batp=None, st='U', bgrb=None), 'bgap', par['GAP_bind_SHC'], m1=erbb(ty='2', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None))\n\n # Bound and unbound SHC phosphorylation:\n Rule('SHC_phos',\n erbb(bd=ANY, st='P', b=1) % SHC(bgap=1, bgrb=None, batp=None, st='U') >>\n erbb(bd=ANY, st='P', b=1) % SHC(bgap=1, bgrb=None, batp=None, st='P'),\n par['SHC_phos'])\n\n #SHC:P binds/unbinds ErbB dimers\n for i in ['1', '2', '3', '4']:\n bind_complex(erbb(ty='1', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None) % erbb(ty=i, bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None), 'b', SHC(batp=None, st='P', bgrb=None), 'bgap', par['GAP_bind_SHCP'], m1=erbb(ty='1', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None))\n\n for i in ['2', '3', '4']:\n bind_complex(erbb(ty='2', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None) % erbb(ty=i, bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None), 'b', SHC(batp=None, st='P', bgrb=None), 'bgap', par['GAP_bind_SHCP'], m1=erbb(ty='2', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None))\n\n #SHC:P-GRB2 binds ErbB dimers\n for i in ['1', '2', '3', '4']:\n bind_complex(erbb(ty='1', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None) % erbb(ty=i, bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None), 'b', SHC(batp=None, st='P', bgrb=2, bgap=None) % GRB2(bgap=None, bgab1=None, bsos=None, bcbl=None, b=2), 'bgap', par['GAP_bind_SHCP'], m1=erbb(ty='1', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None), m2=SHC(batp=None, st='P', bgrb=2, bgap=None))\n\n for i in ['2', '3', '4']:\n bind_complex(erbb(ty='2', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None) % erbb(ty=i, bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None), 'b', SHC(batp=None, st='P', bgrb=2, bgap=None) % GRB2(bgap=None, bgab1=None, bsos=None, bcbl=None, b=2), 'bgap', par['GAP_bind_SHCP'], m1=erbb(ty='2', bd=1, st='P', b=None, pi3k1=None, pi3k2=None, pi3k3=None, pi3k4=None, pi3k5=None, pi3k6=None), m2=SHC(batp=None, st='P', bgrb=2, bgap=None))\n\n # Unbound SHC dephosphorylation \n Rule('SHC_unbound_dephos',\n SHC(bgap=None, bgrb=None, batp=None, st='P') >>\n SHC(bgap=None, bgrb=None, batp=None, st='U'),\n par['SHC_unbound_dephos'])",
"def test_bespoke_bond_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"CC\")\n\n bond_smirks = gen._get_bespoke_bond_smirks(molecule=mol)\n # there should be 2 unique bond smirks\n assert len(bond_smirks) == 2\n all_bonds = []\n for smirk in bond_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_bonds.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all bonds are covered\n for bond in mol.bonds:\n assert (bond.atom1_index, bond.atom2_index) in all_bonds",
"def test_bespoke_torsion_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_file(get_data(\"OCCO.sdf\"))\n\n torsion_smirks = gen._get_bespoke_torsion_smirks(molecule=mol)\n # there should be 5 unique torsions\n assert len(torsion_smirks) == 5\n\n all_torsions = []\n for smirk in torsion_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_torsions.extend(atoms)\n assert compare_matches(atoms, smirk.atoms) is True\n\n for torsion in mol.propers:\n dihedral = tuple([atom.molecule_atom_index for atom in torsion])\n assert dihedral in all_torsions or tuple(reversed(dihedral)) in all_torsions",
"def test_bespoke_target_torsion_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_file(get_data(\"OCCO.sdf\"))\n\n torsion_smirks = gen._get_bespoke_torsion_smirks(molecule=mol, central_bonds=[(1, 2)])\n # there should be 3 unique smirks for this molecule\n # H-C-C-H, H-C-C-O, O-C-C-O\n assert len(torsion_smirks) == 3\n for smirk in torsion_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n assert compare_matches(atoms, smirk.atoms) is True",
"def guess_potentialisation(self, sysargs):\n\n print(\"Guessing potentialisation...\")\n print(\"Copying reference basis...\")\n shutil.copyfile(self.reference_guess_basis_path, os.path.join(os.getcwd(), 'basis'))\n\n sp2_replacement_list = []\n sp2_deletion_list = []\n sp2_carbon_list = []\n sp3_replacement_list = []\n sp3_deletion_list = []\n sp3_carbon_list =[]\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n\n # Sort through carbons to decide what needs potentialising. Find atoms bonded to each carbon\n for atom in carbon_atoms:\n distanced_atoms = self.order_atoms_by_distance_from(atom['#'])\n nearest_4_distances = [self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) for distanced_atom in\n distanced_atoms[1:5]]\n bonded_distances = [less_than_distance for less_than_distance in nearest_4_distances if\n less_than_distance < self.bond_deciding_distance]\n\n # if 3 bonded atoms, may be sp2, check if they're hydrogens\n if len(bonded_distances) == 3:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n sp2_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp2_replacement_list.append(str(atom['#']))\n sp2_carbon_list.append(atom)\n\n # if 4 bonded atoms, may be sp3, check if they're hydrogens\n elif len(bonded_distances) == 4:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n if len(hydrogens_bonded_to_this_atom) == 3:\n sp3_replacement_list.extend([str(hydrogen['#']) for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_carbon_list.append(atom)\n\n log_file = open('pseudification.log', 'w+')\n log_file.writelines(\n 'sp2 carbon indices: %s \\nsp3 carbon indices: %s \\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_carbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_carbon_list)\n ))\n\n sp2_coord_command = 'mn sp2 %s' % (','.join(sp2_replacement_list))\n print(\"sp2 command: %s\" % sp2_coord_command)\n sp3_coord_command = 'mn sp3 %s' % (','.join(sp3_replacement_list))\n print(\"sp3 command: %s\" % sp3_coord_command)\n\n if 'nosp3' not in sysargs:\n self.pseudopotentialise_ethane_like_molecule(sp3_coord_command.split(), execute_deletion=False)\n self.pseudopotentialise_molecule(sp2_coord_command.split(), execute_deletion=False)\n\n self.delete_specified_atoms(sp2_deletion_list + sp3_deletion_list)\n\n print(\"Identifying 2-electron sp2 carbons...\")\n # Now need to work out where the 2e sp2 carbons are\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n if len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n print(\"Re-discovered %s sp2 carbons.\" % str(len(sp2_pseudocarbon_list)))\n\n # Now check for ncore=4 sp2 pseudocarbons\n pseudopotential_hashes_to_delete = []\n for atom in sp2_pseudocarbon_list:\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n carbons_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_carbon_list[1:5] if\n self.measure_atom_atom_dist(atom['#'],\n distanced_atom[\n '#']) < self.bond_deciding_distance]\n print(\"Carbons bonded to atom %s: %s\" % (str(atom['#']),\n str([carbon['#'] for carbon in carbons_bonded_to_this_atom])))\n\n for carbon_bonded_to_this_atom in carbons_bonded_to_this_atom:\n if carbon_bonded_to_this_atom not in sp2_pseudocarbon_list:\n def distance_from(list_atom):\n return self.measure_atom_atom_dist(carbon_bonded_to_this_atom['#'], list_atom['#'])\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # find pseudos closest to the other carbon\n pseudos_distanced_from_sp2_2e = sorted(carbon_pseudos, key=distance_from)\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[0]['#'])\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[1]['#'])\n\n self.delete_specified_atoms(pseudopotential_hashes_to_delete)\n\n # Read final coordinates\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n sp2_2e_pseudocarbon_list = []\n sp2_2e_pseudohydrogen_list = []\n sp3_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n\n # if 3 atoms within pseudo-distance this is an sp3 pseudo-carbon\n if len(carbon_pseudos) == 3:\n sp3_pseudocarbon_list.append(atom)\n\n # if 4 atoms within pseudo-distance this is an sp2 2e pseudo-carbon\n elif len(carbon_pseudos) == 4:\n sp2_2e_pseudocarbon_list.append(atom)\n sp2_2e_pseudohydrogen_list.extend(carbon_pseudos)\n\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n elif len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n\n\n log_file.writelines(\n 'sp2 pseudocarbon indices: %s \\nsp3 pseudocarbon indices: %s\\nsp2 2e pseudocarbon indices: %s\\nsp2 2e pseudohydrogen indices: %s\\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudohydrogen_list)\n ))\n\n # Need to supply potentials to atoms\n define_cmds_path = 'define_add_pseudos'\n with open(os.path.join(define_cmds_path), 'w') as var_file:\n var_file.writelines(define_cmds % (\n # sp2 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'ecp', self.sp2_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'ecp', self.sp2_hydrogen_ecp),\n # sp3 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'ecp', self.sp3_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'ecp', self.sp3_hydrogen_ecp),\n # sp2 2e potentials\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define([hydrogen['#'] for hydrogen in sp2_2e_pseudohydrogen_list], 'ecp', self.sp2_2e_hydrogen_ecp),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'ecp', self.sp2_2e_carbon_ecp),\n ))\n\n self.run_define('define_add_pseudos')",
"def load_coolgas(self):\n llist = LineList('ISM')\n # Ricther+17\n print('Loading Richter+17 for CII, CIV, SiII, SiIII')\n r17_a1_file = resource_filename('pyigm','/data/CGM/Galaxy/richter17_A1.fits')\n r17_a1 = Table.read(r17_a1_file)\n r17_a2_file = resource_filename('pyigm','/data/CGM/Galaxy/richter17_A2.fits')\n r17_a2 = Table.read(r17_a2_file)\n # Coords\n coords = SkyCoord(ra=r17_a1['Simbad_RA(ICRS)'], dec=r17_a1['Simbad_DEC(ICRS)'], unit='deg')\n gc = coords.transform_to('galactic')\n ra = np.zeros((len(r17_a2)))\n dec = np.zeros((len(r17_a2)))\n\n # Loop on Sightlines\n for kk,row in enumerate(r17_a1):\n if self.debug and (kk == 5):\n break\n a2_idx = np.where(r17_a2['Name'] == row['Name'])[0]\n if len(a2_idx) == 0:\n continue\n ra[a2_idx] = row['Simbad_RA(ICRS)']\n dec[a2_idx] = row['Simbad_DEC(ICRS)']\n # Generate the components\n icoord = gc[kk]\n alines = []\n for jj,idx in enumerate(a2_idx):\n # Transition\n trans = '{:s} {:d}'.format(r17_a2['Ion'][idx].strip(), int(r17_a2['lambda0'][idx]))\n try:\n aline = AbsLine(trans, linelist=llist)\n except ValueError:\n pdb.set_trace()\n aline.attrib['coord'] = icoord\n\n # Skip EW=0 lines\n if r17_a2['e_W'][idx] == 0:\n continue\n # Velocity\n z = 0.\n aline.setz(z)\n vlim = np.array([r17_a2['vmin'][idx], r17_a2['vmax'][idx]]) * u.km / u.s\n\n aline.limits.set(vlim) # These are v_LSR\n # EW\n aline.attrib['flag_EW'] = 1\n aline.attrib['EW'] = r17_a2['W'][idx] / 1e3 * u.AA\n aline.attrib['sig_EW'] = r17_a2['e_W'][idx] / 1e3 * u.AA\n # Column\n\n if np.isnan(r17_a2['logN'][idx]): # Odd that some lines had an error but no value\n aline.attrib['flag_N'] = 0\n elif r17_a2['l_logN'][idx] == '>':\n aline.attrib['flag_N'] = 2\n aline.attrib['sig_logN'] = 99.99\n else:\n aline.attrib['flag_N'] = 1\n aline.attrib['sig_logN'] = r17_a2['e_logN'][idx]\n aline.attrib['logN'] = r17_a2['logN'][idx]\n # Fill linear\n _, _ = linear_clm(aline.attrib)\n alines.append(aline)\n #if row['Name'] == 'ESO-031--G-008':\n # debug=True\n #else:\n # debug=False\n # Generate components from abslines\n comps = ltiu.build_components_from_abslines(alines, chk_sep=False, chk_vel=False)#, debug=debug)\n # Limits\n vmin = np.min([icomp.limits.vmin.value for icomp in comps])\n vmax = np.max([icomp.limits.vmax.value for icomp in comps])\n # Instantiate\n s_kwargs = dict(name=row['Name'] + '_z0')\n c_kwargs = dict(chk_sep=False, chk_z=False)\n abssys = IGMSystem.from_components(comps, vlim=[vmin,vmax]*u.km/u.s, s_kwargs=s_kwargs, c_kwargs=c_kwargs)\n # CGM Abs\n rho, ang_sep = calc_Galactic_rho(abssys.coord)\n cgmabs = CGMAbsSys(self.galaxy, abssys, rho=rho, ang_sep=ang_sep, cosmo=self.cosmo)\n # Add to cgm_abs\n self.abs.cgm_abs.append(cgmabs)\n # Finish\n r17_a2['RA'] = ra\n r17_a2['DEC'] = dec\n self.richter17 = r17_a2\n # Reference\n if len(self.refs) > 0:\n self.refs += ','\n self.refs += 'Richter+17'",
"def bell_gen(modes, photons, bell=1):\n\n assert modes>= 4, \"Need at least 4 modes for Bell state generation\"\n assert photons >=2, \"Need at least two photons for Bell state generation\"\n\n \n # number of ancilla photons\n aphotons = photons - 2\n # number of ancilla modes\n amodes = modes - 4\n if amodes == 0 and photons > 2:\n aphotons = 0\n photons = 2\n print(\"Warning: Must have ancilla modes for ancilla photons, truncating\")\n\n # prelims\n nstates = number_states(modes,photons)\n # dimension of space\n dim = comb(modes+photons-1,photons, exact=True)\n # basis set for complete space\n basis = np.eye(dim)\n # output projector\n proj = np.zeros([dim,])\n\n # ancilla output state\n if amodes>0:\n aout = [0]*amodes\n aout[0] = aphotons\n else:\n aout = []\n\n # generate psi-\n if bell==2:\n # basis states |1001> and -|0110>\n s1 = aout + [1, 0, 0, 1]\n s2 = aout + [0, 1, 1, 0]\n \n \n # this is bad even for me\n for i in range(dim):\n if (s1 == nstates[i,:]).all():\n proj = proj + basis[:,i]\n\n if (s2 == nstates[i,:]).all():\n proj = proj - basis[:,i]\n\n \n # generate phi+\n elif bell==3:\n # states |1010> and |0101>\n s1 = aout + [1, 0, 1, 0]\n s2 = aout + [0, 1, 0, 1]\n \n for i in range(dim):\n if (s1 == nstates[i,:]).all():\n proj = proj + basis[:,i]\n\n if (s2 == nstates[i,:]).all():\n proj = proj + basis[:,i]\n\n # generate phi-\n elif bell==4:\n \n # states |1010> and -|0101>\n s1 = aout + [1, 0, 1, 0]\n s2 = aout + [0, 1, 0, 1]\n \n for i in range(dim):\n if (s1 == nstates[i,:]).all():\n proj = proj + basis[:,i]\n \n if (s2 == nstates[i,:]).all():\n proj = proj - basis[:,i]\n \n \n # default to psi+\n else:\n # basis states |1001> and |0110>\n s1 = aout + [1, 0, 0, 1]\n s2 = aout + [0, 1, 1, 0]\n \n for i in range(dim):\n if (s1 == nstates[i,:]).all():\n proj = proj + basis[:,i]\n\n if (s2 == nstates[i,:]).all():\n proj = proj + basis[:,i]\n\n proj = proj.reshape(dim,1)\n return np.kron(proj, dagger(proj))/2",
"def test_bespoke_angle_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"CC\")\n\n angle_smirks = gen._get_bespoke_angle_smirks(molecule=mol)\n # there should be 2 unique smirks\n assert len(angle_smirks) == 2\n all_angles = []\n for smirk in angle_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_angles.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all angles are covered\n for angle in mol.angles:\n assert tuple([atom.molecule_atom_index for atom in angle]) in all_angles",
"def generate_SBB_representation (nffg, add_sg_hops=False,\n log=logging.getLogger(\"SBB\")):\n if nffg is None:\n log.error(\"Missing global resource info! Skip OneBisBis generation!\")\n return None\n # Create Single BiSBiS NFFG\n log.debug(\"Generate trivial SingleBiSBiS NFFG based on %s:\" % nffg)\n log.debug(\"START SBB generation...\")\n sbb = NFFG(id=\"SingleBiSBiS\", name=\"Single-BiSBiS-View\")\n # Create the single BiSBiS infra\n sbb_infra = sbb.add_infra(id=\"SingleBiSBiS\",\n name=\"SingleBiSBiS\",\n domain=NFFG.DEFAULT_DOMAIN,\n infra_type=NFFG.TYPE_INFRA_BISBIS)\n # Compute and add resources\n # Sum of available CPU\n try:\n sbb_infra.resources.cpu = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.cpu for n in nffg.infras if\n n.resources.cpu is not None) or None)\n except TypeError:\n sbb_infra.resources.cpu = None\n # Sum of available memory\n try:\n sbb_infra.resources.mem = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.mem for n in nffg.infras if\n n.resources.mem is not None) or None)\n except TypeError:\n sbb_infra.resources.mem = None\n # Sum of available storage\n try:\n sbb_infra.resources.storage = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.storage for n in nffg.infras if\n n.resources.storage is not None) or None)\n except TypeError:\n sbb_infra.resources.storage = None\n # Minimal available delay value of infras and links in DoV\n try:\n # Get the minimum delay in Dov to avoid false negative mapping result\n sbb_infra.resources.delay = min(itertools.chain(\n # If the chained iterators is empty --> ValueError thrown by sum\n (n.resources.delay for n in nffg.infras if\n n.resources.delay is not None),\n (l.delay for l in nffg.links if l.delay is not None)))\n except ValueError:\n sbb_infra.resources.delay = None\n # Maximum available bandwidth value of infras and links in DoV\n try:\n max_bw = max(itertools.chain(\n (n.resources.bandwidth for n in nffg.infras if\n n.resources.bandwidth is not None),\n (l.bandwidth for l in nffg.links if l.bandwidth is not None)))\n # Number of infras and links in DoV\n sum_infra_link = sum(1 for _ in itertools.chain(nffg.infras, nffg.links))\n # Overestimate switching capacity to avoid false positive mapping result\n sbb_infra.resources.bandwidth = max_bw * sum_infra_link\n except ValueError:\n sbb_infra.resources.bandwidth = None\n log.debug(\"Computed SingleBiBBiS resources: %s\" % sbb_infra.resources)\n # Add supported types\n s_types = set()\n for infra in nffg.infras:\n s_types = s_types.union(infra.supported)\n sbb_infra.add_supported_type(s_types)\n log.debug(\"Added supported types: %s\" % s_types)\n log.debug(\"Added Infra BiSBiS: %s\" % sbb_infra)\n log.log(5, \"SBB:\\n%s\" % sbb_infra.dump())\n # Add existing NFs\n for nf in nffg.nfs:\n c_nf = sbb.add_nf(nf=nf.copy())\n log.debug(\"Added NF: %s\" % c_nf)\n log.log(5, \"NF:\\n%s\" % nf.dump())\n # Discover and add NF connections\n for u, v, l in nffg.real_out_edges_iter(nf.id):\n if l.type != NFFG.TYPE_LINK_DYNAMIC:\n continue\n # Explicitly add links for both direction\n link1, link2 = sbb.add_undirected_link(port1=c_nf.ports[l.src.id],\n port2=sbb_infra.add_port(\n id=l.dst.id),\n p1p2id=l.id,\n p2p1id=\"%s-back\" % l.id,\n dynamic=True,\n delay=l.delay,\n bandwidth=l.bandwidth)\n log.debug(\"Added connection: %s\" % link1)\n log.debug(\"Added connection: %s\" % link2)\n # Use SAP id --> SBB port id cache for delay matrix calculation\n delay_matrix_cache = {}\n # Add existing SAPs and their connections to the SingleBiSBiS infra\n for sap in nffg.saps:\n c_sap = sbb.add_sap(sap_obj=sap.copy())\n log.debug(\"Added SAP: %s\" % c_sap)\n log.log(5, \"SAP:\\n%s\" % c_sap.dump())\n # Discover and add SAP connections\n for u, v, l in nffg.real_out_edges_iter(sap.id):\n if len(sap.ports) > 1:\n log.warning(\"SAP contains multiple port!\")\n sbb_infra_port = sbb_infra.add_port(id=str(c_sap.id),\n sap=sap.ports.container[0].sap)\n # Explicitly add links for both direction\n link1, link2 = sbb.add_undirected_link(port1=c_sap.ports[l.src.id],\n port2=sbb_infra_port,\n p1p2id=l.id,\n p2p1id=\"%s-back\" % l.id,\n delay=l.delay,\n bandwidth=l.bandwidth)\n log.debug(\"Added connection: %s\" % link1)\n log.debug(\"Added connection: %s\" % link2)\n delay_matrix_cache[c_sap.id] = sbb_infra_port.id\n # Shortest paths in format of dict in dict keyed with node ids\n # e.g. SAP2 --> EE1 --> 4.9\n latency_paths = NFFGToolBox.shortestPathsInLatency(G=nffg.network)\n log.log(5, \"Calculated latency paths for delay matrix:\\n%s\"\n % pprint.pformat(latency_paths))\n log.log(5, \"Collected SAP ports for delay matrix:\\n%s\"\n % pprint.pformat(delay_matrix_cache))\n dm_elements = itertools.permutations(delay_matrix_cache.keys(), 2)\n for src, dst in dm_elements:\n if src not in latency_paths:\n log.warning(\"Missing node: %s for latency paths: %s!\"\n % (src, (src, dst)))\n continue\n if dst not in latency_paths[src]:\n log.warning(\"Missing node: %s for latency paths: %s!\"\n % (src, (src, dst)))\n else:\n sbb_infra.delay_matrix.add_delay(src=src,\n dst=dst,\n delay=latency_paths[src][dst])\n log.debug(\"Added delay matrix element [%s --> %s]: %s\"\n % (src, dst, latency_paths[src][dst]))\n # Recreate flowrules based on NBalazs functions\n sg_hop_info = NFFGToolBox.get_all_sghop_info(nffg=nffg)\n log.debug(\"Detected SG hop info:\\n%s\" % pprint.pformat(sg_hop_info))\n log.debug(\"Recreate flowrules...\")\n for sg_id, value in sg_hop_info.iteritems():\n sg_src_node = value[0].node.id\n sg_src_port = value[0].id\n sg_dst_node = value[1].node.id\n sg_dst_port = value[1].id\n flowclass = value[2]\n fr_bw = value[3]\n fr_delay = value[4]\n fr_hop = sg_id\n sbb_src_port = [l.dst for u, v, l in\n sbb.network.out_edges_iter(sg_src_node, data=True) if\n l.src.id == sg_src_port and l.src.node.id == sg_src_node]\n if len(sbb_src_port) < 1:\n log.warning(\"No opposite Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node\" % (\n sg_src_node, sg_src_port, fr_hop))\n continue\n if len(sbb_src_port) > 1:\n log.warning(\"Too much Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node: %s\" % (\n sg_src_node, sg_src_port, fr_hop, sbb_src_port))\n continue\n sbb_src_port = sbb_src_port.pop()\n sbb_dst_port = [l.dst for u, v, l in\n sbb.network.out_edges_iter(sg_dst_node, data=True) if\n l.src.id == sg_dst_port and l.src.node.id == sg_dst_node]\n if len(sbb_dst_port) < 1:\n log.warning(\"No opposite Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node\" % (\n sg_dst_node, sg_dst_port, fr_hop))\n continue\n if len(sbb_dst_port) > 1:\n log.warning(\"Too much Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node: %s\" % (\n sg_dst_node, sg_dst_port, fr_hop, sbb_dst_port))\n continue\n sbb_dst_port = sbb_dst_port.pop()\n if flowclass:\n fr_match = \"in_port=%s;flowclass=%s\" % (sbb_src_port.id, flowclass)\n else:\n fr_match = \"in_port=%s\" % sbb_src_port.id\n fr_action = \"output=%s\" % sbb_dst_port.id\n if value[0].node.type == NFFG.TYPE_SAP and \\\n value[1].node.type == NFFG.TYPE_NF and \\\n value[0].sap is not None:\n # Update action for flowrule connecting inter-domain SAP to NF\n fr_action += \";UNTAG\"\n fr = sbb_src_port.add_flowrule(id=fr_hop,\n match=fr_match,\n action=fr_action,\n bandwidth=fr_bw,\n delay=fr_delay, )\n log.debug(\"Added flowrule: %s\" % fr)\n if add_sg_hops:\n log.debug(\"Recreate SG hops...\")\n for sg_id, value in sg_hop_info.iteritems():\n sg_src_port = value[0]\n sg_dst_port = value[1]\n hop_fc = value[2]\n hop_bw = value[3]\n hop_delay = value[4]\n sg = sbb.add_sglink(id=sg_id,\n src_port=sg_src_port,\n dst_port=sg_dst_port,\n flowclass=hop_fc,\n delay=hop_delay,\n bandwidth=hop_bw)\n log.debug(\"Added SG hop: %s\" % sg)\n else:\n log.debug(\"Skip SG hop recreation for the SingleBiSBiS!\")\n NFFGToolBox.rewrite_interdomain_tags([(sbb.id, sbb)])\n log.debug(\"END SBB generation...\")\n # Return with Single BiSBiS infra\n return sbb",
"def herbrand_model(clauses: Sequence[Clause]) -> Sequence[Clause]:\n i = 1\n m = {0: []}\n # Find a fact in the theory (i.e. no body literals)\n facts = list(filter(lambda c: len(c.get_body().get_literals()) == 0, clauses))\n if len(facts) == 0:\n raise AssertionError(\n \"Theory does not contain ground facts, which necessary to compute a minimal Herbrand model!\"\n )\n # print(\"Finished iteration 0\")\n\n # If all clauses are just facts, there is nothing to be done.\n if len(facts) == len(clauses):\n return clauses\n\n #BUG: doesn't work properly after pylo update...\n\n m[1] = list(facts)\n while Counter(m[i]) != Counter(m[i - 1]):\n model_constants = _flatten(\n [fact.get_head().get_arguments() for fact in m[i]]\n )\n\n m[i + 1] = []\n rules = list(\n filter(lambda c: len(c.get_body().get_literals()) > 0, clauses)\n )\n\n for rule in rules:\n # if there is a substition theta such that\n # all literals in rule._body are true in the previous model\n body = rule.get_body()\n body_vars = body.get_variables()\n # Build all substitutions body_vars -> model_constants\n substitutions = _all_maps(body_vars, model_constants)\n\n for theta in substitutions:\n # add_rule is True unless there is some literal that never\n # occurs in m[i]\n add_fact = True\n for body_lit in body.get_literals():\n candidate = body_lit.substitute(theta)\n facts = list(map(lambda x: x.get_head(), m[i]))\n # print(\"Does {} occur in {}?\".format(candidate,facts))\n if candidate in facts:\n pass\n # print(\"Yes\")\n else:\n add_fact = False\n\n new_fact = Clause(rule.get_head().substitute(theta), [])\n\n if add_fact and not new_fact in m[i + 1] and not new_fact in m[i]:\n m[i + 1].append(new_fact)\n # print(\"Added fact {} to m[{}]\".format(str(new_fact),i+1))\n # print(m[i+1])\n\n # print(f\"Finished iteration {i}\")\n m[i + 1] = list(set(m[i + 1] + m[i]))\n # print(\"New model: \"+str(m[i+1]))\n i += 1\n return m[i]",
"def test_noise_model_basis_gates(self):\n basis_gates = [\"u1\", \"u2\", \"u3\", \"cx\"]\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n self.assertEqual(model.basis_gates, target)\n\n # Check adding readout errors doesn't add to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_readout_error([[0.9, 0.1], [0, 1]], False)\n self.assertEqual(model.basis_gates, target)\n model.add_readout_error([[0.9, 0.1], [0, 1]], [2], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a reset instruction error isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), [\"reset\"], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a non-standard gate isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), [\"label\"], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a standard gate is added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates + [\"h\"])\n model.add_all_qubit_quantum_error(reset_error(0.2), [\"h\"], False)\n self.assertEqual(model.basis_gates, target)",
"def test_noise_model_basis_gates(self):\n basis_gates = ['u1', 'u2', 'u3', 'cx']\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n self.assertEqual(model.basis_gates, target)\n\n # Check adding readout errors doesn't add to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_readout_error([[0.9, 0.1], [0, 1]], False)\n self.assertEqual(model.basis_gates, target)\n model.add_readout_error([[0.9, 0.1], [0, 1]], [2], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a reset instruction error isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), ['reset'], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a non-standard gate isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), ['label'], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a standard gate is added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates + ['h'])\n model.add_all_qubit_quantum_error(reset_error(0.2), ['h'], False)\n self.assertEqual(model.basis_gates, target)",
"def addPiecewiseCosts(model, true_gens, prod_vars):\n #First generate all the variables so only need one call to model.update()\n ys_all, cost_vars = {}, {}\n for name, gen in true_gens.items():\n #There is a nicety that all the true generators pertain H1*H36\n assert gen.offerBlocks.keys() == ['H1*H36']\n offerBlocks = gen.offerBlocks['H1*H36'] \n #For numerical stability, makes sense to clip these at eco_max.\n eco_max = max( gen.eco_max.values() )\n for numKnots, b in enumerate(offerBlocks):\n if b.size >= eco_max:\n break\n numKnots += 1\n for iHr in xrange(HORIZON_LENGTH):\n ys_all[name, iHr] = [model.addVar(ub=1.0) for ix in xrange(numKnots ) ] \n cost_vars[name, iHr] = model.addVar(obj=1.0)\n model.update()\n\n #now go back and add relevant constraints against the variables\n for name, gen in true_gens.items():\n offerBlocks = gen.offerBlocks['H1*H36'] \n #clip these at eco-max.\n #VG in reality, could clip them also at eco min and do this by hour.\n eco_max = max( gen.eco_max.values() )\n for numKnots, b in enumerate(offerBlocks):\n if b.size >= eco_max:\n break\n sizes = [b.size for b in offerBlocks[:numKnots] ] + [ eco_max ] \n prices = [b.price for b in offerBlocks[:numKnots] ] + [ offerBlocks[numKnots].price ] \n size_diffs = [s - sm for s, sm in zip(sizes, [0] + sizes) ] \n \n for iHr in xrange(HORIZON_LENGTH): \n model.addConstr( cost_vars[name, iHr] == \n grb.quicksum( y * p * s for y, p, s in zip(ys_all[name, iHr], prices, size_diffs) ) ) \n model.addConstr( prod_vars[name, iHr] == \n grb.quicksum( y * s for y, s in zip(ys_all[name, iHr], size_diffs) ) )\n return cost_vars",
"def create_GO(init_file, no_COOH, no_epoxy, no_OH, filename1):\n global atoms\n global bond_list\n bond_list = bond_list_1\n atoms = read_in_graphene(init_file)\n global anywhere_map\n anywhere_map = get_map_anywhere(atoms)\n global edge_map\n edge_map = get_map_edge(atoms)\n \n list_residue_numbers = [x.residue_number for x in atoms]\n added_functional_groups = max(list_residue_numbers)\n \n must_add = no_COOH + no_epoxy + no_OH\n while (must_add > 0):\n print(\"Left to add: \", \"cooh: \", no_COOH, \"epoxy: \", no_epoxy, \"hydroxyl: \", no_OH)\n chosen = random.choice(pick_to_add(no_COOH, no_epoxy, no_OH))\n if (chosen == \"carboxyl\"):\n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_carboxyl(random_pick_spot(\"carboxyl\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_COOH -= 1\n attempt = 1888\n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n elif (chosen == \"epoxy\"): \n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_epoxy(random_pick_spot(\"epoxy\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_epoxy -= 1\n attempt = 1888\n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n elif (chosen == \"hydroxyl\"):\n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_hydroxyl(random_pick_spot(\"hydroxyl\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_OH -=1\n attempt = 1888 \n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n atno = 1\n new_list = []\n for atom in atoms:\n if (atom.atom_name == \"CX\"):\n New_CX = Atom(atno, \"CX\", \"GGG\", atno, atom.x, atom.y, atom.z)\n new_list.append(New_CX)\n atno += 1 \n \n for atom in atoms:\n if (atom.atom_name == \"C4\"):\n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"C1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_OJ in atoms:\n if ((atom_OJ.atom_name == \"OJ\") and (atom_OJ.residue_name == \"C1A\") and (atom_OJ.residue_number == atom.residue_number)):\n for atom_OK in atoms:\n if ((atom_OK.atom_name == \"OK\") and (atom_OK.residue_name == \"C1A\") and (atom_OK.residue_number == atom.residue_number)):\n for atom_HK in atoms:\n if ((atom_HK.atom_name == \"HK\") and (atom_HK.residue_name == \"C1A\") and (atom_HK.residue_number == atom.residue_number)):\n New_CY = Atom(atno + 0, \"CY\", \"C1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z )\n New_C4 = Atom(atno + 1, \"C4\", \"C1A\", atom.residue_number, atom.x, atom.y, atom.z)\n New_OJ = Atom(atno + 2, \"OJ\", \"C1A\", atom.residue_number, atom_OJ.x, atom_OJ.y, atom_OJ.z)\n New_OK = Atom(atno + 3, \"OK\", \"C1A\", atom.residue_number, atom_OK.x, atom_OK.y, atom_OK.z)\n New_HK = Atom(atno + 4, \"HK\", \"C1A\", atom.residue_number, atom_HK.x, atom_HK.y, atom_HK.z)\n atno += 5\n new_list.append(New_CY); new_list.append(New_C4); new_list.append(New_OJ); new_list.append(New_OK); new_list.append(New_HK);\n check = True\n break\n if (check == True):\n break\n if (check == True):\n break\n if (check == True):\n break \n \n elif (atom.atom_name == \"OE\"): \n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"E1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_CY2 in atoms: \n if ((atom_CY2.atom_name == \"CZ\") and (atom_CY2.residue_name == \"E1A\") and (atom_CY2.residue_number == atom.residue_number) and (atom_CY2 != atom_CY)):\n New_CY = Atom( atno + 0, \"CY\", \"E1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z)\n New_CY2 = Atom(atno + 1, \"CZ\", \"E1A\", atom.residue_number, atom_CY2.x, atom_CY2.y, atom_CY2.z)\n New_OE = Atom( atno + 2, \"OE\", \"E1A\", atom.residue_number, atom.x, atom.y, atom.z)\n atno += 3\n new_list.append(New_CY); new_list.append(New_CY2); new_list.append(New_OE);\n check = True\n break\n if (check == True):\n break\n elif (atom.atom_name == \"OL\"):\n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"H1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_HK in atoms:\n if ((atom_HK.atom_name == \"HK\") and (atom_HK.residue_name == \"H1A\") and (atom_HK.residue_number == atom.residue_number)):\n New_CY = Atom(atno + 0, \"CY\", \"H1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z)\n New_OL = Atom(atno + 1, \"OL\", \"H1A\", atom.residue_number, atom.x, atom.y, atom.z)\n New_HK = Atom(atno + 2, \"HK\", \"H1A\", atom.residue_number, atom_HK.x, atom_HK.y, atom_HK.z)\n atno += 3\n new_list.append(New_CY); new_list.append(New_OL); new_list.append(New_HK);\n check = True\n break\n if (check == True):\n break\n \n atoms = new_list.copy()\n writepdb(atoms, filename1)\n sum_c1a = 0; sum_e1a = 0; sum_h1a = 0; sum_ggg = 0\n for atom in atoms:\n if (atom.residue_name == \"C1A\"):\n sum_c1a += 1\n elif (atom.residue_name == \"E1A\"):\n sum_e1a += 1\n elif (atom.residue_name == \"H1A\"):\n sum_h1a += 1\n elif (atom.residue_name == \"GGG\"):\n sum_ggg += 1\n print(\"Placed:\")\n print(\"carboxyl: \", sum_c1a/5)\n print(\"epoxy: \", sum_e1a/3)\n print(\"hydroxyl: \", sum_h1a/3)\n print(\"graphene atoms (CX - GGG) left: \", sum_ggg)\n return 'done.'",
"def generate_bnd(cli_file, geo_file, slf_file, bnd_file, varnames, varunits):\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ cli+slf new mesh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(cli_file):\n raise TelemacException(\\\n '... the provided cli_file does not seem to exist:'\n ' {}\\n\\n'.format(cli_file))\n if not path.exists(geo_file):\n raise TelemacException(\\\n '... the provided geo_file does not seem to exist: '\n '{}\\n\\n'.format(geo_file))\n\n if len(varnames) != len(varunits):\n raise TelemacException(\\\n 'Not the same number of variables and units\\nvarnames: {}\\nvarunits: {}'\n '{}\\n\\n'.format(varnames, varunits))\n\n\n # Read the new CLI file to get boundary node numbers\n print(' +> getting hold of the Conlim file and of its liquid boundaries')\n cli = Conlim(cli_file)\n # Keeping only open boundary nodes\n bor = np.extract(cli.bor['lih'] != 2, cli.bor['n'])\n\n # Find corresponding (x,y) in corresponding new mesh\n print(' +> getting hold of the GEO file and of its bathymetry')\n geo = Selafin(geo_file)\n xys = np.vstack((geo.meshx[bor-1], geo.meshy[bor-1])).T\n _ = geo.get_variables_at(0,\\\n subset_variables_slf(\"BOTTOM: \", geo.varnames)[0])[0]\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ slf existing res ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(slf_file):\n raise TelemacException(\\\n '... the provided slf_file does not seem to exist: '\n '{}\\n\\n'.format(slf_file))\n slf = Selafin(slf_file)\n slf.set_kd_tree()\n slf.set_mpl_tri()\n\n print(' +> support extraction')\n # Extract triangles and weigths in 2D\n support2d = []\n ibar = 0\n pbar = ProgressBar(maxval=len(xys)).start()\n for xyi in xys:\n support2d.append(xys_locate_mesh(xyi, slf.ikle2, slf.meshx, slf.meshy,\n slf.tree, slf.neighbours))\n ibar += 1\n pbar.update(ibar)\n pbar.finish()\n # Extract support in 3D\n support3d = list(zip(support2d, len(xys)*[range(slf.nplan)]))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n bnd = Selafin('')\n bnd.fole = {}\n bnd.fole.update({'hook':open(bnd_file, 'wb')})\n bnd.fole.update({'name':bnd_file})\n bnd.fole.update({'endian':\">\"}) # big endian\n bnd.fole.update({'float':('f', 4)}) # single precision\n\n # Meta data and variable names\n bnd.title = ''\n bnd.nbv1 = len(varnames)\n # /!\\ ELEVATION has to be the first variable\n # (for possible vertical re-interpolation within TELEMAC)\n\n bnd.varnames = []\n bnd.varunits = []\n for var, unit in zip(varnames, varunits):\n new_var = var + (16-len(var))*\" \"\n new_unit = unit + (16-len(unit))*\" \"\n bnd.varnames.append(new_var)\n bnd.varunits.append(new_unit)\n\n bnd.nvar = bnd.nbv1\n bnd.varindex = range(bnd.nvar)\n\n # Sizes and mesh connectivity\n bnd.nplan = slf.nplan\n # Number of nodes per boundary element (ndp2 in 2D and ndp3 in 3D)\n bnd.ndp2 = 2\n bnd.ndp3 = 4\n bnd.npoin2 = len(bor)\n bnd.npoin3 = bnd.npoin2*slf.nplan\n bnd.iparam = [0, 0, 0, 0, 0, 0, bnd.nplan, 0, 0, 1]\n bnd.ipob2 = bor # /!\\ note that ipobo keeps the original numbering\n print(' +> masking and setting connectivity')\n # Set the array that only includes elements of geo.ikle2\n # with at least two nodes in bor\n array_1d = np.in1d(geo.ikle2, np.sort(bor-1))\n mask = geo.ikle2[np.where(np.sum(array_1d.reshape(geo.nelem2, geo.ndp2),\n axis=1) == 2)]\n # this ikle2 keeps the original numbering\n ikle2 = np.ravel(mask)[np.in1d(mask, np.sort(bor-1))].reshape(len(mask), 2)\n # ~~> re-numbering ikle2 as a local connectivity matrix\n knolg, _ = np.unique(np.ravel(ikle2), return_index=True)\n knogl = dict(zip(knolg, range(len(knolg))))\n bnd.ikle2 = - np.ones_like(ikle2, dtype=np.int)\n for k in range(len(ikle2)):\n # /!\\ bnd.ikle2 has a local numbering, fit to the boundary elements\n bnd.ikle2[k] = [knogl[ikle2[k][0]], knogl[ikle2[k][1]]]\n # Last few numbers\n bnd.nelem2 = len(bnd.ikle2)\n if slf.nplan > 1:\n bnd.nelem3 = bnd.nelem2*(slf.nplan-1)\n else:\n bnd.nelem3 = bnd.nelem2\n bnd.ndp3 = bnd.ndp2\n # 3D structures\n if slf.nplan > 1:\n bnd.ipob3 = np.ravel(np.add(np.repeat(bnd.ipob2, slf.nplan)\\\n .reshape((bnd.npoin2, slf.nplan)),\n bnd.npoin2*np.arange(slf.nplan)).T)\n bnd.ikle3 = \\\n np.repeat(bnd.npoin2*np.arange(slf.nplan-1),\n bnd.nelem2*bnd.ndp3)\\\n .reshape((bnd.nelem2*(slf.nplan-1), bnd.ndp3)) + \\\n np.tile(np.add(np.tile(bnd.ikle2, 2),\n np.repeat(bnd.npoin2*np.arange(2), bnd.ndp2)),\n (slf.nplan-1, 1))\n else:\n bnd.ipob3 = bnd.ipob2\n bnd.ikle3 = bnd.ikle2\n # Mesh coordinates\n bnd.meshx = geo.meshx[bor-1]\n bnd.meshy = geo.meshy[bor-1]\n\n print(' +> writing header')\n # Write header\n bnd.append_header_slf()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n print(' +> setting variables')\n # TIME and DATE extraction\n bnd.datetime = slf.datetime\n bnd.tags['times'] = slf.tags['times']\n # VARIABLE extraction\n list_var = varnames[0]+\": \"\n for var in varnames[1:]:\n list_var += \";\"+var+\": \"\n\n vrs = subset_variables_slf(list_var, slf.varnames)\n\n # Read / Write data, one time step at a time to support large files\n print(' +> reading / writing variables')\n pbar = ProgressBar(maxval=len(slf.tags['times'])).start()\n zeros = np.zeros((bnd.npoin3, 1), dtype=np.float)\n for itime in range(len(slf.tags['times'])):\n data = get_value_history_slf(slf.file, slf.tags, [itime], support3d,\n slf.nvar, slf.npoin3, slf.nplan, vrs)\n data = np.reshape(np.transpose(np.reshape(np.ravel(data),\n (bnd.nvar, bnd.npoin2,\n bnd.nplan)),\n (0, 2, 1)),\n (bnd.nvar, bnd.npoin3))\n bnd.append_core_time_slf(itime)\n bnd.append_core_vars_slf(data)\n pbar.update(itime)\n pbar.finish()\n\n # Close bnd_file\n bnd.fole['hook'].close()",
"def twostr_func(wavelength, F_s, solarzenithangle,albedo_dif, \n\t\t\talbedo_dir, temp_ground, w_0, g, tau_n, temp_c):\n\t\n\t########################\n\t###Import useful libraries\n\t########################\n\timport numpy as np\n\timport pdb\n\timport scipy.linalg\n\n\n\n\n\t########################\n\t###Define model parameters\n\t########################\n\t#Properties of the ground\n\temissivity_ground=1.-albedo_dif #emissivity of ground. 1=perfect BB emitter.\n\n\t#Optical depth structure\n\tNlayer=len(tau_n) #number of layers in the atmospheric model.\n\t\n\ttau_c=np.zeros(Nlayer+1)# tau_c[n] is the cumulative optical depth at the upper edge of layer n. So tau_c[0]=0, and tau_c[N] is the maximum possible.\n\tfor n in range(0, Nlayer):\n\t\ttau_c[n+1]=tau_c[n]+tau_n[n] \n\n\t#In the Toon formalism, j=0 corresponds to space, and j=N+1 corresponds to the planet surface.\n\t#These points in wavelength space define the edges of the bins in tau space. \n\t#Other terminology:\n\t#\ttau_c=cumulative optical depth of layers *above* layer n. \n\t#\ttau_n=total optical depth of the layer n\n\t#\ttau=total optical depth at any point within a layer n, hence satisfying 0<tau<tau_n\n\n\tmu_0=np.cos(solarzenithangle) #\"incident direction of solar beam\"\n\n\n\t########################\n\t###Determine the two-stream approximation coefficients.\n\t########################\n\t#Eddington and quadrature are good at solar wavelengths (i.e., not thermal blackbody dominated). delta scalings of Joseph et al (1976) recommended to replace w_0, g, tau in this case. However, when dominated by internal isotropic sources like the Planck function, hemispheric mean approximation is preferable. When w_0=0, quadrature case has problems. This happens esp at thermal wavelengths. Again this favors using hemispheric mean at these wavelengths\n\t\n\t#We use quadrature because 1) we are at solar wavelengths for this UV work and 2) that's what twostr.f does (which is our comparison case)\n\tgamma_1= np.sqrt(3.)*(2.-w_0*(1.+g))/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_1\n\tgamma_2=np.sqrt(3.)*w_0*(1.-g)/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_2\n\tgamma_3=(1.-np.sqrt(3.)*g*mu_0)/2. #consistent with Toon et al; equal to the Pierrehumbert gamma_plus/w_0\n\tgamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\tmu_1=1./np.sqrt(3.)+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\t##Eddington\n\t#gamma_1= (7.-w_0*(4.+3.*g))/4.\n\t#gamma_2=-1.*(1.-w_0*(4.-3.*g))/4.\n\t#gamma_3=(2.-3.*g*mu_0)/4.\n\t#gamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\t#mu_1=1./2.+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\talambda=np.sqrt(np.abs(gamma_1*gamma_1-gamma_2*gamma_2)) #this is the lower-case lambda, from eqn 21 of Toon et al\n\t\t\t\t\t\t\t\t #The absolute value was added based on the code Toon just sent us. This corresponds to his AK(L,J) parameter. But it should not matter since gamma_1>gamma_2 for w_0<1.\n\tclambda=(gamma_1-alambda)/(gamma_2) #this is the upper-case lambda, from eqn 22 of Toon et al\n\n\tEMLT=np.exp(-alambda*tau_n) #this appears to be a prefactor used to facilitate computation of eqn 44 of Toon et al\n\te1=1.+clambda*EMLT\n\te2=1.-clambda*EMLT\n\te3=clambda+EMLT\n\te4=clambda-EMLT\n\n\t########################\n\t###Set up calculation\n\t########################\n\t\"\"\"\n\tThe fundamental equation we are solving is of form:\n\tA_{l}*Y_{l-1}+B_{l}*Y_{l}+D{l+1}=E_{l} (equation 39 of Toon et al)\n\tHere, A_l, B_l, D_l, E_l are quantities we determine, and the Y_l is what we solve for.\n\tHence, we can summarize that we are solving a matrix equation that takes form:\n\tPY=E\n\twhere Y[l]=Y_l\n\t E[l]=E_l\n\t P[l, l-1]=A_l [row, column]\n\t P[l, l]=B_l\n\t P[l, l+1]=D_l\n\t P[i,j]=0 else\n\tToon et al use 1-indexing. Hence n runs from 1 to N, l runs from 1 to 2N, where N is the number of layers, and they have:\n\tY_l=Y_{1n} for l=1,3,5,...2n-1...2N-1\n\tY_l=Y_{2n} for l=2,4,6,...2n...2N\n\n\tHowever, we use Python, which has 0-indexing. Hence *we* choose that n runs from 0 to N-1, l runs from 0 to 2N-1, and:\n\tY_l=Y_{1n} for l=0,2,4...2n...2N-2\n\tY_l=Y_{2n} for l=1,3,5...2n+1...2N-1\n\n\tThe Y_{1n} and Y_{2n} are related to F^+_n and F^-_n via equations 31 and 32 of Toon et al.\n\tThis parametrization has been done to remove exponentials with positive operands (ie ones that could grow large and lead to numerical instabilities) from the matrix.\n\n\tNote: The mapping of this PQ=R to the F+ and F- is unclear because of 1) this parametrization in terms of Y_l (done to eliminate numerical instabilities) and 2)further linear combinations done to convert a pentagiagonal matrix to an even simpler tridiagonal matrix. Hence intuitive checks are hard.\n\t\"\"\"\n\n\t########################\n\t###Set up surface flux\n\t########################\n\tS_sfc=albedo_dir*mu_0*np.exp(-tau_c[-1]/mu_0)*np.pi*F_s+emissivity_ground*np.pi*Planck(temp_ground, wavelength)\n\t#Surface emission. Formed by adding blackbody emission from the ground to the reflected energy from the direct beam. The direct beam's reflected energy is assumed to be purely diffuse. This corresponds to equations 37 and 38 of Toon et al. Note that this does NOT match equation 5.31 of Pierrehumbert because it does not include the reflected diffuse radiation. So, this implicitly assumes the diffuse albedo to be 0. \n\n\t########################\n\t###Set up C-values\n\t########################\n\t#In the reshuffled set of parameters used in this formalism, these seem analagous to the forcing term in Pierrehumbert. All the added radiation is contained in here.\n\n\tdef C_plus(n, tau): #implementation of superposition of eqns 23 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]-1./mu_0)*gamma_3[n]+gamma_4[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau+1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\tdef C_minus(n, tau): #implementation of superposition of eqns 24 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]+1./mu_0)*gamma_4[n]+gamma_3[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau-1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\t########################\n\t###Calculate matrix coefficients\n\t#########################\n\t#initialize the A, B, D, and E.\n\tA=np.zeros(Nlayer*2)\n\tB=np.zeros(np.shape(A))\n\tD=np.zeros(np.shape(A))\n\tE=np.zeros(np.shape(A))\n\n\n\t#For l=0 (n=0) we have the boundary condition that the downward diffuse flux at the top of the first layer is equal to any incident diffuse downward flux. We set this to be zero.\n\tA[0]=0.\n\tB[0]=e1[0]\n\tD[0]=-1.*e2[0]\n\tE[0]=0.-1*C_minus(0,0) #This is really F_minus[0,0], i.e. we are assuming there is no downward diffuse flux from the top of the atmosphere.\n\n\t#for l=2N-1 (n=N-1), we have the boundary condition that the upward flux at the surface is the sume of the reflected downward diffuse flux and energy from any other sources (e.g. reflected direct beam, BB emission of the ground)/np.sqrt(3.)\n\tA[2*Nlayer-1]=e1[Nlayer-1]-albedo_dif*e3[Nlayer-1]\n\tB[2*Nlayer-1]=e2[Nlayer-1]-albedo_dif*e4[Nlayer-1]\n\tD[2*Nlayer-1]=0.\n\tE[2*Nlayer-1]=S_sfc-C_plus(Nlayer-1, tau_n[Nlayer-1])+albedo_dif*C_minus(Nlayer-1, tau_n[Nlayer-1])\n\n\t#There is a problem in the Toon paper. As written, the l=2n depends on e_n+1, running over the array edge. twostr.f resolves this by adopting a different mapping: their definition reduces to defining l=2(n+1) and running n from 0 to N-1. In this case, l=2 (The third value in the list of ls) depends on n=0 and n=1. This eliminates the overflow problem. We have implemented this below.\n\t\n\t##For n=1,2,3...N-1, l=2,4,6,...2N-2:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*(n+1)\n\t\tA[l]=e2[n]*e3[n]-e4[n]*e1[n]\n\t\tB[l]=e1[n]*e1[n+1]-e3[n]*e3[n+1]\n\t\tD[l]=e3[n]*e4[n+1]-e1[n]*e2[n+1]\n\t\t\n\t\tE[l]=e3[n]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))+e1[n]*(C_minus(n,tau_n[n])-C_minus(n+1,0.))\n\n\n\t#For n=0...N-2, l=1,3...2N-3:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*n+1\n\t\tA[l]=e2[n+1]*e1[n]-e3[n]*e4[n+1]\n\t\tB[l]=e2[n]*e2[n+1]-e4[n]*e4[n+1]\n\t\tD[l]=e1[n+1]*e4[n+1]-e2[n+1]*e3[n+1]\n\t\t\n\t\tE[l]=e2[n+1]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))-e4[n+1]*(C_minus(n+1, 0)-C_minus(n, tau_n[n])) #twostr.f has a -1*e_{4,n+1}. We have applied the same even though this is NOT what is written in the Toon et al paper. We have done this because Toon told us (6/26/2015) that there are some sign errors in the coefficients, and we currently trust the validated CLIMA code over the paper we know has errors in it. EDIT: Looking at the code Toon shared with us, he does the same. \n\n\n\t########################\n\t###Assemble matrix equation components\n\t#########################\n\tP=np.zeros([Nlayer*2,Nlayer*2])\n\n\t#l=0: no \"A\" coefficient b/c l-1 has no meaning\n\tP[0,0]=B[0]\n\tP[0,1]=D[0]\n\n\t#l=2N-1: no \"D\" coefficient b/c l+1 has no meaning\n\tP[2*Nlayer-1,2*Nlayer-1-1]=A[2*Nlayer-1]\n\tP[2*Nlayer-1,2*Nlayer-1]=B[2*Nlayer-1]\n\n\tfor l in range(1, Nlayer*2-1): #This populates the matrix P in PY=E. \n\t\tP[l, l-1]=A[l]\n\t\tP[l,l]=B[l]\n\t\tP[l,l+1]=D[l]\n\n\t########################\n\t###Invert matrix\n\t#########################\n\t#Y=np.linalg.solve(P, E) #this is the Y_l\n\t\n\t#try using a specialized solver\n\tab=np.zeros([3,2*Nlayer])\n\tab[0,:]=np.append(0.0, np.diag(P, k=1))\n\tab[1,:]=np.diag(P, k=0)\n\tab[2,:]=np.append(np.diag(P, k=-1),0.0)\n\t#pdb.set_trace()\n\tY=scipy.linalg.solve_banded((1,1), ab, E) #this is the Y_l\n\n\n\t########################\n\t###Convert from Y_l to Y_1n, Y_2n\n\t#########################\n\t#The Y_1n as defined in Toon et al correspond to l=1,3, 5...2N-1. Adjusting for the zero-indexing of Python as we have done, they instead correspond to l=0,2,...2N-2\n\t#The Y_2n as defined in Toon et al correspond to l=2,4,6...2N. Adjusting for Python zero-indexing as we have done, they instead correspond to l=1,3,5...2N-1.\n\t#For detail, see eq. 40.\n\tY_1=np.zeros(Nlayer)\n\tY_2=np.zeros(Nlayer)\n\tfor n in range(0, Nlayer):\n\t\tY_1[n]=Y[2*n]\n\t\tY_2[n]=Y[2*n+1] \n\t\t#last number called is Nlayer-1=N-1, so is consistent.\n\t\n\t########################\n\t###Convert from Y_1n, Y_2n to F_plus, F_minus\n\t#########################\n\tdef F_plus(n,tau): #defined from Eqn 31 of Toon et al.\n\t\tterm1=Y_1[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))+clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))-clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm3=C_plus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\n\tdef F_minus(n, tau): #defined from Eqn 32 of Toon et al.\n\t\tterm1=Y_1[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))+np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))-np.exp(-alambda[n]*tau))\n\t\tterm3=C_minus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\t\n\t########################\n\t###Evaluate F_plus, F_minus at boundary edges\n\t#########################\n\tF_plus_tau0=np.zeros(np.shape(tau_n))\n\tF_plus_taumax=np.zeros(np.shape(tau_n))\n\tF_minus_tau0=np.zeros(np.shape(tau_n))\n\tF_minus_taumax=np.zeros(np.shape(tau_n))\n\n\tfor n in range(0, Nlayer):\n\t\tF_plus_tau0[n]=F_plus(n, 0.)\n\t\tF_plus_taumax[n]=F_plus(n, tau_n[n])\n\t\tF_minus_tau0[n]=F_minus(n, 0.)\n\t\tF_minus_taumax[n]=F_minus(n, tau_n[n])\n\n\n\t########################\n\t###Convert from Y_1n, Y_2n to F_net, mean intensity.\n\t#########################\n\t#test if diffuse flux dominates over direct flux. If direct flux dominant, instead set mu_1=mu_0\n\t\n\t#if F_minus_taumax[-1]<mu_0*np.pi*F_s*np.exp(-tau_c[-1]/mu_0):\n\t\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t\n\tF_net=np.zeros(np.shape(tau_n)) #defined from Eqn 48 of Toon et al. This quantity is the net flux at the BASE of layer n.\n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\n\t\tterm1=Y_1[n]*(e1[n]-e3[n])\n\t\tterm2=Y_2[n]*(e2[n]-e4[n])\n\t\tterm3=C_plus(n, tau_n[n])-C_minus(n, tau_n[n])\n\t\t\n\t\tF_net[n]=term1+term2+term3 -direct\n\n\tAMEAN=np.zeros(np.shape(tau_n)) #defined from Eqn 49 of Toon et al. This is the equivalent of the quantity AMEAN in the twostr.f code. It is equal to 4*np.pi*J_n, where J_n is the mean intensity at the base of layer n. Hence this quantity AMEAN should be equal to the total intensity received by a point at the base of layer n. \n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\t\n\t\tterm1=Y_1[n]*(e1[n]+e3[n])\n\t\tterm2=Y_2[n]*(e2[n]+e4[n])\n\t\tterm3=C_plus(n, tau_n[n])+C_minus(n, tau_n[n])\n\t\t\n\t\t#AMEAN[n]=(1./mu_1[n])*(term1+term2+term3)+direct/mu_0\t\n\t\tAMEAN[n]=(1./mu_1[n])*(F_plus_taumax[n]+F_minus_taumax[n])+direct/mu_0\t\n\t\n\t########################\n\t###Compute \"surface intensity\"\n\t#########################\t\n\t#\"Surface intensity\" refers to the total intensity that would be intercepted by a particle at the surface of the planet. Whereas the total intensity is equal to (F_plus[-1]+F_minus[-1])/mu_1+direct[-1]/mu_0, the surface intensity is instead equal to (F_minus[-1])/mu_1+direct[-1]/mu_0, i.e. the downwelling diffuse intensity (since the bottom intensity is cut out due to there being a planet there) plus the direct intensity\n\t\n\tsurface_intensity=(F_minus_taumax[-1]/mu_1[-1])+(np.pi*F_s)*np.exp(-(tau_c[-1])/mu_0)\n\t\n\t########################\n\t###Return Result\n\t#########################\n\t#F_minus_tau0\n\t#np.max(np.abs((F_minus_taumax[:-1]-F_minus_tau0[1:]))/F_minus_tau0[1:])\n\t#np.max(np.abs((F_plus_taumax[:-1]-F_plus_tau0[1:]))/F_plus_tau0[1:])\n\t\n\treturn (F_plus_tau0, F_plus_taumax, F_minus_tau0, F_minus_taumax, F_net, AMEAN, surface_intensity)",
"def genBC_calls(rhs):\n\n\tincPATH \t= rhs.incPATH\n\tdim \t\t\t= rhs.dim\n\twp \t\t\t= rhs.wp\n\tvarname \t= rhs.varname\n\tvarstored = rhs.varstored\n\tvarsolved = rhs.varsolved\n\tvarbc = rhs.varbc\n\t\n\thlo_rhs \t\t= rhs.hlo_rhs\n\tstencil_rhs = rhs.stencil\n\torder_rhs = rhs.order\n\n\tbc_info = rhs.bc_info[0]\n\n# Extract bcdir:\n\n\tbcdir_all = []\n\tfor bcname in bc_info:\n\t\tbcdir_all = bcdir_all + list(bc_info[bcname].keys())\n\n\tbcdir_all = sorted(bcdir_all)\n\n# Extract phybc details:\n\n\tbcphy_all = {}\n\n\tfor bcdir in bcdir_all:\n\t\tbcphy_all[bcdir] = {}\n\t\tfor bcname in bc_info:\n\t\t\tif bcdir in list(bc_info[bcname].keys()):\n\t\t\t\tbcphy_all[bcdir][bcname] = bc_info[bcname][bcdir] \n\n\tstaticstored = {}\n\tdynamicstored = {}\n\tstaticvarbc = {}\n\tdynamicvarbc = {}\n\n\tfor var in varstored:\n\t\tif varstored[var]['static']:\n\t\t\tstaticstored[var] = varstored[var]['symb']\n\t\telse:\n\t\t\tdynamicstored[var] = varstored[var]['symb']\t\n\n\tvar2process = {'storedstatic':staticstored, \n\t\t\t\t 'stored' :dynamicstored,\n\t\t\t\t 'varbcstatic' :staticvarbc,\n\t\t\t\t 'varbc' :dynamicvarbc}\n\n\tslcbc_stored = {} \n\tefname_stored = {}\n\n\tDirDic = {'i':{'dir':None,'indbc':None},\n\t 'j':{'dir':None,'indbc':None},\n\t 'k':{'dir':None,'indbc':None}}\n\n\tidrhs = {}\t\t\n\n\tidrhs['i1'] = 'idrhs(1)=idarray(1)\\n'\n\tidrhs['imax'] = 'idrhs(2)=idarray(2)\\n'\n\tidrhs['j1'] = 'idrhs(3)=idarray(3)\\n'\t\n\tidrhs['jmax'] = 'idrhs(4)=idarray(4)\\n'\t\t\t\t\t\t\n\tidrhs['k1'] = 'idrhs(5)=idarray(5)\\n'\t\n\tidrhs['kmax'] = 'idrhs(6)=idarray(6)\\n'\n\tindi = 'i'\n\tindj = 'j'\n\tindk = 'k'\n\n\tindiri = indi\n\tindirj = indj\n\tindirk = indk \n\t\n\n\tbcdone = {}\n\tbcnum = 0\n\n# # ADD EDGES :\n\n\tif dim == 3:\t\n\t\tdirlist = ['i1','imax','j1','jmax','k1','kmax']\n\telif dim == 2:\n\t\tdirlist = ['i1','imax','j1','jmax']\n\telif dim == 1:\n\t\tdirlist = ['i1','imax']\n\n\n\tedgeBCs = []\n\tfor d1 in dirlist:\n\t\tfor d2 in dirlist:\n\t\t\tif d1 != d2:\n\t\t\t\tedgeBCs.append(d1+d2)\n\n\tfrom itertools import permutations\n\n\tedone = []\n\tfor dir1 in bcdir_all:\n\t\tif dir1 not in edgeBCs:\n\t\t\tfor dir2 in bcdir_all:\n\t\t\t\tif dir2 not in edgeBCs:\n\t\t\t\t\tif dir2[0] != dir1[0]:\n\t\n\t\t\t\t\t\tif ((dir2+dir1 not in edone) and (dir1+dir2 not in edone)):\n\t\t\t\t\t\t\tif ((dir1+dir2 not in bcdone) and (dir2+dir1 not in bcdone)):\n\t\n\t\t\t\t\t\t\t\tbcnum = bcnum + 1\n\t\n\t\t\t\t\t\t\t\tbcdone[dir1+dir2] = []\n\t\t\t\t\t\t\t\tbcdone[dir1+dir2].append(bcnum)\t\t\n\t\n\t\t\t\t\t\t\t\tslcbc = {'rhs': open(incPATH+'select_phybc_rhs.f90' ,'a+')}\n\t\n\t\t\t\t\t\t\t\ttypebc = []\n\t\t\t\t\t\t\t\tfor bcname in bcphy_all[dir1]:\n\t\t\t\t\t\t\t\t\tfor bctype in bcphy_all[dir1][bcname]:\n\t\t\t\t\t\t\t\t\t\ttypebc.append(bctype)\t\n\t\n\t\t\t\t\t\t\t\tif 'rhs' not in typebc: # By default we extend interiror eqns for rhs at the edges\n\t\t\t\t\t\t\t\t\t\tslcbc['rhs'].write('CASE ('+str(bcnum)+')\\n')\t\n\t\n\t\t\t\t\t\t\t\tfor bctype in typebc:\n\t\t\t\t\t\t\t\t\tslcbc[bctype] = open(incPATH+'select_phybc_'+bctype+'.f90' ,'a+')\n\t\t\t\t\t\t\t\t\tslcbc[bctype].write('CASE ('+str(bcnum)+')\\n')\n\t\n\t\t\t\t\t\t\t\t# rhs bc edge:\n\t\n\t\t\t\t\t\t\t\tfor layer1 in range(0,hlo_rhs): #BC layers dir1\n\t\t\t\t\t\t\t\t\tefname = open(incPATH+'bcsrc_edgescall_'+dir1+'_'+dir2+'_'+str(layer1)+'.for','r')\n\t\t\t\t\t\t\t\t\tslcbc['rhs'].write(' call '+efname.readlines()[8][10:])\n\t\t\t\t\t\t\t\tslcbc['rhs'].write(' '+idrhs[dir1])\n\t\t\t\t\t\t\t\tslcbc['rhs'].write(' '+idrhs[dir2])\n\t\n\t\n\t\t\t\t\t\t\t\tfephy = {}\n\t\n\t\n\t\t\t\t\t\t\t\tif dir1 in bcphy_all:\n\t\t\t\t\t\t\t\t\t# slcbc[bctype] = open(incPATH+'select_phybc_'+bctype+'.f90' ,'a+')\n\t\t\t\t\t\t\t\t\t# slcbc[bctype].write('CASE ('+str(bcnum)+')\\n')\n\t\t\t\t\t\t\t\t\tfor bcname in bcphy_all[dir1]:\n\t\t\t\t\t\t\t\t\t\tfor bctype in bcphy_all[dir1][bcname]:\n\t\n\t\t\t\t\t\t\t\t\t\t\tfephy[bctype] = open(incPATH+'bcsrc_edgescall_PhyBC_'+bcname+'_'+bctype+'_'+dir1+'_'+dir2+'_0.for','r')\n\t\t\t\t\t\t\t\t\t\t\tslcbc[bctype].write(' call '+fephy[bctype].readlines()[8][10:])\n\t\n\t\t\t\t\t\t\t\tif dir2 in bcphy_all:\n\t\t\t\t\t\t\t\t\tfor bcname in bcphy_all[dir2]:\n\t\t\t\t\t\t\t\t\t\tfor bctype in bcphy_all[dir2][bcname]:\n\t\t\t\t\t\t\t\t\t\t\tif bctype not in slcbc:\n\t\t\t\t\t\t\t\t\t\t\t\tslcbc[bctype] = open(incPATH+'select_phybc_'+bctype+'.f90' ,'a+')\n\t\t\t\t\t\t\t\t\t\t\t\tslcbc[bctype].write('CASE ('+str(bcnum)+')\\n')\n\t\t\t\t\t\t\t\t\t\t\tfephy[bctype] = open(incPATH+'bcsrc_edgescall_PhyBC_'+bcname+'_'+bctype+'_'+dir2+'_'+dir1+'_0.for','r')\n\t\t\t\t\t\t\t\t\t\t\tslcbc[bctype].write(' call '+fephy[bctype].readlines()[8][10:])\n\n\t\t\t\t\t\t\t\tperms = [''.join(p) for p in permutations(dir1+dir2)]\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif (dir1+dir2) in bcphy_all or (list(set(perms)&set(bcphy_all)) != [] ):\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tdir1dir2 = list(set(perms)&set(bcphy_all))[0]\n\n\t\t\t\t\t\t\t\t\tfor bcname in bcphy_all[dir1dir2]:\n\t\t\t\t\t\t\t\t\t\tfor bctype in bcphy_all[dir1dir2][bcname]:\n\t\t\t\t\t\t\t\t\t\t\tif bctype not in slcbc:\n\t\t\t\t\t\t\t\t\t\t\t\tslcbc[bctype] = open(incPATH+'select_phybc_'+bctype+'.f90' ,'a+')\n\t\t\t\t\t\t\t\t\t\t\t\tslcbc[bctype].write('CASE ('+str(bcnum)+')\\n')\n\t\t\t\t\t\t\t\t\t\t\tfephy[bctype] = open(incPATH+'bcsrc_edges_PhyBC_'+bcname+'_'+bctype+'_'+dir1dir2+'_0_0.for','r')\n\t\t\t\t\t\t\t\t\t\t\tslcbc[bctype].write(' call '+fephy[bctype].readlines()[8][10:])\n\t\n\t\t\t\t\t\t\t\tedone.append(dir1+dir2)\n\t\t\t\t\t\t\t\t\n\n# # Stored OPEN select and static/dyn extraction\t\n\t\t\t\t\t\t\tvar2process['varbcstatic'] = {}\n\t\t\t\t\t\t\tvar2process['varbc'] = {}\t\n\t\t\t\t\t\t\tstaticvarbc = {}\n\t\t\t\t\t\t\tdynamicvarbc= {}\n\t\t\t\t\t\t\taddvarbc = False\n\t\t\t\t\t\t\tfor var in varbc:\n\t\t\t\t\t\t\t\tif 'face' in varbc[var]:\n\t\t\t\t\t\t\t\t\tif varbc[var]['face'] == dir1: addvarbc = True\n\t\t\t\t\t\t\t\telif 'edge' in varbc[var]:\n\t\t\t\t\t\t\t\t\tif varbc[var]['edge'] == dir1+dir2: addvarbc = True\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\taddvarbc = False\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif addvarbc:\t\n\t\t\t\t\t\t\t\t\tif varbc[var]['static']:\n\t\t\t\t\t\t\t\t\t\tstaticvarbc[var] = varbc[var]['symb']\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tdynamicvarbc[var] = varbc[var]['symb']\n\t\t\t\t\t\t\t\taddvarbc = False\t\t\t\n\t\n\t\t\t\t\t\t\tvar2process['varbcstatic'] = staticvarbc\n\t\t\t\t\t\t\tvar2process['varbc'] = dynamicvarbc\n\n\t\t\t\t\t\t\tfor k in var2process:\n\t\t\t\t\t\t\t\tif var2process[k] != {}:\t\n\n\t\t\t\t\t\t\t\t\tslcbc_stored[k] = open(incPATH+'select'+k+'bc.f90','a+')\n\t\t\t\t\t\t\t\t\tslcbc_stored[k].write('CASE ('+str(bcnum)+')\\n')\n\t\t\t\t\t\t\t\t\t# if k[0:5] == 'varbc': \t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# layerend = 1\n\t\t\t\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t\t\t # layerend = hlo_rhs \n\t\t\t\t\t\t\t\t\tlayerend = hlo_rhs \n\t\t\t\t\t\t\t\t\tfor layer1 in range(0,layerend): #BC layers dir1\n\t\t\t\t\t\t\t\t\t\tefname_stored[k] = open(incPATH+'bcsrc_'+k+'_edgescall_'+dir1+'_'+dir2+'_'+str(layer1)+'.for','r')\n\t\t\t\t\t\t\t\t\t\tslcbc_stored[k].write(' call '+efname_stored[k].readlines()[8][10:])\t\t\t\t\t\t\t\t\n\n# #\t\textends bc filtering along the edges:\t\t\n\n\t\t\t\t\t\t\tfltbnd = {'i1' :{'dir': 'x','bound': 'idloop(1) = idarray(1)\\n'},\n\t\t\t\t\t\t\t\t\t 'imax':{'dir': 'x','bound': 'idloop(2) = idarray(2)\\n'},\n\t\t\t\t\t\t\t\t\t 'j1' :{'dir': 'y','bound': 'idloop(3) = idarray(3)\\n'},\n\t\t\t\t\t\t\t\t\t 'jmax':{'dir': 'y','bound': 'idloop(4) = idarray(4)\\n'},\n\t\t\t\t\t\t\t\t\t 'k1' :{'dir': 'z','bound': 'idloop(5) = idarray(5)\\n'},\n\t\t\t\t\t\t\t\t\t 'kmax':{'dir': 'z','bound': 'idloop(6) = idarray(6)\\n'}}\n\t\t\t\t\t\t\ttry:\t\n\t\t\t\t\t\t\t\tslcbd.close()\t \t \n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tNone\n\n\t\t\t\t\t\t\tslcbd = open(incPATH+'selectfilterbc_'+fltbnd[dir1]['dir']+'.f90','a+')\n\t\t\t\t\t\t\tslcbd.write('\\n CASE ('+str(bcnum)+')\\n\\n')\n\t\t\t\t\t\t\tslcbd.write(' '+fltbnd[dir2]['bound'])\n\t\t\t\t\t\t\tslcbd.close()\n\n\t\t\t\t\t\t\tslcbd = open(incPATH+'selectfilterbc_'+fltbnd[dir2]['dir']+'.f90','a+')\n\t\t\t\t\t\t\tslcbd.write('\\n CASE ('+str(bcnum)+')\\n\\n')\n\t\t\t\t\t\t\tslcbd.write(' '+fltbnd[dir1]['bound'])\n\t\t\t\t\t\t\tslcbd.close()\t\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\tslcbd = open(incPATH+'selectupdate_filterbc_'+fltbnd[dir1]['dir']+'.f90','a+')\n\t\t\t\t\t\t\tslcbd.write('\\n CASE ('+str(bcnum)+')\\n\\n')\n\t\t\t\t\t\t\tslcbd.write(' '+fltbnd[dir2]['bound'])\n\t\t\t\t\t\t\tslcbd.close()\n\n\t\t\t\t\t\t\tslcbd = open(incPATH+'selectupdate_filterbc_'+fltbnd[dir2]['dir']+'.f90','a+')\n\t\t\t\t\t\t\tslcbd.write('\\n CASE ('+str(bcnum)+')\\n\\n')\n\t\t\t\t\t\t\tslcbd.write(' '+fltbnd[dir1]['bound'])\n\t\t\t\t\t\t\tslcbd.close()\t\t\t\t\t\t\n\n\n# # ADDS NORMAL-TO-BOUNDARY FILTERS FOR BC DIRECTION:\n\tfor dir1 in bcdir_all:\n\t\tif dir1 not in edgeBCs:\n\t\t\n\t\t\tbcnum = bcnum + 1\n\t\n\t\t\tbcdone[dir1] = []\n\t\t\tbcdone[dir1].append(bcnum)\n\t\t\t\t\t\t\n\t\t\taxes = {'i':'x','j':'y','k':'z'}\n\t\n\t\t\tslcbcflt = open(incPATH+'selectfilterbc_'+axes[dir1[0]]+'.f90' ,'a+')\n\t\t\tslcbcfltup = open(incPATH+'selectupdate_filterbc_'+axes[dir1[0]]+'.f90' ,'a+')\t\t\t\t\t\t\t\n\t\t\tslcbcflt.write('\\n CASE ('+str(bcnum)+')\\n\\n')\n\t\t\tslcbcfltup.write('\\n CASE ('+str(bcnum)+')\\n\\n')\n\t\n\t\t\tfor layer in range(0,hlo_rhs):\n\t\t\t\t genFilter(stencil_rhs,order_rhs,len(varsolved),dirBC=dir1,indbc=layer,fltbeg=0,rhs=rhs)\n\t\t\t\n\t\t\tup = open(incPATH+'update_filterbc_'+axes[dir1[0]]+'.f90','r') # set to empty\n\t\t\tfltbc = open(incPATH+'filterbc_'+axes[dir1[0]]+'.f90' ,'r') # set to empty\t\n\t\n\t\t\tfltbclines = fltbc.readlines()\n\t\t\tuplines = up.readlines()\n\t\n\t\t\tfor l in fltbclines:\n\t\t\t\tslcbcflt.write(' '+l)\n\t\t\tfor l in uplines:\n\t\t\t\tslcbcfltup.write(' '+l)\t\t\n\t\t\n# #\t\textends in-plane filtering along the bc:\n\t\t\t\n\t\t\tslcbcflt.close()\n\t\t\tslcbcfltup.close()\n\t\n\t\t\tif dir1[1:] == 'max':\n\t\t\t\tbndx = 'idflt(2) = idarray(2)\\n'\t\t\t\t\t\t\t\n\t\t\t\tbndy = 'idflt(4) = idarray(4)\\n'\n\t\t\t\tbndz = 'idflt(6) = idarray(6)\\n'\n\t\t\telse:\t\n\t\t\t\tbndx = 'idflt(1) = idarray(1)\\n'\t\t\t\t\t\t\t\n\t\t\t\tbndy = 'idflt(3) = idarray(3)\\n'\n\t\t\t\tbndz = 'idflt(5) = idarray(5)\\n'\t\t\t\t\t\t\t\t\t\n\t\n\t\t\tfltbnd = {'x':[{},{'y':bndx},{'y':bndx,'z':bndx}],\n\t\t\t\t\t 'y':[{},{'x':bndy},{'x':bndy,'z':bndy}],\n\t\t\t\t\t 'z':[{},{ },{'x':bndz,'y':bndz}]}\n\t\t\tbounds = fltbnd[axes[dir1[0]]][dim-1]\n\t\t\t\n\t\t\tfor axebd in bounds:\n\t\t\t\tslcbd = open(incPATH+'selectfilterbc_'+axebd+'.f90','a+')\n\t\t\t\tslcbd.write('\\n CASE ('+str(bcnum)+')\\n\\n')\n\t\t\t\tslcbd.write(' '+bounds[axebd])\t\t\n\t\t\t\t\t\t\t \t\n# # GENERATE STATIC/DYNAMIC STORED VARIABLES:\t\t\t\n\t\t\tvar2process['varbcstatic'] = {}\n\t\t\tvar2process['varbc'] = {}\t\n\t\t\tstaticvarbc = {}\n\t\t\tdynamicvarbc= {}\n\n\n\t\t\taddvarbc = False\n\t\t\tfor var in varbc:\n\t\t\t\tif 'face' in varbc[var]:\n\t\t\t\t\tif varbc[var]['face'] == dir1: addvarbc = True\n\t\t\t\telif 'edge' in varbc[var]:\n\t\t\t\t\tif varbc[var]['edge'] == dir1: addvarbc = True\n\t\t\t\telse:\n\t\t\t\t\taddvarbc = False\n\t\t\t\n\t\t\t\tif addvarbc:\t\n\t\t\t\t\tif varbc[var]['static']:\n\t\t\t\t\t\tstaticvarbc[var] = varbc[var]['symb']\n\t\t\t\t\telse:\n\t\t\t\t\t\tdynamicvarbc[var] = varbc[var]['symb']\t\n\t\t\t\taddvarbc = False\t\t\n\t\n\t\t\tvar2process['varbcstatic'] = staticvarbc\n\t\t\tvar2process['varbc'] = dynamicvarbc\n\t\t\t\t\t\n\t\t\tfor k in var2process:\n\t\t\t\tif var2process[k] != {}:\t\n\t\n\t\t\t\t\tslcbc = open(incPATH+'select'+k+'bc.f90','a+')\n\t\t\t\t\tst = open(incPATH+'bcsrc'+k+'_'+dir1+'.for','r') # set to empty\t\n\t\t\t\t\t\n\t\t\t\t\tslcbc.write('CASE ('+str(bcnum)+')\\n')\n\t\t\t\t\tcallname = st.readlines()[8][10:]\n\t\t\t\t\tslcbc.write(' call '+ callname)\n\t\n# # ADD CALL TO NEW PHYSICAL BC :\t\t\t\t\t\n\t\t\t\n\t\t\tslcbc = {}\n\t\t\t# for bctype in ['rhs','q']:\n\t\t\t# \tslcbc[bctype] = open(incPATH+'select_phybc_'+bctype+'.f90' ,'a+')\n\t\t\t# \tslcbc[bctype].write('CASE ('+str(bcnum)+')\\n')\n\t\n\t\t\tif dir1 in bcphy_all:\n\t\t\t\tfor bcname in bcphy_all[dir1]:\n\t\t\t\t\tfor bctype in bcphy_all[dir1][bcname]:\n\t\n\t\t\t\t\t\tslcbc[bctype] = open(incPATH+'select_phybc_'+bctype+'.f90' ,'a+')\n\t\t\t\t\t\tslcbc[bctype].write('CASE ('+str(bcnum)+')\\n')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t# slcbc = open(incPATH+'select_phybc_'+bctype+'.f90' ,'a+')\n\t\t\t\t\t# rhs_for.close()\n\t\t\t\t\t\n\t\t\t\t\t\tphyBCread = open(incPATH+'PhyBC'+bcname+'_'+dir1+'_'+bctype+'.for','r')\t\n\t\t\t\t\t\trhshlo = []\n\t\t\t\t\t\tfor layer in range(0,hlo_rhs):\n\t\t\t\t\t\t\trhshlo.append(open(incPATH+'bcsrc'+dir1+'_'+str(layer)+'.for','r'))\n\t\t\t\n\t\t\t\t\t\t# slcbc[bctype].write('CASE ('+str(bcnum)+')\\n')\n\t\t\t\t\t\tif(bctype=='rhs'):\n\t\t\t\t\t\t\tfor layer in range(0,hlo_rhs):\n\t\t\t\t\t\t\t\t\tslcbc[bctype].write(' call '+rhshlo[layer].readlines()[8][10:])\n\t\t\t\t\t\t\tslcbc[bctype].write(' '+idrhs[dir1])\t\n\t\t\t\t\t\tslcbc[bctype].write(' call '+phyBCread.readlines()[9][10:])\t\t\t\t\t\t\t\t\n\t\t\t\tif slcbc != {}:\n\t\t\t\t\tif 'rhs' not in slcbc:\n\t\t\t\t\t\t\tslcbc['rhs'] = open(incPATH+'select_phybc_rhs.f90' ,'a+')\n\t\t\t\t\t\t\tslcbc['rhs'].write('CASE ('+str(bcnum)+')\\n')\t\t\n\t\t\t\t\t\t\trhshlo = []\n\t\t\t\t\t\t\tfor layer in range(0,hlo_rhs):\n\t\t\t\t\t\t\t\trhshlo.append(open(incPATH+'bcsrc'+dir1+'_'+str(layer)+'.for','r'))\t\n\t\t\t\t\t\t\tfor layer in range(0,hlo_rhs):\n\t\t\t\t\t\t\t\t\tslcbc['rhs'].write(' call '+rhshlo[layer].readlines()[8][10:])\n\t\t\t\t\t\t\tslcbc['rhs'].write(' '+idrhs[dir1])\t\t\t\t\t\t\t\n\t\n\trhs.bc_info[1] = bcdone",
"def test_f1_circuit_maker(self):\n fho = tfho.test_file_handle_object()\n W = 5\n G = 20\n fg = .9\n X = 10\n fx = .85\n gate_maker = g.TYPE_TO_GATE_GEN[g.TEST_TYPES.RANDOM]\n # family 1 files:\n t_circuit_file_name = \"circuit_file_trimming\"\n t_circuit_file = fho.get_file_object(t_circuit_file_name, 'w')\n t_input_file_name = \"input_file_trimming\"\n t_input_file = fho.get_file_object(t_input_file_name, 'w')\n t_output_file_name = \"output_file_trimming\"\n t_output_file = fho.get_file_object(t_output_file_name, 'w')\n nt_circuit_file_name = \"circuit_file_no_trimming\"\n nt_circuit_file = fho.get_file_object(nt_circuit_file_name, 'w')\n nt_input_file_name = \"input_file_no_trimming\"\n nt_input_file = fho.get_file_object(nt_input_file_name, 'w')\n nt_output_file_name = \"output_file_no_trimming\"\n nt_output_file = fho.get_file_object(nt_output_file_name, 'w')\n level_type_array = [g.LEVEL_TYPES.RANDOM]\n F = 1\n # make a family 1 circuit with trimming:\n sr.seed(self.rand_seed)\n t_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n t_circuit_file,\n t_input_file,\n t_output_file,\n X, fx, gate_maker,\n level_type_array, True)\n t_gen.generate()\n # make a family 1 circuit without trimming, with the same randomness:\n sr.seed(self.rand_seed)\n nt_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n nt_circuit_file,\n nt_input_file,\n nt_output_file,\n X, fx, gate_maker,\n level_type_array, False)\n nt_gen.generate()\n # obtain strings representing the contents of all the resulting files:\n t_circuit_string = fho.get_file(t_circuit_file_name).getvalue()\n t_input_string = fho.get_file(t_input_file_name).getvalue()\n t_output_string = fho.get_file(t_output_file_name).getvalue()\n nt_circuit_string = fho.get_file(nt_circuit_file_name).getvalue()\n nt_input_string = fho.get_file(nt_input_file_name).getvalue()\n nt_output_string = fho.get_file(nt_output_file_name).getvalue()\n # make sure that the inputs and outputs produced by the trimming and\n # no trimming algorithms are the same:\n self.assertEqual(t_input_string, nt_input_string)\n self.assertEqual(t_output_string, nt_output_string)\n # make sure that the input begins and ends with a bracket:\n self.assertEqual(\"[\", t_input_string[0])\n self.assertEqual(\"]\", t_input_string[-1])\n # make sure that each input element is a bit:\n for bit in t_input_string[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # make sure that the output is a bit:\n self.assertTrue((t_output_string == '0') or (t_output_string == '1'))\n # make sure that the two circuit headers are the same, and that they\n # contain the correct values:\n t_circuit_header = t_circuit_string.split(\"\\n\")[0]\n nt_circuit_header = nt_circuit_string.split(\"\\n\")[0]\n self.assertEqual(t_circuit_header, nt_circuit_header)\n (W_string, G_string, F_string) = t_circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n G_value = int(G_string.split(\"=\")[-1])\n F_value = int(F_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(G, G_value)\n self.assertEqual(F, F_value)\n # note that we cannot test that the circuits themselves are the same,\n # because the trimming algorithm produces a circuit with gates listed\n # in a different order.",
"def rec_events_scaffold_protein_binding_gab1(): \n \n #GAB1 binds ErbB:ErbB-GRB2. \n bind_complex(erbb(bd=1) % erbb(bd=1) % GRB2(b=None, bsos=None, bgap=ANY, bgab1=None, bcbl=None), 'bgab1', GAB1(bgrb2=None, bshp2=None, bpi3k=None, batp=None, bERKPP=None, bPase9t=None, S='U'), 'bgrb2', par['GRB2_bind_GAB1'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
messages1 and messages2 represent the encoded headlines from two news sources corr represents the correlation between the two currently returns average correlation | def average_similarity(messages1, messages2):
if np.array_equal(messages2, messages1):
return 1
corr = np.corrcoef(messages1, messages2)
return np.average(corr) | [
"def compute_correlation(distribution1, distribution2):\n\taverage1 = compute_average(distribution1)\n\taverage2 = compute_average(distribution2)\n\tstd1 = compute_std(distribution1)\n\tstd2 = compute_std(distribution2)\n\tnewdistribution = []\n\tfor entry in range(len(distribution1)):\n\t\tnewdistribution.append((distribution1[entry]-average1)*(distribution2[entry]-average2))\n\tnewaverage = compute_average(newdistribution)\n\tcorrelation = newaverage/(std1*std2)\n\treturn correlation",
"def getcorrelation(movieid1,movieid2):\n\n #the initialized integer, cosine_sum, has an initialized value of -100\n #such that in the case where correlation shouldn't be updated, the value\n #remains unchanged\n cosine_sum = NEGATIVE\n #variable r_a,i and r_b,i in the formula\n r_mv1 = 0\n r_mv2 = 0\n #numerator\n nume_sum = 0\n #two parts in the denominator (before taking square root)\n deno_mv1_sum = 0\n deno_mv2_sum = 0\n denominator = 0\n #variable that keeps track of count of common users\n currentCommon = 0\n\n #firstly check if the count of user passes the threshold for each movie\n if(len(dictMovie.get(movieid1))<threshold or\n len(dictMovie.get(movieid2))<threshold):\n #if either does not, returns a negative correlation (to be invalid)\n return cosine_sum\n #if both pass threshold, get the intersection (of users) of two movies\n else:\n intersect=dictMovie.get(movieid1).intersection(dictMovie.get(movieid2))\n #if the number of common users is smaller than threshold, return\n if (len(intersect) < threshold):\n return cosine_sum\n #otherwise, start counting correlation\n else:\n #get the average rating of two movies\n mv1_bar = float(dictMovieRate.get(movieid1))\n mv2_bar = float(dictMovieRate.get(movieid2))\n #iterate through common users and use formula\n for commonuser in intersect:\n #increment common user count\n currentCommon += 1\n r_mv1 = int(dictUser.get(commonuser).get(movieid1))\n r_mv2 = int(dictUser.get(commonuser).get(movieid2))\n nume_sum += ( (r_mv1)-mv1_bar )*( (r_mv2)-mv2_bar )\n deno_mv1_sum += ( (r_mv1)-mv1_bar )**2\n deno_mv2_sum += ( (r_mv2)-mv2_bar )**2\n #when done with denominator separate calculation, combine\n denominator = math.sqrt(deno_mv1_sum * deno_mv2_sum)\n #handle the case where denominator=0 (invalid)\n if denominator == 0:\n return cosine_sum\n #otherwise, successful. return valid values and pass in\n #common count to global variable for program to catch\n else:\n cosine_sum = nume_sum / denominator\n global currentCommonCount\n currentCommonCount = currentCommon\n return cosine_sum",
"def _compute_correlation(ts1, ts2, comparison_mode, correlation_type,\r\n tail_type, num_permutations, confidence_level,\r\n perform_detailed_comparisons=False,\r\n expected_sample_id=None):\r\n # Convert our notion of tail type into the format expected by PyCogent's\r\n # correlation_test().\r\n if tail_type == 'two-sided':\r\n tail_type = None\r\n\r\n if comparison_mode != 'paired' and comparison_mode != 'expected':\r\n raise ValueError(\"Invalid comparison mode '%s'. Must be one of %r.\" %\r\n (comparison_mode, comparison_modes))\r\n\r\n # Make sure that the second taxa summary has only one sample if we weren't\r\n # provided an expected sample ID to compare against.\r\n if (comparison_mode == 'expected' and expected_sample_id is None and\r\n len(ts2[0]) != 1):\r\n raise ValueError(\"The second taxa summary file must contain a single \"\r\n \"sample (column) to compare all samples in the first taxa \"\r\n \"summary file against when the comparison mode is 'expected' \"\r\n \"and an expected sample ID is not provided. You provided a \"\r\n \"file with %d samples.\"\r\n % len(ts2[0]))\r\n\r\n if comparison_mode == 'paired':\r\n # Make sure the number of samples match between the two files (the IDs\r\n # do not have to match because of the sample ID map).\r\n if len(ts1[0]) != len(ts2[0]):\r\n raise ValueError(\"The two taxa summaries are incompatible because \"\r\n \"they do not have the same number of sample IDs. \"\r\n \"The taxa summaries must be made compatible \"\r\n \"before attempting to perform \"\r\n \"pairwise-comparisons between samples.\")\r\n\r\n # Make sure the taxa information is the same (i.e. the summaries have been\r\n # sorted and filled).\r\n if ts1[1] != ts2[1]:\r\n raise ValueError(\"The taxa do not match exactly between the two taxa \"\r\n \"summary files. The taxa must be sorted and filled \"\r\n \"before attempting to compare them.\")\r\n\r\n # Find the index of the expected sample ID.\r\n if comparison_mode == 'expected':\r\n if expected_sample_id:\r\n try:\r\n expected_idx = ts2[0].index(expected_sample_id)\r\n except ValueError:\r\n raise ValueError(\"The expected sample ID '%s' is not in the \"\r\n \"taxa summary file.\" % expected_sample_id)\r\n else:\r\n # We know the 'expected' taxa summary has a single sample in it, so\r\n # this is the only possible index.\r\n expected_idx = 0\r\n\r\n # Compute the overall correlation between each sample and the expected\r\n # sample, or each of the paired samples, and optionally the correlation\r\n # between each pair of samples individually.\r\n corr_vec = None\r\n if perform_detailed_comparisons:\r\n corr_vec = []\r\n num_comparisons = len(ts1[0])\r\n\r\n all_ts1_data = []\r\n all_ts2_data = []\r\n for samp_idx, samp_id in enumerate(ts1[0]):\r\n if comparison_mode == 'paired':\r\n paired_idx = samp_idx\r\n elif comparison_mode == 'expected':\r\n paired_idx = expected_idx\r\n else:\r\n # Redundant check, but here for safety in case the one above is\r\n # changed or removed.\r\n raise ValueError(\"Invalid comparison mode '%s'. Must be one of \"\r\n \"%r.\" % (comparison_mode, comparison_modes))\r\n\r\n # Grab the columns of data for the current sample and its pair.\r\n ts1_data = ts1[2].T[samp_idx]\r\n ts2_data = ts2[2].T[paired_idx]\r\n all_ts1_data.extend(ts1_data)\r\n all_ts2_data.extend(ts2_data)\r\n\r\n if perform_detailed_comparisons:\r\n # Compare the current sample and its pair.\r\n corr_coeff, param_p_val, unused, nonparam_p_val, conf_interval = \\\r\n correlation_test(ts1_data, ts2_data,\r\n method=correlation_type,\r\n tails=tail_type,\r\n permutations=num_permutations,\r\n confidence_level=confidence_level)\r\n\r\n # Compute the Bonferroni-corrected p-values.\r\n param_p_val_corr = min(param_p_val * num_comparisons, 1)\r\n nonparam_p_val_corr = None if nonparam_p_val is None else \\\r\n min(nonparam_p_val * num_comparisons, 1)\r\n\r\n corr_vec.append((samp_id, ts2[0][paired_idx], corr_coeff,\r\n param_p_val, param_p_val_corr, nonparam_p_val,\r\n nonparam_p_val_corr, conf_interval))\r\n\r\n # Compare all paired samples at once.\r\n results = correlation_test(all_ts1_data, all_ts2_data,\r\n method=correlation_type, tails=tail_type,\r\n permutations=num_permutations,\r\n confidence_level=confidence_level)\r\n # We don't need to return all of the permuted correlation coefficients.\r\n overall_corr = (results[0], results[1], results[3], results[4])\r\n return overall_corr, corr_vec",
"def _merge_correlation(self, other):\n corr_mat1 = self.correlation_matrix\n corr_mat2 = other.correlation_matrix\n n1 = self.total_samples - self.row_is_null_count\n n2 = other.total_samples - other.row_is_null_count\n if n1 == 0:\n return corr_mat2\n if n2 == 0:\n return corr_mat1\n\n if corr_mat1 is None or corr_mat2 is None:\n return None\n\n # get column indices without nan\n col_ids1 = np.where(~np.isnan(corr_mat1).all(axis=0))[0]\n col_ids2 = np.where(~np.isnan(corr_mat2).all(axis=0))[0]\n\n if len(col_ids1) != len(col_ids2) or len(col_ids1) <= 1:\n return None\n if (col_ids1 != col_ids2).any():\n return None\n\n mean1 = np.array(\n [self._profile[idx].profile['statistics']['mean']\n for idx in range(len(self._profile)) if idx in col_ids1])\n std1 = np.array(\n [self._profile[idx].profile['statistics']['stddev']\n for idx in range(len(self._profile)) if idx in col_ids1])\n\n mean2 = np.array(\n [other._profile[idx].profile['statistics']['mean']\n for idx in range(len(self._profile)) if idx in col_ids2])\n std2 = np.array(\n [other._profile[idx].profile['statistics']['stddev']\n for idx in range(len(self._profile)) if idx in col_ids2])\n return self._merge_correlation_helper(corr_mat1, mean1, std1, n1,\n corr_mat2, mean2, std2, n2)",
"def correlation(im1, im2):\n #return np.sum( (im1-np.mean(im1)) * (im2-np.mean(im2)) / ( np.std(im1) * np.std(im2) ) ) / np.prod(np.shape(im1))\n return np.sum((im1) * (im2)) / np.sqrt(np.sum((im1)**2) * np.sum((im2)**2))",
"def lagcorr(x1, x2):\n x1 = x1 - x1.mean()\n x2 = x2 - x2.mean()\n corr1 = sp.signal.correlate(x1, x2)\n corr1 = (corr1 / (x1.std() * x2.std())) / x1.size\n\n return corr1",
"def correlate_channels_multichannel(r, first, second):\n A, B = r.intensity_image[...,[first, second]].T\n\n filt = (A > 0)&(B > 0)\n if filt.sum() == 0:\n return np.nan\n\n A = A[filt]\n B = B[filt]\n corr = (A - A.mean()) * (B - B.mean()) / (A.std() * B.std())\n\n return corr.mean()",
"def coupling_coef_corrs(coupling_coefs1, coupling_coefs2, correlation='pearson'):\n n_neurons = coupling_coefs1.shape[0]\n correlations = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n ccs1 = coupling_coefs1[neuron]\n ccs2 = coupling_coefs2[neuron]\n\n if np.array_equal(ccs1, ccs2):\n correlations[neuron] = 1.\n elif np.all(ccs1 == 0) or np.all(ccs2 == 0):\n correlations[neuron] = 0\n else:\n if correlation == 'pearson':\n correlations[neuron] = np.corrcoef(ccs1, ccs2)[0, 1]\n elif correlation == 'spearman':\n correlations[neuron] = spearmanr(ccs1, ccs2).correlation\n elif correlation == 'cosine':\n correlations[neuron] = cosine_similarity(ccs1, ccs2)\n\n return correlations",
"def correlation(subj_a_data, subj_b_data):\n run_split_a_data = np.split(subj_a_data, RUN_DIVISIONS[:-1], axis=1)\n run_split_b_data = np.split(subj_b_data, RUN_DIVISIONS[:-1], axis=1)\n correlations = np.zeros(NUM_VOXELS)\n for run_a, run_b in itertools.izip(run_split_a_data, run_split_b_data):\n correlations += pearson_r(run_a, run_b)\n correlations /= NUM_RUNS\n return correlations",
"def test__same_text_correlation(self):\n \n _log.info('-'*80)\n \n # arrange \n text1 = \"love is rain as long story short\"\n text2 = text1\n\n dump_file = getInputFile(\"swiki_knowledge_output.xml\")\n parsed_file = getOutputFile(\"swiki_knowledge_output.parsed.xml\")\n #wdb_file = getOutputFile(\"swiki_knowledge_output.wdb\")\n\n articles = ['Rain', 'Love', 'Tree'] \n \n # act\n wn.make_dump(dump_file, articles, compress=False)\n wn.parse_dump(dump_file, parsed_file)\n db_wrapper = wn.build_database_wrapper(parsed_file, StopWordsStemmer([]))\n \n #self.addCleanup(os.remove, self.tmp_dump_file)\n \n comparer = SemanticComparer(db_wrapper)\n correlation = comparer.compare(text1, text2)\n _log.info(test_utils.get_texts_correlation_message(text1, text2, correlation))\n self.assertAlmostEqual(correlation, 1.0, msg=\"for same text correlation should be 1\")",
"def cal_cc_score(Att_1, Att_2):\n eps = 2.2204e-16 #regularization value\n Map_1 = Att_1 - np.mean(Att_1)\n if np.max(Map_1) > 0:\n Map_1 = Map_1 / np.std(Map_1)\n Map_2 = Att_2 - np.mean(Att_2)\n if np.max(Map_2) > 0:\n Map_2 = Map_2 / np.std(Map_2)\n if np.sum(Map_1)==0:\n Map_1+=eps\n if np.sum(Map_2)==0:\n Map_2+=eps\n\n score = np.corrcoef(Map_1.reshape(-1), Map_2.reshape(-1))[0][1]\n if np.isnan(score):\n score=0\n\n return score",
"def _detect_correlation(self):\n correlations = []\n shifted_correlations = []\n self.time_series_a.normalize()\n self.time_series_b.normalize()\n a, b = self.time_series_a.align(self.time_series_b)\n a_values, b_values = a.values, b.values\n a_avg, b_avg = a.average(), b.average()\n a_stdev, b_stdev = a.stdev(), b.stdev()\n n = len(a)\n denom = a_stdev * b_stdev * n\n # Find the maximal shift steps according to the maximal shift seconds.\n allowed_shift_step = self._find_allowed_shift(a.timestamps)\n if allowed_shift_step:\n shift_upper_bound = allowed_shift_step\n shift_lower_bound = -allowed_shift_step\n else:\n shift_upper_bound = 1\n shift_lower_bound = 0\n for delay in range(shift_lower_bound, shift_upper_bound):\n delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]\n if delay < 0:\n delay_in_seconds = -delay_in_seconds\n s = 0\n for i in range(n):\n j = i + delay\n if j < 0 or j >= n:\n continue\n else:\n s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))\n r = s / denom if denom != 0 else s\n correlations.append([delay_in_seconds, r])\n # Take shift into account to create a \"shifted correlation coefficient\".\n if self.max_shift_milliseconds:\n shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))\n else:\n shifted_correlations.append(r)\n max_correlation = list(max(correlations, key=lambda k: k[1]))\n max_shifted_correlation = max(shifted_correlations)\n max_correlation.append(max_shifted_correlation)\n self.correlation_result = CorrelationResult(*max_correlation)",
"def correlate_rdms(rdm1, rdm2):\n\n # Extract off-diagonal elements of each RDM\n ioffdiag = np.triu_indices(rdm1.shape[0], k=1) # indices of off-diagonal elements\n rdm1_offdiag = rdm1[ioffdiag]\n rdm2_offdiag = rdm2[ioffdiag]\n\n corr_coef = np.corrcoef(rdm1_offdiag, rdm2_offdiag)[0,1]\n\n return corr_coef",
"def plot_correlations(p1, p2):\n param1 = p1.GetY()\n param2 = p2.GetY()\n err1 = p1.GetEY() \n err2 = p2.GetEY()\n\n #corr_graph = ROOT.TGraphErrors(p1.GetN(), param1, param2, np.zeros(p1.GetN()), np.zeros(p2.GetN()) )\n corr_graph = ROOT.TGraphErrors(p1.GetN(), param1, param2, err1, err2)\n corr_graph.SetTitle(\"Correlations of TELLIE PIN readings and recorded nHits\")\n tits_1 = p1.GetYaxis().GetTitle()\n tits_2 = p2.GetYaxis().GetTitle()\n corr_graph.GetXaxis().SetTitle(tits_1)\n corr_graph.GetYaxis().SetTitle(tits_2)\n corr_graph.GetXaxis().SetTitleOffset(1.2)\n corr_graph.GetYaxis().SetTitleOffset(1.2)\n corr_graph.SetMarkerStyle(33)\n return corr_graph",
"def cloud_motion_fft(convolver,fft1,fft2,ratio=0.7): \n####use this routine if convolver and fft objects are ready \n ny,nx=fft2[-2]\n try:\n corr=mncc.mncc_fft(convolver, fft1, fft2, ratio_thresh=ratio) \n# plt.figure(); plt.imshow(corr) \n max_idx=np.nanargmax(corr)\n vy,vx=max_idx//len(corr)-ny+1,max_idx%len(corr)-nx+1 \n return vy,vx,corr.ravel()[max_idx] \n except:\n return None, None, None",
"def find_correspondences(pts1, pts2, desc1, desc2, match_score_type='ratio'):\n N = pts1.shape[0]\n X = np.sum(desc1**2, axis=1, keepdims=True)\n Y = np.sum(desc2**2, axis=1, keepdims=True).T\n XY = np.dot(desc1,desc2.T)\n L = X + Y - 2*XY\n\n D = (np.maximum(L, 0))\n scores = np.min(D, axis = 1)\n indices = np.argmin(D,axis = 1)\n corr = []\n for j,index in enumerate(indices):\n corr.append(np.hstack([pts1[j],pts2[index]]))\n if match_score_type=='ratio': \n p = np.sort(D, axis = 1)\n scores = p[:,0]/p[:,1]\n return np.array(corr), indices, scores",
"def compare_emails(first, second):\n match = 0\n ignored = ['Subject', 'From', 'X-Authentication-Warning', 'Received']\n # Compare subject\n if first.subject == second.subject:\n match += SUBJECT_PRIORITY\n elif not_empty(first.subject, second.subject):\n match += compare_dicts(compute_word_frequencies_from_text(first.subject),\n compute_word_frequencies_from_text(second.subject)) * SUBJECT_PRIORITY / 2\n # they are not equal, only some words occurrences\n\n # Compare from\n if first.From == second.From:\n match += FROM_PRIORITY\n\n # compare X authentication warning\n if first.x_authentication_warning == second.x_authentication_warning:\n match += WARNING_PRIORITY\n\n # compare receive history chain\n length = max(len(first.received), len(second.received))\n receive_match = set(first.received).intersection(second.received)\n if length > 0:\n match += (len(receive_match) / length) * RECEIVED_PRIORITY\n\n MatchedHeaders = 0\n # compare secondary headers\n for header in first.AllHeaders:\n if header[0] not in ignored:\n if header in second.AllHeaders:\n MatchedHeaders += 1\n\n match += SECONDARY_PRIORITY * MatchedHeaders / max(len(first.AllHeaders), len(second.AllHeaders))\n # compare payloads\n match += PAYLOAD_PRIORITY * compare_payloads(first.payloads, second.payloads)\n return match",
"def correlate_targets(self,target1,target2):\n target_list = [target1,target2]\n for t in target_list:\n if t not in self.targets:\n print('Target {0:s} no in the target list'.format(t))\n return\n if self.size!=255:\n cube1 = fits.getdata(os.path.join(self.pathOut_targets[target1],'{0:s}_{1:d}x{1:d}_{2:s}_O.fits'.format(target1,self.size,self.channel)))\n cube2 = fits.getdata(os.path.join(self.pathOut_targets[target2],'{0:s}_{1:d}x{1:d}_{2:s}_O.fits'.format(target2,self.size,self.channel)))\n else: # we are in full frame mode here\n cube1 = fits.getdata(os.path.join(self.pathOut_targets[target1],'{0:s}_1024x1024_rebinned_255x255_{1:s}_O.fits'.format(target1,self.channel)))\n cube2 = fits.getdata(os.path.join(self.pathOut_targets[target2],'{0:s}_1024x1024_rebinned_255x255_{1:s}_O.fits'.format(target2,self.channel))) \n nframes1 = cube1.shape[0]\n nframes2 = cube2.shape[0]\n correlation_matrix = np.ndarray((nframes1,nframes2))\n for iframe1 in range(nframes1):\n for iframe2 in range(nframes2):\n# img1 = cube1[iframe1,:,:]*self.mask_correlation\n# img2 = cube2[iframe2,:,:]*self.mask_correlation\n img1 = cube1[iframe1,:,:][self.mask_correlation]\n img1 -= np.mean(img1)\n img2 = cube2[iframe2,:,:][self.mask_correlation]\n img2 -= np.mean(img2)\n corr_coeff = np.nansum(img1*img2) / ( np.sqrt(np.nansum(img1**2)) * np.sqrt(np.nansum(img2**2)) )\n correlation_matrix[iframe1,iframe2] = corr_coeff\n return correlation_matrix",
"def corr_coef(signal1, signal2):\r\n coef = np.corrcoef(signal1.timeSignal, signal2.timeSignal)\r\n return coef[0, 1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
represents messages as vectors which are used to calculate similarity | def find_similarity(message1, message2):
total = 0
for i in range(len(message1)):
max = 0
for j in range(len(message2)):
message1_encoded = embed([message1[i]])
message2_encoded = embed([message2[j]])
sim = average_similarity(message1_encoded, message2_encoded)
if sim > max:
max = sim
total += max
return total/len(message1) | [
"def get_similarity_vector(self, title, content, seperator, weightpara, language='Chinese'):\r\n print('Title is:', title, '\\n')\r\n if language == 'Chinese':\r\n clean_content = content.replace(' ', '').replace('\\r\\n', '')\r\n else:\r\n clean_content = content.replace('\\r\\n', '')\r\n # clean_content=re.findall(r'([\\u4e00-\\u9fff]+|[A-Z]+|[0-9]+|[a-z])',content)\r\n print('Content is :', clean_content, '\\n')\r\n # print ('Seperators are:', seperator,'\\n')\r\n splited_content = list(filter(None, re.split(seperator, clean_content)))\r\n all_sentences = [title, content] + splited_content\r\n # print (all_sentences)\r\n # for a, b in enumerate(all_sentences):\r\n # print (a, ' : ', len(b), ' ', b + '\\n')\r\n\r\n print('There are ', str(len(all_sentences) - 2), 'sentences been splited and need embeddings.')\r\n embedding = self.get_embedding(all_sentences, language, weightpara)\r\n emb_t = embedding[0]\r\n emb_c = embedding[1]\r\n scores = []\r\n for i, b in enumerate(all_sentences[2:]):\r\n # print ('sentences',i, ':', b)\r\n emb_i = embedding[i + 2]\r\n\r\n scores.append(0.5 * (pearsonr(emb_t, emb_i)[0] + pearsonr(emb_c, emb_i)[0]))\r\n # print ('scores for all the sentences are:', scores)\r\n return scores, all_sentences",
"def semantic_vector(self,words, joint_words, info_content_norm):\n\t sent_set = set(words)\n\t semvec = np.zeros(len(joint_words))\n\t i = 0\n\t for joint_word in joint_words:\n\t if joint_word in sent_set:\n\t # if word in union exists in the sentence, s(i) = 1 (unnormalized)\n\t semvec[i] = 1.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * math.pow(self.info_content(joint_word), 2)\n\t else:\n\t # find the most similar word in the joint set and set the sim value\n\t sim_word, max_sim = self.most_similar_word(joint_word, sent_set)\n\t semvec[i] = self.PHI if max_sim > self.PHI else 0.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * self.info_content(joint_word) * self.info_content(sim_word)\n\t i = i + 1\n\t return semvec",
"def semantic_vector(words, joint_words, info_content_norm):\n sent_set = set(words)\n semvec = np.zeros(len(joint_words))\n i = 0\n for joint_word in joint_words:\n if joint_word in sent_set:\n # if word in union exists in the sentence, s(i) = 1 (unnormalized)\n semvec[i] = 1.0\n if info_content_norm:\n semvec[i] = semvec[i] * math.pow(info_content(joint_word), 2)\n else:\n # find the most similar word in the joint set and set the sim value\n sim_word, max_sim = most_similar_word(joint_word, sent_set)\n semvec[i] = PHI if max_sim > PHI else 0.0\n if info_content_norm:\n semvec[i] = semvec[i] * info_content(joint_word) * info_content(sim_word)\n i = i + 1\n return semvec",
"def message(self, x_j, similarity): # default is source to target\n return similarity * x_j",
"def _to_vec(self, sentence):\n\t\t# regex for non-punctuation\n\t\tnot_punc = re.compile('.*[A-Za-z0-9].*')\n\n\t\t# preprocess a given token\n\t\tdef preprocess(t):\n\t\t\tt = t.lower().strip(\"';.:()\").strip('\"')\n\t\t\tt = 'not' if t == \"n't\" else t\n\t\t\treturn t\n\t\t\n\t\ttokens = map(preprocess, filter(lambda t: not_punc.match(t), nltk.word_tokenize(sentence)))\n\t\ttokens = reduce(lambda a,b: a + b, [[]] + map(lambda t: re.split(r'[-]', t), tokens))\n\t\ttokens = filter(lambda t: t in self.vec, tokens)\n\n\t\t# if no parseable tokens, return a vector of a's \n\t\tif tokens == []:\n\t\t\treturn np.zeros(300) + self.a\n\t\telse:\n\t\t\tv_t = np.array(map(lambda (i,t): self.vec[t], enumerate(tokens)))\n\t\t\tv_t = v_t * (1.0 / np.linalg.norm(v_t, axis=0))\n\t\t\tembed1_t = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens)))\n\t\t\tembed1_t = np.mean(embed1_t, axis=0) \n\n\t\t\tif (len(v_t) >=16):\n\t\t\t\tif (len(v_t) % 2 ==0):\n\t\t\t\t\tembed2_t_1 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[1:len(tokens)/2])))\n\t\t\t\t\tembed2_t_2 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[len(tokens)/2:-1])))\n\t\t\t\telse: \n\t\t\t\t\tembed2_t_1 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[1:(len(tokens)+1)/2])))\n\t\t\t\t\tembed2_t_2 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[(len(tokens)+1)/2:-1])))\n\n\t\t\t\tif (len(v_t) % 4 ==0):\n\t\t\t\t\tembed4_t_1 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[1:len(tokens)/4])))\n\t\t\t\t\tembed4_t_2 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[len(tokens)/4:len(tokens)/2])))\n\t\t\t\t\tembed4_t_3 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[len(tokens)/2: 3*len(tokens)/4])))\n\t\t\t\t\tembed4_t_4 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[ 3*len(tokens)/4:-1])))\n\t\t\t\telif (len(v_t) % 4 ==1) : \n\t\t\t\t\tembed4_t_1 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[1:(len(tokens)-1)/4])))\n\t\t\t\t\tembed4_t_2 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[(len(tokens)-1)/4:(len(tokens)-1)/2])))\n\t\t\t\t\tembed4_t_3 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[(len(tokens)-1)/2:3*(len(tokens)-1)/4])))\n\t\t\t\t\tembed4_t_4 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[3*(len(tokens)-1)/4:-1])))\n\t\t\t\telif (len(v_t) % 4 ==2) : \n\t\t\t\t\tembed4_t_1 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[1:(len(tokens)+2)/4])))\n\t\t\t\t\tembed4_t_2 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[(len(tokens)+2)/4:(len(tokens)+2)/2])))\n\t\t\t\t\tembed4_t_3 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[(len(tokens)+2)/2: 3*(len(tokens)+2)/4])))\n\t\t\t\t\tembed4_t_4 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[3*(len(tokens)+2)/4:-1])))\n\t\t\t\telse:\n\t\t\t\t\tembed4_t_1 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[1:(len(tokens)+1)/4])))\n\t\t\t\t\tembed4_t_2 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[(len(tokens)+1)/4:(len(tokens)+1)/2])))\n\t\t\t\t\tembed4_t_3 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[(len(tokens)+1)/2:3*(len(tokens)+1)/4])))\n\t\t\t\t\tembed4_t_4 = np.array(map(lambda (i,t): self.weight(t) * v_t[i,:], enumerate(tokens[3*(len(tokens)+1)/4:-1])))\n\n\t\t\t\tembed2_t_1 = np.mean(embed2_t_1, axis=0)\n\t\t\t\tembed2_t_2 = np.mean(embed2_t_2, axis=0)\n\t\t\t\tembed2_t = np.maximum(embed2_t_1, embed2_t_2) #Max Pooling between the two halves\n\n\t\t\t\tembed4_t_1 = np.mean(embed4_t_1, axis=0)\n\t\t\t\tembed4_t_2 = np.mean(embed4_t_2, axis=0)\n\t\t\t\tembed4_t_3 = np.mean(embed4_t_3, axis=0)\n\t\t\t\tembed4_t_4 = np.mean(embed4_t_4, axis=0)\n\n\t\t\t\tembed4_t = np.maximum(embed4_t_1, embed4_t_2) #Max Pooling between the two halves\n\t\t\t\tembed4_t = np.maximum(embed4_t, embed4_t_3)\n\t\t\t\tembed4_t = np.maximum(embed4_t, embed4_t_4)\n\n\t\t\t\tprint np.shape(embed1_t)\n\t\t\t\tprint np.shape(embed2_t)\n\t\t\t\tprint np.shape(embed4_t)\n\n\t\t\t\treturn np.concatenate((embed1_t, embed2_t), axis=0)\n\t\t\telse:\n\t\t\t\treturn np.concatenate((embed1_t, embed1_t), axis=0)",
"def wordSimilarityRatio(sent_1,sent_2):",
"def semantic_similarity(sentence_1, sentence_2, info_content_norm):\n words_1 = nltk.word_tokenize(sentence_1)\n words_2 = nltk.word_tokenize(sentence_2)\n joint_words = set(words_1).union(set(words_2))\n vec_1 = semantic_vector(words_1, joint_words, info_content_norm)\n vec_2 = semantic_vector(words_2, joint_words, info_content_norm)\n return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))",
"def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))",
"def CalculateVectorsSimilarity(vectorfromcam, vectorfromdb):\r\n print('CalculateVectorsSimilarity')\r\n print('vectorfromdb:', type(vectorfromdb), vectorfromdb)\r\n print('vectorfromcam:', type(vectorfromcam), vectorfromcam)\r\n known_face_encodings = vectorfromdb[1:-1] # vectorfromdb come as str with \"[]\" here we remove those signs\r\n face_encoding_to_check = vectorfromcam\r\n bla = face_recognition.compare_faces(known_face_encodings, face_encoding_to_check, 0.6)\r\n print('after compare_faces')\r\n print(bla)",
"def sentence_vector(tokens, n_dim, n_dim_glove, gloves, w2v_func):\n\n vec = np.zeros(n_dim).reshape((1, n_dim)) # create vector to store w2vec output\n vec_glove = np.zeros(n_dim).reshape((1, n_dim_glove)) # create vector to store glove output\n count = 0\n\n for token in tokens:\n try:\n vec += w2v_func[token].reshape((1, n_dim)) # from word2vec model\n vec_glove += gloves[str(token)] # from glove model\n count += 1\n except KeyError:\n continue # if word is not present in either word2vec or in glove\n if count != 0:\n vec /= count\n vec_glove /= count\n vecs = np.concatenate((vec, vec_glove), axis=1) # create a vector combining both models\n return vecs",
"def get_matches(self, vec):\n sims = []\n for word, word_vec in self.word_vecs.items():\n sim = np.dot(vec, np.array(word_vec))\n sims.append((word, sim))\n return sorted(sims, key=lambda x: x[1])",
"def vector_similarity(self, vector, items):\n vector = self.normalize(vector)\n items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])\n return vector.dot(items_vec.T)",
"def _to_vec(self, sentence):\n # regex for non-punctuation\n not_punc = re.compile('.*[A-Za-z0-9].*')\n\n # preprocess a given token\n# def preprocess(t):\n# t = t.lower().strip(\"';.:()\").strip('\"')\n# t = 'not' if t == \"n't\" else t\n# return re.split(r'[-]', t)\n #sentence = sentence.lower()\n tokens = self.tokenizer.tokenize(sentence)\n input = self.tokenizer(sentence, padding=True, truncation=True, return_tensors='pt')\n with torch.no_grad():\n scores, hiddens = self.model(input['input_ids'].to(device), output_hidden_states=True)\n v_t = hiddens[0].squeeze().cpu().numpy()\n# for token in word_tokenize(sentence):\n# if not_punc.match(token):\n# tokens = tokens + preprocess(token)\n\n #tokens = list(filter(lambda t: t in self.vec, tokens))\n \n # if no parseable tokens, return a vector of a's \n# if tokens == []:\n# return np.zeros(300) + self.a\n# else:\n #v_t = np.array([ self.vec[t] for t in tokens ])\n v_t = v_t * (1.0 / np.linalg.norm(v_t, axis=0))\n v_t = np.array([ self.weight(t) * v_t[i,:] for i,t in enumerate(tokens) ])\n return np.mean(v_t, axis=0)",
"def sentences2vec(self, sentences, unseen=None):\r\n keys = self.keys\r\n # print(sentences)\r\n if unseen:\r\n unseen_vec = self.model.wv.word_vec(unseen)\r\n\r\n # if unseen:\r\n # vec.append([self.model.wv.word_vec(y) if y in set(sentences) & keys\r\n # else unseen_vec for y in sentences])\r\n # else:\r\n # vec.append([self.model.wv.word_vec(y) for y in sentences\r\n # if y in set(sentences) & keys])\r\n vec = np.array([0 for _ in range(300)])\r\n for y in sentences:\r\n if len(vec) == 0:\r\n vec = np.array(self.model.wv.word_vec(y))\r\n elif y in self.keys:\r\n vec = vec + np.array(self.model.wv.word_vec(y))\r\n # print(len(vec))\r\n return vec",
"def transform_text(messages, word_dictionary): \n # *** START CODE HERE ***\n I, J = len(messages), len(word_dictionary)\n transformed = np.zeros((I, J))\n for i, message in enumerate(messages):\n for word in get_words(message):\n if word in word_dictionary:\n transformed[i, word_dictionary[word]] += 1\n return transformed\n # *** END CODE HERE ***",
"def sentence_to_vec(comments,frequencies,embedding_size = 100, a =1e-3,use_frequencies = True):\n sentence_list = prep_text_for_stv(comments)\n if not use_frequencies:\n frequencies.return_only_ones()\n sentence_set = []\n for sentence in sentence_list:\n vs = np.zeros(embedding_size) # add all word2vec values into one vector for the sentence\n sentence_length = sentence.len()\n for word in sentence.word_list:\n a_value = a / (a + frequencies.get_word_frequency(word.text)) # smooth inverse frequency, SIF\n vs = np.add(vs, np.multiply(a_value, word.vector)) # vs += sif * word_vector\n\n vs = np.divide(vs, sentence_length) # weighted average\n sentence_set.append(vs) # add to our existing re-calculated set of sentences\n\n # calculate PCA of this sentence set\n pca = PCA(n_components=embedding_size)\n pca.fit(np.array(sentence_set))\n u = pca.components_[0] # the PCA vector\n u = np.multiply(u, np.transpose(u)) # u x uT\n\n # pad the vector? (occurs if we have less sentences than embeddings_size)\n if len(u) < embedding_size:\n for i in range(embedding_size - len(u)):\n u = np.append(u, 0) # add needed extension for multiplication below\n\n # resulting sentence vectors, vs = vs -u x uT x vs\n sentence_vecs = []\n for vs in sentence_set:\n sub = np.multiply(u,vs)\n sentence_vecs.append(np.subtract(vs, sub))\n\n return sentence_vecs",
"def get_song_word2vec_similarity(embedding_model, original, generated):\n original_without_eol = list(filter(lambda a: a != 'eol', original))\n generated_without_eol = list(filter(lambda a: a != 'eol', generated[\"generated_song\"]))\n inspected_len = min(len(original_without_eol), len(generated_without_eol))\n original_vectors = [embedding_model[word] for word in original_without_eol[:inspected_len]]\n generated_vectors = [embedding_model[word] for word in generated_without_eol[:inspected_len]]\n return np.nanmean([cosine(original_vectors[i], generated_vectors[i]) for i in range(inspected_len)])",
"def _embeddings_to_scores(self, query_vectors: torch.Tensor, passage_vectors: torch.Tensor):\n sim_func = self.get_similarity_function()\n scores = sim_func(query_vectors, passage_vectors)\n if len(query_vectors.size()) > 1:\n q_num = query_vectors.size(0)\n scores = scores.view(q_num, -1)\n softmax_scores = nn.functional.log_softmax(scores, dim=1)\n return softmax_scores",
"def similarity(self, i1, i2):\n try:\n if i1 in self.items:\n i1 = [i1]\n except TypeError:\n pass\n try:\n if i2 in self.items:\n i2 = [i2]\n except TypeError:\n pass\n i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1])\n i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2])\n return i1_vec.dot(i2_vec.T)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An iterator that will in turn yield all drawable curves in the form of (kind, name, ds, style) tuples (where kind is one of 'algorithm', 'oracle', 'unifpf', 'strategy'). | def _pds_plot_iterator(pds, dim, funcId):
i = 0
for (algname, ds) in pds.algds_dimfunc((dim, funcId)):
yield ('algorithm', algname, ds, _style_algorithm(algname, i))
i += 1
yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())
yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())
i = 0
for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):
yield ('strategy', stratname, ds, _style_strategy(stratname, i))
i += 1 | [
"def iter_svgs(self):\n for name in self.parent.layers:\n yield name, self.parent.layers[name]\n for elem in self.parent.elements:\n if isinstance(elem, SVG):\n yield None, elem",
"def efficiency_curves(self):\n for key in self._efficiency_curves:\n yield key, self._data[key]",
"def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)",
"def style_lines(self):\n self.parent.finalize()\n for name, svg in self.iter_svgs(): # recurse here\n for line in svg._meta.style_lines():\n yield line\n if isinstance(self.parent.style, str):\n yield self.parent.style\n else:\n for cls in self.parent.style:\n yield \"%s {\" % str(cls)\n for key, value in self.parent.style[cls].items():\n yield \" %s: %s;\" % (key, value)\n yield \"}\"",
"def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]",
"def _get_drawables(self):\n for drawable in sorted(self._drawables, key=lambda drawable:\n -self._drawables[drawable]):\n if drawable.is_live():\n yield drawable",
"def lines(self):\n for pair in pairs(self.points):\n yield Line(pair, shape=self)",
"def __iter__(self):\n for format_pair in self._fmt_registry:\n yield format_pair",
"def curves(self) -> list:\n # assert self.loaded, \"DTA file not loaded. Run GamryParser.load()\"\n return self._curves",
"def get_figures(self):\n pass",
"def WeightedLayerIterator(self):\n scale = 0\n while scale <= len(self['scales']) + 1:\n if scale == 0:\n yield self['first_layer']\n elif scale == (len(self['scales']) + 1):\n yield self['final_layer']\n else:\n block = 0\n while block < len(self['scales'][scale - 1]['blocks']):\n layer = 0\n while layer < len(self['scales'][scale - 1]['blocks'][block]['conv_layers']):\n yield self['scales'][scale - 1]['blocks'][block]['conv_layers'][layer]\n layer += 1\n if 'projection' in self['scales'][scale - 1]['blocks'][block].keys():\n yield self['scales'][scale - 1]['blocks'][block]['projection']\n block += 1\n scale += 1",
"def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s",
"def parse_and_construct_graphic_layer(ds):\r\n graphic_layers = list()\r\n for item in ds.SegmentSequence:\r\n layer = {\r\n \"GraphicLayer\": str(item.SegmentDescription).upper(),\r\n \"GraphicLayerOrder\": item.SegmentNumber,\r\n \"GraphicLayerRecommendedDisplayCIELabValue\": [49512, 38656, 52736]\r\n }\r\n graphic_layers.append(layer)\r\n return graphic_layers",
"def curves(self):\n return self._curves",
"def iterdescriptors(self):",
"def convert_shape_generator_to_curve(gen: CrossSection) -> Curve:\n x, y = gen.get_points()\n # [1:-1] without first and last point / not needed in swmm\n height = np.array(x[1:-1]) / gen.height\n area = np.array(y[1:-1]) / gen.height * 2\n return Curve(Name=gen.out_filename, Type=Curve.TYPES.SHAPE, points=[[float(h), float(a)] for h, a in zip(height, area)])",
"def __iter__(self):\n for feature in itertools.izip(self.shapes, self.records):\n yield feature",
"def graphs(self):\n\n return iter(self._graphs.values())",
"def getListCurve(self):\n check, content = getDatasetInfo(self.token, self.datasetInfo['idDataset'])\n if check:\n list = content['curves']\n listObj = []\n for i in list:\n listObj.append(Curve(self.token, i))\n return listObj\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show a legend. obj can be an Axes or Figure (in that case, also pass handles and labels arguments). | def legend(obj, ncol=3, **kwargs):
# Font size handling here is a bit weird. We specify fontsize=6
# in legend constructor since that affects spacing. However, we
# need to manually override with 'small' later, because the original
# specification did not take effect on whole-figure legends (and for
# actual text, 6 is a wee bit small). We get a specific cramped
# appearance and correct behavior for whole-figure legends this way.
l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs)
plt.setp(l.get_texts(), fontsize='small') | [
"def legend(**options):\n underride(options, loc=\"best\", frameon=False)\n\n ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels, **options)",
"def legend (self, **kwargs):\n axes = self.twin_axes or self.axes\n self.mpl_legend = axes.legend (self.mpl_lines, self.labels, **kwargs)",
"def _plot_legend(fig, ax):\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='upper left', bbox_to_anchor=(1.02, 1), framealpha=0)",
"def open_legend(self, legend, props):\r\n pass",
"def legend_extras(\n self, handles=None, labels=None, *, loc=None,\n frame=None, frameon=None, ncol=None, ncols=None,\n center=None, order='C', label=None, title=None,\n fontsize=None, fontweight=None, fontcolor=None,\n **kwargs\n):\n # Parse input args\n # TODO: Legend entries for colormap or scatterplot objects! Idea is we\n # pass a scatter plot or contourf or whatever, and legend is generated by\n # drawing patch rectangles or markers using data values and their\n # corresponding cmap colors! For scatterplots just test get_facecolor()\n # to see if it contains more than one color.\n # TODO: It is *also* often desirable to label a colormap object with\n # one data value. Maybe add a legend option for the *number of samples*\n # or the *sample points* when drawing legends for colormap objects.\n # Look into \"legend handlers\", might just want to add own handlers by\n # passing handler_map to legend() and get_legend_handles_labels().\n if order not in ('F', 'C'):\n raise ValueError(\n f'Invalid order {order!r}. Choose from '\n '\"C\" (row-major, default) and \"F\" (column-major).'\n )\n ncol = _not_none(ncols=ncols, ncol=ncol)\n title = _not_none(label=label, title=title)\n frameon = _not_none(frame=frame, frameon=frameon, default=rc['legend.frameon'])\n if handles is not None and not np.iterable(handles): # e.g. a mappable object\n handles = [handles]\n if labels is not None and (not np.iterable(labels) or isinstance(labels, str)):\n labels = [labels]\n if title is not None:\n kwargs['title'] = title\n if frameon is not None:\n kwargs['frameon'] = frameon\n fontsize = kwargs.get('fontsize', None) or rc['legend.fontsize']\n if fontsize is None:\n pass\n elif fontsize in mfonts.font_scalings:\n kwargs['fontsize'] = rc._scale_font(fontsize)\n else:\n kwargs['fontsize'] = units(fontsize, 'pt')\n\n # Handle and text properties that are applied after-the-fact\n # NOTE: Set solid_capstyle to 'butt' so line does not extend past error bounds\n # shading in legend entry. This change is not noticable in other situations.\n kw_text = {}\n for key, value in (('color', fontcolor), ('weight', fontweight)):\n if value is not None:\n kw_text[key] = value\n kw_handle = _pop_props(kwargs, 'lines')\n kw_handle['solid_capstyle'] = 'butt'\n\n # Get axes for legend handle detection\n # TODO: Update this when no longer use \"filled panels\" for outer legends\n axs = [self]\n if self._panel_hidden:\n if self._panel_parent: # axes panel\n axs = list(self._panel_parent._iter_axes(hidden=False, children=True))\n else:\n axs = list(self.figure._iter_axes(hidden=False, children=True))\n\n # Handle list of lists (centered row legends)\n # NOTE: Avoid very common plot() error where users draw individual lines\n # with plot() and add singleton tuples to a list of handles. If matplotlib\n # gets a list like this but gets no 'labels' argument, it raises error.\n list_of_lists = False\n if handles is not None:\n handles = [h[0] if isinstance(h, tuple) and len(h) == 1 else h for h in handles]\n list_of_lists = any(isinstance(h, (list, np.ndarray)) for h in handles)\n if list_of_lists:\n if any(not np.iterable(_) for _ in handles):\n raise ValueError(f'Invalid handles={handles!r}.')\n if not labels:\n labels = [None] * len(handles)\n elif not all(np.iterable(_) and not isinstance(_, str) for _ in labels):\n # e.g. handles=[obj1, [obj2, obj3]] requires labels=[lab1, [lab2, lab3]]\n raise ValueError(f'Invalid labels={labels!r} for handles={handles!r}.')\n\n # Parse handles and legends with native matplotlib parser\n if not list_of_lists:\n if isinstance(handles, np.ndarray):\n handles = handles.tolist()\n if isinstance(labels, np.ndarray):\n labels = labels.tolist()\n handles, labels, *_ = mlegend._parse_legend_args(\n axs, handles=handles, labels=labels,\n )\n pairs = list(zip(handles, labels))\n else:\n pairs = []\n for ihandles, ilabels in zip(handles, labels):\n if isinstance(ihandles, np.ndarray):\n ihandles = ihandles.tolist()\n if isinstance(ilabels, np.ndarray):\n ilabels = ilabels.tolist()\n ihandles, ilabels, *_ = mlegend._parse_legend_args(\n axs, handles=ihandles, labels=ilabels,\n )\n pairs.append(list(zip(ihandles, ilabels)))\n\n # Manage pairs in context of 'center' option\n center = _not_none(center, list_of_lists)\n if not center and list_of_lists: # standardize format based on input\n list_of_lists = False # no longer is list of lists\n pairs = [pair for ipairs in pairs for pair in ipairs]\n elif center and not list_of_lists:\n list_of_lists = True\n ncol = _not_none(ncol, 3)\n pairs = [pairs[i * ncol:(i + 1) * ncol] for i in range(len(pairs))]\n ncol = None\n if list_of_lists: # remove empty lists, pops up in some examples\n pairs = [ipairs for ipairs in pairs if ipairs]\n\n # Bail if no pairs\n if not pairs:\n return mlegend.Legend(self, [], [], loc=loc, ncol=ncol, **kwargs)\n # Multiple-legend pseudo-legend\n elif center:\n objs = _multiple_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)\n # Individual legend\n else:\n objs = [_single_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)]\n\n # Add legends manually so matplotlib does not remove old ones\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n continue\n if hasattr(self, 'legend_') and self.legend_ is None:\n self.legend_ = obj # set *first* legend accessible with get_legend()\n else:\n self.add_artist(obj)\n\n # Apply legend box properties\n outline = rc.fill({\n 'linewidth': 'axes.linewidth',\n 'edgecolor': 'axes.edgecolor',\n 'facecolor': 'axes.facecolor',\n 'alpha': 'legend.framealpha',\n })\n for key in (*outline,):\n if key != 'linewidth':\n if kwargs.get(key, None):\n outline.pop(key, None)\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n obj.update(outline) # the multiple-legend bounding box\n else:\n obj.legendPatch.update(outline) # no-op if frame is off\n\n # Apply *overrides* to legend elements\n # WARNING: legendHandles only contains the *first* artist per legend because\n # HandlerBase.legend_artist() called in Legend._init_legend_box() only\n # returns the first artist. Instead we try to iterate through offset boxes.\n # TODO: Remove this feature? Idea was this lets users create *categorical*\n # legends in clunky way, e.g. entries denoting *colors* and entries denoting\n # *markers*. But would be better to add capacity for categorical labels in a\n # *single* legend like seaborn rather than multiple legends.\n for obj in objs:\n try:\n children = obj._legend_handle_box._children\n except AttributeError: # older versions maybe?\n children = []\n for obj in _iter_legend_children(children):\n # Account for mixed legends, e.g. line on top of error bounds shading\n if isinstance(obj, mtext.Text):\n obj.update(kw_text)\n else:\n for key, value in kw_handle.items():\n getattr(obj, 'set_' + key, lambda value: None)(value)\n\n # Append attributes and return, and set clip property!!! This is critical\n # for tight bounding box calcs!\n for obj in objs:\n obj.set_clip_on(False)\n if isinstance(objs[0], mpatches.FancyBboxPatch):\n objs = objs[1:]\n return objs[0] if len(objs) == 1 else tuple(objs)",
"def plot2d(self, obj, options=\"\", label=None, labelfmt=None, **kwargs):\n self._pad.cd()\n self._pad.Update() # Updating the pad prevents spontaneous seg faults...\n\n # Apply formatting (if any) before calling `Draw()`\n root_helpers.set_graphics_attributes(obj, **kwargs)\n\n # Draw the object, depending on its type\n if isinstance(obj, root.TH2):\n if isinstance(self._frame, root.TH1F):\n if not self._is_empty:\n warnings.warn(\"plot2d: overwriting non-empty axes\")\n\n self._frame.Delete()\n self._frame = obj\n\n elif \"SAME\" not in options.upper():\n self._frame = obj\n\n obj.Draw(options)\n\n else:\n try:\n warnings.warn(\n \"plot2d: attempting to plot an object that is not a TH2.\\n\"\n \"This may result in unexpected behaviour.\"\n )\n obj.Draw(options)\n\n except AttributeError:\n raise TypeError(\"Attempting to plot an object with no Draw() method\")\n\n # Add object to list of legend entries if label was provided\n if label is not None:\n self._legend_entries.append((obj, label, labelfmt))\n\n self._is_empty = False # Record that the axes are no longer empty",
"def open_legend(self, legend, props):\n pass",
"def legend(*args, **kwargs):\n if kwargs.has_key('loc'):\n loc = kwargs['loc']\n if (loc == 'outer'):\n global new\n kwargs.pop('loc')\n leg = plt.legend(loc=(0,0), *args, **kwargs)\n frame = leg.get_frame()\n currentAxes = plt.gca()\n barray = currentAxes.get_position().get_points()\n currentAxesPos = [barray[0][0], barray[0][1], barray[1][0], barray[1][1]]\n currentAxes.set_position([currentAxesPos[0]-0.02, currentAxesPos[1], currentAxesPos[2] - 0.2, currentAxesPos[3]-currentAxesPos[1]])\n version = mpl.__version__.split(\".\")\n #if map(int, version) < [0, 98]:\n # leg._loc = (1 + leg.axespad, 0.0)\n #else:\n leg._loc = (1.03, -0.05) # + leg.borderaxespad, 0.0)\n plt.draw_if_interactive()\n return leg\n return plt.legend(*args, **kwargs)",
"def _show_legend(ax):\n leg = ax.legend(loc=1, shadow=True, fancybox=True, labelspacing=0.2,\n borderpad=0.15)\n ltext = leg.get_texts()\n llines = leg.get_lines()\n\n from matplotlib.artist import setp\n setp(ltext, fontsize='small')\n setp(llines, linewidth=1)",
"def decorate(**options):\n ax = plt.gca()\n ax.set(**options)\n\n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels)\n\n plt.tight_layout()",
"def _patch_legend(obj, draw_options, legend_type):\n legend = \"\"\n if _is_in_legend(obj):\n # Unfortunately, patch legend entries need \\addlegendimage in Pgfplots.\n do = \", \".join([legend_type] + draw_options) if draw_options else \"\"\n legend += \"\\\\addlegendimage{{{}}}\\n\\\\addlegendentry{{{}}}\\n\\n\".format(\n do, obj.get_label()\n )\n\n return legend",
"def custom_legend(colors: Union[list, str], labels: Union[list, str], do_show=True) -> Union[list, None]:\n _handles = []\n\n for _color, _label in zip(assert_list(colors), assert_list(labels)):\n _handles.append(patches.Patch(color=_color, label=_label))\n\n if do_show:\n plt.legend(handles=_handles)\n else:\n return _handles",
"def addLegend(plot,**kwargs):\n plot.legend = LegendItem(**kwargs)\n # Attempted hack to allow adding legends to ViewBox...\n # if hasattr(plot,'vb'):\n # vb=plot.vb\n # else:\n # vb=plot\n # doesn't work.\n plot.legend.setParentItem(plot.vb)\n return plot.legend",
"def legend(colors, labels, **kwds):\n proxies = [pylab.Rectangle((0, 0), 1, 1, fc=color) for color in colors]\n nl = min(len(proxies), len(labels))\n pylab.legend(proxies[:nl], labels[:nl], **kwds)",
"def add_legend(\n self,\n labels=None,\n bcolor=(0.5, 0.5, 0.5),\n border=False,\n size=(0.2, 0.2),\n name=None,\n loc='upper right',\n face='triangle',\n ):\n if self.legend is not None:\n self.remove_legend()\n self._legend = _vtk.vtkLegendBoxActor()\n\n if labels is None:\n # use existing labels\n if not self._labels:\n raise ValueError(\n 'No labels input.\\n\\n'\n 'Add labels to individual items when adding them to'\n 'the plotting object with the \"label=\" parameter. '\n 'or enter them as the \"labels\" parameter.'\n )\n\n self._legend.SetNumberOfEntries(len(self._labels))\n for i, (vtk_object, text, color) in enumerate(self._labels.values()):\n if face is None:\n # dummy vtk object\n vtk_object = pyvista.PolyData([0.0, 0.0, 0.0])\n\n self._legend.SetEntry(i, vtk_object, text, color.float_rgb)\n\n else:\n self._legend.SetNumberOfEntries(len(labels))\n\n legend_face = make_legend_face(face)\n for i, (text, color) in enumerate(labels):\n self._legend.SetEntry(i, legend_face, text, Color(color).float_rgb)\n\n if loc is not None:\n if loc not in ACTOR_LOC_MAP:\n allowed = '\\n'.join([f'\\t * \"{item}\"' for item in ACTOR_LOC_MAP])\n raise ValueError(f'Invalid loc \"{loc}\". Expected one of the following:\\n{allowed}')\n x, y, size = map_loc_to_pos(loc, size, border=0.05)\n self._legend.SetPosition(x, y)\n self._legend.SetPosition2(size[0], size[1])\n\n if bcolor is None:\n self._legend.SetUseBackground(False)\n else:\n self._legend.SetUseBackground(True)\n self._legend.SetBackgroundColor(Color(bcolor).float_rgb)\n\n self._legend.SetBorder(border)\n\n self.add_actor(self._legend, reset_camera=False, name=name, pickable=False)\n return self._legend",
"def show_legend(self, show_legend):\n\n self.container['show_legend'] = show_legend",
"def drawLegend(self, ax, xbound, ybound):\n\n spacing = 3\n \n # determine bounding box\n nlines = len(self.colors) + int(self.title is not None) + 1\n height = spacing*nlines \n width = 15\n if self.title is not None and len(self.title) > width:\n width = len(self.title)\n \n xloc = xbound-width\n\n if ybound < 0:\n yloc = ybound+height+2\n # ax.add_patch(patches.Rectangle( (xloc-2, yloc-height+4), width, height, fc='white', lw=1.0))\n else:\n yloc = ybound-height+2\n # ax.add_patch(patches.Rectangle( (xloc-2, yloc+4), width, height, fc='white', lw=1.0))\n \n\n # write the title\n if self.title is not None:\n ax.text(xloc, yloc, self.title, horizontalalignment='left',\n size=\"10\",weight=\"medium\", color=\"black\")\n yloc -= spacing\n\n\n for i, c in enumerate(self.colors):\n # self.colors is a list of dict patch specifications, so c is a dict\n ax.add_patch(patches.Rectangle( (xloc, yloc), 1.5, 1.5, **c ) )\n \n # now add label\n ax.text(xloc+2.5, yloc, self.labels[i], horizontalalignment='left',\n size=\"8\",weight=\"normal\", color=\"black\")\n yloc -= spacing",
"def legend(self, legend):\n\n self.container['legend'] = legend",
"def add_legend(self):\n self.ax.legend(loc=\"upper left\", bbox_to_anchor=(1,1.1))\n # TODO: maybe pass other args through to ax.legend()\n # TODO: overall vs subplot legends?"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot each algorithm/method's rank evolving as budget increases. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. Note that funcId may be an array of id numbers; in that case, an average rank over listed functions is taken. | def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
try: # funcId is array?
# _pds_plot_iterator[] uses funcId only for things we don't care for
fakeFuncId = funcId[0]
manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])
rankcount = np.shape(manyranking[0])[1] - 1
amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))
budget = amanyranking[:,0]
rankings = np.hsplit(amanyranking[:,1:], len(funcId))
avgranking = np.average(rankings, axis=0)
ranking = np.vstack([budget, avgranking.T]).T
except TypeError: # funcId is scalar
fakeFuncId = funcId
ranking = pds.ranking((dim, funcId), groupby)
i = 0
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):
if kind != 'algorithm' and kind != 'strategy':
continue
#print name, ds
budgets = ranking[:,0]
ranks = ranking[:,1+i]
style['markevery'] = 64
ax.plot(budgets, ranks, label=name, **style)
i += 1
ax.set_xlabel('Budget')
ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')
ax.set_xscale('log', basex=pfsize)
ax.grid() | [
"def ranking(self, dimfun, groupby, ftarget=10**-8):\n nameds = list(itertools.chain(self.algds_dimfunc(dimfun), self.stratds_dimfunc(dimfun)))\n count = len(nameds)\n\n # Produce \"fv\" items, one per dataset, containing single function value\n # for each budget\n fvset = []\n for (name, ds) in nameds:\n budgets = ds.funvals[:,0]\n f1vals = np.maximum(groupby(ds.funvals[:, 1:], axis=1), ftarget)\n fv = np.transpose(np.vstack([budgets, f1vals]))\n fvset.append(fv)\n\n # Align the \"fv\" items by budget and merge them\n fva = ra.alignArrayData(ra.VArrayMultiReader(fvset))\n budgets = fva[:,0]\n\n # Assign function values and rank them\n # However, we want to resolve eventual ties by ranking first\n # converging function first. So we do a trick and rewrite ftarget\n # values in increasing convergence sort order.\n values = fva[:,1:].copy()\n firstconv = np.ones(count) * (np.size(budgets)+1) # runlength+1 is default\n for i in range(count): # XXX: drop the loop\n try:\n firstconv[i] = np.nonzero(values[:,i] == ftarget)[0][0]\n except IndexError:\n continue # no rewriting needed\n firstconvranks = ss.mstats.rankdata(firstconv)\n for i in range(count):\n r = firstconvranks[i]\n values[firstconv[i]:, i] = ftarget - (1-r/count)*ftarget\n\n ranks = ss.mstats.rankdata(values, axis=1)\n\n return np.transpose(np.vstack([budgets, ranks.T]))",
"def plot(dsList, valuesOfInterest=values_of_interest, styles=styles):\n valuesOfInterest = pproc.TargetValues.cast(valuesOfInterest)\n styles = list(reversed(styles[:len(valuesOfInterest)]))\n dsList = pproc.DataSetList(dsList)\n dictFunc = dsList.dictByFunc()\n res = []\n\n for func in dictFunc:\n dictFunc[func] = dictFunc[func].dictByDim()\n dimensions = sorted(dictFunc[func])\n\n # legend = []\n line = []\n mediandata = {}\n displaynumber = {}\n for i_target in range(len(valuesOfInterest)):\n succ = []\n unsucc = []\n # data = []\n maxevals = np.ones(len(dimensions))\n maxevals_succ = np.ones(len(dimensions)) \n # Collect data that have the same function and different dimension.\n for idim, dim in enumerate(dimensions):\n assert len(dictFunc[func][dim]) == 1\n # (ert, success rate, number of success, total number of\n # function evaluations, median of successful runs)\n tmp = generateData(dictFunc[func][dim][0], valuesOfInterest((func, dim))[i_target])\n maxevals[idim] = max(dictFunc[func][dim][0].maxevals)\n # data.append(np.append(dim, tmp))\n if tmp[2] > 0: # Number of success is larger than 0\n succ.append(np.append(dim, tmp))\n if tmp[2] < dictFunc[func][dim][0].nbRuns():\n displaynumber[dim] = ((dim, tmp[0], tmp[2]))\n mediandata[dim] = (i_target, tmp[-1])\n unsucc.append(np.append(dim, np.nan))\n else:\n unsucc.append(np.append(dim, tmp[-2])) # total number of fevals\n\n if len(succ) > 0:\n tmp = np.vstack(succ)\n # ERT\n if genericsettings.scaling_figures_with_boxes:\n for dim in dimensions: \n # to find finite simulated runlengths we need to have at least one successful run\n if dictFunc[func][dim][0].detSuccesses([valuesOfInterest((func, dim))[i_target]])[0]:\n # make a box-plot\n y = toolsstats.drawSP_from_dataset(\n dictFunc[func][dim][0],\n valuesOfInterest((func, dim))[i_target],\n [25, 50, 75], \n genericsettings.simulated_runlength_bootstrap_sample_size)[0]\n rec_width = 1.1 # box (\"rectangle\") width\n rec_taille_fac = 0.3 # notch width parameter\n r = rec_width ** ((1. + i_target / 3.) / 4) # more difficult targets get a wider box\n styles2 = {}\n for s in styles[i_target]:\n styles2[s] = styles[i_target][s]\n styles2['linewidth'] = 1\n styles2['markeredgecolor'] = styles2['color'] \n x = [dim / r, r * dim]\n xm = [dim / (r**rec_taille_fac), dim * (r**rec_taille_fac)]\n y = np.array(y) / dim\n plt.plot([x[0], xm[0], x[0], x[1], xm[1], x[1], x[0]],\n [y[0], y[1], y[2], y[2], y[1], y[0], y[0]],\n markersize=0, **styles2)\n styles2['linewidth'] = 0\n plt.plot([x[0], x[1], x[1], x[0], x[0]],\n [y[0], y[0], y[2], y[2], y[0]],\n **styles2)\n styles2['linewidth'] = 2 # median\n plt.plot([x[0], x[1]], [y[1], y[1]],\n markersize=0, **styles2)\n # plot lines, we have to be smart to connect only adjacent dimensions\n for i, n in enumerate(tmp[:, 0]):\n j = list(dimensions).index(n)\n if i == len(tmp[:, 0]) - 1 or j == len(dimensions) - 1: \n break\n if dimensions[j+1] == tmp[i+1, 0]:\n res.extend(plt.plot(tmp[i:i+2, 0], tmp[i:i+2, 1] / tmp[i:i+2, 0]**ynormalize_by_dimension,\n markersize=0, clip_on=True, **styles[i_target]))\n # plot only marker\n lw = styles[i_target].get('linewidth', None) \n styles[i_target]['linewidth'] = 0\n res.extend(plt.plot(tmp[:, 0], tmp[:, 1] / tmp[:, 0]**ynormalize_by_dimension,\n markersize=20, clip_on=True, **styles[i_target]))\n # restore linewidth\n if lw:\n styles[i_target]['linewidth'] = lw\n else:\n del styles[i_target]['linewidth']\n\n # To have the legend displayed whatever happens with the data.\n for i in reversed(range(len(valuesOfInterest))):\n res.extend(plt.plot([], [], markersize=10,\n label=valuesOfInterest.label(i) if isinstance(valuesOfInterest, pproc.RunlengthBasedTargetValues) else valuesOfInterest.loglabel(i),\n **styles[i]))\n # Only for the last target function value\n if unsucc: # obsolete\n tmp = np.vstack(unsucc) # tmp[:, 0] needs to be sorted!\n # res.extend(plt.plot(tmp[:, 0], tmp[:, 1]/tmp[:, 0],\n # color=styles[len(valuesOfInterest)-1]['color'],\n # marker='x', markersize=20))\n if 1 < 3: # maxevals\n ylim = plt.ylim()\n res.extend(plt.plot(tmp[:, 0], maxevals / tmp[:, 0]**ynormalize_by_dimension,\n color=styles[len(valuesOfInterest) - 1]['color'],\n ls='', marker='x', markersize=20))\n plt.ylim(ylim)\n # median\n if mediandata:\n # for i, tm in mediandata.iteritems():\n for i in displaynumber: # display median where success prob is smaller than one\n tm = mediandata[i]\n plt.plot((i,), (tm[1] / i**ynormalize_by_dimension,), \n color=styles[tm[0]]['color'],\n linestyle='', marker='+', markersize=30,\n markeredgewidth=5, zorder= -1)\n\n a = plt.gca()\n # the displaynumber is emptied for each new target precision\n # therefore the displaynumber displayed below correspond to the\n # last target (must be the hardest)\n if displaynumber: # displayed only for the smallest valuesOfInterest\n for _k, j in displaynumber.iteritems():\n # the 1.5 factor is a shift up for the digits \n plt.text(j[0], 1.5 * j[1] / j[0]**ynormalize_by_dimension, \n \"%.0f\" % j[2], axes=a,\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\",\n fontsize=plt.rcParams['font.size'] * 0.85)\n # if later the ylim[0] becomes >> 1, this might be a problem\n return res",
"def evals_by_target(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline_ds:\n baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs = groupby(ds.detEvals(targets), axis=1)\n if baseline_ds:\n fevs /= baseline_fevs\n style['markevery'] = 64\n ax.loglog(targets, fevs, label=name, basey=pfsize, **style)\n ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))\n if baseline_ds:\n ax.set_yticks([2, 3.5], minor=True)\n ax.set_xlabel('Function Value Targets')\n ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')",
"def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label=\"\", baseline2_ds=None, baseline2_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline1_ds:\n baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))\n if baseline2_ds:\n baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs1 = groupby(ds.detEvals(targets), axis=1)\n if baseline1_ds:\n fevs1 /= baseline1_fevs\n fevs2 = groupby(ds.detEvals(targets), axis=1)\n if baseline2_ds:\n fevs2 /= baseline2_fevs\n\n infsx = np.nonzero(fevs1 == inf)\n infs = infsx[0]\n if np.size(infs) > 0:\n #print infs\n fevs1 = fevs1[:infs[0]-1]\n fevs2 = fevs2[:infs[0]-1]\n\n #print name, fevs1, fevs2\n style['markevery'] = 64\n ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)\n ax.grid()\n ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1\n ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))\n ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))",
"def EstimateSurvivalByDecade(groups, **options):\n thinkplot.PrePlot(len(groups))\n for name, group in groups:\n _, sf = EstimateSurvival(group)\n thinkplot.Plot(sf, **options)",
"def _pds_plot_iterator(pds, dim, funcId):\n i = 0\n for (algname, ds) in pds.algds_dimfunc((dim, funcId)):\n yield ('algorithm', algname, ds, _style_algorithm(algname, i))\n i += 1\n yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())\n yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())\n i = 0\n for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):\n yield ('strategy', stratname, ds, _style_strategy(stratname, i))\n i += 1",
"def group_plot(self, i):\n # extract group level parameters\n logk = self.posterior_samples[\"group_logk\"][:, i]\n logs = self.posterior_samples[\"group_logs\"][:, i]\n # plotting\n fig, ax = plt.subplots(1, 2, figsize=(12, 6))\n self.plot_parameter_space(ax[0], (logk, logs))\n self.plot_discount_functions(ax[1], (logk, logs))\n return (fig, ax)",
"def plot_groupby(x, attr='ebeam_photon_energy', groupby='step'):\n dag = x[attr].dropna(dim='time').groupby(groupby)\n dag.mean().to_pandas().plot(yerr=dag.std())\n print \"dag = x['{:}'].dropna(dim='time').groupby('{:}')\".format(attr, groupby)\n print \"dag.mean().to_pandas().plot(yerr=dag.std())\"",
"def plot_eval(df_input, tuned_params, eval_seeds, num_folds=10, eval_method=\"roc\", fontsize=24,\n individual_alpha=0.0, stat_func=np.median):\n\n # initialize the stratified folds using the given list of seeds\n stratified_folds = [StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=eval_seed)\n for eval_seed in eval_seeds]\n # intialize lists for colors\n color_lst = ['#1f77b4', '#ff7f0e', '#2ca02c']\n\n # construct the plot\n fig, ax = plt.subplots(3, 1, sharex=\"all\", sharey=\"all\",\n figsize=(10, 10))\n\n # iterate over the 3 binary classifiers\n for index, (choice, title) in enumerate(zip([\"Metal\", \"Insulator\", \"MIT\"],\n [\"M\", \"I\", \"T\"])):\n # initialize the eval_metric list\n aucs = []\n mean_x = np.linspace(0, 1, 100)\n # load in the data for the specified binary classifier\n X_choice, y_choice = load_data(df_input, choice)\n # initialize the tuned binary classifier\n tuned_xgb_model = xgb.XGBClassifier(**tuned_params[choice])\n # setup naive precision if eval_method == \"pr\"\n if eval_method == \"pr\":\n naive_precision = np.sum(y_choice == 1) / len(y_choice)\n # iterate over the stratified folds created using the random seeds in eval_seeds\n for stratified_fold in stratified_folds:\n aucs_seed = []\n y = []\n for train_indices, test_indices in stratified_fold.split(X_choice, y_choice):\n # fit the model on k-1 training folds\n tuned_xgb_model.fit(X_choice.iloc[train_indices], y_choice[train_indices])\n # plot the pr curve on the 1 test fold\n if eval_method == \"roc\":\n plot_func = plot_roc_curve\n elif eval_method == \"pr\":\n plot_func = plot_precision_recall_curve\n else:\n raise Exception('Invalid eval_method. Please choose from \"roc\" or \"pr\"')\n # create the visulization object\n viz = plot_func(tuned_xgb_model,\n X_choice.iloc[test_indices], y_choice[test_indices],\n ax=ax[index], alpha=individual_alpha)\n if eval_method == \"roc\":\n # get the AUC score for that test fold\n aucs_seed.append(viz.roc_auc)\n # linear interpolation for true positive rate\n interp_y = np.interp(mean_x, viz.fpr, viz.tpr)\n # set the first interpolated true positive rate to be 0\n interp_y[0] = 0.0\n # get the linearly interpolated true positive rate\n y.append(interp_y)\n # get the average/median roc curve of a given seed\n mean_y = stat_func(y, axis=0)\n mean_y[-1] = 1\n elif eval_method == \"pr\":\n # get the AUC score for that test fold\n aucs_seed.append(auc(viz.recall, viz.precision))\n # linear interpolation for precision\n interp_y = np.interp(mean_x, viz.recall[::-1], viz.precision[::-1])\n # set the first interpolated precision to be 1\n interp_y[0] = 1.0\n # get the linearly interpolated precision\n y.append(interp_y)\n # get the average/median pr curve of a given seed\n mean_y = stat_func(y, axis=0)\n mean_y[-1] = 0\n\n ax[index].plot(mean_x, mean_y, alpha=0.6, lw=3, color=color_lst[index])\n aucs.append(stat_func(aucs_seed))\n\n if eval_method == \"roc\":\n ax[index].set(xlim=(-0.01, 1), ylim=(0, 1.05), xlabel=None, ylabel=None)\n ax[index].set_title(title, fontsize=fontsize)\n ax[index].plot([0, 1], [0, 1], linestyle=\"--\", lw=3, color=\"k\")\n ax[index].text(0.98, 0.05, \"{} AUC: {:.2f}\".format(stat_func.__name__.capitalize(),\n stat_func(aucs)), fontsize=fontsize,\n horizontalalignment=\"right\", verticalalignment=\"bottom\")\n elif eval_method == \"pr\":\n ax[index].set(xlim=(0, 1), ylim=(0, 1.05), xlabel=None, ylabel=None)\n ax[index].set_title(\"{} {} AUC: {:.2f}\".format(title, stat_func.__name__.capitalize(),\n stat_func(aucs)), fontsize=fontsize)\n ax[index].plot([0, 1], [naive_precision, naive_precision],\n linestyle=\"--\", lw=3, color=\"k\")\n ax[index].text(0.02, naive_precision - 0.03, \"naive precision:{:.2f}\".format(naive_precision),\n fontsize=fontsize - 4, horizontalalignment=\"left\", verticalalignment=\"top\")\n\n ax[index].get_legend().remove()\n ax[index].tick_params(axis='both', which='major', labelsize=fontsize)\n\n if eval_method == \"roc\":\n x_axis_label = \"False positive rate\"\n y_axis_label = \"True positive rate\"\n elif eval_method == \"pr\":\n x_axis_label = \"Recall\"\n y_axis_label = \"Precision\"\n ax[2].set_xlabel(x_axis_label, fontsize=fontsize)\n ax[1].set_ylabel(y_axis_label, fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n return fig",
"def group_apply_edges(self, group_by, func, edges=ALL, inplace=True):\n super(BaseGraphStore, self).group_apply_edges(group_by, func, edges, inplace=True)",
"def plot_scoring(\n graphs: list,\n ref_partitions: object,\n graph_names: list,\n methods: list,\n scoring: Callable[\n [object, object], object\n ] = cdlib.evaluation.adjusted_mutual_information,\n nbRuns: int = 5,\n) -> object:\n forDF = []\n for i, g in enumerate(graphs):\n for m in methods:\n for r in range(nbRuns):\n partition = m(g)\n\n score = scoring(partition, ref_partitions[i]).score\n forDF.append([graph_names[i], score, partition.get_description()])\n df = pd.DataFrame(columns=[\"graph\", \"score\", \"method\"], data=forDF)\n ax = sns.lineplot(x=\"graph\", y=\"score\", hue=\"method\", data=df, legend=\"brief\")\n ax.legend(loc=\"best\")\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n plt.tight_layout()\n\n return ax",
"def run(self):\n #num = self.libsort.num_func()\n self.visualize(self.sortfun())",
"def parallel_group(\n G, group_by, ax=None, y_offset=-0.3, rotation=45, ha=\"right\", va=\"top\"\n):\n if ax is None:\n ax = plt.gca()\n nt = utils.node_table(G)\n # groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()\n groups = sorted(nt[group_by].unique())\n\n for i, label in enumerate(groups):\n x = i * 4\n y = y_offset\n ax.annotate(label, xy=(x, y), ha=ha, va=va, rotation=rotation)\n ax.relim()",
"def scatter(adata, groupby, groupid, x,y, n=100, special_markers=None,\n coloring='scores', size=12, annotate=True):\n groups = 'all'\n\n groups_order, groups_masks = utils.select_groups(\n adata, groups, groupby)\n\n\n imask=groupid\n mask=groups_masks[imask]\n\n score_list = list()\n name_list = list()\n special_markers_indices = list()\n # Note: No duplicates in each group\n for j, k in enumerate(adata.uns['rank_genes_groups_gene_scores']):\n # Make sure only first n datapoints are used\n if j >= n:\n break\n score_list.append(k[imask])\n name_list.append(adata.uns['rank_genes_groups_gene_names'][j][imask])\n # Inefficient and not generalizable but works: Check added index if in list of specially_marked_genes\n # TODO: Speed up if becomes a time issue\n if special_markers is None:\n pass\n elif adata.uns['rank_genes_groups_gene_names'][j][imask] in special_markers[imask]:\n special_markers_indices.append(len(name_list) - 1)\n else:\n pass\n\n ### Get all the key figures\n # make things faster by calculating only what is required for plot\n mask_rest = ~mask\n\n # Get rate of expression\n rate_group = _zero_inflation_estimate(adata[:, name_list], mask)\n rate_rest = _zero_inflation_estimate(adata[:, name_list], mask_rest)\n if (x in {'full_mean_group', 'tail_mean_group', 'full_mean_difference',\n 'tail_mean_difference'} or y in {'full_mean_group', 'tail_mean_group', 'full_mean_difference',\n 'tail_mean_difference'}):\n means_group = _tail_mean_estimate(adata[:, name_list], mask)\n if (x in {'full_mean_rest', 'tail_mean_rest', 'full_mean_difference',\n 'tail_mean_difference'} or y in {'full_mean_rest', 'tail_mean_rest', 'full_mean_difference',\n 'tail_mean_difference'}):\n means_rest = _tail_mean_estimate(adata[:, name_list], mask_rest)\n\n if (x == 'tail_var_group' or y == 'tail_var_group'):\n # Get tail variance of expression\n var_group = _tail_var_estimate(adata[:, name_list], mask)\n if (x == 'tail_var_rest' or y == 'tail_var_rest'):\n var_rest = _tail_var_estimate(adata[:, name_list], mask_rest)\n if (x == 'CDR' or y == 'CDR'):\n # Get CDR: Need to give full adata object, since we need to count everything\n CDR = _Avg_CDR(adata, mask, name_list, model='rough', n_genes=None)\n if (x == 'full_var_group' or y == 'full_var_group'):\n # Slice first appropriately:\n adata_relevant = adata[:, name_list]\n exp, full_var_group = simple._get_mean_var(adata_relevant.X[mask])\n if (x == 'full_var_rest' or y == 'full_var_rest'):\n # Slice first appropriately:\n adata_relevant = adata[:, name_list]\n exp_rest, full_var_rest = simple._get_mean_var(adata_relevant.X[mask_rest])\n\n ### Prepare for coloring\n # get colored scatterplot\n # For coloring, get max score value, normalize (0,1)\n # Depending on whether normalization should be scale-invariant or only rank-invariant, do the following\n if coloring == 'scores':\n score_list = score_list / max(score_list)\n colors = cm.jet(score_list)\n elif coloring == 'absolute':\n color_list = rankdata(score_list)\n max_values = max(color_list)\n colors = cm.jet(color_list / max_values)\n # Identify true markers distinctly by using different size.\n else:\n logg.error('coloring should be either <socres> or <absolute>')\n s = 20 * np.ones(len(score_list))\n # This works for numpy access (not for normal lists though)\n s[special_markers_indices] = 100\n # In future, build method to mark top genes specially\n\n ### Actually do the plotting: Looping is inefficient and lengthy, but clear style\n # Potential values for x, y: 'mean' ('full' or 'tail'), 'tail_variance', 'inflation', 'CDR',\n # tail_variance_rest, Score (Just the ranking as given by test-statistic), 'full_var', 'full_var_rest'\n\n if x == 'expression_rate_difference':\n x_plot = rate_group - rate_rest\n elif x == 'expression_rate_group':\n x_plot = rate_group\n elif x == 'expression_rate_rest':\n x_plot = rate_rest\n elif x == 'Score':\n x_plot = score_list\n elif x == 'full_mean_difference':\n x_plot = means_group * rate_group - means_rest * rate_rest\n elif x == 'full_mean_group':\n x_plot = means_group * rate_group\n elif x == 'full_mean_rest':\n x_plot = means_rest * rate_rest\n elif x == 'tail_mean_difference':\n x_plot = means_group - means_rest\n elif x == 'tail_mean_group':\n x_plot = means_group\n elif x == 'tail_mean_rest':\n x_plot = means_rest\n elif x == 'tail_var_group':\n x_plot = var_group\n elif x == 'tail_var_rest':\n x_plot = var_rest\n elif x == 'full_var_group':\n x_plot = full_var_group\n elif x == 'full_var_rest':\n x_plot = full_var_rest\n elif x == 'CDR':\n x_plot = CDR\n else:\n logg.error('No accepted input. Check function documentation to get an overview over all inputs')\n\n if y == 'expression_rate_difference':\n y_plot = rate_group - rate_rest\n elif y == 'expression_rate_group':\n y_plot = rate_group\n elif y == 'expression_rate_rest':\n y_plot = rate_rest\n elif y == 'Score':\n y_plot = score_list\n elif y == 'full_mean_difference':\n y_plot = means_group * rate_group - means_rest * rate_rest\n elif y == 'full_mean_group':\n y_plot = means_group * rate_group\n elif y == 'full_mean_rest':\n y_plot = means_rest * rate_rest\n elif y == 'tail_mean_difference':\n y_plot = means_group - means_rest\n elif y == 'tail_mean_group':\n y_plot = means_group\n elif y == 'tail_mean_rest':\n y_plot = means_rest\n elif y == 'tail_var_group':\n y_plot = var_group\n elif y == 'tail_var_rest':\n y_plot = var_rest\n elif y == 'full_var_group':\n y_plot = full_var_group\n elif y == 'full_var_rest':\n y_plot = full_var_rest\n elif y == 'CDR':\n y_plot = CDR\n else:\n logg.error('No accepted input. Check function documentation to get an overview over all inputs')\n\n # To make different scalings easier to compare, we set fixed limits for the case that x,y are e\n # expression rates\n if (x in {'expression_rate_difference', 'expression_rate_group', 'expression_rate_rest'} and\n y in {'expression_rate_difference', 'expression_rate_group', 'expression_rate_rest'}):\n plt.xlim(0, 1)\n plt.ylim(0, 1)\n fig, ax= plt.subplots(figsize=(size,size))\n ax.scatter(x_plot, y_plot, color=colors, s=s)\n plt.xlabel(x)\n plt.ylabel(y)\n if annotate is True:\n for i, txt in enumerate(name_list):\n plt.annotate(txt, (x_plot[i], y_plot[i]))\n plt.show()",
"def plot(self, ax, *args, **kwargs):\n for group in self.groups:\n group.plot(ax, *args, **kwargs)",
"def plot_running_averages(fn, n):\n plt.plot(list(running_averages(fn() for _ in range(n))))",
"def complexity_graph(\n fn, ns, reps=20, number=1000, shuffle=True, setup=\"pass\", extra_globals={}\n):\n ts = time_complexity(fn, ns, reps, number, shuffle, setup, extra_globals)\n score_dict = fit_curves(ns, ts)\n print()\n print(f\"Scores for {fn.__name__}\")\n score_report(score_dict)\n plot_complexity(ns, ts, reference_curves=True)\n return score_dict",
"def makeaplot(events,\n sensitivities,\n hrf_estimates,\n roi_pair,\n fn=True):\n import matplotlib.pyplot as plt\n\n # take the mean and transpose the sensitivities\n sensitivities_stacked = mv.vstack(sensitivities)\n\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.bilat_ROIs)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.all_ROIs)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # some parameters\n # get the conditions\n block_design = sorted(np.unique(events['trial_type']))\n reorder = [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11]\n block_design = [block_design[i] for i in reorder]\n # end indices to chunk timeseries into runs\n run_startidx = np.array([0, 157, 313, 469])\n run_endidx = np.array([156, 312, 468, 624])\n\n runs = np.unique(mean_sens_transposed.sa.chunks)\n\n for j in range(len(hrf_estimates.fa.bilat_ROIs_str)):\n comparison = hrf_estimates.fa.bilat_ROIs[j][0]\n if (roi_pair[0] in comparison) and (roi_pair[1] in comparison):\n roi_pair_idx = j\n roi_betas_ds = hrf_estimates[:, roi_pair_idx]\n roi_sens_ds = mean_sens_transposed[:, roi_pair_idx]\n\n for run in runs:\n fig, ax = plt.subplots(1, 1, figsize=[18, 10])\n colors = ['#7b241c', '#e74c3c', '#154360', '#3498db', '#145a32', '#27ae60',\n '#9a7d0a', '#f4d03f', '#5b2c6f', '#a569bd', '#616a6b', '#ccd1d1']\n plt.suptitle('Timecourse of sensitivities, {} versus {}, run {}'.format(roi_pair[0],\n roi_pair[1],\n run + 1),\n fontsize='large')\n plt.xlim([0, max(mean_sens_transposed.sa.time_coords)])\n plt.ylim([-5, 7])\n plt.xlabel('Time in sec')\n plt.legend(loc=1)\n plt.grid(True)\n # for each stimulus, plot a color band on top of the plot\n for stimulus in block_design:\n onsets = events[events['trial_type'] == stimulus]['onset'].values\n durations = events[events['trial_type'] == stimulus]['duration'].values\n stimulation_end = np.sum([onsets, durations], axis=0)\n r_height = 1\n color = colors[0]\n y = 6\n\n # get the beta corresponding to the stimulus to later use in label\n beta = roi_betas_ds.samples[hrf_estimates.sa.condition == stimulus.replace(\" \", \"\"), 0]\n\n for i in range(len(onsets)):\n r_width = durations[i]\n x = stimulation_end[i]\n rectangle = plt.Rectangle((x, y),\n r_width,\n r_height,\n fc=color,\n alpha=0.5,\n label='_'*i + stimulus.replace(\" \", \"\") + '(' + str('%.2f' % beta) + ')')\n plt.gca().add_patch(rectangle)\n plt.legend(loc=1)\n del colors[0]\n\n times = roi_sens_ds.sa.time_coords[run_startidx[run]:run_endidx[run]]\n\n ax.plot(times, roi_sens_ds.samples[run_startidx[run]:run_endidx[run]], '-', color='black', lw=1.0)\n glm_model = hrf_estimates.a.model.results_[0.0].predicted[run_startidx[run]:run_endidx[run], roi_pair_idx]\n ax.plot(times, glm_model, '-', color='#7b241c', lw=1.0)\n model_fit = hrf_estimates.a.model.results_[0.0].R2[roi_pair_idx]\n plt.title('R squared: %.2f' % model_fit)\n if fn:\n plt.savefig(results_dir + 'timecourse_localizer_glm_sens_{}_vs_{}_run-{}.svg'.format(roi_pair[0], roi_pair[1], run + 1))",
"def series_measure(function,group_filters,**options):\n\tresults=pd.Series()\n\tfor group_key, group_filter in group_filters.items():\n\t\tjoined_options={**options,**group_filter}\n\t\tif not callable(function):\n\t\t\tif \"func\" in joined_options.keys():\n\t\t\t\tfunc=joined_options.pop('func')\n\t\t\t\tresults[group_key]=func(**joined_options)\n\t\t\telse:\n\t\t\t\traise TypeError('function passed is not callable and no functions\\\n\t\t\t\t referenced in filters!')\n\t\telse:\n\t\t\tresults[group_key]=function(**joined_options)\n\treturn results"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot a rotated convergence plot. It is essentially like fval_by_budget(), but rotated by 90 degrees, showing how big budget is required to reach every target. While this is a little less intuitive at first, it allows better judgement of performance impact of each strategy. With fval_by_budget(), performance change is represented by a curve phase shift, while in evals_by_target(), it simply translates position on the y axis. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. By default, absolute evaluations count is shown, but relative values to some baseline dataset can be shown instead. | def evals_by_target(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline_ds:
baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs = groupby(ds.detEvals(targets), axis=1)
if baseline_ds:
fevs /= baseline_fevs
style['markevery'] = 64
ax.loglog(targets, fevs, label=name, basey=pfsize, **style)
ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))
if baseline_ds:
ax.set_yticks([2, 3.5], minor=True)
ax.set_xlabel('Function Value Targets')
ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor') | [
"def kde_target_plot(df, feature):\n\n # Need to reset index for loc to workBU\n df = df.reset_index()\n plt.figure(figsize=(10, 6))\n plt.style.use('fivethirtyeight')\n\n # plot repaid loans\n sns.kdeplot(df.loc[df['TARGET'] == 0, feature], label='target == 0')\n # plot loans that were not repaid\n sns.kdeplot(df.loc[df['TARGET'] == 1, feature], label='target == 1')\n\n # Label the plots\n plt.title('Distribution of Feature by Target Value')\n plt.xlabel('%s' % feature);\n plt.ylabel('Density');\n plt.show()",
"def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label=\"\", baseline2_ds=None, baseline2_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline1_ds:\n baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))\n if baseline2_ds:\n baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs1 = groupby(ds.detEvals(targets), axis=1)\n if baseline1_ds:\n fevs1 /= baseline1_fevs\n fevs2 = groupby(ds.detEvals(targets), axis=1)\n if baseline2_ds:\n fevs2 /= baseline2_fevs\n\n infsx = np.nonzero(fevs1 == inf)\n infs = infsx[0]\n if np.size(infs) > 0:\n #print infs\n fevs1 = fevs1[:infs[0]-1]\n fevs2 = fevs2[:infs[0]-1]\n\n #print name, fevs1, fevs2\n style['markevery'] = 64\n ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)\n ax.grid()\n ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1\n ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))\n ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))",
"def ecdf(data, group_by=None, targets=None, ax=None, **kwargs):\n text_color = plt.rcParams.get('ytick.color')\n linewidth = 2\n # Handle keyword arguments\n for k, v in kwargs.items():\n if k not in ['linewidth']:\n raise TypeError('ecdf got an unexpeted keyword argument: {}'.format(k))\n else:\n if k == 'linewidth':\n linewidth = v\n # Deal with input data\n if group_by is not None:\n if type(data) == pd.core.frame.DataFrame:\n print(\"Grouping DataFrame by {}\".format(group_by))\n print(\"Target Features:\", targets)\n if type(targets) == str:\n targets = [targets]\n else:\n try:\n it = iter(targets)\n except:\n targets = [targets]\n cols = targets + [group_by]\n data = data[cols]\n variables = data.columns[:-1]\n data = data.groupby(group_by)\n else:\n return(\"Error: only DataFrame input works with group_by functionality\")\n else: \n if type(data) == pd.core.series.Series:\n variables = [data.name]\n elif type(data) == pd.core.frame.DataFrame:\n if targets is None:\n variables = list(data.columns)\n else:\n if type(targets) == str:\n targets = [targets]\n else: \n try:\n it = iter(targets)\n except:\n targets = [targets]\n print(\"Target Features:\", targets)\n variables = targets\n elif type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n variables = list(data.obj.columns)\n else:\n data = pd.Series(data, name='data')\n variables = [data.name]\n \n \n if type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n for variable in variables:\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for name, group in data:\n x = np.sort(group[variable])\n n = len(group)\n y = np.arange(1, n+1) / n\n ax.plot(x, y, marker='.', label=name, alpha=0.7, linewidth=linewidth)\n if max(x) > max_x:\n max_x = max(x)\n #max_x = 0\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n ax.legend()\n plt.title(\"ECDF for feature: {}\".format(variable), color=text_color)\n plt.show()\n \n else:\n n = len(data)\n y = np.arange(1, n+1) / n\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for variable in variables:\n if type(data) == pd.core.series.Series:\n x = np.sort(data)\n string = variable\n else:\n x = np.sort(data[variable])\n string = 'Data'\n ax.plot(x, y, marker='.', label=variable)\n if max(x) > max_x:\n max_x = max(x)\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n plt.title(\"ECDF for {}\".format(string), color=text_color)\n plt.legend()\n plt.show()",
"def plotGoalFunctionEverySet(use_all): #DONE\r\n\r\n # Get the metrics, all or only the ones used in the goal function\r\n if use_all:\r\n calibration_metrices = all_optional_metrics\r\n metric_units = [ 'RMSE/std' for i in \r\n ['cilp','cohes','contag','ed','fdi','lpi','pd','wfdi'] ] + locationalMetric\r\n else: \r\n calibration_metrices = [ m.upper() for m in metricNames ] + locationalMetric\r\n metric_units = [ 'RMSE/std' for i in metricNames ] + locationalMetric\r\n \r\n # First, get all results\r\n results = getAverageResultsArrayEverySet('calibration', True)\r\n # Loop the data for all metrics to get minimum and maximum goal function values\r\n limits = {}\r\n # Get metric stats\r\n for i,m in enumerate(calibration_metrices):\r\n limits[m] = {\r\n 'min': np.amin(results[i]),\r\n 'max': np.amax(results[i]),\r\n 'mean': np.mean(results[i]),\r\n 'median':np.median(results[i]),\r\n 'sd':np.std(results[i])\r\n }\r\n \r\n # Now, plot the data \r\n ## 1. Get the parameter sets\r\n parameterSets = calibrate.getParameterConfigurations()\r\n parameters=np.arange(0,len(parameterSets),1)\r\n n = len(calibration_metrices)# number of subplots\r\n \r\n ## 2. Prepare the plot \r\n fig, axs = plt.subplots(n, 2, figsize=(6,8), sharex = 'col')\r\n xticks = np.arange(0, parameters[-1]+10, 15.0)\r\n xticks=xticks.tolist()+[parameters[-1]]\r\n fig.align_ylabels()\r\n plt.subplots_adjust(hspace=0.4)\r\n #gf = ['$o_1$','$o_2$','$o_3$']\r\n gf = ['o('+c.upper()+')' for c in calibration_metrices]\r\n \r\n ## 3. Loop metrics. Each metric = new subplot\r\n for i,m in enumerate(calibration_metrices):\r\n j=0\r\n axs[i][1].set_ylabel(metric_units[i])\r\n axs[i][1].set_xticks(xticks)\r\n axs[i][1].set_xlim(0,max(parameters)+1)\r\n \r\n # Loop all the countries. Each suplot has data for all case studies:\r\n for country in case_studies:\r\n # Loop calibration scenarios:\r\n for scenario in scenarios:\r\n # set the min and max y axis values:\r\n axs[i][1].set_title(gf[i], pad=3)\r\n\r\n # standardize vaules by divinding by std\r\n stand_results = results[i,j]/np.std(results[i,j])\r\n \r\n amin = limits[m]['min']\r\n amax = limits[m]['max']\r\n axs[i][1].ticklabel_format(style='sci', axis='y', scilimits=(-2,2))\r\n #axs[i].set_ylim([amin*0.9,amax*1.1])\r\n #axs[i].set_yticks([amin,amax])\r\n # Create the labels only for one metric\r\n if i>0:\r\n myLabel = {1:None,2:None}\r\n else:\r\n myLabel = {1:cities[country],2:cities[country]}\r\n fmt = {1:'-',2:'--'}\r\n # plot\r\n axs[i][1].plot(\r\n parameters,\r\n stand_results,\r\n fmt[scenario],\r\n linewidth = 0.5,\r\n label = myLabel[scenario],\r\n c = countryColors[country])\r\n plt.setp(axs[i][1].get_xticklabels(), rotation=90)\r\n j+=1\r\n axs[i][1].set_xlabel('parameter set')\r\n\r\n ## 4. Loop metrics again, now plot over time. Each metric = new subplot\r\n calibration_metrices.pop(-1)\r\n for i,m in enumerate(calibration_metrices):\r\n j=0\r\n print(m)\r\n axs[i][0].set_ylabel(m.upper())\r\n axs[i][0].set_xlim(min(observedYears)-2,max(observedYears)+2)\r\n \r\n # Loop all the countries. Each suplot has data for all case studies:\r\n for country in case_studies:\r\n # Loop calibration scenarios:\r\n for scenario in scenarios:\r\n\r\n # get results\r\n observed = calibrate.getObservedArray(m, case=country)\r\n metric_values = []\r\n for year in range(len(obsTimeSteps)):\r\n metric_values.append(observed[year][0][1])\r\n \r\n metric_values = np.array(metric_values)[:, :, 0].transpose()\r\n metric_values = metric_values[~np.isnan(metric_values).any(axis=1)]\r\n \r\n ##print(metric_values.shape)\r\n #axs[i].set_ylim([amin*0.9,amax*1.1])\r\n #axs[i].set_yticks([amin,amax])\r\n # plot\r\n c = countryColors[country]\r\n axs[i][0].boxplot(\r\n metric_values,\r\n positions = np.array(observedYears)+j-1,\r\n whis = [0,100],\r\n widths = 1,\r\n patch_artist=True,\r\n boxprops=dict(facecolor='w', color=c),\r\n capprops=dict(color=c),\r\n whiskerprops=dict(color=c),\r\n flierprops=dict(color=c, markeredgecolor=c),\r\n medianprops=dict(color=c))\r\n j+=1\r\n axs[i][0].set_xticks(observedYears)\r\n\r\n axs[i+1][0].set_visible(False) \r\n axs[i][0].set_xticklabels([str(x) for x in observedYears])\r\n axs[i][0].tick_params(labelbottom=True)\r\n axs[i][0].set_xlabel('time')\r\n\r\n \r\n # Create the legend\r\n leg = fig.legend(\r\n bbox_to_anchor=(0.28, 0.12),\r\n loc='lower center',\r\n ncol=len(case_studies),\r\n borderaxespad=0.,\r\n frameon = False,\r\n fontsize=8)\r\n \r\n # Set the name and clear the directory if needed\r\n setNameClearSave('Figure3_metrics', scenario=None)",
"def plot_coupling_grid(baseline_group, fits_groups, metrics, fax=None):\n n_algorithms = len(fits_groups)\n n_metrics = len(metrics)\n\n if fax is None:\n fig, axes = plt.subplots(n_metrics, n_algorithms,\n figsize=(3 * n_algorithms, 3 * n_metrics))\n else:\n fig, axes = fax\n\n # iterate over metrics\n for row_idx, metric in enumerate(metrics):\n if metric == 'selection_ratio':\n baseline_coefs = baseline_group['coupling_coefs'][:]\n baseline_selection_ratio = \\\n calculate_selection_ratio(baseline_coefs).mean(axis=0)\n\n # iterate over algorithms\n for col_idx, algorithm in enumerate(fits_groups):\n if metric == 'selection_ratio':\n # calculate selection ratio for algorithm\n coefs = algorithm['coupling_coefs'][:]\n selection_ratio = calculate_selection_ratio(coefs).mean(axis=0)\n\n # plot direct comparison\n axes[row_idx, col_idx].scatter(\n baseline_selection_ratio,\n selection_ratio,\n alpha=0.5,\n color='k',\n edgecolor='w')\n else:\n axes[row_idx, col_idx].scatter(\n baseline_group[metric][:].mean(axis=0),\n algorithm[metric][:].mean(axis=0),\n alpha=0.5,\n color='k',\n edgecolor='w')\n\n return fig, axes",
"def plot_calibration_curve(est, X, y, fig_index):\n\n # Split data into a development and evaluation set\n X_dev,X_eval, y_dev,y_eval = train_test_split(X, y,\n test_size=0.33, random_state=42)\n # Split development set into a train and test set\n X_train, X_test, y_train, y_test = train_test_split(X_dev, y_dev, test_size=0.33,\n random_state=seed)\n \n name = est.steps[1][0]\n \n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # We take the no calibration as baseline\n fig = plt.figure(fig_index, figsize=(6, 6))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"--\", label=\"Perfectly calibrated\")\n \n for clf, name in [(est, name),\n (isotonic, name + ' + Isotonic'),\n (sigmoid, name + ' + Sigmoid')]: # Also called Platt Scaling\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"\\n\\x1b[1;31mclassifier %s:\\x1b[0m\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"o-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n \n # Customize the major grid\n ax1.grid(which='major', linestyle='-', linewidth='0.2', color='gray')\n ax1.set_axis_bgcolor('white')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"best\", ncol=1)\n \n # Customize the major grid\n ax2.grid(which='major', linestyle='-', linewidth='0.2', color='gray')\n ax2.set_axis_bgcolor('white')\n \n plt.tight_layout()\n plt.show()",
"def EstimateSurvivalByDecade(groups, **options):\n thinkplot.PrePlot(len(groups))\n for name, group in groups:\n _, sf = EstimateSurvival(group)\n thinkplot.Plot(sf, **options)",
"def gamma_tradeoff(self):\n\n k = 5\n reshaped_train_x = self.train_x.reshape((self.num_train_points, -1))\n\n scores = [\n np.sum(self.inf, axis=1),\n np.sum(self.rif, axis=1),\n np.sum(self.gc, axis=1),\n np.sum(self.rp, axis=1),\n self.ds\n ]\n\n labels = [\n \"IF\",\n \"RIF\",\n \"GC\",\n \"RP\",\n \"DS\"\n ]\n\n gammas = np.append([0], np.logspace(-3, 9, num=50))\n\n fig, ax = plt.subplots(figsize=(12, 8))\n\n for i in range(5):\n idxs = [select_top_diverse_k(scores[i], k, reshaped_train_x, gamma=g) for g in gammas]\n\n xs = [np.sum(pairwise_distances(reshaped_train_x[j])) / (k*(k-1)) for j in idxs]\n ys = [np.sum(scores[i][j]) for j in idxs]\n ys = ys / ys[0] # Normalise\n\n ax.plot(xs, ys, label=labels[i], zorder=1)\n # ax.scatter(xs, ys, s=16, marker=\"x\")\n\n # Maximum metric.\n ax.scatter(xs[0], ys[0], s=64, marker=\"D\", color=hls[0], edgecolors=\"black\", zorder=2)\n\n # Maximum product.\n prod = np.argmax(xs * ys)\n ax.scatter(xs[prod], ys[prod], s=64, marker=\"D\", color=hls[3], edgecolors=\"black\", zorder=2)\n print(\"Optimal Gamma for {} = {:3e}\".format(labels[i], gammas[prod]))\n\n # Maximum APD.\n max_apd = np.argmax(xs)\n ax.scatter(xs[max_apd], ys[max_apd], s=64, marker=\"D\", color=hls[6], edgecolors=\"black\", zorder=2)\n\n ax.set_ylabel(\"Normalised Metric\", fontdict={\"fontsize\": 14})\n ax.set_xlabel(\"Average Pairwise Distance\", fontdict={\"fontsize\": 14})\n ax.legend(title=\"Method\", loc=\"lower left\")",
"def plot_eval(df_input, tuned_params, eval_seeds, num_folds=10, eval_method=\"roc\", fontsize=24,\n individual_alpha=0.0, stat_func=np.median):\n\n # initialize the stratified folds using the given list of seeds\n stratified_folds = [StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=eval_seed)\n for eval_seed in eval_seeds]\n # intialize lists for colors\n color_lst = ['#1f77b4', '#ff7f0e', '#2ca02c']\n\n # construct the plot\n fig, ax = plt.subplots(3, 1, sharex=\"all\", sharey=\"all\",\n figsize=(10, 10))\n\n # iterate over the 3 binary classifiers\n for index, (choice, title) in enumerate(zip([\"Metal\", \"Insulator\", \"MIT\"],\n [\"M\", \"I\", \"T\"])):\n # initialize the eval_metric list\n aucs = []\n mean_x = np.linspace(0, 1, 100)\n # load in the data for the specified binary classifier\n X_choice, y_choice = load_data(df_input, choice)\n # initialize the tuned binary classifier\n tuned_xgb_model = xgb.XGBClassifier(**tuned_params[choice])\n # setup naive precision if eval_method == \"pr\"\n if eval_method == \"pr\":\n naive_precision = np.sum(y_choice == 1) / len(y_choice)\n # iterate over the stratified folds created using the random seeds in eval_seeds\n for stratified_fold in stratified_folds:\n aucs_seed = []\n y = []\n for train_indices, test_indices in stratified_fold.split(X_choice, y_choice):\n # fit the model on k-1 training folds\n tuned_xgb_model.fit(X_choice.iloc[train_indices], y_choice[train_indices])\n # plot the pr curve on the 1 test fold\n if eval_method == \"roc\":\n plot_func = plot_roc_curve\n elif eval_method == \"pr\":\n plot_func = plot_precision_recall_curve\n else:\n raise Exception('Invalid eval_method. Please choose from \"roc\" or \"pr\"')\n # create the visulization object\n viz = plot_func(tuned_xgb_model,\n X_choice.iloc[test_indices], y_choice[test_indices],\n ax=ax[index], alpha=individual_alpha)\n if eval_method == \"roc\":\n # get the AUC score for that test fold\n aucs_seed.append(viz.roc_auc)\n # linear interpolation for true positive rate\n interp_y = np.interp(mean_x, viz.fpr, viz.tpr)\n # set the first interpolated true positive rate to be 0\n interp_y[0] = 0.0\n # get the linearly interpolated true positive rate\n y.append(interp_y)\n # get the average/median roc curve of a given seed\n mean_y = stat_func(y, axis=0)\n mean_y[-1] = 1\n elif eval_method == \"pr\":\n # get the AUC score for that test fold\n aucs_seed.append(auc(viz.recall, viz.precision))\n # linear interpolation for precision\n interp_y = np.interp(mean_x, viz.recall[::-1], viz.precision[::-1])\n # set the first interpolated precision to be 1\n interp_y[0] = 1.0\n # get the linearly interpolated precision\n y.append(interp_y)\n # get the average/median pr curve of a given seed\n mean_y = stat_func(y, axis=0)\n mean_y[-1] = 0\n\n ax[index].plot(mean_x, mean_y, alpha=0.6, lw=3, color=color_lst[index])\n aucs.append(stat_func(aucs_seed))\n\n if eval_method == \"roc\":\n ax[index].set(xlim=(-0.01, 1), ylim=(0, 1.05), xlabel=None, ylabel=None)\n ax[index].set_title(title, fontsize=fontsize)\n ax[index].plot([0, 1], [0, 1], linestyle=\"--\", lw=3, color=\"k\")\n ax[index].text(0.98, 0.05, \"{} AUC: {:.2f}\".format(stat_func.__name__.capitalize(),\n stat_func(aucs)), fontsize=fontsize,\n horizontalalignment=\"right\", verticalalignment=\"bottom\")\n elif eval_method == \"pr\":\n ax[index].set(xlim=(0, 1), ylim=(0, 1.05), xlabel=None, ylabel=None)\n ax[index].set_title(\"{} {} AUC: {:.2f}\".format(title, stat_func.__name__.capitalize(),\n stat_func(aucs)), fontsize=fontsize)\n ax[index].plot([0, 1], [naive_precision, naive_precision],\n linestyle=\"--\", lw=3, color=\"k\")\n ax[index].text(0.02, naive_precision - 0.03, \"naive precision:{:.2f}\".format(naive_precision),\n fontsize=fontsize - 4, horizontalalignment=\"left\", verticalalignment=\"top\")\n\n ax[index].get_legend().remove()\n ax[index].tick_params(axis='both', which='major', labelsize=fontsize)\n\n if eval_method == \"roc\":\n x_axis_label = \"False positive rate\"\n y_axis_label = \"True positive rate\"\n elif eval_method == \"pr\":\n x_axis_label = \"Recall\"\n y_axis_label = \"Precision\"\n ax[2].set_xlabel(x_axis_label, fontsize=fontsize)\n ax[1].set_ylabel(y_axis_label, fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n return fig",
"def plot_cross_validation_metric(\n df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)\n):\n if ax is None:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n # Get the metric at the level of individual predictions, and with the rolling window.\n df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)\n df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)\n\n # Some work because matplotlib does not handle timedelta\n # Target ~10 ticks.\n tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.\n # Find the largest time resolution that has <1 unit per bin.\n dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']\n dt_names = [\n 'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',\n 'nanoseconds'\n ]\n dt_conversions = [\n 24 * 60 * 60 * 10 ** 9,\n 60 * 60 * 10 ** 9,\n 60 * 10 ** 9,\n 10 ** 9,\n 10 ** 6,\n 10 ** 3,\n 1.,\n ]\n for i, dt in enumerate(dts):\n if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):\n break\n\n x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n\n ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')\n ax.plot(x_plt_h, df_h[metric], '-', c='b')\n ax.grid(True)\n\n ax.set_xlabel('Horizon ({})'.format(dt_names[i]))\n ax.set_ylabel(metric)\n return fig",
"def tune_plots(self):\r\n tune_df = pd.DataFrame()\r\n # Loop through results to create data frame\r\n # Grab the best point for each curve on iteration\r\n for val, curves in self.curves.items():\r\n # Find max and save hyperparam value\r\n max_dict = curves.iloc[curves[\"Fitness\"].idxmax()].to_dict()\r\n if isinstance(val, collections.Sequence):\r\n res = ''.join(filter(lambda i: i.isdigit(), val))\r\n max_dict[self.hyperparameter] = res\r\n else:\r\n max_dict[self.hyperparameter] = val\r\n\r\n # Create or append to dataframe\r\n if not tune_df.empty:\r\n tune_df = tune_df.append(max_dict, ignore_index=True)\r\n else:\r\n tune_df = pd.DataFrame(max_dict, index=[0])\r\n\r\n # Now that you have the data, make dem plots\r\n # Yeah its repeat code, cant be bothered to merge them\r\n fit_fig, fit_ax = plt.subplots()\r\n fit_ax.set_title(f\"{self.title}: Fitness vs {self.hyperparameter}\")\r\n fit_ax.set_xlabel(self.hyperparameter)\r\n fit_ax.set_ylabel('Fitness')\r\n\r\n fev_fig, fev_ax = plt.subplots()\r\n fev_ax.set_title(f\"{self.title}: Fitness Evals vs {self.hyperparameter}\")\r\n fev_ax.set_xlabel(self.hyperparameter)\r\n fev_ax.set_ylabel('Function Evaluations')\r\n\r\n time_fig, time_ax = plt.subplots()\r\n time_ax.set_title(f\"{self.title}: Time vs {self.hyperparameter}\")\r\n time_ax.set_xlabel(self.hyperparameter)\r\n time_ax.set_ylabel('Time To Converge')\r\n\r\n # Plot fitness over hyper-parameter\r\n x = tune_df[self.hyperparameter]\r\n y = tune_df[\"Fitness\"]\r\n fit_ax.plot(x, y, alpha=0.8)\r\n\r\n # Plot feval over hyper-parameter\r\n x = tune_df[self.hyperparameter]\r\n y = tune_df[\"FEvals\"]\r\n fev_ax.plot(x, y, alpha=0.8)\r\n\r\n # Plot time over hyper-parameter\r\n x = tune_df[self.hyperparameter]\r\n y = tune_df[\"Time\"]\r\n time_ax.plot(x, y, alpha=0.8)\r\n\r\n # Format and output all graphs\r\n fit_ax.grid(True)\r\n fit_fig.savefig(os.path.join(self.out_dir, f\"{self.hyperparameter}_fitness_curve.png\"))\r\n\r\n fev_ax.grid(True)\r\n fev_fig.savefig(os.path.join(self.out_dir, f\"{self.hyperparameter}_feval_curve.png\"))\r\n\r\n time_ax.grid(True)\r\n time_fig.savefig(os.path.join(self.out_dir, f\"{self.hyperparameter}_time_curve.png\"))",
"def plot(self):\n cla = list(self.y1.unique())\n tree = self.trees\n x1 = list(self.x1)\n x2 = list(self.x2)\n y1 = list(self.y1)\n fig,axes = plt.subplots(1,self.n_estimators)\n for ii in range(self.n_estimators):\n a = []\n b = []\n c = []\n d = []\n w1 = []\n w2 = []\n for i in range(len(x1)):\n if y1[i]==cla[0]:\n a.append(x1[i])\n b.append(x2[i])\n w1.append(self.weight[ii][i])\n else:\n c.append(x1[i])\n d.append(x2[i])\n w2.append(self.weight[ii][i])\n axes[ii].scatter(np.array(a), np.array(b), s=w1, c='#0001fb', edgecolors='k')\n axes[ii].scatter(np.array(c), np.array(d), s=w2, c='#eb170a', edgecolors='k')\n axes[ii].set_xlim(min(x1)-0.25,max(x1)+0.25)\n axes[ii].set_ylim(min(x2)-0.25,max(x2)+0.25)\n axes[ii].set_xlabel(\"feature: \"+str(self.df[0]))\n axes[ii].set_ylabel(\"feature: \"+str(self.df[1]))\n axes[ii].set_title('alpha = '+str(self.alph[ii]))\n for i in tree[2*ii+1]:\n if i == self.df[0]:\n axes[ii].axvspan(min(x1)-1,tree[2*ii+1][i][0],facecolor='#93b7d7',alpha=0.5)\n axes[ii].axvspan(tree[2*ii+1][i][0],max(x1)+1,facecolor='#db9397',alpha=0.5)\n else:\n axes[ii].axhspan(min(x2)-1,tree[2*ii+1][i][0],facecolor='#93b7d7',alpha=0.5)\n axes[ii].axhspan(tree[2*ii+1][i][0],max(x2)+1,facecolor='#db9397',alpha=0.5) \n \n plot_colors = 'rb'\n plot_step = 0.02\n fig2, ax2 = plt.subplots(1,1)\n X = self.X.copy()\n cols=X.columns\n y = self.y.copy()\n cols = list(X.columns)\n x_min, x_max = X.iloc[:, 0].min() - 0.25, X.iloc[:, 0].max() + 0.25\n y_min, y_max = X.iloc[:, 1].min() - 0.25, X.iloc[:, 1].max() + 0.25\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),\n np.arange(y_min, y_max, plot_step))\n # plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n # clf = classifiers[i]\n X_ = np.c_[xx.ravel(), yy.ravel()]\n Z = self.predict(pd.DataFrame({cols[i]: pd.Series(X_[:,i]) for i in range(len(X_[0]))}))\n Z = np.array(Z).reshape(xx.shape)\n try:\n cs = ax2.contourf(xx, yy, Z, cmap=plt.cm.PuOr)\n except:\n for i in range(len(Z)):\n Z[i] = [int(j=='Iris-virginica') for j in Z[i]]\n cs = ax2.contourf(xx, yy, Z, cmap=plt.cm.PuOr)\n\n ax2.set_xlabel((\"feature: \"+str(self.df[0])))\n ax2.set_ylabel((\"feature: \"+str(self.df[1])))\n\n ax2.set_title(\"decision surface by combining the individual estimators\")\n\n # Plot the training points\n for cls, color in zip(np.unique(y), plot_colors):\n # print(color)\n # break\n idx = np.where(y == cls)[0]\n ax2.scatter(X.iloc[idx, 0], X.iloc[idx, 1], c=color, cmap=plt.cm.PuOr, edgecolor='black', s=50)\n\n\n return fig,fig2",
"def det_plot(data, group_by, plot_title, save_figure_path=None):\n subgroups = data.groupby(group_by)\n li_subgroups = subgroups.groups\n\n fontsize = 12\n fig, ax = plt.subplots(figsize=(8, 8), constrained_layout=True)\n for subgroup in li_subgroups:\n # for each subgroup\n df_subgroup = subgroups.get_group(subgroup)\n labels, scores = (\n df_subgroup[\"label\"].values.astype(int),\n df_subgroup[\"score\"].values,\n )\n fpr, fnr, thresholds = calculate_det_curves(labels, scores)\n ax = draw_det_curve(\n fpr, fnr, ax=ax, label=subgroup, fontsize=fontsize, title=plot_title\n )\n\n ax.xaxis.set_major_formatter(mtick.FormatStrFormatter(\"%.e\"))\n plt.minorticks_off()\n ax.set_ylabel(\"FNR (%)\", fontsize=fontsize)\n ax.set_xlabel(\"FPR\", fontsize=fontsize)\n plt.legend(fontsize=fontsize)\n ax.set_xlim([1e-4, 1])\n ax.set_ylim([0, 30])\n\n ax.tick_params(axis=\"both\", labelsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)",
"def plot_control_contributions(self, estimated_panel, max_num_steps=4):\n\n num_steps = estimated_panel.shape[0]\n if num_steps > max_num_steps:\n num_steps = max_num_steps\n\n column_names = estimated_panel.iloc[0].columns\n\n for control in self.controls:\n fig, axes = plt.subplots(int(round(num_steps / 2.0)), 2,\n sharex=True, sharey=True)\n fig.suptitle('Contributions to the {} control'.format(control))\n contribs = [name for name in column_names if '-' in name and\n name.startswith(control)]\n contribs += [control + '0']\n\n for ax, (step_num, cycle) in zip(axes.flatten()[:num_steps],\n estimated_panel.iteritems()):\n # here we want to plot each component of this:\n # m0 + k11 * se1 + k12 se2\n cycle[contribs].plot(kind='bar', stacked=True, ax=ax,\n title='Step {}'.format(step_num),\n colormap='jet')\n # TODO: Figure out why the xtick formatting doesn't work\n # this formating method seems to make the whole plot blank\n #formatter = FuncFormatter(lambda l, p: '{1.2f}'.format(l))\n #ax.xaxis.set_major_formatter(formatter)\n # this formatter doesn't seem to work with this plot as it\n # operates on the xtick values instead of the already\n # overidden labels\n #ax.xaxis.set_major_formatter(_percent_formatter)\n # This doesn't seem to actually overwrite the labels:\n #for label in ax.get_xticklabels():\n #current = label.get_text()\n #label.set_text('{:1.0%}'.format(float(current)))\n\n for t in ax.get_legend().get_texts():\n t.set_fontsize(6)\n # only show the contribution in the legend\n try:\n t.set_text(t.get_text().split('-')[1])\n except IndexError:\n t.set_text(t.get_text().split('.')[1])\n\n for axis in axes[-1]:\n axis.set_xlabel('Time [s]')\n\n # snatch the colors from the last axes\n contrib_colors = [patch.get_facecolor() for patch in\n ax.get_legend().get_patches()]\n\n mean = estimated_panel.mean(axis='items')\n std = estimated_panel.std(axis='items')\n\n for control in self.controls:\n fig, ax = plt.subplots()\n fig.suptitle('Contributions to the {} control'.format(control))\n contribs = [control + '0']\n contribs += [name for name in column_names if '-' in name and\n name.startswith(control)]\n for col, color in zip(contribs, contrib_colors):\n ax.errorbar(mean.index.values, mean[col].values,\n yerr=std[col].values, color=color)\n\n labels = []\n for contrib in contribs:\n try:\n labels.append(contrib.split('-')[1])\n except IndexError:\n labels.append(contrib.split('.')[1])\n ax.legend(labels, fontsize=10)\n ax.set_xlabel('Time [s]')\n ax.xaxis.set_major_formatter(_percent_formatter)",
"def target_plot(df, feature, feature_name, target, num_grid_points=10, grid_type='percentile',\n percentile_range=None, grid_range=None, cust_grid_points=None, show_percentile=False,\n show_outliers=False, endpoint=True, figsize=None, ncols=2, plot_params=None):\n\n # check inputs\n _ = _check_target(target=target, df=df)\n feature_type, show_outliers = _check_info_plot_params(\n df=df, feature=feature, grid_type=grid_type, percentile_range=percentile_range, grid_range=grid_range,\n cust_grid_points=cust_grid_points, show_outliers=show_outliers)\n\n # create feature grids and bar counts\n target = _make_list(target)\n useful_features = _make_list(feature) + target\n\n # map feature values to grid point buckets (x)\n data = df[useful_features]\n data_x, bar_data, summary_df, info_cols, display_columns, percentile_columns = _prepare_info_plot_data(\n feature=feature, feature_type=feature_type, data=data, num_grid_points=num_grid_points,\n grid_type=grid_type, percentile_range=percentile_range, grid_range=grid_range,\n cust_grid_points=cust_grid_points, show_percentile=show_percentile, show_outliers=show_outliers,\n endpoint=endpoint)\n\n # prepare data for target lines\n # each target line contains 'x' and mean target value\n target_lines = []\n for target_idx in range(len(target)):\n target_line = data_x.groupby('x', as_index=False).agg(\n {target[target_idx]: 'mean'}).sort_values('x', ascending=True)\n target_lines.append(target_line)\n summary_df = summary_df.merge(target_line, on='x', how='outer')\n summary_df = summary_df[info_cols + ['count'] + target]\n\n # inner call target plot\n fig, axes = _target_plot(\n feature_name=feature_name, display_columns=display_columns, percentile_columns=percentile_columns,\n target=target, bar_data=bar_data, target_lines=target_lines, figsize=figsize, ncols=ncols,\n plot_params=plot_params)\n\n return fig, axes, summary_df",
"def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n try: # funcId is array?\n # _pds_plot_iterator[] uses funcId only for things we don't care for\n fakeFuncId = funcId[0]\n\n manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])\n rankcount = np.shape(manyranking[0])[1] - 1\n amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))\n budget = amanyranking[:,0]\n rankings = np.hsplit(amanyranking[:,1:], len(funcId))\n avgranking = np.average(rankings, axis=0)\n ranking = np.vstack([budget, avgranking.T]).T\n\n except TypeError: # funcId is scalar\n fakeFuncId = funcId\n ranking = pds.ranking((dim, funcId), groupby)\n\n i = 0\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):\n if kind != 'algorithm' and kind != 'strategy':\n continue\n #print name, ds\n budgets = ranking[:,0]\n ranks = ranking[:,1+i]\n\n style['markevery'] = 64\n ax.plot(budgets, ranks, label=name, **style)\n i += 1\n\n ax.set_xlabel('Budget')\n ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')\n ax.set_xscale('log', basex=pfsize)\n ax.grid()",
"def bias_plot():\r\n\r\n biasstuff=io.loadmat(data_dir+'/cv_compare_tot_norm.mat',mat_dtype=True)\r\n bias_data=biasstuff['rsn_tot'][0]\r\n #cvtypes=['k2','k5','k10','loo','external']\r\n #cvtypes_plots=['Split Half', '5 Fold', '10 Fold', 'LOO', 'External']\r\n\r\n cvtypes=['k2','k5','k10','loo']\r\n cvtypes_plots=['Split-half', '5-fold', '10-fold', 'LOO']\r\n\r\n\r\n Rpos=np.concatenate([bias_data[0][cvt][:,0] for cvt in cvtypes])\r\n Rmse=np.concatenate([bias_data[0][cvt][:,4] for cvt in cvtypes])\r\n labels=np.concatenate([np.repeat(cv,200) for cv in cvtypes_plots])\r\n\r\n dfarr=np.concatenate([np.vstack(labels).T,np.vstack(Rpos).T,np.vstack(Rmse).T]).T\r\n\r\n #n1=\"R Squared (Pearsons)\"\r\n #n2=\"R Squared (MSE)\"\r\n n1=\"MSE(observed,yhat)\"\r\n n2=\"MSE(observed,pred)\"\r\n\r\n ipdata=pd.DataFrame(dfarr,columns=['Labels',n1,n2])\r\n\r\n ipdata[n1]=ipdata[n1].astype('float')\r\n ipdata[n2]=ipdata[n2].astype('float')\r\n\r\n ipdata[n1]=ipdata[n1]**2\r\n ipdata[n2]=ipdata[n2]\r\n\r\n ipdata['X']=np.concatenate([np.linspace(0,0.3,200) for n in range(0,len(cvtypes_plots))])\r\n ipdata['Y']=np.concatenate([np.linspace(0,0.3,200) for n in range(0,len(cvtypes_plots))])\r\n\r\n\r\n g=sns.FacetGrid(ipdata,col=\"Labels\",col_wrap=2,hue=\"Labels\",palette='vlag',despine=True)\r\n axes = g.axes.flatten()\r\n axes[0].set_title(r\"$Split-half$\")\r\n axes[1].set_title(r\"$5-fold$\")\r\n axes[2].set_title(r\"$10-fold$\")\r\n axes[3].set_title(r\"$LOO$\")\r\n g=g.map(plt.plot,\"X\",\"Y\")\r\n #g=g.map(plt.scatter,n1,n2).set_axis_labels(r'$MSE(observed,\\hat y)$', r'$MSE(observed,pred)$')\r\n g=g.map(plt.scatter,n1,n2).set_axis_labels(r'$R^2(explanatory)$', r'$R^2(prediction)$')\r\n\r\n\r\n axes[0].set_title(\"Split-half\")\r\n axes[1].set_title(\"5-fold\")\r\n axes[2].set_title(\"10-fold\")\r\n axes[3].set_title(\"LOO\")\r\n\r\n #plt.show()\r\n\r\n print(fig_dir+'/biasfig.tiff')\r\n plt.savefig(fig_dir+'/biasfig.tiff', dpi=300, facecolor='w', edgecolor='w',\r\n orientation='portrait', papertype=None, format=None,\r\n transparent=False, bbox_inches=None, pad_inches=0.1,\r\n frameon=None)",
"def plot_calibration_curve(est, name, fig_index):\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1.)\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),(est, name),(isotonic, name + ' + Isotonic'),(sigmoid, name + ' + Sigmoid')]:\n #Para cada modelo, entrenamos y predecimos \n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()",
"def plot_graph(values, strategy):\n plt.plot(values[0],values[1])\n plt.scatter(values[0],values[1],color='red')\n plt.xlabel('Number of Clusters k ')\n plt.ylabel('Objective Function Value')\n plt.title('K-Means')\n plt.show()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot the evolution of relative evaluations for a target based on increasing absolute evaluations. In other words, for each absolute number of evaluations, determine the target reached and show how faster did baseline reach it. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. It's not clear whether this will eventually be useful at all, but it offers another perspective that might aid some analysis. | def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label="", baseline2_ds=None, baseline2_label="", dim=None, funcId=None, groupby=None):
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline1_ds:
baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))
if baseline2_ds:
baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs1 = groupby(ds.detEvals(targets), axis=1)
if baseline1_ds:
fevs1 /= baseline1_fevs
fevs2 = groupby(ds.detEvals(targets), axis=1)
if baseline2_ds:
fevs2 /= baseline2_fevs
infsx = np.nonzero(fevs1 == inf)
infs = infsx[0]
if np.size(infs) > 0:
#print infs
fevs1 = fevs1[:infs[0]-1]
fevs2 = fevs2[:infs[0]-1]
#print name, fevs1, fevs2
style['markevery'] = 64
ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)
ax.grid()
ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1
ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))
ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby))) | [
"def evals_by_target(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline_ds:\n baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs = groupby(ds.detEvals(targets), axis=1)\n if baseline_ds:\n fevs /= baseline_fevs\n style['markevery'] = 64\n ax.loglog(targets, fevs, label=name, basey=pfsize, **style)\n ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))\n if baseline_ds:\n ax.set_yticks([2, 3.5], minor=True)\n ax.set_xlabel('Function Value Targets')\n ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')",
"def plot_groupby(x, attr='ebeam_photon_energy', groupby='step'):\n dag = x[attr].dropna(dim='time').groupby(groupby)\n dag.mean().to_pandas().plot(yerr=dag.std())\n print \"dag = x['{:}'].dropna(dim='time').groupby('{:}')\".format(attr, groupby)\n print \"dag.mean().to_pandas().plot(yerr=dag.std())\"",
"def plot_target_avg(self):\r\n target = np.mean(self.epoch_target, axis=2)\r\n notarget = np.mean(self.epoch_notarget, axis=2)\r\n t = np.array(range(target.shape[0])) / self.Params.Fs + self.Params.epoch[0]\r\n\r\n fig, axes = plt.subplots(1, 2)\r\n axes[0].plot(t, target)\r\n axes[0].set_title('target')\r\n axes[1].plot(t, notarget)\r\n axes[1].set_title('no target')\r\n [ax.legend(self.Params.selected_channels_names) for ax in axes]\r\n [ax.set_xlabel('time after flashing (s)') for ax in axes]",
"def plot_test_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n\n df_i = process_for_test_objective(\n df_i.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = df_i.loc[df_i[\"max_idx\"]][\"timestamp_end\"].values\n y = df_i.loc[df_i[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = exp_config.get(\"best_objective\", 1) - f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n\n else:\n\n exp_df = process_for_test_objective(\n exp_df.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = exp_df.loc[exp_df[\"max_idx\"]][\"timestamp_end\"].values\n y = exp_df.loc[exp_df[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n idx = np.unique(x, return_index=True, axis=0)[1]\n\n x = x[idx]\n y = y[idx]\n\n x = np.clip(np.concatenate([x, [exp_config[\"t_max\"]]]), 0, exp_config[\"t_max\"])\n y = np.clip(exp_config.get(\"best_objective\", 1) - np.concatenate([y, [y[-1]]]), 0, 1)\n \n area = aulc(x, y)\n exp_config[\"data\"][exp_name][\"AULC\"] = area\n \n plt.step(\n x[:],\n y[:],\n where=\"post\",\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n # if MODE == \"min\":\n # plt.legend(loc=\"upper right\")\n # else:\n # plt.legend(loc=\"lower right\")\n plt.legend(loc=exp_config.get(\"legend\", \"best\"))\n\n plt.ylabel(\"Test Regret\")\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid(which=\"minor\", color=\"gray\", linestyle=\":\")\n plt.grid(which=\"major\", linestyle=\"-\")\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()",
"def target_plot(df, feature, feature_name, target, num_grid_points=10, grid_type='percentile',\n percentile_range=None, grid_range=None, cust_grid_points=None, show_percentile=False,\n show_outliers=False, endpoint=True, figsize=None, ncols=2, plot_params=None):\n\n # check inputs\n _ = _check_target(target=target, df=df)\n feature_type, show_outliers = _check_info_plot_params(\n df=df, feature=feature, grid_type=grid_type, percentile_range=percentile_range, grid_range=grid_range,\n cust_grid_points=cust_grid_points, show_outliers=show_outliers)\n\n # create feature grids and bar counts\n target = _make_list(target)\n useful_features = _make_list(feature) + target\n\n # map feature values to grid point buckets (x)\n data = df[useful_features]\n data_x, bar_data, summary_df, info_cols, display_columns, percentile_columns = _prepare_info_plot_data(\n feature=feature, feature_type=feature_type, data=data, num_grid_points=num_grid_points,\n grid_type=grid_type, percentile_range=percentile_range, grid_range=grid_range,\n cust_grid_points=cust_grid_points, show_percentile=show_percentile, show_outliers=show_outliers,\n endpoint=endpoint)\n\n # prepare data for target lines\n # each target line contains 'x' and mean target value\n target_lines = []\n for target_idx in range(len(target)):\n target_line = data_x.groupby('x', as_index=False).agg(\n {target[target_idx]: 'mean'}).sort_values('x', ascending=True)\n target_lines.append(target_line)\n summary_df = summary_df.merge(target_line, on='x', how='outer')\n summary_df = summary_df[info_cols + ['count'] + target]\n\n # inner call target plot\n fig, axes = _target_plot(\n feature_name=feature_name, display_columns=display_columns, percentile_columns=percentile_columns,\n target=target, bar_data=bar_data, target_lines=target_lines, figsize=figsize, ncols=ncols,\n plot_params=plot_params)\n\n return fig, axes, summary_df",
"def _vis_multiple_run_performance_metrics_ave_std(x_vals, metrics, metric_labels, per_cluster_projection_entropies,\n extractor_names, colors, markers):\n n_estimators = metrics[0].shape[0]\n n_metrics = len(metrics)\n\n width = 1.0 / n_estimators - 0.05\n\n ave_metrics = []\n std_metrics = []\n\n for i in range(n_metrics):\n ave_metrics.append(metrics[i].mean(axis=1))\n std_metrics.append(metrics[i].std(axis=1))\n\n ave_per_cluster_projection_entropies = per_cluster_projection_entropies.mean(axis=1)\n std_per_cluster_projection_entropies = per_cluster_projection_entropies.std(axis=1)\n\n cluster_proj_entroy_ylim = [0, np.max(\n ave_per_cluster_projection_entropies + std_per_cluster_projection_entropies + 0.1)]\n\n x_val_clusters = np.arange(ave_per_cluster_projection_entropies.shape[1]) - width * n_estimators / 2.0\n\n fig1, _ = plt.subplots(1, n_metrics, figsize=(7, 5))\n fig2, _ = plt.subplots(1, 1, figsize=(20, 5))\n\n for i_metric in range(n_metrics):\n fig1.axes[i_metric].plot(x_vals, ave_metrics[i_metric], color=[0.77, 0.77, 0.82], linewidth=4, zorder=-1)\n\n x_tick_labels = []\n\n for i_estimator in range(n_estimators):\n # Visualize each performance metric for current estimator with average+-std, in each axis\n for i_metric in range(n_metrics):\n if i_metric == n_metrics - 1:\n x_tick_labels.append(extractor_names[i_estimator])\n _vis_performance_metrics(x_vals[i_estimator], ave_metrics[i_metric][i_estimator], fig1.axes[i_metric],\n 'Estimator',\n metric_labels[i_metric], extractor_names[i_estimator],\n colors[i_estimator % len(colors)], markers[i_estimator],\n std_val=std_metrics[i_metric][i_estimator],\n show_legends=False, ylim=[0, 1.05])\n if i_estimator == n_estimators - 1:\n fig1.axes[i_metric].xaxis.set_ticks(x_vals)\n fig1.axes[i_metric].set_xticklabels(x_tick_labels)\n fig1.axes[i_metric].set_xlim([x_vals.min() - 0.5, x_vals.max() + 0.5])\n\n if not (np.any(np.isnan(ave_per_cluster_projection_entropies[i_estimator, :]))):\n _vis_per_cluster_projection_entropy(x_val_clusters + width * i_estimator,\n ave_per_cluster_projection_entropies[i_estimator, :], width,\n fig2.axes[0],\n colors[i_estimator % len(colors)],\n extractor_names[i_estimator],\n std_val=std_per_cluster_projection_entropies[i_estimator, :],\n xlabel='Cluster', ylabel='Projection entropy',\n ylim=cluster_proj_entroy_ylim)\n\n return",
"def make_plot_for_proportion_within_target(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n num_of_trials,\n seed_num,\n target,\n runtime=1440,\n max_threshold=None,\n):\n ambulance_proportions = []\n other_proportions = []\n all_proportions = []\n if max_threshold == None:\n max_threshold = num_of_servers\n for threshold in range(max_threshold + 1):\n mean_ambulance, mean_other, mean_combined = get_mean_waits_of_current_threshold(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n num_of_trials,\n runtime,\n target,\n )\n ambulance_proportions.append(mean_ambulance)\n other_proportions.append(mean_other)\n all_proportions.append(mean_combined)\n\n plt.figure(figsize=(23, 10))\n proportion_plot = plt.plot(\n ambulance_proportions, \":\", other_proportions, \":\", all_proportions, \"-\"\n )\n plt.title(\n \"Proportion of individuals within target for different capacity thresholds\"\n )\n plt.xlabel(\"Capacity Threshold\")\n plt.ylabel(\"Proportion of Individuals within target\")\n plt.legend(\n [\"Ambulance Patients\", \"Other Patient\", \"All Patients\"], fontsize=\"x-large\"\n )\n\n return proportion_plot",
"def median_absolute_error(self):\n print('Median absolute error regression loss: ' + str(median_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))",
"def median_absolute_error(y_true, y_pred, *, multioutput=..., sample_weight=...):\n ...",
"def plot_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n df_i = df_i.sort_values(\"timestamp_end\")\n x, y = df_i.timestamp_end.to_numpy(), df_i.objective.cummin().to_numpy()\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n # plt.fill_between(T,\n # y_mean-1.96*y_std,\n # y_mean+1.96*y_std,\n # facecolor=exp_config[\"data\"][exp_name][\"color\"],\n # alpha=0.3)\n else:\n exp_df = exp_df.sort_values(\"timestamp_end\")\n x, y = exp_df.timestamp_end.to_numpy(), exp_df.objective.cummax().to_numpy()\n if \"hartmann6D\" in exp_name:\n y = y + 3.32237 # hartmann6D\n\n plt.plot(\n x,\n y,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n if MODE == \"min\":\n plt.legend(loc=\"upper right\")\n else:\n plt.legend(loc=\"lower right\")\n\n plt.ylabel(exp_config.get(\"ylabel\", \"Objective\"))\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid()\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()",
"def plot_posterior(ax, algo, num_plotted_trials, X_eval, n, mean, var):\n plot_truth(ax, n, mean, var, X_eval)\n\n for i in range(num_plotted_trials):\n linewidth = 1\n opacity = 0.3\n if i == num_plotted_trials - 1:\n opacity = 1\n linewidth = 8\n ax.set_title(algo[\"title\"])\n ax.plot(\n X_eval.flatten().ravel(),\n algo[\"predicted_posterior\"][i, :].ravel(),\n label=algo[\"label\"],\n linewidth=linewidth,\n color=algo[\"color\"],\n alpha=opacity,\n )",
"def plot_eval(df_input, tuned_params, eval_seeds, num_folds=10, eval_method=\"roc\", fontsize=24,\n individual_alpha=0.0, stat_func=np.median):\n\n # initialize the stratified folds using the given list of seeds\n stratified_folds = [StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=eval_seed)\n for eval_seed in eval_seeds]\n # intialize lists for colors\n color_lst = ['#1f77b4', '#ff7f0e', '#2ca02c']\n\n # construct the plot\n fig, ax = plt.subplots(3, 1, sharex=\"all\", sharey=\"all\",\n figsize=(10, 10))\n\n # iterate over the 3 binary classifiers\n for index, (choice, title) in enumerate(zip([\"Metal\", \"Insulator\", \"MIT\"],\n [\"M\", \"I\", \"T\"])):\n # initialize the eval_metric list\n aucs = []\n mean_x = np.linspace(0, 1, 100)\n # load in the data for the specified binary classifier\n X_choice, y_choice = load_data(df_input, choice)\n # initialize the tuned binary classifier\n tuned_xgb_model = xgb.XGBClassifier(**tuned_params[choice])\n # setup naive precision if eval_method == \"pr\"\n if eval_method == \"pr\":\n naive_precision = np.sum(y_choice == 1) / len(y_choice)\n # iterate over the stratified folds created using the random seeds in eval_seeds\n for stratified_fold in stratified_folds:\n aucs_seed = []\n y = []\n for train_indices, test_indices in stratified_fold.split(X_choice, y_choice):\n # fit the model on k-1 training folds\n tuned_xgb_model.fit(X_choice.iloc[train_indices], y_choice[train_indices])\n # plot the pr curve on the 1 test fold\n if eval_method == \"roc\":\n plot_func = plot_roc_curve\n elif eval_method == \"pr\":\n plot_func = plot_precision_recall_curve\n else:\n raise Exception('Invalid eval_method. Please choose from \"roc\" or \"pr\"')\n # create the visulization object\n viz = plot_func(tuned_xgb_model,\n X_choice.iloc[test_indices], y_choice[test_indices],\n ax=ax[index], alpha=individual_alpha)\n if eval_method == \"roc\":\n # get the AUC score for that test fold\n aucs_seed.append(viz.roc_auc)\n # linear interpolation for true positive rate\n interp_y = np.interp(mean_x, viz.fpr, viz.tpr)\n # set the first interpolated true positive rate to be 0\n interp_y[0] = 0.0\n # get the linearly interpolated true positive rate\n y.append(interp_y)\n # get the average/median roc curve of a given seed\n mean_y = stat_func(y, axis=0)\n mean_y[-1] = 1\n elif eval_method == \"pr\":\n # get the AUC score for that test fold\n aucs_seed.append(auc(viz.recall, viz.precision))\n # linear interpolation for precision\n interp_y = np.interp(mean_x, viz.recall[::-1], viz.precision[::-1])\n # set the first interpolated precision to be 1\n interp_y[0] = 1.0\n # get the linearly interpolated precision\n y.append(interp_y)\n # get the average/median pr curve of a given seed\n mean_y = stat_func(y, axis=0)\n mean_y[-1] = 0\n\n ax[index].plot(mean_x, mean_y, alpha=0.6, lw=3, color=color_lst[index])\n aucs.append(stat_func(aucs_seed))\n\n if eval_method == \"roc\":\n ax[index].set(xlim=(-0.01, 1), ylim=(0, 1.05), xlabel=None, ylabel=None)\n ax[index].set_title(title, fontsize=fontsize)\n ax[index].plot([0, 1], [0, 1], linestyle=\"--\", lw=3, color=\"k\")\n ax[index].text(0.98, 0.05, \"{} AUC: {:.2f}\".format(stat_func.__name__.capitalize(),\n stat_func(aucs)), fontsize=fontsize,\n horizontalalignment=\"right\", verticalalignment=\"bottom\")\n elif eval_method == \"pr\":\n ax[index].set(xlim=(0, 1), ylim=(0, 1.05), xlabel=None, ylabel=None)\n ax[index].set_title(\"{} {} AUC: {:.2f}\".format(title, stat_func.__name__.capitalize(),\n stat_func(aucs)), fontsize=fontsize)\n ax[index].plot([0, 1], [naive_precision, naive_precision],\n linestyle=\"--\", lw=3, color=\"k\")\n ax[index].text(0.02, naive_precision - 0.03, \"naive precision:{:.2f}\".format(naive_precision),\n fontsize=fontsize - 4, horizontalalignment=\"left\", verticalalignment=\"top\")\n\n ax[index].get_legend().remove()\n ax[index].tick_params(axis='both', which='major', labelsize=fontsize)\n\n if eval_method == \"roc\":\n x_axis_label = \"False positive rate\"\n y_axis_label = \"True positive rate\"\n elif eval_method == \"pr\":\n x_axis_label = \"Recall\"\n y_axis_label = \"Precision\"\n ax[2].set_xlabel(x_axis_label, fontsize=fontsize)\n ax[1].set_ylabel(y_axis_label, fontsize=fontsize)\n plt.tight_layout()\n plt.show()\n\n return fig",
"def visualization(obj_value):\n for n in range(3):\n plt.loglog(obj_value[n],\".\");\n\n plt.ylabel('objective values');\n plt.xlabel('iteration counter');\n plt.title('objective values for each pair against iterations');\n plt.legend();\n plt.show();",
"def plot_inference(inference_values, z_hist, ax, state=1, map_estimate=False):\n n_samples = len(inference_values)\n xspan = np.arange(1, n_samples + 1)\n spans = find_dishonest_intervals(z_hist)\n if map_estimate:\n ax.step(xspan, inference_values, where=\"post\")\n else:\n ax.plot(xspan, inference_values[:, state])\n\n for span in spans:\n ax.axvspan(*span, alpha=0.5, facecolor=\"tab:gray\", edgecolor=\"none\")\n ax.set_xlim(1, n_samples)\n # ax.set_ylim(0, 1)\n ax.set_ylim(-0.1, 1.1)\n ax.set_xlabel(\"Observation number\")",
"def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig",
"def plot_visualization(plot_name, experiment_results, metric_results, true_params, config):\n X_test = []\n for dgp_name in config['dgps'].keys(): # just one right now\n for method_name in config['methods'].keys():\n X_test = experiment_results[dgp_name][method_name][0][0][0]\n pred = np.array([experiment_results[dgp_name][method_name][i][0][1] for i in range(len(experiment_results[dgp_name][method_name]))])\n mean = np.mean(pred, axis=0)\n plt.plot(X_test, mean, label=method_name)\n plt.xlabel(\"X_test\")\n plt.ylabel(\"Treatment Effect\")\n lb = np.array([experiment_results[dgp_name][method_name][i][1][0] for i in range(len(experiment_results[dgp_name][method_name]))])\n ub = np.array([experiment_results[dgp_name][method_name][i][1][1] for i in range(len(experiment_results[dgp_name][method_name]))])\n lb_ = np.min(lb, axis=0)\n ub_ = np.max(ub, axis=0)\n plt.fill_between(X_test.reshape(100,), lb_, ub_, alpha=0.25)\n\n true = true_params[dgp_name][0]\n plt.plot(X_test, true, label='true effect')\n plt.legend()\n plt.savefig(plot_name)\n plt.show()",
"def plot_kalman_evaluation(self, use_groundtruth = True, grouped=True):\r\n error_pos, error_att = self.get_kalman_error(use_groundtruth = True)\r\n \r\n plt.figure()\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Error (m)\")\r\n plt.title(\"Error in position of the Kalman filter in first image frame\")\r\n if grouped:\r\n plt.plot(self.get_timestamps(0, np.inf), np.linalg.norm(error_pos[:,0:2], axis=1))\r\n print(\"Average position error (m): \" + str(np.round(np.mean(np.linalg.norm(stat_filter(error_pos[:,0:2], 0.9), axis=1), axis=0), 5)) + \" (\" +str(np.round(np.std(np.linalg.norm(error_pos[:,0:2], axis=1), axis=0), 5))+ \")\")\r\n else:\r\n plt.plot(self.get_timestamps(0, np.inf), error_pos)\r\n plt.legend([\"Right\", \"Backward\", \"Down\"])\r\n print(\"Average position error (m): \" + str(np.round(np.mean(stat_filter(error_pos, 0.9), axis=0), 5)) + \" (\" +str(np.round(np.std(stat_filter(error_pos, 0.9), axis=0), 5))+ \")\")\r\n \r\n plt.figure()\r\n plt.xlabel(\"Time (s)\")\r\n plt.ylabel(\"Error (deg)\")\r\n plt.title(\"Error in attitude of the Kalman filter in first image frame\")\r\n if grouped:\r\n plt.plot(self.get_timestamps(0, np.inf), abs(np.rad2deg(error_att)))\r\n print(\"Average rotation error (deg): \" + str(np.round(np.rad2deg(np.mean(np.abs(stat_filter(error_att, 0.9)))), 5)) + \" (\" +str(np.round(np.rad2deg(np.std(np.abs(error_att))), 5))+ \")\")\r\n else:\r\n plt.plot(self.get_timestamps(0, np.inf), np.rad2deg(error_att))\r\n print(\"Average rotation error (deg): \" + str(np.round(np.rad2deg(np.mean(stat_filter(error_att, 0.9))), 5)) + \" (\" +str(np.round(np.rad2deg(np.std(error_att)), 5))+ \")\")",
"def plot_stats( groupedData, fig_prefix, cutoff='baseline', **kwargs):\n\n fig = plt.figure(figsize=(4,3))\n ax = fig.add_subplot(111)\n \n for GROUP, data in groupedData:\n colour = colourlist[groupedData.groups.keys().index(GROUP)]\n pvalue_results = {}\n if cutoff == 'baseline':\n ax.set_title('Kruskal Wallis: '+parameter+' vs baseline:', fontsize=12)\n baseline = data[data.synced_time <=0][parameter].values \n for time, _data in data[(data.synced_time > 0) & (data.synced_time <= 360)].groupby('synced_time'):\n pvalue_results[time*args.binsize] = st.kruskal(baseline, _data[parameter])[1]\n elif cutoff == 'zero':\n ax.set_title('Kruskal Wallis: '+parameter+' vs zero:', fontsize=12)\n for time, _data in data[(data.synced_time > 0) & (data.synced_time <= 360)].groupby('synced_time'):\n pvalue_results[time*args.binsize] = st.ttest_1samp(_data[parameter], 0)[1]\n\n pvalue_results = {k: pvalue_results[k] for k in pvalue_results if not isnan(pvalue_results[k])} \n\n ax.scatter(pvalue_results.keys(), -np.log10(pvalue_results.values()), label=GROUP, color=colour, linewidth=0)\n \n if len(pvalue_results)>=1:\n n_comparisons = len(pvalue_results)\n ax.axhline( -np.log10(0.05/n_comparisons), color='k', lw=0.5, linestyle='--' )\n\n ax.set_xlim(0,360)\n ax.set_ylim(0,8)#1.1*max(-np.log10(pvalue_results.values())))\n ax.set_xlabel('Time (s)', fontsize=12)\n ax.set_ylabel('-Log10(P)', fontsize=12)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n l = plt.legend()\n l.set_zorder(1000)\n plt.tick_params(axis='both', which='major', labelsize=12)\n plt.tight_layout()\n for ext in ['.png','.svg']:\n fig_fname = fig_prefix + '_'+parameter + ext\n fig.savefig(fig_fname, bbox='tight')\n print 'saved',fig_fname\n\n return pvalue_results",
"def visualization(self, x, y, preds):\n plt.figure()\n plt.plot(x, y, \".\", color = \"blue\")\n plt.plot(x, preds, \"-\", color = \"red\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pinForward is the forward Pin, so we change its duty cycle according to speed. | def forward(self, speed):
self.pwm_backward.ChangeDutyCycle(0)
self.pwm_forward.ChangeDutyCycle(speed) | [
"def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)",
"def go_forward() -> None:\n set_motors_speed(FULL_SPEED, FULL_SPEED)",
"def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)",
"def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)",
"def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)",
"def convert_pin_values(self, motor_speed):\n # 停止\n in1 = 0\n in2 = 0\n pwm = max(min(abs(float(motor_speed)), 1.0), 0.0)\n # 正転\n if motor_speed > 0:\n in1 = 1\n # 逆転\n elif motor_speed < 0:\n in2 = 1\n return pwm, in1, in2",
"def drive_forwards(time, power, setup = False):\n\n Motor1A = 16\n Motor1B = 18\n Motor1E = 22\n\n Motor2A = 19\n Motor2B = 21\n Motor2E = 23\n\n if setup is True:\n \n GPIO.setmode(GPIO.BOARD)\n\n GPIO.setup(Motor1A,GPIO.OUT)\n GPIO.setup(Motor1B,GPIO.OUT)\n GPIO.setup(Motor1E,GPIO.OUT)\n\n GPIO.setup(Motor2A,GPIO.OUT)\n GPIO.setup(Motor2B,GPIO.OUT)\n GPIO.setup(Motor2E,GPIO.OUT)\n \n pwm1 = GPIO.PWM(Motor1E, 100)\n pwm2 = GPIO.PWM(Motor2E, 100)\n\n print(\"Going forwards at {}%...\".format(power))\n GPIO.output(Motor1A,GPIO.HIGH)\n GPIO.output(Motor1B,GPIO.LOW)\n GPIO.output(Motor1E,GPIO.HIGH)\n\n GPIO.output(Motor2A,GPIO.HIGH)\n GPIO.output(Motor2B,GPIO.LOW)\n GPIO.output(Motor2E,GPIO.HIGH)\n \n pwm1.start(power)\n pwm2.start(power)\n\n sleep(time)\n \n print(\"Stopping forwards drive\")\n GPIO.output(Motor1E,GPIO.LOW)\n GPIO.output(Motor2E,GPIO.LOW)\n pwm1.stop()\n pwm2.stop()\n\n if setup is True:\n GPIO.cleanup()",
"def setFanSpeed(PWM_duty_cycle):\n fan.start(PWM_signal*PWM_FREQ) # Sends the frequency\n return()",
"def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True",
"def forward_one_zero():\n knob = nuke.thisKnob()\n knob.setAnimated()\n knob.setValueAt(1, previous_current_next_frame()[1])\n knob.setValueAt(0, previous_current_next_frame()[2])",
"def move_forward(self):\n self.at(at_pcmd, True, 0, -self.speed, 0, 0)",
"def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)",
"def forward(self, speed, seconds=None):\n # Set motor speed and move both forward.\n self._drive_speed(speed)\n self._drive.run(Adafruit_MotorHAT.FORWARD)\n # If an amount of time is specified, move for that time and then stop.\n if seconds is not None:\n time.sleep(seconds)\n self.stop()",
"def update_output(self,io):\n io.outpin.motor_speed_out = self.motorspeed * self.direction\n io.outpin.mspeed_limit = self.limited\n #io.outpin.unlimited_motor = self.unlimited_speed",
"def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)",
"def forward_zero_one():\n knob = nuke.thisKnob()\n knob.setAnimated()\n knob.setValueAt(0, previous_current_next_frame()[1])\n knob.setValueAt(1, previous_current_next_frame()[2])",
"def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)",
"def speed_change_done(self):\n io.outpin.at_speed = True",
"def turn_90degrees(self, direction):\n if direction == \"right\" or direction == 1:\n self.myspeedctrl.send_speed(0,1)\n elif direction == \"left\" or direction == 2:\n self.myspeedctrl.send_speed(0,-1)\n rospy.sleep(1.61) #value found by trail and error\n self.myspeedctrl.send_speed(0,0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the duty cycle of both control pins to zero to stop the motor. | def stop(self):
self.pwm_forward.ChangeDutyCycle(0)
self.pwm_backward.ChangeDutyCycle(0)
self.pwm_left.ChangeDutyCycle(0)
self.pwm_right.ChangeDutyCycle(0) | [
"def stop(self):\n\n GPIO.output(self.pin1, False)\n GPIO.output(self.pin2, False)\n self.pwm_control.ChangeDutyCycle(0)",
"def stop(self):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(0)",
"def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)",
"def stop(self):\n\t\tself.pwm.set_pwm(self.channel, 0, 0x1000)",
"def stop_motor(self):\n\n GPIO.setmode(GPIO.BOARD)\n # this is the pin where the relay board that controls the motor is attached to\n pinList = self.pin\n\n # in case there are multiple relay boards attached to the Pi\n for i in pinList:\n GPIO.setup(i, GPIO.OUT)\n GPIO.output(i, GPIO.HIGH)\n\n try:\n GPIO.cleanup()\n\n\n # End program cleanly with keyboard\n except KeyboardInterrupt:\n print(\"Quit\")\n\n # Reset GPIO settings\n GPIO.cleanup()",
"def set_do0_off(self):\n self.device.eDigitalOut(channel=0,writeD=1,state=0)\n print('set_do0_off')",
"def down(self):\n\n if self._pwm == None:\n GPIO.output(self.pin_number,False)\n else:\n err = \"cannot set to 'down': pulse width modulation running on pin.\\n\"\n raise ValueError(err)",
"def stop(self):\n self.rollerMotor.set(0)",
"def power_off(self):\n self.pd_sck_pin.value(0)\n self.pd_sck_pin.value(1)\n sleep_us(self.SLEEP_DELAY_USEC)",
"def stop(self):\n self.left_motor.stop()\n self.right_motor.stop()",
"def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)",
"def stop_motors():\n leftMotor.stop()\n rightMotor.stop()\n sleep(0.1)",
"def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0",
"def stop(self):\n self.lmotor_power = 0.0\n self.lmotor.stop()\n self.rmotor_power = 0.0\n self.rmotor.stop()",
"def set_servo_power_off(force = False):\n set_drive_power(power=False)",
"def stop(self):\n\n # Put the pin in the down state\n if self._pwm != None:\n self.stop_pwm()\n self.down() \n\n GPIO.cleanup(self.pin_number)",
"def stop() -> None:\n set_motors_speed(0, 0)",
"def emitters_off(self):\n self.wp.digitalWrite(self.LEDON_PIN, self.wp.LOW)\n self.wp.delayMicroseconds(20)",
"def set_do1_off(self):\n self.device.eDigitalOut(channel=1,writeD=1,state=0)\n print('set_do1_off')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Interpolate the points and radii between sections that have too few points. | def interpPoints(self, interpRad=False):
# print(np.shape(long_distances))
long_sections, long_distances, meddist = self.findLongSections()
print('Long inter-point distances found: %i' % len(long_sections))
count = 0
for sec in long_sections:
print('Supposed long section %i has %i nodes' \
% (sec, len(self.sections[sec])))
# set first and last points for interpolation
pt0, pt1 = self.sections[sec][0], self.sections[sec][-1]
# find number of points
numpts = int(long_distances[long_sections.index(sec)]/meddist)
Xs = np.linspace(pt0[0], pt1[0], numpts)
Ys = np.linspace(pt0[1], pt1[1], numpts)
Zs = np.linspace(pt0[2], pt1[2], numpts)
newpts = np.dstack((Xs, Ys, Zs))
newpts = [newpts[0][i] for i in xrange(len(newpts[0]))]
self.sections[sec] = newpts
count = count + 1
rad0, rad1 = self.secRads[sec][0], self.secRads[sec][-1]
# print(rad0, rad1)
rads = np.linspace(rad0, rad1, numpts)
# print(rads)
self.secRads[sec] = rads
long_sections, long_distances, meddist = self.findLongSections()
print('Long sections still remaining: %i' % len(long_sections))
if len(long_sections) > 0:
print(long_distances, meddist)
return self | [
"def interpolate_old(self):\n\n # FIXME (Ole): Maybe this function\n # should move to the C-interface?\n # However, it isn't called by validate_all.py, so it\n # may not be that important to optimise it?\n\n# N = self.vertex_values.shape[0]\n# for i in range(N):\n# v0 = self.vertex_values[i, 0]\n# v1 = self.vertex_values[i, 1]\n# v2 = self.vertex_values[i, 2]\n#\n# self.centroid_values[i] = (v0 + v1 + v2)/3\n\n v0 = self.vertex_values[:, 0]\n v1 = self.vertex_values[:, 1]\n v2 = self.vertex_values[:, 2]\n\n self.centroid_values[:] = (v0 + v1 + v2) / 3\n\n self.interpolate_from_vertices_to_edges()",
"def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint",
"def _interpolateContour( self ):\n logger.debug( f\"_interpolateContour()\" )\n\n if not self._electrodePositions:\n self._contourMapper = vtkPolyDataMapper()\n self._contourMapper.SetInputConnection( self._smoothedContour.GetOutputPort() )\n return\n\n self._points, self._values = vtkPoints(), vtkFloatArray()\n\n for point, value in zip( self._electrodePositions, self._electrodeValues ):\n self._points.InsertNextPoint( point )\n self._values.InsertNextValue( value )\n\n self._electrodes = vtkPolyData()\n self._electrodes.SetPoints( self._points )\n self._electrodes.GetPointData().SetScalars( self._values )\n\n # Add one to the bounds to include points right on the bounds.\n bounds = list( self._contour.GetOutput().GetBounds() )\n bounds = [ x + y for x, y in zip( bounds, [-1, 1, -1, 1, -1, 1] ) ]\n\n self._shepard = vtkShepardMethod()\n self._shepard.SetInputData( self._electrodes )\n self._shepard.SetModelBounds( bounds )\n self._shepard.SetSampleDimensions( 50, 50, 25 ) # The size of the grid to sample onto.\n self._shepard.SetMaximumDistance( 1.0 ) # Include ALL electrodes to calculate the value of a sample.\n self._shepard.SetNullValue( 0.5 ) # When max. distance is not 1 and a sample is out of range of any electrode, give it this value.\n self._shepard.SetPowerParameter( 2 ) # Higher power factors will decay slower.\n\n self._resample = vtkResampleWithDataSet()\n self._resample.SetInputConnection( self._smoothedContour.GetOutputPort() )\n self._resample.SetSourceConnection( self._shepard.GetOutputPort() )\n\n self._contourMapper.SetInputConnection( self._resample.GetOutputPort() )",
"def interpolate(self, distance, normalized=...): # -> BaseGeometry:\n ...",
"def test_interpolation(self):\n\n ndx1, ndx2 = self.find_partition()\n tessellation = Delaunay(self.grid[ndx2,:])\n\n # initialisation\n results = []\n ndim = self.ndim+1\n\n for j in ndx1:\n nmodels = len(self.tracks[j].models)\n aResult = np.empty((nmodels,ndim+nglb+6),dtype=gtype)\n pt = self.tracks[j].params + [0.0,]\n\n for i in range(nmodels):\n aModel1 = self.tracks[j].models[i]\n pt[-1] = aModel1.glb[iage]\n aModel2 = interpolate_model(self,pt,tessellation,ndx2)\n aResult[i,0:ndim] = pt\n if (aModel2 is None):\n aResult[i,ndim:ndim+nglb+6] = np.nan\n else:\n aResult[i,ndim:ndim+nglb+6] = compare_models(aModel1,aModel2)\n\n results.append(aResult)\n\n return results, ndx1, ndx2, tessellation",
"def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan",
"def interpolate_trajectory(trajectory):\r\n \r\n nans = np.isnan(trajectory[:,0])\r\n not_nans = ~nans\r\n \r\n nans_idx = np.where(nans)[0]\r\n valid_idx = np.where(not_nans)[0]\r\n if len(valid_idx) < 2:\r\n return np.zeros(shape=(trajectory.shape[0]), dtype=np.float32)\r\n \r\n # Interpolate gaps.\r\n for i in nans_idx:\r\n # Find closest two points to use for interpolation.\r\n begin_t = np.searchsorted(valid_idx, i) - 1\r\n if begin_t == len(valid_idx) - 1:\r\n begin_t -= 1 # extrapolate right\r\n elif begin_t == -1:\r\n begin_t = 0 # extrapolate left\r\n \r\n begin_t_idx = valid_idx[begin_t]\r\n end_t_idx = valid_idx[begin_t + 1]\r\n \r\n last_t = trajectory[begin_t_idx]\r\n next_t = trajectory[end_t_idx]\r\n dx = (end_t_idx - begin_t_idx) / 3.0\r\n m = [(next_t[0] - last_t[0]) / dx,\r\n (next_t[1] - last_t[1]) / dx,\r\n short_angle_dist(last_t[2], next_t[2]) / dx]\r\n\r\n dt = (i - begin_t_idx) / 3.0\r\n e = [m[i] * dt + last_t[i] for i in range(3)]\r\n trajectory[i] = e\r\n \r\n return not_nans.astype(np.float32)",
"def _interpolate(self, kps1: List[List[kp]], kps2: List[List[kp]]) -> np.ndarray:\n interpolated_kps = []\n for i in range(len(kps1)):\n # If one of the two points is empty -> Not interpolate\n if len(kps1[i]) != 0 and len(kps2[i]) != 0:\n interpolated_coords = np.linspace(np.array(kps1[i]), np.array(kps2[i]), num=3).tolist()\n interpolated_kps.append(interpolated_coords[1])\n else:\n interpolated_kps.append([None, None, None])\n return np.array(interpolated_kps)",
"def interpolate(self):\n return NonbondedInterpolated(\n self.get_exclusion_idxs(),\n self.get_scale_factors(),\n self.get_lambda_plane_idxs(),\n self.get_lambda_offset_idxs(),\n self.get_beta(),\n self.get_cutoff()\n )",
"def test_interpolate(self):\r\n f = lambda t,y,yd: y**0.25-yd\r\n \r\n prob = Implicit_Problem(f,[1.0],[1.0])\r\n\r\n sim = IDA(prob)\r\n sim.simulate(10., 100)\r\n y100 = sim.y_sol\r\n t100 = sim.t_sol\r\n sim.reset()\r\n sim.simulate(10.)\r\n nose.tools.assert_almost_equal(y100[-2], sim.interpolate(9.9,0),5)",
"def interpolate(target_x, index, interpolation_values):\n \n # the x2/y2 coordinate, y2 is a list because y is one whole datapoint with\n # x and y values. The y values will be interpolated, the x values do not change,\n # they are just kept for creating a valid datapoint again\n x2 = interpolation_values[index][0]\n y2_values = interpolation_values[index][1]\n \n # the x1/y1 coordinate\n i = index - 1\n while i >= 0 and i < len(interpolation_values) and interpolation_values[i][0] == x2:\n i -= 1\n x1 = interpolation_values[i][0]\n y1_values = interpolation_values[i][1]\n \n \n # the max count\n count = max(len(y1_values[0]), len(y1_values[1]), len(y2_values[0]), len(y2_values[1]))\n # the result\n result = []\n \n # go through each of the ~200 raw position/raw voltage points\n for i in range(0, count):\n if i < len(y1_values[1]) and i < len(y2_values[1]):\n # the slope and the y intercept\n m = (y2_values[1][i] - y1_values[1][i]) / (x2 - x1)\n t = y1_values[1][i] - m * x1\n \n # the resulting y value for the target_y\n y = m * target_x + t\n elif i < len(y1_values):\n y = y1_values\n elif i < len(y2_values):\n y = y2_values\n else:\n continue\n \n # get the x value, normally this should be the y2 value because this is \n # interpolating with the slope of the target point and the point before\n # but if it is not possible use the point before. There are ~200 points\n # on a distance of ~4cm, it does not matter if there is a shift of one\n # point or not\n if i < len(y2_values[0]):\n x = y2_values[0][i]\n elif i < len(y1_values[1]):\n x = y2_values[1][i]\n else:\n x = None\n \n # add the result\n if x != None and y != None:\n result.append((x, y))\n \n # the result\n return result",
"def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity",
"def InterpolateDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def interpolate(timepoint_defined, signal, interp_type, TR):\n\n timepoint_defined = np.array(timepoint_defined)\n\n true_inds = np.where(timepoint_defined == True)[0]\n false_inds = np.where(timepoint_defined == False)[0]\n\n\n signal_copy = np.array(signal)\n\n if interp_type == 'linear':\n\n #Still need to handle beginning/end cases\n\n for temp_timepoint in false_inds:\n\n\n #past_timepoint = true_inds[np.sort(np.where(true_inds < temp_timepoint)[0])[-1]]\n #future_timepoint = true_inds[np.sort(np.where(true_inds > temp_timepoint)[0])[0]]\n\n\n #Be sure there is at least one future timepoint and one past timepoint.\n #If there isn't, then grab either two past or two future timepoints and use those\n #for interpolation. If there aren't even two total past + future timepoints, then\n #just set the output to 0. Could also set the output to be unadjusted, but this\n #is a way to make the issue more obvious.\n temp_past_timepoint = np.sort(np.where(true_inds < temp_timepoint)[0])\n temp_future_timepoint = np.sort(np.where(true_inds > temp_timepoint)[0])\n\n #If we don't have enough data to interpolate/extrapolate\n if len(temp_past_timepoint) + len(temp_future_timepoint) < 2:\n\n signal_copy[temp_timepoint] = 0\n\n #If we do have enough data to interpolate/extrapolate\n else:\n\n if len(temp_past_timepoint) == 0:\n past_timepoint = true_inds[temp_future_timepoint[1]]\n else:\n past_timepoint = true_inds[temp_past_timepoint[-1]]\n\n if len(temp_future_timepoint) == 0:\n future_timepoint = true_inds[temp_past_timepoint[-2]]\n else:\n future_timepoint = true_inds[temp_future_timepoint[0]]\n\n #Find the appopriate past/future values\n past_value = signal_copy[int(past_timepoint)]\n future_value = signal_copy[int(future_timepoint)]\n\n #Use the interp1d function for interpolation\n interp_object = interp.interp1d([past_timepoint, future_timepoint], [past_value, future_value], bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint] = interp_object(temp_timepoint).item(0)\n\n return signal_copy\n\n\n #For cubic spline interpolation, instead of taking the past/future timepoint\n #we will just take the closest 5 timepoints. If there aren't 5 timepoints, we will\n #set the output to 0\n if interp_type == 'cubic_spline':\n\n sorted_good = np.sort(signal_copy[true_inds])\n min_bound = sorted_good[0]\n max_bound = sorted_good[-1]\n\n #Continue if there are at least 5 good inds\n true_inds_needed = 5\n if len(true_inds) >= true_inds_needed:\n\n for temp_timepoint in false_inds:\n\n closest_inds = true_inds[np.argsort(np.absolute(true_inds - temp_timepoint))]\n closest_vals = signal_copy[closest_inds.astype(int)]\n interp_object = interp.interp1d(closest_inds, closest_vals, kind = 'cubic', bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint.astype(int)] = interp_object(temp_timepoint).item(0)\n\n min_bound_exceded = np.where(signal_copy < min_bound)[0]\n if len(min_bound_exceded) > 0:\n\n signal_copy[min_bound_exceded] = min_bound\n\n max_bound_exceded = np.where(signal_copy > max_bound)[0]\n if len(max_bound_exceded) > 0:\n\n signal_copy[max_bound_exceded] = max_bound\n\n #If there aren't enough good timepoints, then set the bad timepoints = 0\n else:\n\n signal_copy[false_inds.astype(int)] = 0\n\n\n return signal_copy\n\n\n if interp_type == 'spectral':\n\n signal_copy = spectral_interpolation(timepoint_defined, signal_copy, TR)\n\n return signal_copy",
"def _interpolate( p1, p2, x ):\n return p1[ 1 ] + _slope( p1, p2 ) * ( x - p1[ 0 ] )",
"def interpolate(self, interpolation=\"nearest\", **kwargs):\n return podpac.interpolators.Interpolate(source=self, interpolation=interpolation, **kwargs)",
"def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def demo_interpolate():\n \n g = generate_structure(\n structure = 'graphene',\n cell = 'prim',\n tiling = (4,4,1),\n )\n g11 = g.folded_structure\n\n # Make chromium atom positions (pretend like they are above the surface)\n npos = np.dot((1./3,2./3,0),g11.axes)\n npos1 = npos+3*g11.axes[0]\n npos2 = npos1+g11.axes[0]+g11.axes[1]\n\n # \"Relaxed\" structure with additional atom on one ring\n gr1 = g.copy()\n gr1.add_atoms(['Cr'],[npos1])\n\n # \"Relaxed\" structure with additional atom on neighboring ring\n gr2 = g.copy()\n gr2.add_atoms(['Cr'],[npos2])\n gr2.recenter()\n\n # Interpolate between the two structures within min. image convention\n spath = gr1.interpolate(gr2,10)\n \n # Plot the (linear) interpolation path\n plt.figure(tight_layout=True)\n for gr in spath:\n gr.plot2d_ax(0,1,'k',lw=2)\n gr.plot2d_pos(0,1,'bo')\n npos = gr.pos[-1]\n plt.plot([npos[0]],[npos[1]],'go')\n #end for\n plt.axis('equal')\n plt.xlabel('x (A)')\n plt.ylabel('y (A)')\n plt.title('demo interpolate: paths for NEB')\n\n plt.show()",
"def _interpolate_data(self, data):\n len_data = len(data)\n if len_data > self._nbr_points:\n x_interp = range(0, len_data)\n new_x_points = np.linspace(0, len_data - 1, self._nbr_points)\n new_data = np.interp(new_x_points, x_interp, data)\n return new_data\n return data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads the data set provided in this repository and returns a list of Decks or FuzzyDecks. The deck list is sorted by archetype so the distance matrix is easier to visualize. | def load_data_set(hero_class: str, fuzzy: bool, filename: str = "data/Decks.json", debug: bool = False) \
-> Union[List[Deck], List[FuzzyDeck]]:
if debug:
print("### loading dataset...")
with open(filename) as f:
data = json.load(f)
hero_classes = list(data["series"]["metadata"].keys())
if hero_class not in hero_classes and hero_class != "ALL":
raise Exception("hero class <" + hero_class + "> not available. "
"Consider using one class out of: " + ", ".join(hero_classes))
if debug:
for cl in hero_classes:
print("" + str(len(data["series"]["data"][cl])) + " played decks for hero class " + cl)
played_decks = []
if hero_class == "ALL":
for hero_class in hero_classes:
for i, deck_data in enumerate(data["series"]["data"][hero_class]):
if fuzzy:
played_decks.append(FuzzyDeck(deck_data))
else:
played_decks.append(Deck(deck_data))
else:
for i, deck_data in enumerate(data["series"]["data"][hero_class]):
if fuzzy:
played_decks.append(FuzzyDeck(deck_data))
else:
played_decks.append(Deck(deck_data))
# sort by cluster label for easier visualization of distance matrix
played_decks = sorted(played_decks, key=lambda x: x.archetype[0])
return played_decks | [
"def test_read_deck() -> None:\n ex_full = \"\"\"TD-DFT calculation \nFreq RB3LYP 6-31G(d) \nNumber of atoms I 12\nInfo1-9 I N= 9\n 7 6 0 0 0 100\n 6 18 -502\nMultiplicity I 1\nNumber of electrons I 42\nNumber of alpha electrons I 21\nRoute C N= 7\n# Geom=AllCheck Guess=Read SCRF=Check GenChk B3LYP/6-31G(d) \nSymm=None Freq=(NoRaman)\nNuclear charges R N= 12\n 6.00000000E+00 6.00000000E+00 6.00000000E+00 6.00000000E+00 6.00000000E+00\n 6.00000000E+00 1.00000000E+00 1.00000000E+00 1.00000000E+00 1.00000000E+00\n 1.00000000E+00 1.00000000E+00\n\"\"\"\n result = fchic.deck_loads(ex_full, \"Multiplicity\")\n assert result == [1]\n result = fchic.deck_loads(ex_full, \"Nuclear charges\")\n assert len(result) == 12\n assert result[0] == 6.0",
"def load_cards() -> List[\"Card\"]:\n cards = []\n\n with open(os.path.dirname(__file__) + \"/resources/cardlist.txt\", \"r\") as card_list:\n raw_cards = card_list.readlines()\n type_mapping = {\n \"creature\": (Creature, 0),\n \"itemGreen\": (GreenItem, 1),\n \"itemRed\": (RedItem, 2),\n \"itemBlue\": (BlueItem, 3),\n }\n\n for card in raw_cards:\n (\n card_id,\n name,\n card_type,\n cost,\n attack,\n defense,\n keywords,\n player_hp,\n enemy_hp,\n card_draw,\n text,\n ) = map(str.strip, card.split(\";\"))\n\n card_class, type_id = type_mapping[card_type]\n\n cards.append(\n card_class(\n int(card_id),\n name,\n type_id,\n int(cost),\n int(attack),\n int(defense),\n keywords,\n int(player_hp),\n int(enemy_hp),\n int(card_draw),\n 0,\n text,\n )\n )\n\n assert len(cards) == 160\n\n return cards",
"def get_decks(self, include_cards=True):\n deck_previews = self.data_source.get_decks(self.user_id,\n not include_cards)\n\n return deck_previews",
"def test_read_deck_file() -> None:\n with open(\"tests/data/data.fchk\", \"r\") as f:\n result = fchic.deck_load(f, \"Cartesian Force Constants\")\n assert len(result) == 666 # number of the beast!",
"def get_deck_list(deckid):\n # Need to know if we're looking at a deckid or deckid tuple\n # TODO: Clean this up a bit (shouldn't need to support deckids or deck)\n # tuples now that I'm using Deck objects.)\n if isinstance(deckid, tuple):\n # The deckid is in deck[0]\n # Format is (deckid, deck_class)\n deckid = deckid[0]\n # http://www.hearthpwn.com/decks/listing/ + /neutral or /class\n url = 'http://www.hearthpwn.com/decks/listing/'\n css = '#cards > tbody > tr > td.col-name'\n\n cards = []\n\n # Class Cards\n pagetree = get_pagetree(url + str(deckid) + '/class')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n # Neutral Cards\n pagetree = get_pagetree(url + str(deckid) + '/neutral')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n regex = re.compile(b'^\\r\\n(.+)\\r\\n\\r\\n\\xc3\\x97 (\\d+)')\n deck = []\n for card in cards:\n match = re.search(regex, card)\n if match:\n cardname = match.group(1).decode('UTF-8')\n amount = int(match.group(2))\n deck.append(Card(cardname, amount))\n\n return deck",
"def load_deck(self):\n self.log.debug(\"Loading file with deck\")\n file = QFileDialog().getOpenFileName(self, \"Load deck\", os.path.join(os.path.dirname(__file__), os.pardir, \"decks\"), \"Text files (*.txt)\")\n if not file or file[0] == \"\":\n return\n self.log.debug(f\"Trying to load {file[0]} file\")\n self.deck = []\n with open(file[0], \"r\") as f:\n for line in f.readlines():\n try:\n number = int(line.split()[0])\n except ValueError:\n _ = QMessageBox.information(self, \"Information\", \"This deck file is corrupted.\"\n \" Please, create new deck. \",\n QMessageBox.Ok, QMessageBox.NoButton)\n self.deck = []\n self.log.warning(f\"Loading of the file {file[0]} ended in error\")\n return\n self.deck.append(number)\n self.log.info(f\"Loading of the deck {file[0]} ended successfully\")",
"def get_decks(filtering=None, sorting=None, count=None,\n patch=None, classid=None):\n decks_metainfo = get_deck_metainfo(filtering, sorting, count,\n patch, classid)\n decks = [Deck(deck[0], deck[1], get_deck_list(deck[0]))\n for deck in decks_metainfo]\n return decks",
"async def decks(self, ctx: Context, *cards):\n if cards is None or not len(cards):\n await send_cmd_help(ctx)\n return\n\n # legacy param - will remove in future updates\n snapshot_id = None\n\n # check last param, if digit, assign as snapshot id\n if cards[-1].isdigit():\n snapshot_id = int(cards[-1])\n cards = cards[:-1]\n\n if snapshot_id is None:\n snapshot_id = str(cardpop_range_max - 1)\n\n is_most_recent_snapshot = int(snapshot_id) == cardpop_range_max - 1\n\n # await self.bot.say(\"{}: {}\".format(snapshot_id, cards))\n\n card_names_are_valid = True\n for card in cards:\n if self.get_card_name(card) is None:\n await self.bot.say(\n \"**{}** is not valid card name.\".format(card))\n card_names_are_valid = False\n if not card_names_are_valid:\n return\n\n # repopulate cards with normalized data\n cards = [self.get_card_name(c) for c in cards]\n # cpids = [self.get_card_cpid(c) for c in cards]\n\n found_decks = []\n if snapshot_id in self.cardpop:\n decks = self.cardpop[snapshot_id][\"decks\"]\n for k in decks.keys():\n deck = decks[k][\"deck\"]\n if all(card in deck for card in cards):\n found_decks.append(k)\n # if all(cpid in k for cpid in cpids):\n # found_decks.append(k)\n\n await self.bot.say(\"Found {} decks with {} in Snapshot #{}{}.\".format(\n len(found_decks),\n ', '.join([self.card_to_str(card) for card in cards]),\n snapshot_id,\n ' (most recent)' if is_most_recent_snapshot else ''))\n\n if len(found_decks):\n # await self.bot.say(\n # \"Listing top {} decks:\".format(\n # min([max_deck_show, len(found_decks)])))\n\n for i, deck in enumerate(found_decks):\n # Show top 5 deck images only\n # if i < max_deck_show:\n\n results_max = 3\n\n cards = deck.split(', ')\n norm_cards = [self.get_card_from_cpid(c) for c in cards]\n\n await self.bot.say(\"**{}**: {}/100: {}\".format(\n i + 1,\n self.get_deckpop_count(deck, snapshot_id),\n self.card_to_str(deck)))\n\n FakeMember = namedtuple(\"FakeMember\", \"name\")\n m = FakeMember(name=\"Snapshot #{}\".format(snapshot_id))\n\n await self.bot.get_cog(\"Deck\").deck_get_helper(\n ctx,\n card1=norm_cards[0],\n card2=norm_cards[1],\n card3=norm_cards[2],\n card4=norm_cards[3],\n card5=norm_cards[4],\n card6=norm_cards[5],\n card7=norm_cards[6],\n card8=norm_cards[7],\n deck_name=\"Top Deck: {}\".format(i + 1),\n author=m)\n\n if (i + 1) % results_max == 0 and (i + 1) < len(found_decks):\n def pagination_check(m):\n return m.content.lower() == 'y'\n await self.bot.say(\n \"Would you like to see more results? (y/n)\")\n answer = await self.bot.wait_for_message(\n timeout=PAGINATION_TIMEOUT,\n author=ctx.message.author,\n check=pagination_check)\n if answer is None:\n await self.bot.say(\"Search results aborted.\")\n return",
"def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck",
"def fulldecklist(deckcode):\n ##First imports the deckcode as a deck\n ##Example Mage Deck we will be using (\"AAECAf0ECO0Fcem6AoivAuwHobcCxQS/CAuBsgKVA8HBApYF17YCmMQCqwTAAbC8ArsCo7YCAA==\")\n\n deck = Deck.from_deckstring(str(deckcode))\n complete_deck = []\n for card,copy in deck.cards:\n l = [card] * copy\n complete_deck.extend(l)\n return complete_deck",
"def load_data ():\n \n data_set = datasets.load_breast_cancer()\n return data_set",
"def get_cards(self):\n cards = []\n for item in self.db:\n cards.append(item[\"card\"])\n \n return cards",
"def generate_deck(self):\n\t\tsuits = [\"hearts\", \"spades\",\"diamonds\",\"clubs\"]\n\t\tcards = []\n\n\t\tfor suit in suits:\n\t\t\tif self.ace_as_eleven:\n\t\t\t\tace = Card(\"Ace\", 11, suit)\n\t\t\telse:\n\t\t\t\tace = Card(\"Ace\", 1, suit)\n\t\t\tcards.append(ace)\n\n\t\t\ttwo = Card(\"Two\", 2, suit)\n\t\t\tcards.append(two)\n\t\t\t\n\t\t\tthree = Card(\"Three\", 3, suit)\n\t\t\tcards.append(three)\n\n\t\t\tfour = Card(\"Four\", 4, suit)\n\t\t\tcards.append(four)\n\n\t\t\tfive = Card(\"Five\", 5, suit)\n\t\t\tcards.append(five)\n\n\t\t\tsix = Card(\"Six\", 6, suit)\n\t\t\tcards.append(six)\n\n\t\t\tseven = Card(\"Seven\", 7, suit)\n\t\t\tcards.append(seven)\n\n\t\t\teight = Card(\"Eight\", 8, suit)\n\t\t\tcards.append(eight)\n\n\t\t\tnine = Card(\"Nine\", 9, suit)\n\t\t\tcards.append(nine)\n\n\t\t\tten = Card(\"Ten\", 10, suit)\n\t\t\tcards.append(ten)\n\n\t\t\tjack = Card(\"Jack\", 10, suit)\n\t\t\tcards.append(jack)\n\n\t\t\tqueen = Card(\"Queen\", 10, suit)\n\t\t\tcards.append(queen)\n\n\t\t\tking = Card(\"King\", 10, suit)\n\t\t\tcards.append(king)\n\n\t\treturn cards",
"async def _games_by_decks(self, ctx, *, deck_names: str=\"\"):\n\n if not deck_names:\n await ctx.send(embed=embed.error(ctx, description=\"No deck name included\"))\n return\n deck_name_list = deck_names.split(',')\n if len(deck_name_list) > 4:\n await ctx.send(embed=embed.error(ctx, description=\"Games cannot contain more than 4 decks\"))\n return\n\n deck_names = []\n for deck_name in deck_name_list:\n deck = self.bot.db.find_deck(deck_name)\n if not deck:\n continue\n deck_names.append(deck['name'])\n\n if not deck_names:\n await ctx.send(embed=embed.error(ctx, description=\"No decks found with the given deck names\"))\n return\n matches = self.bot.db.find_matches({\"players.deck\": {\"$all\": deck_names}}, ctx.message.guild, limit=20)\n matches = list(matches)\n if not matches:\n await ctx.send(embed=embed.info(description=(\"No matches found containing \" + \", \".join(deck_names))))\n return\n title = \"Games Containing: \" + \", \".join(deck_names)\n emsgs = self._make_match_table(title, matches, winner_type=\"deck\")\n for emsg in emsgs:\n await ctx.send(embed=emsg)",
"def load_decks() -> dict:\n images = [img for img in glob.glob(\"decks/*/*.png\")]\n decks = {deck.split('/')[1]: {} for deck in glob.glob(\"decks/*\")}\n\n for image in images:\n img = Image.open(image)\n card = image.split('/')[-1].split('.')[0]\n deck = image.split('/')[1]\n decks[deck][card] = img\n\n return decks",
"def test_deck_is(self):\n app.create_deck(self.table)\n self.assertEqual(52, len(self.table.deck))\n unique_deck = self.table.deck[:]\n unique_deck = set(unique_deck)\n unique_deck = list(unique_deck)\n self.assertEqual(52, len(unique_deck))\n spades = [x for x in self.table.deck if x.suit == \"s\"]\n self.assertEqual(13, len(spades))\n unique_deck = list(set(spades))\n self.assertEqual(13, len(unique_deck))",
"def __init__(self):\n \n self.deck = [Card(suit,rank) for suit in SUITS for rank in RANKS]",
"def _create_deck(self, random):\n color_cards = product(COLORS, COLOR_CARD_TYPES)\n black_cards = product(repeat('black', 4), BLACK_CARD_TYPES)\n all_cards = chain(color_cards, black_cards)\n deck = [UnoCard(color, card_type) for color, card_type in all_cards]\n if random:\n shuffle(deck)\n return deck\n else:\n return list(reversed(deck))",
"def loadCardDB():\n with open(CARDS_JSON, 'r') as infofile:\n cards = json.load(infofile)\n with open(PILOT_TEXT_JSON, 'r') as infofile:\n pilotTexts = json.load(infofile)\n with open(UPGRADE_TEXT_JSON, 'r') as infofile:\n upgradeTexts = json.load(infofile)\n with open(MODIFICATION_TEXT_JSON, 'r') as infofile:\n modificationTexts = json.load(infofile)\n with open(TITLE_TEXT_JSON, 'r') as infofile:\n titleTexts = json.load(infofile)\n return _createCardDB(cards, pilotTexts, upgradeTexts, modificationTexts, titleTexts)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the distance matrix of a list of Deck or FuzzyDeck objects. Returns the vectorform distance vector. | def calculate_distance_matrix(played_decks: Union[List[FuzzyDeck], List[Deck]], measure: str):
deck_data = np.array(played_decks).reshape(len(played_decks), 1)
if measure == "jaccard":
dist = pdist(deck_data, lambda u, v: u[0].jaccard_distance(v[0]))
elif measure == "euclidean":
dist = pdist(deck_data, lambda u, v: u[0].euclidean_distance(v[0]))
else:
raise ValueError("Unknown distance measure {}. ".format(measure) +
"Please choose one of the following distance measures ['euclidean','jaccard']")
return dist | [
"def get_distance_matrix(df_vectorized):\n\n\n gram_matrix = np.dot(df_vectorized.values,df_vectorized.values.T)\n\n norms_matrix = np.sqrt(np.outer(np.diag(gram_matrix),np.diag(gram_matrix)))\n\n cosine_distance_matrix = gram_matrix/norms_matrix\n\n return cosine_distance_matrix",
"def compute_distance_matrix(self):\n return np.square(self.data[:, np.newaxis, :] - self.centers[['x1', 'x2']].as_matrix()).sum(axis=2)",
"def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist",
"def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)",
"def calculate_distance_matrix(atomlist):\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist",
"def to_distances(self):\n # Ignore empty cell\n num_class = len(Item) - 1\n distance_vision = np.zeros(num_class)\n for cell in self.nearest_cells:\n distance_vector = (self.center.coord[0] - cell.coord[0],\n self.center.coord[1] - cell.coord[1])\n distance = np.linalg.norm(distance_vector)\n distance_vision[cell.value] = distance\n \n return distance_vision",
"def calc_distance_matrix(mols):\n\n # TODO - do we need to calculate both sides of the matrix? Tanimoto is supposed to be a symmetric distance measure,\n # but the matrix that is generated does not seem to be symmetric.\n\n mol_fm_tuples = []\n for mol in mols:\n features = sucos.getRawFeatures(mol)\n mol_fm_tuples.append((mol, features))\n\n matrix = []\n for tuple1 in mol_fm_tuples:\n tmp = []\n for tuple2 in mol_fm_tuples:\n if tuple1[0] == tuple2[0]:\n tmp.append(0.0)\n else:\n # utils.log(\"Calculating SuCOS between\", mol1, mol2)\n sucos_score, fm_score, tani_score = sucos.get_SucosScore(\n tuple1[0],\n tuple2[0],\n tani=True,\n ref_features=tuple1[1],\n query_features=tuple2[1],\n )\n tmp.append(1.0 - sucos_score)\n matrix.append(tmp)\n\n return matrix",
"def build_distance_matrix(self):\n coords = self.atomcoords\n self.distancematrix = np.zeros((len(coords), len(coords)))\n for i in range(len(coords)):\n for j in [x for x in range(len(coords)) if x > i]:\n self.distancematrix[i][j] = norm(coords[i] - coords[j])\n self.distancematrix[j][i] = self.distancematrix[i][j]",
"def DistanceMatrices(self):\r\n return self._dms",
"def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)",
"def DistanceMatrices(self):\n return self._dms",
"def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix",
"def compute_distance_matrix(self):\n clusters_list = self.clusters.keys() # Extract the list of keys of all clusters.\n # Iterate over all pairs in the clusters\n for clust_it_1 in range(len(clusters_list)):\n for clust_it_2 in range(clust_it_1 + 1, len(clusters_list)):\n # Ignore the distance to itself.\n if clust_it_1 == clust_it_2:\n continue\n else:\n # Compute the distance between all pairs\n distance = self.eucledian_distance(self.clusters[clust_it_1].center,\n self.clusters[clust_it_2].center)\n # Update the distances in distance matrix.\n self.distances[clust_it_1][clust_it_2] = distance\n self.distances[clust_it_2][clust_it_1] = distance",
"def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ",
"def get_distance_matrix(self, prev_json_list, json_list,\n dist_norm_parameters=None):\n\n xy1 = [trackerutils.get_xy(json_ele) for json_ele in prev_json_list] # [[centroid1], [centroid2],..[centroidn]]\n xy1 = self.normalize_dist(xy1, dist_norm_parameters)\n xy2 = [trackerutils.get_xy(json_ele) for json_ele in json_list] # [[centroid1], [centroid2],..[centroidn]]\n xy2 = self.normalize_dist(xy2, dist_norm_parameters)\n dist_matrix = distance_matrix(xy1, xy2) #computes euclidean dist, shape: [len(xy2), len(xy1)]\n return dist_matrix",
"def distance_matrix(x, distf = None):\n\n if distf == None:\n distf = d_euclidean\n\n vectors = x.shape[0]\n d = numpy.zeros([vectors, vectors])\n\n for i in range(vectors):\n for j in range(i, vectors):\n dd = distf(x[i, :], x[j, :])\n d[i, j] = dd\n d[j, i] = dd\n\n return d",
"def distant_calculation1(A_dis,Xi_dis):\n d_dis=[]\n for i in range(0,len(A_dis)):\n d_dis_i= math.pow( np.linalg.norm(A_dis[i] - Xi_dis) ,2)\n d_dis.append(d_dis_i)\n return d_dis",
"def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)",
"def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates vmeasure, homogeneity, and completeness for each clustering algorithm stored in clustering_alg and adds it to each algorithms dictionary. | def eval_v_measure_homogeneity_completeness(clustering_alg: List, sdist_euclidean, sdist_jaccard,
labels_true, debug: bool = False):
for i, alg_dict in enumerate(clustering_alg):
if "alg" in alg_dict:
if alg_dict["distance"] == "euclidean":
clustering = alg_dict["alg"].fit(sdist_euclidean)
elif alg_dict["distance"] == "jaccard":
clustering = alg_dict["alg"].fit(sdist_jaccard)
else:
raise ValueError("Unknown distance measure {}. ".format(alg_dict["distance"]) +
"Please choose one of the following distance measures ['euclidean','jaccard']")
labels_predicted = clustering.labels_
alg_dict["labels"] = labels_predicted
else:
labels_predicted = alg_dict["labels"]
alg_dict["homogeneity"], alg_dict["completeness"], alg_dict["v-measure"] = \
homogeneity_completeness_v_measure(labels_true, labels_predicted)
if debug:
print("Alg: " + alg_dict["name"] + "; \t v-measure = " + str(alg_dict["v-measure"])) | [
"def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }",
"def cluster_analysis(\n clusterers: list,\n hyperparameter_grids: list,\n eval_metrics_grid: list,\n eval_metrics_params: dict,\n word_embeddings: np.ndarray,\n words_vocabulary: list,\n word_to_int: dict,\n word_embeddings_normalized: np.ndarray = None,\n compute_pairwise_word_distances: bool = False,\n compute_pairwise_word_distances_normalized: bool = False,\n return_word_vectors: bool = False,\n save_result_to_disk: bool = False,\n output_dir: Optional[str] = None,\n model_name: Optional[str] = None,\n dataset_name: Optional[str] = None,\n output_filepath_suffix: Optional[str] = None,\n) -> Union[dict, tuple]:\n # Create word vectors from given words/vocabulary\n word_vectors = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings,\n )\n\n # Create normalized word vectors from given words/vocabulary if specified.\n word_vectors_normalized = None\n if word_embeddings_normalized is not None:\n word_vectors_normalized = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings_normalized,\n )\n\n if compute_pairwise_word_distances:\n word_vectors_pairwise_distances = pairwise_cosine_distances(word_vectors)\n if (\n compute_pairwise_word_distances_normalized\n and word_vectors_normalized is not None\n ):\n normalized_word_vectors_pairwise_distances = euclidean_distances(\n word_vectors_normalized\n )\n\n # Perform cluster analysis\n clusterers_result = {}\n unique_cluster_metrics = set()\n for clusterer_tuple, hyperparameter_grid, eval_metrics in zip(\n clusterers, hyperparameter_grids, eval_metrics_grid\n ):\n if len(clusterer_tuple) == 3:\n (clusterer_name, clusterer_cls, clusterer_use_normalized) = clusterer_tuple\n else:\n clusterer_use_normalized = False\n (clusterer_name, clusterer_cls) = clusterer_tuple\n print(f\"-- Clustering using {clusterer_name} --\")\n clusterers_result[clusterer_name] = {\n \"cluster_labels\": [],\n \"cluster_params\": [],\n \"cluster_metrics\": {},\n }\n\n # Do clustering for each set of hyperparameters\n param_grid = ParameterGrid(hyperparameter_grid)\n for params_idx, params in enumerate(tqdm(param_grid)):\n clusterers_result[clusterer_name][\"cluster_params\"].append(params)\n\n # Add exception for ward linkage clustering.\n if (\n clusterer_cls is AgglomerativeClustering\n and params.get(\"linkage\") == \"ward\"\n and word_vectors_normalized is not None\n ):\n params = {**params, \"affinity\": \"euclidean\"}\n clusterer_instance = clusterer_cls(**params)\n fit_predict_X = word_vectors_normalized\n else:\n clusterer_instance = clusterer_cls(**params)\n if (\n params.get(\"affinity\") == \"precomputed\"\n or params.get(\"metric\") == \"precomputed\"\n ):\n if (\n clusterer_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n fit_predict_X = normalized_word_vectors_pairwise_distances\n elif compute_pairwise_word_distances:\n fit_predict_X = word_vectors_pairwise_distances\n else:\n if clusterer_use_normalized and word_vectors_normalized is not None:\n fit_predict_X = word_vectors_normalized\n else:\n fit_predict_X = word_vectors\n\n # Use fit_predict if it is available.\n if getattr(clusterer_instance, \"fit_predict\", None) is not None:\n predicted_labels = clusterer_instance.fit_predict(fit_predict_X)\n else:\n clusterer_instance.fit(fit_predict_X)\n predicted_labels = clusterer_instance.predict(fit_predict_X)\n\n # Separate noise labels into clusters\n if clusterer_cls is HDBSCAN:\n predicted_labels = separate_noise_labels_into_clusters(predicted_labels)\n\n clusterers_result[clusterer_name][\"cluster_labels\"].append(predicted_labels)\n\n # Evaluate predicted cluster labels using internal evaluation metrics\n for eval_metric_tuple in eval_metrics:\n if len(eval_metric_tuple) == 3:\n (\n eval_metric_key,\n eval_metric,\n eval_metric_use_normalized,\n ) = eval_metric_tuple\n else:\n eval_metric_use_normalized = False\n (eval_metric_key, eval_metric) = eval_metric_tuple\n eval_metric_params = eval_metrics_params.get(eval_metric_key, {})\n if (\n compute_pairwise_word_distances\n and eval_metric_params.get(\"metric\") == \"precomputed\"\n ):\n if (\n eval_metric_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=normalized_word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n if (\n eval_metric_use_normalized\n and word_vectors_normalized is not None\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_normalized,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n unique_cluster_metrics.add(metric_name)\n\n # Initialize metric result\n if (\n metric_name\n not in clusterers_result[clusterer_name][\"cluster_metrics\"]\n ):\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ] = {\n \"metric_scores\": [],\n \"metric_obj_max\": metric_obj_max,\n \"best_metric_score_indices\": [],\n }\n\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"metric_scores\"\n ].append(metric_score)\n\n # Set best metric score indices\n if params_idx == len(param_grid) - 1:\n best_metric_score_indices = np.argsort(\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ][\"metric_scores\"]\n )\n if metric_obj_max:\n best_metric_score_indices = best_metric_score_indices[::-1]\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"best_metric_score_indices\"\n ] = best_metric_score_indices\n\n # Find preferred clusterers for each cluster metric (from best to worst)\n metric_preferred_clusterers = {}\n for cluster_metric_name in unique_cluster_metrics:\n metric_obj_max = None\n metric_best_scores = []\n clusterer_names = []\n for clusterer_name, clusterer_result in clusterers_result.items():\n if cluster_metric_name in clusterer_result[\"cluster_metrics\"]:\n clusterer_names.append(clusterer_name)\n metric_result = clusterer_result[\"cluster_metrics\"][cluster_metric_name]\n if metric_obj_max is None:\n metric_obj_max = metric_result[\"metric_obj_max\"]\n best_metric_score = metric_result[\"metric_scores\"][\n metric_result[\"best_metric_score_indices\"][0]\n ]\n metric_best_scores.append(best_metric_score)\n clusterer_names = np.array(clusterer_names)\n metric_best_scores = np.array(metric_best_scores)\n\n metric_best_scores_sorted_indices = np.argsort(metric_best_scores)\n if metric_obj_max:\n metric_best_scores_sorted_indices = metric_best_scores_sorted_indices[::-1]\n metric_preferred_clusterers[cluster_metric_name] = {\n \"clusterer_names\": clusterer_names[metric_best_scores_sorted_indices],\n \"best_metric_scores\": metric_best_scores[metric_best_scores_sorted_indices],\n }\n\n # Return result as dictionary\n cluster_analysis_result = {\n \"clusterers\": clusterers_result,\n \"metric_preferred_clusterers\": metric_preferred_clusterers,\n }\n\n if return_word_vectors:\n if compute_pairwise_word_distances:\n cluster_analysis_result = (\n cluster_analysis_result,\n word_vectors,\n word_vectors_pairwise_distances,\n )\n else:\n cluster_analysis_result = (cluster_analysis_result, word_vectors)\n\n # Save result to disk\n if save_result_to_disk:\n save_cluster_result_to_disk(\n cluster_result=cluster_analysis_result,\n output_dir=output_dir,\n model_name=model_name,\n dataset_name=dataset_name,\n output_filepath_suffix=output_filepath_suffix,\n )\n\n return cluster_analysis_result",
"def eval_cluster_contingency(clustering_alg: List, labels_true, sdist):\n for (alg_name, alg_dict) in clustering_alg:\n if \"alg\" in alg_dict:\n clustering = alg_dict[\"alg\"].fit(sdist)\n labels_pred = clustering.labels_\n alg_dict[\"labels\"] = labels_pred\n else:\n labels_pred = alg_dict[\"labels\"]\n\n pred_label_dict, new_labels = normalize_labels(labels_pred)\n\n alg_dict[\"cm\"] = contingency_matrix(labels_true, new_labels)",
"def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info",
"def run_algorithm(algorithm, algorithm_kwargs, clustering_id):\n clustering = algorithm.perform_clustering(algorithm_kwargs)\n return (clustering_id, clustering)",
"def test_4_default():\n for clustering in CLUSTERING:\n print('#### clustering algo tested: {0}'.format(clustering))\n\n granatum = GranatumClustering(\n n_components=5,\n selected_clustering=clustering)\n\n results = granatum.fit(TEST_DATASET)\n print('number of clusters found: {0}'.format(results['n_clusters']))\n\n assert('clusters' in results)\n assert('n_clusters' in results)\n assert('n_components' in results)\n assert('embedding' in results)\n assert('clustering_algorithm' in results)",
"def compute_clustering_score():\n # TODO: Implement simple clustering\n raise NotImplementedError()",
"def cluster(source_dir, algorithm='DBSCAN', initial_eps=0.44, iterations=1, max_distance=50, mosaic=True):\n\n from sklearn.cluster import DBSCAN\n from sklearn.cluster import OPTICS\n from sklearn.cluster import AgglomerativeClustering\n\n global network_dir, face_db, cluster_dir, output_dir\n\n output_dir=os.path.join(source_dir, \"Face Network/\")\n network_dir=os.path.join(output_dir, \"Data/\")\n face_db=pd.read_hdf(network_dir+\"FaceDatabase.h5\")\n cluster_dir=os.path.join(output_dir, \"Clusters/\")\n face_dir=os.path.join(output_dir, \"Faces/\")\n\n # Create empty df to store results\n final_results=pd.DataFrame()\n \n exit=False\n\n for i in range(1,iterations+1):\n\n print('Iteration {}, Algorithm:{}, EPS: {}'.format(i,algorithm,initial_eps))\n \n encodings=list(face_db['encoding'])\n face_names=list(face_db['face_name'])\n img_names=list(face_db['img_name'])\n\n\n if algorithm=='OPTICS':\n clt = OPTICS()\n clt.fit(encodings)\n exit=True\n\n if algorithm=='DBSCAN':\n\n # Decrease EPS by 0.01 each iteration \n eps=initial_eps-(i/100)\n clt = DBSCAN(eps=eps, min_samples=3, n_jobs=-1, metric='euclidean', algorithm='kd_tree')\n clt.fit(encodings)\n\n if algorithm=='AHC':\n eps=3-.2\n clt = AgglomerativeClustering(distance_threshold=eps, compute_full_tree=True, n_clusters=None)\n\n # Conduct clustering and save results to a dataframe\n model=clt.fit(encodings)\n clt.labels_=clt.labels_#+1\n\n #plot_dendrogram(model, img_names)\n\n\n results=pd.DataFrame({'face_name':face_names, 'img_name':img_names, 'cluster':clt.labels_, 'encoding':encodings})\n\n \n def parallel_apply(chunk, df, core=False):\n if core:\n chunk['cluster_distance_core']=chunk.apply(lambda x: match(x, df, core=True), axis=1)\n else:\n chunk['cluster_distance']=chunk.apply(lambda x: match(x, df), axis=1)\n return chunk\n\n cpus=joblib.cpu_count()-1\n df_split = np.array_split(results, cpus)\n\n rows=Parallel(n_jobs=cpus)(delayed(parallel_apply)(chunk, results) for chunk in df_split)\n results=pd.concat(rows)\n\n rows=Parallel(n_jobs=cpus)(delayed(parallel_apply)(chunk, results, core=True) for chunk in df_split)\n results=pd.concat(rows)\n\n\n # Small clusters and faces with high cosine distance (bad matches) are assigned to a bin cluster with ID -2\n results['cluster']=np.where(results['cluster_distance_core']>max_distance+10,-2,results['cluster'])\n counts=results.groupby('cluster')['face_name'].count().reset_index().rename(columns={'face_name':'count'})\n results=results.merge(counts, how='left',on='cluster')\n results['cluster']=np.where(results['count']<5,-2,results['cluster'])\n results=results.drop(columns='count')\n\n # Calculate the median cosine distance and percentage of outliers for each cluster. \n outliers=results.groupby('cluster')[['cluster_distance_core']].agg({'cluster_distance_core':'median'}).reset_index().rename(columns={'cluster_distance_core':'cluster_distance_mean'})\n results=results.merge(outliers, how='left',on='cluster')\n\n # Assign clusters with a high average cosine distance and those in the bin clusters (-1, -2) to face_db for reanalysis\n \n # Add faces in clusters with low average cosine distance (<40) to final output\n face_db=results[(results['cluster_distance_mean']>max_distance) | (results['cluster']<0)]\n results=results[(results['cluster_distance_mean']<=max_distance) & (results['cluster']>=0)]\n\n # Count the number of images in each cluster\n counts=results.groupby('cluster')['face_name'].count().reset_index().rename(columns={'face_name':'count'})\n results=results.merge(counts, how='left',on='cluster')\n \n # Generate a cluster code; the first four numbers indicate the number of the iteration, followed by the cluster ID.\n results['cluster']=results['cluster'].apply(lambda x: int((str(i)*4 )+ str(x)))\n final_results=final_results.append(results)\n\n print(\"Matched: \", len(final_results),\"(+{})\".format(len(results)))\n print(\"Unmatched: \", len(face_db))\n\n #exit=True\n # When no new matches are found, switch to a more flexible clustering algorithm for the final pass.\n # OPTICS allows for clusters of varying densities. \n\n if i>(iterations-1)/2:\n algorithm='DBSCAN'\n\n #if (len(results) ==0 or i==iterations-1):\n # algorithm='OPTICS'\n\n if (len(results) ==0 or len(face_db)==0):\n exit=True\n\n if exit:\n break\n\n face_db['cluster']=-2\n final_results=final_results.append(face_db).sort_values(by='count',ascending=False)\n from sklearn import preprocessing\n le=preprocessing.LabelEncoder()\n le.fit(final_results['cluster'])\n final_results['cluster']=le.transform(final_results['cluster'])\n\n final_results.reset_index(inplace=False)\n final_results.to_hdf(network_dir+'FaceDatabase.h5', 'index', 'w',complevel=9) \n\n if mosaic:\n # build a mosaic of face tiles for each cluster\n overwrite(cluster_dir)\n clusters=final_results['cluster'].unique().tolist()\n clusters = [ elem for elem in clusters if elem > 0]\n cpus=joblib.cpu_count()-1\n rows=Parallel(n_jobs=cpus)(delayed(build_mosaic)(cluster,final_results,face_dir,cluster_dir) for cluster in clusters)\n\n return final_results",
"def compute_measures(net, dict):\n # N -- number of network nodes\n N = nx.number_of_nodes(net)\n dict['#nodes'] = N\n\n # L -- number of links\n L = nx.number_of_edges(net)\n dict['#edges'] = L\n\n # D -- density\n D = nx.density(net)\n dict['density'] = D\n\n # d -- diameter\n max_sub = max(nx.connected_component_subgraphs(net), key=len)\n d = nx.diameter(max_sub)\n dict['diameter'] = d\n\n # C -- average clustering coefficient\n C = nx.average_clustering(net, count_zeros=True)\n dict['avg_cc'] = C",
"def get_clustered_data(self, alg):\n\n algorithm = self.clustering_algorithms[alg]\n # print(self.datasets)\n re_X_data = {}\n for i_dataset, (dataset, algo_params) in enumerate(self.datasets):\n X = dataset\n if isinstance(X, pd.DataFrame):\n for i in range(len(X.index)):\n data_id = algorithm.labels_[i]\n if data_id not in re_X_data:\n re_X_data[data_id] = X.iloc[i].transpose()\n else:\n re_X_data[data_id] = pd.concat((re_X_data[data_id], X.iloc[i]), axis=1)\n\n for key in re_X_data:\n re_X_data[key] = re_X_data[key].transpose()\n\n elif not isinstance(X, pd.DataFrame):\n for i in range(len(X)):\n data_id = algorithm.labels_[i]\n if data_id not in re_X_data:\n re_X_data[data_id] = np.array([X[i]])\n else:\n re_X_data[data_id] = np.concatenate((re_X_data[data_id], [X[i]]), axis=0)\n\n return re_X_data",
"def get_cluster_results(method='kmeans'):\n\n cluster_dir()\n if method == 'kmeans':\n recall_df = pd.read_pickle('kmeans_average_recall_ALL.pkl').drop('recall@n', axis=1)\n results_df = pd.read_pickle('kmeans_df.pkl')\n cluster_str = 'kmeans_clusters.pkl'\n elif method == 'lda':\n recall_df = pd.read_pickle('lda_average_recall_ALL.pkl').drop('recall@n', axis=1)\n results_df = pd.read_pickle('lda_df.pkl')\n cluster_str = 'lda_topics.pkl'\n else:\n raise ValueError('Please specify \"kmeans\" or \"lda\" for method parameter')\n\n recall_df['key'] = recall_df[['min_df', 'max_df', 'n_topics/clusters']].astype(str).agg('_'.join, axis=1)\n recall_df['key'] = recall_df['key'].apply(lambda x: '{}_{}'.format(method,\n x))\n recall_df.reset_index(inplace=True)\n recall_df.rename({'index': 'k'}, axis=1, inplace=True)\n recall_df.set_index('key', inplace=True, drop=False)\n\n recall = recall_df.pivot(columns='k', values='average_recall')\n total_subs = recall_df.pivot(columns='k', values='total_subs')\n recall_subs = recall.join(total_subs,\n how='outer',\n lsuffix='_av_recall',\n rsuffix='_av_subs',\n )\n recall_df = recall_df[['min_df', 'max_df', 'tsvd_components', 'n_topics/clusters']].drop_duplicates()\n recall_df = recall_df.join(recall_subs, how='outer')\n\n cluster_data = {}\n for vocab_dict in params.vocab_params_all:\n cluster_dir()\n vocab_dir(**vocab_dict)\n if method == 'kmeans':\n os.chdir('./tsvd_500')\n tsvd_components = 500\n clusters = pd.read_pickle('all_kmeans_clusters.pkl')\n elif method == 'lda':\n os.chdir('./lda')\n clusters = pd.read_pickle('all_lda_topics.pkl')\n tsvd_components = 'n/a'\n\n clusters.columns = clusters.columns.droplevel(0)\n health_clusters = clusters.loc[clusters.index.get_level_values(1) == 1]\n min_df = vocab_dict['min_df']\n max_df = vocab_dict['max_df']\n\n for col in clusters.columns:\n key = recall_df.loc[(recall_df['min_df'] == min_df) &\n (recall_df['max_df'] == max_df) &\n (recall_df['n_topics/clusters'] == col)].index[0]\n cluster_1 = health_clusters[col].value_counts().index[0]\n cluster_2 = health_clusters[col].value_counts().index[1]\n cluster_3 = health_clusters[col].value_counts().index[2]\n subs_1 = [sub[0] for sub in list(clusters.loc[clusters[col] == cluster_1].index)]\n subs_2 = [sub[0] for sub in list(clusters.loc[clusters[col] == cluster_2].index)]\n subs_3 = [sub[0] for sub in list(clusters.loc[clusters[col] == cluster_3].index)]\n\n cluster_data[key] = {\n '1_cluster_id': cluster_1,\n '2_cluster_id': cluster_2,\n '3_cluster_id': cluster_3,\n '1_health_subs': subs_1,\n '2_health_subs': subs_2,\n '3_health_subs': subs_3\n }\n\n final_df = pd.DataFrame.from_dict(cluster_data,\n orient='index',\n )\n final_df = recall_df.join(final_df, how='outer')\n final_df.sort_values(['min_df', 'n_topics/clusters'], inplace=True)\n\n return final_df",
"def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"",
"def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)",
"def agglomerative_clustering(self,models):\r\n clusters = dict([(model.article.id,ClusterModel(self.inv_index,model,cluster_method=self.cluster_method,freq_class_id=model.article.classification)) for model in models.values()])\r\n best_similarity = 1\r\n while best_similarity > self.threshold:\r\n most_similar = []\r\n for cl_1,index in zip(clusters,xrange(len(clusters))):\r\n for cl_2 in clusters:\r\n if cl_1 != cl_2:\r\n sim = clusters[cl_1].similarity(self.comp,clusters[cl_2])\r\n if sim >= self.threshold:\r\n most_similar.append((cl_1,cl_2,sim))\r\n if len(most_similar) > 0:\r\n most_similar = sorted(most_similar,key=operator.itemgetter(2))\r\n most_similar.reverse()\r\n sim_pair = most_similar[0]\r\n '''\r\n print \"Merging \" + str(sim_pair[0]) + \" and \" \\\r\n + str(sim_pair[1]) \\\r\n + \" with similarity \" + str(sim_pair[2])\r\n '''\r\n clusters[sim_pair[0]].merge(clusters[sim_pair[1]])\r\n if sim_pair[2] == 1.0:\r\n clusters[sim_pair[0]].print_articles()\r\n clusters.pop(sim_pair[1])\r\n best_similarity = sim_pair[2]\r\n if self.on_change:\r\n self.on_change(clusters=clusters)\r\n else:\r\n break\r\n # yield clusters,models\r\n return clusters,models",
"def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )",
"def build_algorithm(self, algorithm_type):\n distance_matrix = self.matrix_handler.distance_matrix\n algorithm_execution_parameters = {}\n if algorithm_type == \"spectral\":\n # We need to set number of clusters for performance and we get sigma if defined\n algorithm_execution_parameters[\"max_clusters\"] = self.evaluation_parameters[\"maximum_clusters\"]\n if \"sigma\" in self.clustering_parameters[\"algorithms\"][\"spectral\"]:\n algorithm_execution_parameters[\"sigma_sq\"] = self.clustering_parameters[\"algorithms\"][\"spectral\"][\"sigma\"]\n # else it calculates its own sigma\n\n if algorithm_type in [\"spectral\",\"dbscan\",\"gromos\",\"kmedoids\",\"random\",\"hierarchical\"] :\n return ClusteringExplorer.get_clustering_algorithm_class()[algorithm_type](distance_matrix, **algorithm_execution_parameters)\n else:\n print \"[ERROR][ClusteringExplorer::build_algorithms] Not known algorithm type ( %s )\"%(algorithm_type)\n self.notify(\"SHUTDOWN\", \"Not known algorithm type ( %s )\"%(algorithm_type))\n exit()",
"def get_sklearn_algorithms(verbose=False):\n from collections import defaultdict\n import importlib\n import sklearn\n algos = defaultdict(list)\n if verbose: print(dir(sklearn))\n for nom_module in dir(sklearn):\n if verbose: print(nom_module)\n try:\n to_import = \"sklearn.%s\" % nom_module\n module = importlib.import_module(to_import)\n for nom_fonction in dir(module):\n fonction = getattr(module, nom_fonction)\n if hasattr(fonction, \"fit\"):\n if verbose: print(\" nom algorithme = \", nom_fonction)\n algos[nom_module].append(fonction)\n except Exception as e:\n if verbose: print(e)\n if verbose: print(\"=\" * 30)\n return algos",
"def eigenvector_centrality(self, iterations = 100):\r\n vectorSet = {}\r\n for key, value in self.adj_list.items():\r\n vectorSet[key] = 1 # init vectorSet of everything = 1\r\n\r\n for i in range(iterations):\r\n for sender in self.adj_list:\r\n for r in sender:\r\n vectorSet[sender] = vectorSet[sender] + 1\r\n\r\n sum = 0\r\n for key, value in vectorSet.items():\r\n sum += value\r\n \r\n for key, value in vectorSet.items():\r\n vectorSet[key] = vectorSet[key] / sum\r\n\r\n return vectorSet",
"def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates a clustering's contingency matrix for each clustering algorithm stored in the list clustering_alg and adds it to the dict. | def eval_cluster_contingency(clustering_alg: List, labels_true, sdist):
for (alg_name, alg_dict) in clustering_alg:
if "alg" in alg_dict:
clustering = alg_dict["alg"].fit(sdist)
labels_pred = clustering.labels_
alg_dict["labels"] = labels_pred
else:
labels_pred = alg_dict["labels"]
pred_label_dict, new_labels = normalize_labels(labels_pred)
alg_dict["cm"] = contingency_matrix(labels_true, new_labels) | [
"def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info",
"def _calc_cluster_sums(self):\n\n self._cluster_sum = defaultdict(int)\n for word in self.data:\n word_sum = 0\n for sense_id in self.data[word]:\n word_sum += len(self.data[word][sense_id][\"cluster\"])\n self._cluster_sum[word] += word_sum",
"def get_clustered_data(self, alg):\n\n algorithm = self.clustering_algorithms[alg]\n # print(self.datasets)\n re_X_data = {}\n for i_dataset, (dataset, algo_params) in enumerate(self.datasets):\n X = dataset\n if isinstance(X, pd.DataFrame):\n for i in range(len(X.index)):\n data_id = algorithm.labels_[i]\n if data_id not in re_X_data:\n re_X_data[data_id] = X.iloc[i].transpose()\n else:\n re_X_data[data_id] = pd.concat((re_X_data[data_id], X.iloc[i]), axis=1)\n\n for key in re_X_data:\n re_X_data[key] = re_X_data[key].transpose()\n\n elif not isinstance(X, pd.DataFrame):\n for i in range(len(X)):\n data_id = algorithm.labels_[i]\n if data_id not in re_X_data:\n re_X_data[data_id] = np.array([X[i]])\n else:\n re_X_data[data_id] = np.concatenate((re_X_data[data_id], [X[i]]), axis=0)\n\n return re_X_data",
"def enumerate_clusterings(self):\n\n # Initialize an empty list of clusterings. Each element of the list\n # is a dictionary mapping NOEs to the signatures they are clustered to\n # in a solution. Each clustering is initialize with all uniquely\n # clusterable NOEs as keys mapping to their unique clusters\n\n clusterings = []\n\n while True:\n\n # Run the solver and get a solution back\n\n solution = self.solve()\n\n # If UNSAT, then flush aux clauses from the formula and return\n # all the clusterings we found so far\n\n if not solution:\n self.flush()\n return clusterings\n\n # Iterate over the clustering variables set to true by in the\n # discovered solution. Forbid this clustering from reoccuring and\n # add it to the list of found clusterings\n\n clause = []\n clustering = {}\n for node in self.clustering_variables.keys():\n if len(node.clusters) == 1:\n clustering[node] = list(node.clusters)[0]\n\n for vtype, node, cluster in solution:\n if vtype == Formula.CST_VAR:\n clustering[node] = cluster\n clause.append(-self.clustering_variables[node][cluster])\n\n self.add_clause(clause)\n clusterings.append(clustering)",
"def get_clusters(self):\n dict_clusters = dict()\n predictions = self.get_predictions(self.data_train)\n df_cluster = pd.DataFrame({'target': self.target_train, 'cluster': predictions})\n total_read = df_cluster[df_cluster[\"target\"] == 1].shape[0]\n total_read_cluster0 = df_cluster[(df_cluster[\"target\"] == 1) & (df_cluster[\"cluster\"] == 0)].shape[0]\n if total_read_cluster0 > total_read/2:\n dict_clusters[\"read\"] = 0\n dict_clusters[\"notread\"] = 1\n else:\n dict_clusters[\"notread\"] = 0\n dict_clusters[\"read\"] = 1\n\n return dict_clusters",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def run(self, decomposed_clusters, matrix):\n analysis = {}\n analysis[\"total_num_elements\"] = 0\n analysis[\"total_num_clusters\"] = 0\n\n self.analyze_clustering(decomposed_clusters, matrix, analysis)\n\n self.analyze_clusters(decomposed_clusters, matrix, analysis)\n\n return analysis",
"def sum_cluster_affinities(memberships, aff_filename):\n\t\n\tf_aff = open(aff_filename)\n\n\taff_reader = csv.reader(f_aff,delimiter='\\t')\n\n\tclusters = {}\n\tfor row in aff_reader:\n\t\t#each for in aff file is a tuple, so need to get the parts\n\t\tuser_id = row[0]\n\t\tsr_id = row[1]\n\t\taffinity = float(row[2])\n\t\ttry:\n\t\t\tc_id = int(memberships[user_id])\n\t\t\ttry:\n\t\t\t\t#this will only work if the cluster_ID,sr_id exists\n\t\t\t\tclusters[c_id][sr_id] += affinity\n\t\t\texcept KeyError:\n\t\t\t\t#either that cluster id or the sr_id didn't exist,\n\t\t\t\t#so we must initialize it\n\t\t\t\ttry:\n\t\t\t\t\tclusters[c_id][sr_id] = affinity\n\t\t\t\texcept KeyError:\n\t\t\t\t\t#must be the first time for the cluster_id\t\t\n\t\t\t\t\tclusters[c_id] = { sr_id : affinity } \n\t\texcept KeyError:\n\t\t\t#apparently not all UIDs in clabel are in the output of srrecs.r\t \n\t\t\t#print user_id, \"in affinities.clabel but not in srrecs.r output\"\n pass\n\tf_aff.close()\n\treturn clusters",
"def compute_clustering_score():\n # TODO: Implement simple clustering\n raise NotImplementedError()",
"def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }",
"def eigenvector_centrality(self, iterations = 100):\r\n vectorSet = {}\r\n for key, value in self.adj_list.items():\r\n vectorSet[key] = 1 # init vectorSet of everything = 1\r\n\r\n for i in range(iterations):\r\n for sender in self.adj_list:\r\n for r in sender:\r\n vectorSet[sender] = vectorSet[sender] + 1\r\n\r\n sum = 0\r\n for key, value in vectorSet.items():\r\n sum += value\r\n \r\n for key, value in vectorSet.items():\r\n vectorSet[key] = vectorSet[key] / sum\r\n\r\n return vectorSet",
"def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters",
"def update(self, clusters):\n centroids = {}\n for cluster, coordinates in clusters.iteritems():\n sumLat = 0\n sumLong = 0\n for coordinate in coordinates:\n sumLat += float(coordinate[0])\n sumLong += float(coordinate[1])\n centroids[cluster] = (sumLat/float(len(coordinates)), sumLong/float(len(coordinates)))\n return centroids",
"def calc_cc(graph):\n\tclustering_coeffs = {}\n\tfor node in graph.nodes():\n\t\tclustering_coeffs[node] = { \"cc\" : nx.clustering(graph, node)}\n\tnx.set_node_attributes(graph, clustering_coeffs)",
"def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)",
"def divide_linkage_matrix_in_clusters(self, linkage_matrix, number_of_clusters):\n print (\"linkage matrix\", linkage_matrix, len(linkage_matrix))\n\n self.linkage_dict = {}\n self.cluster_dict = {}\n initial_key = len(linkage_matrix) + 1\n for row in linkage_matrix:\n list_needed = map(int, row[:2])\n print (\"row\", list_needed)\n self.linkage_dict[initial_key] = list_needed\n initial_key += 1\n end_key_id = initial_key - 1\n\n print (self.linkage_dict)\n for i in range(number_of_clusters):\n self.cluster_dict[i] = []\n print (self.cluster_dict, \"cluster_dict\", end_key_id)\n\n # initialising cluster dict\n for i in self.cluster_dict:\n print (self.linkage_dict[end_key_id][i])\n self.cluster_dict[i].append(self.linkage_dict[end_key_id][i])\n print (self.cluster_dict)\n\n # process cluster dict\n for i in self.cluster_dict.keys():\n print (i)\n processing_array = self.cluster_dict[i]\n print (processing_array)\n # Process processing array\n while True:\n element_changed_flag = 0\n for element in processing_array:\n if element in self.linkage_dict.keys():\n processing_array.remove(element)\n processing_array.extend(self.linkage_dict[element])\n element_changed_flag = 1\n break\n if element_changed_flag == 0:\n break\n print (processing_array)\n\n print(\"cluster of drugs dict\",self.cluster_dict)\n\n self.drugsdict = {}\n for i in self.cluster_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in self.cluster_dict:\n self.drugsdict[i] = [drugslist[index] for index in self.cluster_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n\n\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\",clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n #for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\",robot_for_packs_dict)",
"def divide_linkage_matrix_in_clusters(self, linkage_matrix, number_of_clusters):\n initial_key = len(linkage_matrix)+1\n for row in linkage_matrix:\n list_needed = map(int, row[:2])\n self.linkage_dict[initial_key] = list_needed\n initial_key += 1\n end_key_id = initial_key - 1\n for i in range(number_of_clusters):\n self.cluster_dict[i] = []\n print (self.cluster_dict, \"cluster_dict\", end_key_id)\n\n # initialising cluster dict\n for i in self.cluster_dict:\n self.cluster_dict[i].append(self.linkage_dict[end_key_id][i])\n # process cluster dict\n for i in self.cluster_dict.keys():\n processing_array = self.cluster_dict[i]\n # Process processing array\n while True:\n element_changed_flag = 0\n for element in processing_array:\n if element in self.linkage_dict.keys():\n processing_array.remove(element)\n processing_array.extend(self.linkage_dict[element])\n element_changed_flag = 1\n break\n if element_changed_flag == 0:\n break\n\n print (\"cluster dict\", self.cluster_dict)",
"def cooccurrence_matrix(corpus):\n d = defaultdict(lambda: defaultdict(int))\n for text in tqdm(corpus):\n for i in range(len(text) - 1):\n for j in range(i + 1, len(text)):\n w1, w2 = sorted([text[i], text[j]])\n d[w1][w2] += 1\n return d",
"def compute_distance_matrix(self):\n clusters_list = self.clusters.keys() # Extract the list of keys of all clusters.\n # Iterate over all pairs in the clusters\n for clust_it_1 in range(len(clusters_list)):\n for clust_it_2 in range(clust_it_1 + 1, len(clusters_list)):\n # Ignore the distance to itself.\n if clust_it_1 == clust_it_2:\n continue\n else:\n # Compute the distance between all pairs\n distance = self.eucledian_distance(self.clusters[clust_it_1].center,\n self.clusters[clust_it_2].center)\n # Update the distances in distance matrix.\n self.distances[clust_it_1][clust_it_2] = distance\n self.distances[clust_it_2][clust_it_1] = distance"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Modify the column name to make it Pythoncompatible as a field name | def normalize_col_name(col_name, used_column_names, is_relation):
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find(LOOKUP_SEP) >= 0:
while new_name.find(LOOKUP_SEP) >= 0:
new_name = new_name.replace(LOOKUP_SEP, '_')
if col_name.lower().find(LOOKUP_SEP) >= 0:
# Only add the comment if the double underscore was in the original
# name
field_notes.append(
"Field renamed because it contained more than one '_' in a row."
)
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append(
'Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append(
"Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes | [
"def column_rename(self):\n # TODO develop method\n pass",
"def _update_column_name(self, column, idx, old_name, name):\n dtype = self.dtype\n # Updating the names on the dtype should suffice\n dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]",
"def py_field_name(self, field):\n name = field.name\n name = as_identifier(name)\n if self.options(field).convert_case:\n name = from_camel_case(name)\n name = self._mangle_name(name)\n return name",
"def col_name(col):\n\n if isinstance(col, str):\n return col\n return col.__name__",
"def encodeColumnName(self, column):\r\n return '\"{}\"'.format(column)",
"def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name",
"def _append_timestamp_to_column_name(self, column):\n old_name = '\"{}\"'.format(column['api-name'])\n\n timestamp = self._get_timestamp()\n column['name'] += timestamp\n column['api-name'] += timestamp\n new_name = '\"{}\"'.format(column['api-name'])\n\n self.column_translation[old_name] = new_name",
"def wrap_columns_name(self, format_string):\n self._data_frame = self._data_frame.rename(\n columns=lambda column: format_string.format(column)\n )",
"def escape_column_name(column):\n col_name = (\n column[\"name\"] if isinstance(column, collections.abc.Mapping) else str(column)\n )\n escaped_name = col_name.replace('\"', '\"\"')\n return f'\"{escaped_name}\"'",
"def table_name_converter(self, name):\n return name",
"def columnName(self, column):\n return self.gCustomColumnList[column]['name']",
"def get_name(self):\n return self.col_name",
"def capnp_field_name(self, field):\n name = field.name\n return as_identifier(name)",
"def _valid_column(column_name):\n return str(column_name)",
"def column_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"column_name\")",
"def rename_col(self, old_name, new_name):\n if old_name==new_name:\n return\n self.add_col(new_name, self.col_types[self.col_index(old_name)],\n self[old_name])\n self.remove_col(old_name)",
"def FieldName(self) -> str:",
"def _validate_column_name(col_name : str) -> str:\n\n if col_name[0].isdigit():\n return f'\"{col_name}\"'\n return col_name",
"def typed_column(self) -> str:\n\n return \"{}:{}\".format(self.name, self.dtype)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field. | def get_field_type(connection, table_name, row):
field_params = OrderedDict()
field_notes = []
is_geometry = False
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[
5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(
table_name, geo_col)
field_params.update(geo_params)
is_geometry = True
return field_type, field_params, is_geometry
# return getattr(models.fields, field_type), field_params | [
"def processClassField(cursor):\n type = None\n fieldChilds = list(cursor.get_children())\n if len(fieldChilds) == 0: # if there are not cursorchildren, the type is some primitive datatype\n type = cursor.type.spelling\n else: # if there are cursorchildren, the type is some non-primitive datatype (a class or class template)\n for cc in fieldChilds:\n if cc.kind == clang.cindex.CursorKind.TEMPLATE_REF:\n type = cc.spelling\n elif cc.kind == clang.cindex.CursorKind.TYPE_REF:\n type = cursor.type.spelling\n name = cursor.spelling\n canonicalType = cursor.type.get_canonical().spelling\n if cursor.type.kind == clang.cindex.TypeKind.CONSTANTARRAY:\n canonicalType = cursor.type.get_array_element_type().spelling\n\n return name, type, canonicalType",
"def get_field_type(self, table_name, field_name):\n \n dtype = self.field_types[(self.field_types.TABNAME == table_name) & (self.field_types.FIELDNAME == field_name)]['DATATYPE'].values[0] \n return dtype",
"def __parse_field_descriptor(self):\n self.catalog = self._read_length_coded_string()\n self.db = self._read_length_coded_string()\n self.table_name = self._read_length_coded_string()\n self.org_table = self._read_length_coded_string()\n self.name = self._read_length_coded_string().decode(self.connection.charset)\n self.org_name = self._read_length_coded_string()\n self.advance(1) # non-null filler\n self.charsetnr = unpack_uint16(self._read(2))\n self.length = unpack_uint32(self._read(4))\n self.type_code = ord(self._read(1))\n self.flags = unpack_uint16(self._read(2))\n self.scale = ord(self._read(1)) # \"decimals\"\n self.advance(2) # filler (always 0x00)\n \n # 'default' is a length coded binary and is still in the buffer?\n # not used for normal result sets...",
"def get_geometry_type(self, table_name, description):\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"\"\"\n SELECT t.coord_dimension, t.srid, t.type FROM (\n SELECT * FROM geometry_columns\n UNION ALL\n SELECT * FROM geography_columns\n ) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s\n \"\"\",\n (table_name, description.name),\n )\n row = cursor.fetchone()\n if not row:\n raise Exception(\n 'Could not find a geometry or geography column for \"%s\".\"%s\"'\n % (table_name, description.name)\n )\n dim, srid, field_type = row\n # OGRGeomType does not require GDAL and makes it easy to convert\n # from OGC geom type name to Django field.\n field_type = OGRGeomType(field_type).django\n # Getting any GeometryField keyword arguments that are not the default.\n field_params = {}\n if self.postgis_oid_lookup.get(description.type_code) == \"geography\":\n field_params[\"geography\"] = True\n if srid != 4326:\n field_params[\"srid\"] = srid\n if dim != 2:\n field_params[\"dim\"] = dim\n return field_type, field_params",
"def __get_type__(self, param):\r\n\t\tif isinstance(param, dbTableColumn):\r\n\t\t\tif param.__data__['type'] in ['INTEGER']:\r\n\t\t\t\treturn 'int'\r\n\t\t\telif param.__data__['type'] in ['VARCHAR', 'TEXT']:\r\n\t\t\t\treturn 'str'\r\n\t\t\telif param.__data__['type'] in ['REAL']:\r\n\t\t\t\treturn 'float'\r\n\t\t\telse:\r\n\t\t\t\treturn 'dbTableColumn'\r\n\t\telse:\r\n\t\t\treturn type(param).__name__",
"def _convert_field_type(row):\n return row",
"def GetInfoSchemaTableField(request, schema_table, field_name):\n handler = DetermineHandlerModule(request)\n \n result = handler.GetInfoSchemaTableField(request, schema_table, field_name)\n \n return result",
"def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)",
"def parse_description(_descriptions, _db_type):\n _field_names = []\n _field_types = []\n\n \"\"\"name, type_code, display_size, internal_size, precision, scale, null_ok\"\"\"\n\n for _column in _descriptions:\n _field_names.append(_column[0])\n if _db_type == DB_MYSQL:\n _field_types.append(mysql_type_to_sql_type(_column[1]))\n else:\n _field_types.append(_column[1])\n\n return _field_names, _field_types",
"def test_get_field_type_text_field(self):\n db_introspection = DatabaseIntrospection(self.connection)\n self.assertEqual(\n db_introspection.get_field_type(\n TypeCode.STRING,\n description=ColumnInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n internal_size=\"MAX\",\n ),\n ),\n \"TextField\",\n )",
"def field_type(self):\n return \"\"",
"def get_field_type(self, field):\n s = set()\n with arcpy.da.SearchCursor(self.parent, field) as cur:\n for row in cur:\n s.add(type(row[0]).__name__)\n return s",
"def findMetadataTable(self, field_name, field_type, log, study_id, lock):\n \n # log passed in from writeMetadataValue() - it's a list. At end of function, \n # exception handler will output contents of log to web for viewing if error\n # occurrs.\n \n try:\n table = ''\n field_name = field_name.upper()\n field_name.replace('\"', '')\n\n # Fill out the field list if it's the first call\n log.append('Length of fields is: {0}'.format(str(len(self.fields))))\n if len(self.fields) == 0:\n log.append('Filling out field list for table lookup. Current field is \"{0}\"'.format(field_name))\n lock.acquire()\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.find_metadata_table', [results])\n for tab_name, col_name in results:\n if col_name not in self.fields:\n self.fields[col_name] = []\n self.fields[col_name].append(tab_name)\n lock.release()\n \n log.append('field{} successfully filled out')\n \n if field_name in self.fields:\n # If there's only one hit we can assign it\n tables = self.fields[field_name]\n log.append('Type of variable is: %s' % str(tables))\n if len(self.fields[field_name]) == 1:\n table = self.fields[field_name][0]\n log.append('Field only in one table: %s' % table)\n \n # More than one table was found with this column name. Find the correct one\n # based on the study id\n else:\n log.append('Field in multiple tables(%s): %s' % (len(self.fields[field_name]), str(self.fields[field_name])))\n log.append('Study is is: %s' % study_id)\n for table_name in self.fields[field_name]:\n if str(study_id) in table_name:\n table = table_name\n \n # If table is not found, assume user-defined column\n else:\n \"\"\" Code may look bizarre... but here's why:\n 1. To streamline access and prevent blocking, we first check to see if the field\n does exist in the field list. If it does, we do not have to lock and can simply\n look up the table name.\n \n 2. If field is not in list, it must be a new column. In this case we must lock the \n code that handles new column creation. The catch is that if two threads both hit the lock\n with the same field name, one will get in and the other will block. Once the initial thread \n exists, it will have handled the new column, gotten the appropriate table name, and returned. \n The 2nd thread will now enter the critical section, however if we don't again check to see \n if the field is now in the field list, it will attempt to create the same column again and \n fail. Thus we check a 2nd time to see if the field exists and if so, simply read it from the \n field list. \n \"\"\"\n lock.acquire() \n if field_name in self.fields:\n log.append('Field now exists. Pulling from local list.')\n table = self.fields[field_name][0]\n log.append('Table name exists. Using \"%s\".' % table)\n else:\n log.append('Entities do not exist. Creating...')\n table = self.handleExtraData(study_id, field_name, field_type, log)\n log.append('Entities created. Table name is \"%s\"' % table)\n if field_name not in self.fields:\n self.fields[field_name] = [table]\n else:\n self.fields[field_name].append(table)\n lock.release()\n \n log.append('Returning from findMetadataTable with value: %s' % str(table))\n return table\n\n except Exception, e:\n lock.release()\n log.append('Exception caught: %s' % str(e))\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception('\\n'.join(log))",
"def get_column_def(self):\r\n db_type = self.db_type.format(self.value_type.db_type)\r\n return '{} {}'.format(self.cql, db_type)",
"def prepare_field_type(self, obj):\n if hasattr(obj, 'metadata'):\n if isinstance(obj.metadata, GeographicFeatureMetaData):\n field_info = obj.metadata.fieldinformations.all().first()\n if field_info is not None:\n return field_info.fieldType\n else:\n return 'none'\n else:\n return 'none'\n else:\n return 'none'",
"def _get_column_type(self):\n sql = \"\"\"\nselect TABLE_NAME table_name\n, COLUMN_NAME column_name\n, COLUMN_TYPE column_type\n, CHARACTER_SET_NAME character_set_name\n, NULL table_schema\nfrom information_schema.COLUMNS\nwhere TABLE_SCHEMA = database()\nunion all\nselect TABLE_NAME table_name\n, COLUMN_NAME column_name\n, COLUMN_TYPE column_type\n, CHARACTER_SET_NAME character_set_name\n, TABLE_SCHEMA table_schema\nfrom information_schema.COLUMNS\norder by table_schema\n, table_name\n, column_name\"\"\"\n\n rows = StaticDataLayer.execute_rows(sql)\n\n for row in rows:\n key = '@'\n if row['table_schema']:\n key += row['table_schema'] + '.'\n key += row['table_name'] + '.' + row['column_name'] + '%type@'\n key = key.lower()\n value = row['column_type']\n\n if row['character_set_name']:\n value += ' character set ' + row['character_set_name']\n\n self._replace_pairs[key] = value",
"def property_field_type(value):\n if isinstance(value, numbers.Number) or _isnumpytype(value):\n ## pyshp doesn't handle the full variety of numeric types\n ## (float, double, long), only 'N'\n #if isinstance(value, numbers.Integral) or _isnumpyint(value):\n # desc = \"I\"\n #elif isinstance(value, numbers.Real) or _isnumpyfloat(value):\n # desc = \"O\"\n if isinstance(value, (numbers.Integral, numbers.Real)) \\\n or _isnumpyint(value) or _isnumpyfloat(value):\n desc = \"N\"\n else:\n raise TypeError(\"cannot choose the correct dBase type for \"\n \"{0}\\n\".format(type(value)))\n elif isinstance(value, str):\n desc = \"C\"\n elif isinstance(value, datetime.datetime):\n desc = \"@\"\n elif isinstance(value, datetime.date):\n desc = \"D\"\n elif isinstance(value, bool):\n desc = \"L\"\n else:\n raise TypeError(\"cannot choose the correct dBase type for \"\n \"{0}\\n\".format(type(value)))\n return desc",
"def get_unique_name(self, cursor, field_name=None):\n if cursor.kind in [CursorKind.UNEXPOSED_DECL]:\n return ''\n # covers most cases\n name = cursor.spelling\n if cursor.kind == CursorKind.CXX_BASE_SPECIFIER:\n name = cursor.type.spelling\n # if it's a record decl or field decl and its type is anonymous\n if name == '':\n # if cursor.is_anonymous():\n # a unnamed object at the root TU\n if (cursor.semantic_parent\n and cursor.semantic_parent.kind == CursorKind.TRANSLATION_UNIT):\n name = self.make_python_name(cursor.get_usr())\n log.debug('get_unique_name: root unnamed type kind %s',cursor.kind)\n elif cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL,CursorKind.FIELD_DECL]:\n name = self._make_unknown_name(cursor, field_name)\n log.debug('Unnamed cursor type, got name %s',name)\n else:\n log.debug('Unnamed cursor, No idea what to do')\n #import code\n #code.interact(local=locals())\n return ''\n if cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL, CursorKind.CXX_BASE_SPECIFIER]:\n names= {CursorKind.STRUCT_DECL: 'struct',\n CursorKind.UNION_DECL: 'union',\n CursorKind.CLASS_DECL: 'class',\n CursorKind.TYPE_REF: '',\n CursorKind.CXX_BASE_SPECIFIER: 'class'\n }\n name = '%s_%s'%(names[cursor.kind],name)\n log.debug('get_unique_name: name \"%s\"',name)\n return name",
"def get_eltype_from_sbmfielddesc(hgf_field):\n\tq = \"\"\"SELECT type FROM sbmFIELDDESC where name='%s'\"\"\" %(hgf_field)\n\treturn run_sql(q)[0][0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Authenticate user based on code. | def authenticate_user(authentication_code):
for suffix in ('', '=', '=='):
attempt = authentication_code + suffix
decoded = base64.decodestring(attempt)
fields = decoded.split('_')
email, user_id, time_stamp, str_hex = fields
if time_stamp < time.time():
# Authentication Code Expired
raise seerpod_exceptions.AuthenticationCodeExpired('Authentication code expired',
response_data=authentication_code)
user = None #business_contact_api.BusinessContacts().get_user_detail_from_email(email)
if not user:
continue
if attempt == generate_authentication_code(
user.id, time_stamp, user.owner_email_id, user.password):
return user
# Invalid authentication code
raise seerpod_exceptions.InvalidAuthenticationCode('Invalid Authentication code',
response_data=authentication_code) | [
"def authenticate(self, code=None):\n params = {\n \"grant_type\": \"authorization_code\",\n \"code\": code\n }\n data = urllib.urlencode(params)\n request = urllib2.Request(SSO_TOKEN_URL, data)\n request.add_header(\"Authorization\", \"Basic \" + b64encode(\"{}:{}\".format(CREST_CLIENT_ID, CREST_SECRET)))\n\n try:\n response = urllib2.urlopen(request)\n sso_tokens = json.loads(response.read())\n except urllib2.URLError as e:\n logger.debug(e.message)\n return None\n\n request = urllib2.Request(SSO_VERIFY_URL)\n request.add_header(\"Authorization\", \"Bearer \" + sso_tokens[\"access_token\"])\n\n try:\n response = urllib2.urlopen(request)\n character_info = json.loads(response.read())\n except urllib2.URLError as e:\n logger.debug(e.message)\n return None\n\n try:\n user = User.objects.get(pk=character_info[\"CharacterID\"])\n except User.DoesNotExist:\n user = User.objects.create_user(character_info['CharacterID'], character_info['CharacterName'], sso_tokens[\"refresh_token\"])\n\n return user",
"def authenticate(self, request, **kwargs):\n\n self.request = request\n if not self.request:\n return None\n\n state = self.request.GET.get('state')\n code = self.request.GET.get('code')\n nonce = kwargs.pop('nonce', None)\n\n if not code or not state:\n return None\n\n reverse_url = import_from_settings('OIDC_AUTHENTICATION_CALLBACK_URL',\n 'oidc_authentication_callback')\n\n token_payload = {\n 'client_id': self.OIDC_RP_CLIENT_ID,\n 'client_secret': self.OIDC_RP_CLIENT_SECRET,\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': absolutify(\n self.request,\n reverse(reverse_url)\n ),\n }\n\n # Get the token\n token_info = self.get_token(token_payload)\n id_token = token_info.get('id_token')\n access_token = token_info.get('access_token')\n refresh_token = token_info.get('refresh_token')\n\n # Validate the token\n payload = self.verify_token(id_token, nonce=nonce)\n\n # Store users tokens\n usertokens, created = UserTokens.objects.update_or_create(\n user=payload['sub'],\n defaults={'access_token': access_token,\n 'refresh_token': refresh_token}\n )\n\n if payload:\n self.store_tokens(access_token, id_token)\n try:\n return self.get_or_create_user(access_token, id_token, payload)\n except SuspiciousOperation as exc:\n LOGGER.warning('failed to get or create user: %s', exc)\n return None\n\n return None",
"def auth():\n data = request.json\n if \"code\" not in data:\n return \"\", 400\n\n try:\n access_response = LoginService.get_access_code(data[\"code\"])\n check_response = LoginService.check_token(access_response.get(\"access_token\"))\n\n except requests.RequestException:\n return \"\", 400\n\n user = User.upsert(check_response.get(\"user_name\"), access_response.get(\"access_token\"),\n access_response.get(\"refresh_token\"))\n\n if user.superadmin:\n role = \"superadmin\"\n elif user.get_editable_groups():\n role = \"admin\"\n else:\n role = \"user\"\n token = LoginService.create_api_token(check_response.get(\"user_name\"), role)\n return token, 200",
"def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)",
"def requestAuthCode(self):\n pass",
"def authenticate_user():\n\n error = request.args.get(\"error\")\n if error:\n logger.warning(\"Google sent us an error via OAuth2: %s\", error)\n\n return redirect(url_for(\"login\"))\n\n # Get OAuth2 authentication code\n code = request.args.get(\"code\")\n\n # Exchange code for fresh credentials\n credentials = flow.step2_exchange(code)\n\n # Extract email and email verification\n id_token = credentials.id_token\n email = id_token[\"email\"]\n verified_email = id_token[\"email_verified\"]\n\n if verified_email is True:\n # Find the user with the given email\n try:\n user = FlaskUser(User.objects.get(email = email))\n except User.DoesNotExist:\n user = None\n\n if not user:\n flash(\"A Galah account does not exist for this email.\", \"error\")\n\n logger.info(\n \"User %s has attempted to log in via OAuth2 but an account \"\n \"does not exist for them.\", email\n )\n else:\n login_user(user)\n\n logger.info(\n \"User %s has succesfully logged in via OAuth2.\", email\n )\n\n return redirect(url_for(\"home\"))\n\n else:\n flash(\"Sorry, we couldn't verify your email\", \"error\")\n\n logger.info(\"User %s failed to authenticate with OAuth2 because \"\n \"their email has not been verified with google.\", email)\n\n return redirect(url_for(\"login\"))",
"def accesscode(request, code):\n employee = Employee.objects.get(access_code=code)\n user = employee.user\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return HttpResponseRedirect('/')",
"def auth():\n cookie = request.cookies.get(cookie_name)\n if cookie:\n cookie = base64.b64decode(cookie.encode(encryption)).decode(encryption)\n cookie = cookie.split(',')\n\n new_cookie = ','.join(cookie[:-1])\n if is_tampered(new_cookie, cookie[-1]):\n return make_response('Code 403 if the cookie has been tampered.'), 403 \n is_admin = cookie[-2] == 'admin'\n if is_admin:\n return make_response('Code 200 if the user is the administrator.'), 200\n else:\n return make_response('Code 201 if the user is a simple user.'), 201\n else:\n return make_response('Code 403 if no cookie is present.'), 403",
"def authenticate(self):\n\n if self.is_linked(): # already authenticated\n return\n\n auth_func = None\n\n if self.auth_level == 'user':\n if not self.username or not self.password:\n raise ValueError('auth_level \"user\" requires a username and password.')\n auth_func = self._login\n elif self.auth_level == 'client':\n if not self.client_id or not self.client_secret:\n raise ValueError('auth_level \"client\" requires a client_id and client_secret.')\n auth_func = self._client_credentials\n else:\n raise ValueError('Invalid \"auth_level\" value.')\n\n try:\n res = auth_func()\n except RESTError, e:\n raise UsergridException(\"Unable to authenticate: \" + e.description)\n\n self.read_token(res)",
"def auth(self, code):\n # After the user has authorized this app for use in their Slack team,\n # Slack returns a temporary authorization code that we'll exchange for\n # an OAuth token using the oauth.access endpoint\n auth_response = self.client.api_call(\n \"oauth.access\",\n client_id=self.oauth[\"client_id\"],\n client_secret=self.oauth[\"client_secret\"],\n code=code\n )\n # To keep track of authorized teams and their associated OAuth tokens,\n # we will save the team ID and bot tokens to the global\n # authed_teams object\n team_id = auth_response[\"team_id\"]\n authed_teams[team_id] = {\"bot_token\":\n auth_response[\"bot\"][\"bot_access_token\"]}\n # Then we'll reconnect to the Slack Client with the correct team's\n # bot token\n self.client = SlackClient(authed_teams[team_id][\"bot_token\"])",
"def authenticate_with_github(username=None, password=None, code=None):\n if username is not None and password is not None:\n print(' (auth given as {}:{})'.format(username, '*'*len(password)))\n\n def _2fa_func():\n return code\n\n if code:\n return login(username, password, two_factor_callback=_2fa_func)\n else:\n return GitHub(username, password)",
"def custom(code: str, state: str):\n\tpath = frappe.request.path[1:].split(\"/\")\n\tif len(path) == 4 and path[3]:\n\t\tprovider = path[3]\n\t\t# Validates if provider doctype exists\n\t\tif frappe.db.exists(\"Social Login Key\", provider):\n\t\t\tlogin_via_oauth2(provider, code, state, decoder=decoder_compat)",
"def authenticate(self, data):\n if data['username'] in self.accounts:\n _password = self.accounts[data['username']]['password']\n\n md5 = hashlib.md5()\n md5.update(data['password'].encode())\n md5_password = md5.hexdigest()\n if _password == md5_password:\n print(settings.STATUS_CODE[200])\n # 日志\n msg = '用户%s成功登陆' % data['username']\n logger(msg)\n # 设置用户数据,用户家目录,用户当前目录为公共属性,方便调用\n self.user_obj = self.accounts[data['username']]\n self.user_obj['home'] = os.path.join(settings.USER_HOME_DIR, data['username'])\n self.user_current_dir = self.user_obj['home']\n\n return True\n else:\n print(settings.STATUS_CODE[201])\n return False\n else:\n print(settings.STATUS_CODE[202])\n return False",
"def auth_in_stage2(self,stanza):\r\n self.lock.acquire()\r\n try:\r\n if \"plain\" not in self.auth_methods and \"digest\" not in self.auth_methods:\r\n iq=stanza.make_error_response(\"not-allowed\")\r\n self.send(iq)\r\n return\r\n\r\n username=stanza.xpath_eval(\"a:query/a:username\",{\"a\":\"jabber:iq:auth\"})\r\n if username:\r\n username=from_utf8(username[0].getContent())\r\n resource=stanza.xpath_eval(\"a:query/a:resource\",{\"a\":\"jabber:iq:auth\"})\r\n if resource:\r\n resource=from_utf8(resource[0].getContent())\r\n if not username or not resource:\r\n self.__logger.debug(\"No username or resource found in auth request\")\r\n iq=stanza.make_error_response(\"bad-request\")\r\n self.send(iq)\r\n return\r\n\r\n if stanza.xpath_eval(\"a:query/a:password\",{\"a\":\"jabber:iq:auth\"}):\r\n if \"plain\" not in self.auth_methods:\r\n iq=stanza.make_error_response(\"not-allowed\")\r\n self.send(iq)\r\n return\r\n else:\r\n return self._plain_auth_in_stage2(username,resource,stanza)\r\n if stanza.xpath_eval(\"a:query/a:digest\",{\"a\":\"jabber:iq:auth\"}):\r\n if \"plain\" not in self.auth_methods:\r\n iq=stanza.make_error_response(\"not-allowed\")\r\n self.send(iq)\r\n return\r\n else:\r\n return self._digest_auth_in_stage2(username,resource,stanza)\r\n finally:\r\n self.lock.release()",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def authenticate(self, handler, data):\n http_client = AsyncHTTPClient()\n username = data['username']\n password = data['password']\n\n # if not self.check_whitelist(username):\n # return\n\n # Set the necessary params\n params = dict(\n grant_type=\"password\",\n username=username,\n password=password,\n client_id=\"GenePatternNotebook\"\n )\n\n url = url_concat(self.GENEPATTERN_URL + \"/gp/rest/v1/oauth2/token\", params)\n\n req = HTTPRequest(url,\n method=\"POST\",\n headers={\"Accept\": \"application/json\"},\n body='' # Body is required for a POST...\n )\n\n resp = None\n try:\n resp = yield http_client.fetch(req)\n except HTTPError as e:\n # This is likely a 400 Bad Request error due to an invalid username or password\n return\n\n if resp is not None and resp.code == 200:\n return username\n else:\n return",
"def check_code(self, data: dict) -> accounts_models.User:\n if not data.get('code'):\n raise ValueError('{\"detail\":\"' + str(_(\"The code field can not be empty\")) + '\"}')\n if not data.get('password'):\n raise ValueError('{\"detail\":\"' + str(_(\"The password can not be empty\")) + '\"}')\n try:\n user = accounts_models.User.objects.get(recovery=str(data.get('code')))\n except accounts_models.User.DoesNotExist:\n raise ValueError('{\"detail\":\"' +\n str(_(\"Code you sent does not match the one registered in your account\")) + '\"}')\n if user is None or user.is_active is False:\n raise ValueError('{\"detail\":\"' +\n str(_(\"In order to perform this operation, your account must be active\")) + '\"}')\n user.password = make_password(str(data.get('password')))\n user.recovery = ''\n user.save()\n return user",
"def authFunc(self,parts):\r\n # See if we have a auth entry in the\r\n self.auth = self.server.auth == parts[0]\r\n return \"+OK\" if self.auth else \"-ERR authentication password incorrect\"",
"def authenticate():\n if USER and PASS:\n url = BASE_URL + PATHS.get('auth') + '?method=BY_USERNAME'\n auth = (USER, PASS)\n\n HEADERS['Referer'] = 'https://www.altibox.no/login/'\n res = requests.get(url, headers=HEADERS, auth=auth)\n\n if res.status_code == 200:\n data = res.json()\n if data.get('status') == 'success':\n data = data.get('data')\n session = data.get('sessionTicket').get('identifier')\n user = data.get('user')\n log(f'AUTH => Authentication successful.', 2)\n log(f' Authenticated as {user.get(\"firstName\")} {user.get(\"lastName\")}.', 2)\n return session, user\n else:\n err = f'AUTH => Altibox: {data.get(\"message\")}.'\n raise RuntimeError(err)\n else:\n log(f'AUTH => Status code is not 200:', 0)\n log(f' Status => {res.status_code}', 0)\n log(f' Response => {res.text}', 0)\n else:\n err = f'AUTH => No credentials found. Exiting.'\n raise KeyError(err)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Activates the specified MFA TOTP device for the user. Activation requires manual interaction with the Console. | def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs):
resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"activate_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"mfaTotpDeviceId": mfa_totp_device_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=mfa_totp_token,
response_type="MfaTotpDeviceSummary")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=mfa_totp_token,
response_type="MfaTotpDeviceSummary") | [
"async def activate_application_token(self, apptoken, temptoken) -> bool:\n await self.raw_request(\n self.URL_ACTIVATE.format(apptoken=apptoken, temptoken=temptoken)\n )\n return True",
"def enable_mfa_device(self, user_name, serial_number,\r\n auth_code_1, auth_code_2):\r\n params = {'UserName' : user_name,\r\n 'SerialNumber' : serial_number,\r\n 'AuthenticationCode1' : auth_code_1,\r\n 'AuthenticationCode2' : auth_code_2}\r\n return self.get_response('EnableMFADevice', params)",
"def activate(self, key):\n url = self._base + 'user/activate'\n r = requests.get(url, params={\n 'activation_key': key\n })\n r.raise_for_status()",
"def enable_tfa(self):\n result = self._client.post(\"/profile/tfa-enable\")\n\n return result[\"secret\"]",
"def send_activate_firmware(self):\n pass",
"def activate_user(self):\n self.client.post(self.url_register, self.user, format='json')\n user = self.user['user']\n token = generate_token(user['username'])\n self.client.get(reverse(\"authentication:verify\", args=[token]))",
"def activate(activation_key):\n if SHA1_RE.search(activation_key):\n response = make_request('guest/user/activate/', {'act_key': activation_key, 'wi_data': settings.WI_DATA})\n if response['status'] == 'ok':\n return response\n return False",
"def activate_user(request, uid64, token):\n\n if check_token_and_save(User, uid64, token):\n return redirect('sucess-user-activate')\n return redirect('error-user-activate')",
"def activate_user(backend, user, *args, **kwargs):\n if not user.is_active:\n user.is_active = True\n user.save()",
"def mfa_otp_login(self, temp_token, one_time_pass):\n parameters = dict()\n parameters['mfaTokenId'] = temp_token\n parameters['otp'] = one_time_pass\n response = self.request('midas.mfa.otp.login', parameters)\n return response['token']",
"def req_display_otp(self):\n\n ret = self.ui_auth.create_new_one_time_pwd()\n if ret is not None:\n self.error_msg_queue_list.append(ret)",
"def confirm_tfa(self, code):\n self._client.post(\n \"/profile/tfa-enable-confirm\", data={\"tfa_code\": code}\n )\n\n return True",
"def activate(request, activation_key, template_name='registration/activate.html'):\n activation_key = activation_key.lower() # Normalize before trying anything with it.\n account = RegistrationProfile.objects.activate_user(activation_key)\n account.is_active = True\n account.save()\n return render(request, template_name,\n { 'account': account,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })",
"def activate_factor(self, state_token, factor_id, passcode, relay_state=None):\n request = {\n 'stateToken': state_token,\n 'passCode': passcode,\n 'relayState': relay_state\n }\n\n response = ApiClient.post_path(self, '/factors/{0}/lifecycle/activate'.format(factor_id), request)\n return Utils.deserialize(response.text, AuthResult)",
"def check_for_activate(self):\n try:\n # Attempt to activate. If the user has completed pairing on the,\n # backend, this will succeed. Otherwise it throws and HTTPError()\n\n token = self.data.get(\"token\")\n login = self.api.activate(self.state, token) # HTTPError() thrown\n\n # When we get here, the pairing code has been entered on the\n # backend and pairing can now be saved.\n # The following is kinda ugly, but it is really critical that we\n # get this saved successfully or we need to let the user know that\n # they have to perform pairing all over again at the website.\n try:\n IdentityManager.save(login)\n except Exception as e:\n self.log.debug(\"First save attempt failed: \" + repr(e))\n time.sleep(2)\n try:\n IdentityManager.save(login)\n except Exception as e2:\n # Something must be seriously wrong\n self.log.debug(\"Second save attempt failed: \" + repr(e2))\n self.abort_and_restart()\n\n if mycroft.audio.is_speaking():\n # Assume speaking is the pairing code. Stop TTS of that.\n mycroft.audio.stop_speaking()\n\n self.enclosure.activate_mouth_events() # clears the display\n\n # Notify the system it is paired\n self.gui.show_page(\"pairing_done.qml\", override_idle=False)\n self.bus.emit(Message(\"mycroft.paired\", login))\n\n self.pairing_performed = True\n with self.pair_dialog_lock:\n if self.mycroft_ready:\n # Tell user they are now paired\n self.speak_dialog(self.paired_dialog)\n mycroft.audio.wait_while_speaking()\n else:\n self.speak_dialog(\"wait.for.startup\")\n mycroft.audio.wait_while_speaking()\n\n # Un-mute. Would have been muted during onboarding for a new\n # unit, and not dangerous to do if pairing was started\n # independently.\n self.bus.emit(Message(\"mycroft.mic.unmute\", None))\n\n # Send signal to update configuration\n self.bus.emit(Message(\"configuration.updated\"))\n\n # Allow this skill to auto-update again\n self.reload_skill = True\n except HTTPError:\n # speak pairing code every 60th second\n with self.counter_lock:\n if self.count == 0:\n self.speak_code()\n self.count = (self.count + 1) % 6\n\n if time.monotonic() > self.time_code_expires:\n # After 20 hours the token times out. Restart\n # the pairing process.\n with self.counter_lock:\n self.count = -1\n self.data = None\n self.handle_pairing()\n else:\n # trigger another check in 10 seconds\n self.__create_activator()\n except Exception as e:\n self.log.debug(\"Unexpected error: \" + repr(e))\n self.abort_and_restart()",
"def enable_privileged_commands(device_info, telnet_conn, read_delay=1):\n cmd = \"enable\\n\"\n telnet_conn.write(cmd)\n password_prompt = device_info['password_prompt']\n dummy, match, dummy = telnet_conn.expect([password_prompt], read_delay)\n if(match is not None):\n password = \"%s\\n\" % device_info['password']\n telnet_conn.write(password)\n admin_prompt = device_info['admin_prompt']\n telnet_conn.expect([admin_prompt], read_delay)",
"def send_mfa(\n self,\n form: object = None, # noqa: ARG002\n code: str = \"\",\n trusted_device: bool = True,\n ) -> None:\n el_otp = self._driver.find_element(By.CSS_SELECTOR, \"input[name=otc]\", timeout=5)\n el_otp.clear()\n el_otp.send_keys(code)\n\n el_verify = self._driver.find_element(By.CSS_SELECTOR, \"input[type=submit]\", timeout=5)\n if el_verify.accessible_name != \"Verify\":\n msg = f'{self.__class__.__name__}: Cannot find \"Verify\" button'\n raise IdpError(msg)\n\n if trusted_device:\n el_verify.click()\n\n self._stay_signed_in()",
"def test_eap_teap_basic_password_auth_pac(dev, apdev):\n check_eap_capa(dev[0], \"TEAP\")\n params = int_teap_server_params(eap_teap_auth=\"1\")\n hapd = hostapd.add_ap(apdev[0], params)\n eap_connect(dev[0], hapd, \"TEAP\", \"user\",\n anonymous_identity=\"TEAP\", password=\"password\",\n phase1=\"teap_provisioning=2\",\n ca_cert=\"auth_serv/ca.pem\", phase2=\"auth=MSCHAPV2\",\n pac_file=\"blob://teap_pac\")\n res = eap_reauth(dev[0], \"TEAP\")\n if res['tls_session_reused'] != '1':\n raise Exception(\"EAP-TEAP could not use PAC session ticket\")",
"def activate(ctx: CLIContext, access_key):\n with Session() as session:\n try:\n data = session.KeyPair.activate(access_key)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='activation',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='activation',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n extra_info={\n 'access_key': access_key,\n },\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Moves the specified tag namespace to the specified compartment within the same tenancy. To move the tag namespace, you must have the manage tagnamespaces permission on both compartments. For more information about IAM policies, see `Details for IAM`__. Moving a tag namespace moves all the tag key definitions contained in the tag namespace. | def change_tag_namespace_compartment(self, tag_namespace_id, change_tag_namespace_compartment_detail, **kwargs):
resource_path = "/tagNamespaces/{tagNamespaceId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_tag_namespace_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tagNamespaceId": tag_namespace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_tag_namespace_compartment_detail)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_tag_namespace_compartment_detail) | [
"def delete_namespace_tag(self, namespace, tag_name):\n url = 'metadefs/namespaces/%s/tags/%s' % (namespace, tag_name)\n resp, _ = self.delete(url)\n self.expected_success(204, resp.status)\n return rest_client.ResponseBody(resp)",
"def remove_namespace(self, doc, namespace):\r\n ns = u'{%s}' % namespace\r\n nsl = len(ns)\r\n for elem in doc.getiterator():\r\n if elem.tag.startswith(ns):\r\n elem.tag = elem.tag[nsl:]\r\n else:\r\n pass",
"def remove_namespace(doc, namespace=u\"{http://www.EcoInvent.org/EcoSpold02}\"):\n ns = u'{}'.format(namespace)\n nsl = len(ns)\n for elem in doc.getiterator():\n if elem.tag.startswith(ns):\n elem.tag = elem.tag[nsl:]",
"def delete_namespace_tags(self, namespace):\n url = 'metadefs/namespaces/%s/tags' % namespace\n resp, _ = self.delete(url)\n\n self.expected_success(204, resp.status)\n\n return rest_client.ResponseBody(resp)",
"def test_replace_namespaced_deployment(self):\n pass",
"def remove_namespace(doc, namespace):\n ns = u'{%s}' % namespace\n nsl = len(ns)\n for elem in doc.getiterator():\n if elem.tag.startswith(ns):\n elem.tag = elem.tag[nsl:]",
"def replace_namespace(self, request, namespace):\n return self._replace(\n request,\n u\"namespaces\",\n None,\n namespace,\n )",
"def swap_tags(tag1, tag2):\n cfn_config = get_config()\n r53_conn = get_connection(R53)\n zone_name = cfn_config.data['master_zone']\n zone_id = r53_conn.get_hosted_zone_id(zone_name)\n legacy_name = \"{0}-{1}\".format(env.application, env.environment)\n record1 = \"stack.{0}.{1}\".format(tag1, legacy_name)\n record2 = \"stack.{0}.{1}\".format(tag2, legacy_name)\n stack_suffix1 = r53_conn.get_record(zone_name, zone_id, record1, 'TXT')\n stack_suffix2 = r53_conn.get_record(zone_name, zone_id, record2, 'TXT')\n fqdn1 = \"{0}.{1}\".format(record1, zone_name)\n fqdn2 = \"{0}.{1}\".format(record2, zone_name)\n r53_conn.update_dns_record(zone_id, fqdn1, 'TXT', '\"{0}\"'.format(stack_suffix2))\n r53_conn.update_dns_record(zone_id, fqdn2, 'TXT', '\"{0}\"'.format(stack_suffix1))",
"def remove_namespaces(tree):\n assert isinstance(tree, _Element)\n\n for element in tree.xpath(XPATH_NAMESPACED):\n element.tag = QName(element).localname\n deannotate(tree, cleanup_namespaces=True)",
"def test_rename_namespace(self):\n status, body = self.api_create_namespace(self.current_db, self.test_ns)\n self.assertEqual(True, status == self.API_STATUS['success'], body)\n\n ren_ns = \"rename_namespace\"\n status, body = self.api_rename_namespace(\n self.current_db, self.test_ns, ren_ns)\n self.assertEqual(True, status == self.API_STATUS['success'], body)\n\n status, body = self.api_get_namespace(self.current_db, ren_ns)\n self.validate_get_namespace_response(status, body)",
"def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")",
"def move_object(self, from_, to) -> None:\n # boto doesn't have a move operation, so we need to copy then delete\n self.bucket.copy(dict(Bucket=self.bucket.name, Key=from_), to)\n delete_request = {\"Objects\": [{\"Key\": from_}], \"Quiet\": True}\n self.bucket.delete_objects(Delete=delete_request)",
"def move_dimap(infile_prefix, outfile_prefix, to_tif):\n\n if to_tif:\n\n gdal.Warp(outfile_prefix.with_suffix(\".tif\"), infile_prefix.with_suffix(\".dim\"))\n\n else:\n\n # delete outfile if exists\n if outfile_prefix.with_suffix(\".data\").exists():\n delete_dimap(outfile_prefix)\n\n # move them\n try:\n infile_prefix.with_suffix(\".data\").rename(outfile_prefix.with_suffix(\".data\"))\n except OSError:\n\n shutil.copytree(infile_prefix.with_suffix(\".data\"), outfile_prefix.with_suffix(\".data\"))\n shutil.rmtree(infile_prefix.with_suffix(\".data\"))\n\n try:\n infile_prefix.with_suffix(\".dim\").rename(outfile_prefix.with_suffix(\".dim\"))\n except OSError:\n shutil.move(infile_prefix.with_suffix(\".dim\"), outfile_prefix.with_suffix(\".dim\"))",
"def clean_up_namespaces(node, namespace=None):\n if namespace is not None:\n Namespaces.delete_namespace(node, namespace)\n return\n\n namespace_copy = deepcopy(Namespaces.__namespaces)\n for namespace_name in namespace_copy:\n Namespaces.delete_namespace(node, namespace_name)",
"def remove_namespace(doc):\n for elem in doc.getiterator():\n elem.tag = etree.QName(elem).localname\n etree.cleanup_namespaces(doc)\n return doc",
"def add_pod_to_namespace(tx, pod_ip, namespace):\n tx.run(\"MATCH (p:Pod {pod_ip: $pod_ip}) \"\n \"MATCH (n:Namespace {name: $namespace}) \"\n \"MERGE (p)-[:associateTo]->(n)\",\n pod_ip=pod_ip, namespace=namespace)",
"def _migrate_ns(self, nodeid):\n if nodeid.NamespaceIndex in self.namespaces:\n nodeid = copy(nodeid)\n nodeid.NamespaceIndex = self.namespaces[nodeid.NamespaceIndex]\n return nodeid",
"def delete_namespace(self, request, namespace):\n return self._delete(\n request, u\"namespaces\", None, namespace,\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a new dynamic group in your tenancy. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the dynamic group, which must be unique across all dynamic groups in your tenancy, and cannot be changed. Note that this name has to be also unique across all groups in your tenancy. You can use this name or the OCID when writing policies that apply to the dynamic group. For more information about policies, see `How Policies Work`__. You must also specify a description for the dynamic group (although it can be an empty string). It does not | def create_dynamic_group(self, create_dynamic_group_details, **kwargs):
resource_path = "/dynamicGroups"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_dynamic_group got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_dynamic_group_details,
response_type="DynamicGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_dynamic_group_details,
response_type="DynamicGroup") | [
"def create_dynamic_group(data=None):\n return connector.SCIMGroup(\n displayName='display_name{0}'.format(uuid.uuid4()))",
"def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))",
"def create_course_group():\n group_id = request.args.get('group_id')\n\n form = GroupForm(request.form)\n if request.method == 'POST' and form.validate():\n # Check whether the group name is not empty\n if form.name.data == '':\n flash('The group name can not be empty!')\n raise Exception('The group name could not be empty')\n group_name = slugify(form.name.data, separator='_')\n\n # Check whether the group already exists\n parent_group = Group.query.filter_by(id=group_id).one_or_none()\n if group_name in [g.name for g in parent_group.children]:\n raise Exception('The group already exists.')\n\n new_group = new_group_to_db(group_name, parent_group, form)\n try:\n db.session.commit()\n flash('The new group is added,',\n 'and you are set as the admin of the new group')\n except:\n raise Exception('Could not create the group')\n\n return jsonify(group_id=new_group.id,\n group_slug=group_slugify(new_group.name, new_group.parent))\n else:\n raise Exception(\"Error occur\")",
"def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def create_group(self, groupdata: Dict[str, Any]) -> Group:\n ...",
"def create_dynamic_thing_group(thingGroupName=None, thingGroupProperties=None, indexName=None, queryString=None, queryVersion=None, tags=None):\n pass",
"def create_group(self, inventory_id, payload):\n #payload = custom_marshal(payload, group_record, 'create')\n #payload['_id'] = ObjectId()\n count, records = base_obj.get(COLLECTIONS['INVENTORIES'], {\"groups.group_name\": payload['group_name']})\n if count > 0:\n abort(400, \"Group name Already Exists\")\n result = base_obj.update(COLLECTIONS['INVENTORIES'], {'_id': ObjectId(inventory_id)},\n {\"$push\": {'groups': payload}})\n result_meta = base_obj.update(COLLECTIONS['INVENTORIES'], {'_id': ObjectId(inventory_id)},\n {\"$set\": update_meta() })",
"def create(self, name):\n return self.post('groups.create', data={'name': name})",
"def create_group(self):\n sql = open_sql_file('create_group').format(self.group_name)\n self.redshift.query(sql)\n print('Group {} created.'.format(self.group_name))\n\n return",
"def create_ad_group(client, customer_id, campaign):\n operation = client.get_type('AdGroupOperation', version='v2')\n adgroup = operation.create\n adgroup_service = client.get_service('AdGroupService', version='v2')\n adgroup.name.value = 'Earth to Mars Cruises #{}'.format(uuid.uuid4())\n adgroup.campaign.value = campaign.resource_name\n adgroup.status = client.get_type('AdGroupStatusEnum',\n version='v2').ENABLED\n adgroup.type = client.get_type('AdGroupTypeEnum',\n version='v2').SEARCH_STANDARD\n adgroup.cpc_bid_micros.value = 10000000\n response = adgroup_service.mutate_ad_groups(customer_id, [operation])\n ad_group_resource_name = response.results[0].resource_name\n ad_group = get_ad_group(client, customer_id, ad_group_resource_name)\n print('Added AdGroup named {}'.format(ad_group.name.value))\n return ad_group",
"def create_gateway_group(Name=None, Description=None, ClientRequestToken=None):\n pass",
"def create_group (group_name):\n try:\n new_group = Group.objects.get (name = group_name)\n except:\n new_group = Group (name = group_name)\n new_group.save ()\n print 'Group %s created' %(group_name)",
"def create_group(self, **attrs):\n return self._create(_group.Group, **attrs)",
"def create_auth_group (auth_group_name):\n try:\n new_auth_group = auth.models.Group.objects.get (name = auth_group_name)\n except:\n new_auth_group = auth.models.Group (name = auth_group_name)\n new_auth_group.save ()\n print 'Group %s created' %(auth_group_name)",
"def create_group(self, **attrs):\n return self._create(_group.Group, prepend_key=False, **attrs)",
"def create_collective_group(self,\n backend,\n group_name,\n world_size,\n rank):\n pass",
"def new_group(client, description):\n pld = dict(description=description)\n resp = comm.sendpacket(client, op=comm.ccdlib.OP_NEWGROUP, pld=pld)[-1]\n\n try:\n grpid = resp[\"grpid\"]\n except KeyError:\n raise Exception(\"Invalid json format!\")\n\n group = ConnectionGroup(grpid, description)\n client.connection_groups[group.grpid] = group\n print(group.grpid)",
"def create_group(self, name):\n\t\tdata = {\"name\":name}\n\t\tresponse = self.client.post(self._endpoint + \"/group\", content=data)\n\t\treturn Group(\n\t\t\tresponse.json['group_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)",
"def create_iam_group(group_name=\"terraform\", region=\"us-east-2\"):\n iam = boto3.client('iam', region_name=region)\n try:\n iam.create_group( Path='/', GroupName=group_name)\n except Exception as e:\n logging.error(e)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.