query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
GET THE Q GRID ============== This method gives back a list of q points given the reciprocal lattice vectors and the supercell size. The q points are returned in 2pi / a units. Where a is the unit of measure of the unit_cell (usually Angstrom).
def GetQGrid(unit_cell, supercell_size, enforce_gamma_first = True): bg = Methods.get_reciprocal_vectors(unit_cell) n_vects = int(np.prod(supercell_size)) q_final = np.zeros((3, n_vects), dtype = np.double, order = "F") q_final[:,:] = symph.get_q_grid(bg.T, supercell_size, n_vects) # Get the list of the closest vectors q_list = [Methods.get_closest_vector(bg, q_final[:, i]) for i in range(n_vects)] # Setup Gamma as the first vector if enforce_gamma_first: for i, q in enumerate(q_list): if np.abs(np.sum(q)) < __EPSILON__: tmp = q_list[0].copy() q_list[0] = q.copy() q_list[i] = tmp break return q_list
[ "def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the supercell\n bg_s = Methods.get_reciprocal_vectors(supercell)\n \n #print \"SUPERCELL:\", supercell_size\n \n for ix in range(supercell_size[0]):\n for iy in range(supercell_size[1]):\n for iz in range(supercell_size[2]):\n n_s = np.array( [ix, iy, iz], dtype = np.float64)\n q_vect = n_s.dot(bg_s)\n #q_vect = Methods.get_closest_vector(bg, q_vect)\n\n # Check if q is in the listcount = 0\n count = 0\n for q in q_list:\n if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__:\n count += 1\n break\n if count > 0:\n continue\n\n # Add the q point\n q_list.append(q_vect)\n \n # Check if -q and q are different\n if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__:\n q_list.append(-q_vect)\n \n\n \n return q_list", "def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \\\n supercell_size, crystal = True):\n\n # Check the supercell\n n_cell = np.prod(supercell_size)\n\n nat = unit_cell_structure.N_atoms\n nat_sc = np.shape(pols_sc)[0] / 3\n n_modes = np.shape(pols_sc)[1] \n\n ERR_MSG = \"\"\"\n Error, the supercell {} is not commensurate with the polarization vector given.\n nat = {}, nat_sc = {}\n \"\"\"\n assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc)\n assert nat_sc == supercell_structure.N_atoms\n\n # Get the reciprocal lattice\n bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi)\n\n # Get the possible Q list\n q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size)\n\n # Allocate the output variable\n q_list = np.zeros( (n_modes, 3), dtype = np.double, order = \"C\")\n\n # Get the correspondance between the unit cell and the super cell atoms\n itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py\n\n # Get the translational vectors\n R_vects = np.zeros( (nat_sc, 3), dtype = np.double)\n for i in range(nat_sc):\n R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:]\n \n R_vects = R_vects.ravel()\n __thr__ = 1e-6\n\n for imu in range(n_modes):\n pol_v = pols_sc[:, imu]\n\n nq = 0\n for q in q_grid:\n q_vec = np.tile(q, nat_sc)\n q_cos = np.cos(2*np.pi * q_vec * R_vects)\n q_cos /= np.sqrt(q_cos.dot(q_cos))\n q_sin = np.sin(2*np.pi * q_vec * R_vects)\n q_sin /= np.sqrt(q_cos.dot(q_cos))\n\n cos_proj = q_cos.dot(pol_v)\n sin_proj = q_sin.dot(pol_v)\n # Wrong, this select only a translational mode\n\n if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__:\n new_q = q\n if crystal:\n new_q = Methods.covariant_coordinates(bg, q)\n q_list[imu, :] = new_q\n break\n elif cos_proj**2 + sin_proj**2 > __thr__:\n print (q_cos)\n ERROR_MSG = \"\"\"\n Error, mixing between two |q|.\n Please provide polarization vectors that are well defined in |q|.\n This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell.\n q = {}\n i_mode = {}\n\n cos_proj = {} | sin_proj = {}\n \"\"\"\n raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj))\n else:\n nq += 1\n\n \n # If we are here not q has been found\n if nq == len(q_grid):\n ERROR_MSG = \"\"\"\n Error, the polarization vector {} cannot be identified!\n No q found in this supercell!\n \"\"\"\n raise ValueError(ERROR_MSG.format(imu))\n\n\n return q_list", "def GetSupercellFromQlist(q_list, unit_cell):\n\n # Get the bravais lattice\n bg = Methods.get_reciprocal_vectors(unit_cell) \n\n # Convert the q points in crystalline units\n supercell = [1,1,1]\n\n for q in q_list:\n qprime = Methods.covariant_coordinates(bg, q)\n qprime -= np.floor(qprime)\n qprime[np.abs(qprime) < __EPSILON__] = 1\n\n rmax = 1/np.abs(qprime)\n for j in range(3):\n if supercell[j] < int(rmax[j] + .5):\n supercell[j] = int(rmax[j] + .5)\n \n return supercell", "def GetNewQFromUnitCell(old_cell, new_cell, old_qs):\n \n bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi)\n new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi)\n \n new_qs = []\n for iq, q in enumerate(old_qs):\n # Get the q point in crystal coordinates\n new_qprime = Methods.covariant_coordinates(bg, q)\n \n # Convert the crystal coordinates in the new reciprocal lattice vectors\n new_q = np.einsum(\"ji, j\", new_bg, new_qprime)\n new_qs.append(new_q)\n \n return new_qs", "def CheckSupercellQ(unit_cell, supercell_size, q_list):\n # Get the q point list for the given supercell\n correct_q = GetQGrid(unit_cell, supercell_size)\n \n # Get the reciprocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Check if the vectors are equivalent or not\n for iq, q in enumerate(q_list):\n for jq, qnew in enumerate(correct_q):\n if Methods.get_min_dist_into_cell(bg, q, qnew) < __EPSILON__:\n correct_q.pop(jq)\n break\n \n if len(correct_q) > 0:\n print (\"[CHECK SUPERCELL]\")\n print (\" MISSING Q ARE \")\n print (\"\\n\".join([\" q =%16.8f%16.8f%16.8f \" % (q[0], q[1], q[2]) for q in correct_q]))\n return False\n return True", "def gridpts(q, dist=None):\n w = [[] for i in range(len(q[-1]))]\n for j in range(len(q)-1,-1,-1):\n for k in range(len(q[j])):\n for l in range(k*len(w)/len(q[j]), (k+1)*len(w)/len(q[j])):\n w[l].append(q[j][k])\n if j: w += [i[:] for i in w[:]*(len(q[j-1])-1)]\n pts = [list(reversed(w[i])) for i in range(len(w))]\n # inject some randomness\n if dist is None: return pts\n if not len(pts): return pts\n pts += dist((len(pts),len(pts[0])))\n return pts.tolist()", "def define_computational_grid():\n start_point = -0.35 # [m]\n end_point = 0.35 # [m] # Positions.get_position_coilA()\n return np.linspace(start_point, end_point, num=700)", "def test_subset_global_grid(self):\n lower_bound = 42 - 1.0e-7\n upper_bound = 52 + 1.0e-7\n constraint_dict = {\n \"latitude\": lambda cell: lower_bound <= cell.point <= upper_bound\n }\n constr = iris.Constraint(**constraint_dict)\n result = apply_extraction(\n self.global_gridded_cube, constr, longitude_constraint=[0, 7]\n )\n expected_data = np.array(\n [\n [1.0, 2.0, 3.0, 4.0],\n [9.0, 10.0, 11.0, 12.0],\n [17.0, 18.0, 19.0, 20.0],\n [25.0, 26.0, 27.0, 28.0],\n ]\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertArrayAlmostEqual(\n result.coord(\"longitude\").points, np.array([0.0, 2.0, 4.0, 6.0])\n )\n self.assertArrayAlmostEqual(\n result.coord(\"latitude\").points, np.array([45.0, 47.0, 49.0, 51.0])\n )", "def make_q(self):\n self.q = np.zeros((self.Nx+2,self.Ny+2))\n\n\n for i in range(1, self.Nx+1):\n for j in range(1, self.Ny+1):\n self.q[i,j] = self.qq(self.x[i-1], self.y[j-1])\n\n for i in range(1,self.Nx+1):\n self.q[i,0] = 2*self.q[i,1] - self.q[i,2]\n self.q[i,self.Ny +1] = 2*self.q[i,self.Ny] - self.q[i,self.Ny-1]\n\n\n for j in range(1,self.Ny+1):\n self.q[0,j] = 2*self.q[1,j] - self.q[2,j]\n self.q[self.Nx+1,j] = 2*self.q[self.Nx,j] - self.q[self.Nx-1,j]\n\n \"\"\"\n\n self.q[1:-1, 1:-1] = self.qq(self.X, self.Y)\n self.q[1:-1, 0] = 2*self.q[1:-1, 1] - self.q[1:-1, 2]\n self.q[1:-1, self.Ny +1] = 2*self.q[1:-1, self.Ny] - self.q[1:-1, self.Ny-1]\n self.q[0, 1:-1] = 2*self.q[1, 1:-1] - self.q[2, 1:-1]\n self.q[self.Nx+1, 1:-1] = 2*self.q[self.Nx, 1:-1] - self.q[self.Nx-1, 1:-1]\n \"\"\"\n self.stability()", "def q(self):\n return self.coords.q", "def get_q_glue(self) -> List[float]:\n # We take q above the glue\n flange_area = self.thickness*self.flange_sheets*self.flange_width * 2\n flange_d = self.web_height + (self.thickness*self.flange_sheets) / 2 - self.y_bar\n\n deck_area = self.thickness * self.deck_sheets * (self.width - 2*self.flange_width)\n deck_d = self.web_height + (self.thickness * self.deck_sheets) / 2 - self.y_bar\n\n return [flange_area*flange_d + deck_area*deck_d]", "def initqp(self):\n\n self.qp = get_spherical_quad_points()\n sp = cartesian2spherical(self.qp.points)\n self.sqp = sp", "def get_qeels_slice(data_stack: object, point: tuple,\n use_k_axis=False, starting_point=None) -> np.ndarray:\n if starting_point == None:\n centre = data_stack.get_centre(data_stack.pref_frame)\n else:\n centre = starting_point\n\n\n yp, xp = point\n path_length = int(np.hypot(xp-centre[1], yp-centre[0]))\n xsamp = np.linspace(centre[1], xp, path_length)\n ysamp = np.linspace(centre[0], yp, path_length)\n qmap = data_stack.stack[:,ysamp.astype(int),xsamp.astype(int)].T\n\n qaxis = np.zeros(int(path_length))\n data_stack.build_axes()\n\n\n if use_k_axis == False:\n mom_y, mom_x = np.meshgrid(data_stack.axis1, data_stack.axis2)\n mom_map = np.sqrt(mom_y**2 + mom_x**2)\n qaxis = mom_map[xsamp.astype(int), ysamp.astype(int)]\n else:\n if data_stack.naxis0 == None:\n raise ValueError('The transformed axes are not build, use transform_axis()')\n k_y, k_x = np.meshgrid(data_stack.naxis1, data_stack.naxis2)\n kmap = np.sqrt(k_x**2 + k_y**2)\n qaxis = kmap[xsamp.astype(int), ysamp.astype(int)]\n\n\n double_entries = np.asarray([])\n for i in range(0,len(qaxis)-1):\n if qaxis[i] == qaxis[i+1]:\n double_entries = np.append(double_entries, i)\n\n qaxis_sc = np.asarray([])\n qmap_sc = np.asarray([])\n for i in range(len(qaxis)):\n if i not in double_entries:\n qaxis_sc = np.append(qaxis_sc, qaxis[i])\n qmap_sc = np.append(qmap_sc, qmap[i])\n \"\"\" else:\n qm_avg = (qmap[i]+qmap[i+1])/2\n qaxis_sc = np.append(qaxis_sc, qaxis[i])\n qmap_sc = np.append(qmap_sc, qmap[i])\n \"\"\"\n qmap_sc = qmap_sc.reshape((len(qaxis_sc), qmap.shape[1]))\n return qmap_sc, qaxis_sc", "def QvQgrid():\n rnd.seed(513)\n \n N_GAMES = 75000 \n REPS = 5\n Q_init = 0.0\n \n epsilons = [0.1, 0.3, 0.5]\n alphas = [0.1,0.3,0.5,0.7,0.9]\n gammas = [-0.1,-0.3,-0.5,-0.7,-0.9]\n \n setting = [[e,a,g] for e in epsilons for a in alphas for g in gammas] \n \n for s in range(len(setting)):\n params = setting[s]\n epsilon = params[0]\n alpha = params[1]\n gamma = params[2]\n \n print('e:', epsilon, ' a:', alpha, ' g:', gamma)\n \n p1_opt_percs = []\n p1_winlose = []\n p2_opt_percs = []\n p2_winlose = []\n \n for i in range(REPS):\n bd = rnd.randint(0,9,3).tolist()\n if sum(bd) >= 0:\n starting_board_hash = get_hash(rnd.randint(0,9,3).tolist())\n else:\n starting_board_hash = get_hash([4,4,4]) # one in million chance this will be needed\n \n p1 = QAgent('p1', starting_board_hash, Q_init, epsilon, alpha, gamma)\n p2 = QAgent('p2', starting_board_hash, Q_init, epsilon, alpha, gamma)\n \n [p1_stats, p2_stats] = train_agents(N_GAMES, p1, p2, starting_board_hash, 1, -1, False)\n p1_opt_percs.append(p1_stats[0])\n p1_winlose.append(p1_stats[1])\n p2_opt_percs.append(p2_stats[0])\n p2_winlose.append(p2_stats[1]) \n \n file_name = '../final/QvQ/QvQ_optimal_moves' + str(epsilon) + str(alpha) + str(gamma) +'vSelfAll_'\n file_contents = p1_opt_percs + p2_opt_percs\n log_contents(file_name, file_contents)\n \n file_name = '../final/QvQ/QvQ_wins' + str(epsilon) + str(alpha) + str(gamma) +'vSelfAll_'\n file_contents = p1_winlose + p2_winlose\n log_contents(file_name, file_contents)\n\n print('learning complete')", "def get_q(self,coord='rc',unit='au'):\n if(coord=='rc'):\n return self.param['q_rc'];\n if(coord=='cc' and unit=='au'):\n return self.param['q_cc'];\n if(coord=='cc' and unit=='si'):\n return self.param['q_cc']/0.529177249;", "def liste_Qx (self):\n liste_matrices_Qx = [self.Qx(self.liste_J[self.liste_angles[pli]][1],\n self.liste_J[self.liste_angles[pli]][3],\n self.liste_Q0[pli]) \n for pli in range(len(self.liste_angles))]\n return liste_matrices_Qx", "def compute_grid_params_general(minlon, maxlon, minlat, maxlat, zerolon, zerolat):\n deltalon = (maxlon - minlon) * 111.00 * np.cos(np.deg2rad(zerolat)); # in km.\n deltalat = (maxlat - minlat) * 111.00; # in km.\n start_gridx = (minlon - zerolon) * 111.00 * np.cos(np.deg2rad(zerolat)); # in km\n finish_gridx = (maxlon - zerolon) * 111.00 * np.cos(np.deg2rad(zerolat)); # in km.;\n start_gridy = (minlat - zerolat) * 111.00; # in km.\n finish_gridy = (maxlat - zerolat) * 111.00; # in km.\n xinc = deltalon / 100.0;\n yinc = deltalat / 100.0;\n return [start_gridx, finish_gridx, start_gridy, finish_gridy, xinc, yinc];", "def make_all_q(data):\n if not data.has_no_finite_acceptance:\n return []\n elif data.has_yz_acceptance(data):\n # compute qx, qy\n Qx, Qy = np.meshgrid(qx, qy)\n return [Qx, Qy]\n else:\n # else only need q\n # data.has_z_acceptance\n return [q]", "def spherical_multiRegion_Green_Arnoldi_Nmn_Uconverge(n,k,RPlist, invchi, gridpts=10000, mpdps=60, klim=25, Taylor_tol=1e-12, Unormtol=1e-8, veclim=3, delveclim=2, maxveclim=40):\n mp.dps = mpdps #set mpmath precision\n #first step: generate the sub-bases and sub-Gmat/Uinvs for each block\n regionnum = len(RPlist)\n unitRgdotRglist = np.zeros(regionnum, dtype=type(1j*mp.one)) #values needed for computing coupling between different sub-bases in Gmat\n unitRgdotOutlist = np.zeros(regionnum, dtype=type(1j*mp.one)) #stored using mpmath to avoid underflow when calculating Gmat couplings\n unitImdotOutlist = np.zeros(regionnum, dtype=type(1j*mp.one))\n \n subGmatlist = []\n vecnum = 0\n subbasis_head_indlist = []\n rgridlist = []\n All_unitBvecs = []; All_unitPvecs = []\n for i in range(regionnum):\n print('N wave Region #', i)\n if i==0: #inner spherical region is special because it contains origin, use old mpmath Taylor Arnoldi code\n subbasis_head_indlist.append(0)\n rmRgN_Bpol, rmRgN_Ppol, rnImN_Bpol, rnImN_Ppol, unitrmnBpols, unitrmnPpols, Uinv = speedup_Green_Taylor_Arnoldi_RgNmn_Uconverge(n,k,RPlist[0],klim=klim, Taylor_tol=Taylor_tol, invchi=invchi, Unormtol=Unormtol)\n unitRgdotRglist[0] = mp.sqrt(rmnNnormsqr_Taylor(n,k,RPlist[0],rmRgN_Bpol,rmRgN_Ppol)) #unitRg dot Rg is just norm of the regular wave\n #for the inner sphere, the outgoing wave quantities are not relevant since the inner sphere contains origin\n subGmat = mp.eye(Uinv.rows)*invchi-Uinv\n subGmatlist.append(np.array(mpmath.fp.matrix(subGmat.tolist()).tolist()))\n vecnum += Uinv.rows\n \n rgrid = np.linspace(0,RPlist[0],gridpts)\n rgridlist.append(rgrid)\n for i in range(len(unitrmnBpols)-1):\n All_unitBvecs.append((k*rgrid)**(n-1) * po.polyval(k*rgrid, unitrmnBpols[i].coef))\n All_unitPvecs.append((k*rgrid)**(n-1) * po.polyval(k*rgrid, unitrmnPpols[i].coef))\n else:\n subbasis_head_indlist.append(vecnum)\n try:\n rgrid, rsqrgrid, rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs,unitPvecs, Uinv, Gmat = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge(n,k,RPlist[i-1],RPlist[i],invchi,gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim)\n OutBgrid = RgBgrid + 1j*ImBgrid\n OutPgrid = RgPgrid + 1j*ImPgrid\n unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid,RgPgrid,rsqrgrid,rdiffgrid))\n unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0],unitPvecs[0], OutBgrid,OutPgrid, rsqrgrid,rdiffgrid))\n unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1],unitPvecs[1], OutBgrid,OutPgrid, rsqrgrid,rdiffgrid))\n except FloatingPointError:\n rgrid, rsqrgrid, rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs,unitPvecs, Uinv, Gmat = shell_Green_grid_Arnoldi_RgandImNmn_Uconverge_mp(n,k,RPlist[i-1],RPlist[i],invchi,gridpts=gridpts, Unormtol=Unormtol, maxveclim=maxveclim)\n OutBgrid = RgBgrid + 1j*ImBgrid\n OutPgrid = RgPgrid + 1j*ImPgrid\n unitRgdotRglist[i] = mp.sqrt(rgrid_Nmn_normsqr(RgBgrid,RgPgrid,rsqrgrid,rdiffgrid))\n unitRgdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[0],unitPvecs[0], OutBgrid,OutPgrid, rsqrgrid,rdiffgrid))\n unitImdotOutlist[i] = mp.mpc(rgrid_Nmn_dot(unitBvecs[1],unitPvecs[1], OutBgrid,OutPgrid, rsqrgrid,rdiffgrid))\n Gmat = np.array(mpmath.fp.matrix(Gmat.tolist()).tolist())\n subGmatlist.append(Gmat)\n vecnum += Gmat.shape[0]\n \n rgridlist.append(rgrid)\n All_unitBvecs.extend(unitBvecs[:-2])\n All_unitPvecs.extend(unitPvecs[:-2])\n \n subbasis_head_indlist.append(vecnum) #for bookkeeping convenience put the total number of basis vectors at end of the subbasis family head index list\n Gmat = np.zeros((vecnum,vecnum),dtype=np.complex) #the Green's function representation for the entire domain\n for i in range(regionnum):\n indstart = subbasis_head_indlist[i]; indend = subbasis_head_indlist[i+1]\n Gmat[indstart:indend,indstart:indend] = subGmatlist[i][:,:]\n\n #print('RgdotRgN', unitRgdotRglist)\n #print('RgdotOut', unitRgdotOutlist)\n #print('ImdotOut', unitImdotOutlist)\n \n #next generate the couplings between different subbases\n jkcubed = 1j * k**3\n for i in range(regionnum):\n Rgiind = subbasis_head_indlist[i]\n Imiind = Rgiind+1\n #first do regions lying within region #i\n for j in range(i):\n Rgjind = subbasis_head_indlist[j]\n Gmat[Rgjind,Rgiind] = np.complex(jkcubed * unitRgdotRglist[j] * unitRgdotOutlist[i])\n Gmat[Rgjind,Imiind] = np.complex(jkcubed * unitRgdotRglist[j] * unitImdotOutlist[i])\n #then do regions lying outside region #i\n for j in range(i+1,regionnum):\n Rgjind = subbasis_head_indlist[j]\n Imjind = Rgjind+1\n Gmat[Rgjind,Rgiind] = np.complex(jkcubed * unitRgdotOutlist[j] * unitRgdotRglist[i])\n Gmat[Imjind,Rgiind] = np.complex(jkcubed * unitImdotOutlist[j] * unitRgdotRglist[i])\n \n #prepare for output\n #outputting Rgnormlist is for use later to construct source vectors\n #outputting subbasis_head_indlist is for use later to construct projection matrices\n Uinv = invchi*np.eye(vecnum) - Gmat\n \n #create an rgrid over the entire domain and extend the ptval representation of all the subbases onto the entire domain, for potential plotting purposes later\n fullrgrid = rgridlist[0].copy()\n rboundaries = [0,gridpts]\n for i in range(1,len(rgridlist)):\n fullrgrid = np.concatenate((fullrgrid,rgridlist[i][1:])) #1: so we don't have overlapping grid points\n rboundaries.append(len(fullrgrid))\n \n All_fullr_unitBvecs = []; All_fullr_unitPvecs = []\n for i in range(len(rgridlist)):\n for j in range(subbasis_head_indlist[i],subbasis_head_indlist[i+1]):\n vecBgrid = np.zeros_like(fullrgrid)\n vecPgrid = np.zeros_like(fullrgrid)\n if i==0:\n #print(All_unitMvecs[j])\n vecBgrid[rboundaries[i]:rboundaries[i+1]] = mparr_to_npreal(mp_re(All_unitBvecs[j][:]))\n vecPgrid[rboundaries[i]:rboundaries[i+1]] = mparr_to_npreal(mp_re(All_unitPvecs[j][:]))\n else:\n vecBgrid[rboundaries[i]:rboundaries[i+1]] = mparr_to_npreal(mp_re(All_unitBvecs[j][1:]))\n vecPgrid[rboundaries[i]:rboundaries[i+1]] = mparr_to_npreal(mp_re(All_unitPvecs[j][1:]))\n All_fullr_unitBvecs.append(vecBgrid)\n All_fullr_unitPvecs.append(vecPgrid)\n\n return Gmat, Uinv, unitRgdotRglist, subbasis_head_indlist, fullrgrid, All_fullr_unitBvecs,All_fullr_unitPvecs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET THE Q GRID ============== This method gives back a list of q points given the reciprocal lattice vectors and the supercell size.
def GetQGrid_old(unit_cell, supercell_size): q_list = [] # Get the recirpocal lattice vectors bg = Methods.get_reciprocal_vectors(unit_cell) # Get the supercell supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell # Get the lattice vectors of the supercell bg_s = Methods.get_reciprocal_vectors(supercell) #print "SUPERCELL:", supercell_size for ix in range(supercell_size[0]): for iy in range(supercell_size[1]): for iz in range(supercell_size[2]): n_s = np.array( [ix, iy, iz], dtype = np.float64) q_vect = n_s.dot(bg_s) #q_vect = Methods.get_closest_vector(bg, q_vect) # Check if q is in the listcount = 0 count = 0 for q in q_list: if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__: count += 1 break if count > 0: continue # Add the q point q_list.append(q_vect) # Check if -q and q are different if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__: q_list.append(-q_vect) return q_list
[ "def GetQGrid(unit_cell, supercell_size, enforce_gamma_first = True):\n bg = Methods.get_reciprocal_vectors(unit_cell)\n\n n_vects = int(np.prod(supercell_size))\n q_final = np.zeros((3, n_vects), dtype = np.double, order = \"F\")\n q_final[:,:] = symph.get_q_grid(bg.T, supercell_size, n_vects)\n\n # Get the list of the closest vectors\n q_list = [Methods.get_closest_vector(bg, q_final[:, i]) for i in range(n_vects)]\n\n # Setup Gamma as the first vector\n if enforce_gamma_first:\n for i, q in enumerate(q_list):\n if np.abs(np.sum(q)) < __EPSILON__:\n tmp = q_list[0].copy()\n q_list[0] = q.copy()\n q_list[i] = tmp \n break \n\n\n return q_list", "def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \\\n supercell_size, crystal = True):\n\n # Check the supercell\n n_cell = np.prod(supercell_size)\n\n nat = unit_cell_structure.N_atoms\n nat_sc = np.shape(pols_sc)[0] / 3\n n_modes = np.shape(pols_sc)[1] \n\n ERR_MSG = \"\"\"\n Error, the supercell {} is not commensurate with the polarization vector given.\n nat = {}, nat_sc = {}\n \"\"\"\n assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc)\n assert nat_sc == supercell_structure.N_atoms\n\n # Get the reciprocal lattice\n bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi)\n\n # Get the possible Q list\n q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size)\n\n # Allocate the output variable\n q_list = np.zeros( (n_modes, 3), dtype = np.double, order = \"C\")\n\n # Get the correspondance between the unit cell and the super cell atoms\n itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py\n\n # Get the translational vectors\n R_vects = np.zeros( (nat_sc, 3), dtype = np.double)\n for i in range(nat_sc):\n R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:]\n \n R_vects = R_vects.ravel()\n __thr__ = 1e-6\n\n for imu in range(n_modes):\n pol_v = pols_sc[:, imu]\n\n nq = 0\n for q in q_grid:\n q_vec = np.tile(q, nat_sc)\n q_cos = np.cos(2*np.pi * q_vec * R_vects)\n q_cos /= np.sqrt(q_cos.dot(q_cos))\n q_sin = np.sin(2*np.pi * q_vec * R_vects)\n q_sin /= np.sqrt(q_cos.dot(q_cos))\n\n cos_proj = q_cos.dot(pol_v)\n sin_proj = q_sin.dot(pol_v)\n # Wrong, this select only a translational mode\n\n if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__:\n new_q = q\n if crystal:\n new_q = Methods.covariant_coordinates(bg, q)\n q_list[imu, :] = new_q\n break\n elif cos_proj**2 + sin_proj**2 > __thr__:\n print (q_cos)\n ERROR_MSG = \"\"\"\n Error, mixing between two |q|.\n Please provide polarization vectors that are well defined in |q|.\n This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell.\n q = {}\n i_mode = {}\n\n cos_proj = {} | sin_proj = {}\n \"\"\"\n raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj))\n else:\n nq += 1\n\n \n # If we are here not q has been found\n if nq == len(q_grid):\n ERROR_MSG = \"\"\"\n Error, the polarization vector {} cannot be identified!\n No q found in this supercell!\n \"\"\"\n raise ValueError(ERROR_MSG.format(imu))\n\n\n return q_list", "def GetSupercellFromQlist(q_list, unit_cell):\n\n # Get the bravais lattice\n bg = Methods.get_reciprocal_vectors(unit_cell) \n\n # Convert the q points in crystalline units\n supercell = [1,1,1]\n\n for q in q_list:\n qprime = Methods.covariant_coordinates(bg, q)\n qprime -= np.floor(qprime)\n qprime[np.abs(qprime) < __EPSILON__] = 1\n\n rmax = 1/np.abs(qprime)\n for j in range(3):\n if supercell[j] < int(rmax[j] + .5):\n supercell[j] = int(rmax[j] + .5)\n \n return supercell", "def gridpts(q, dist=None):\n w = [[] for i in range(len(q[-1]))]\n for j in range(len(q)-1,-1,-1):\n for k in range(len(q[j])):\n for l in range(k*len(w)/len(q[j]), (k+1)*len(w)/len(q[j])):\n w[l].append(q[j][k])\n if j: w += [i[:] for i in w[:]*(len(q[j-1])-1)]\n pts = [list(reversed(w[i])) for i in range(len(w))]\n # inject some randomness\n if dist is None: return pts\n if not len(pts): return pts\n pts += dist((len(pts),len(pts[0])))\n return pts.tolist()", "def test_subset_global_grid(self):\n lower_bound = 42 - 1.0e-7\n upper_bound = 52 + 1.0e-7\n constraint_dict = {\n \"latitude\": lambda cell: lower_bound <= cell.point <= upper_bound\n }\n constr = iris.Constraint(**constraint_dict)\n result = apply_extraction(\n self.global_gridded_cube, constr, longitude_constraint=[0, 7]\n )\n expected_data = np.array(\n [\n [1.0, 2.0, 3.0, 4.0],\n [9.0, 10.0, 11.0, 12.0],\n [17.0, 18.0, 19.0, 20.0],\n [25.0, 26.0, 27.0, 28.0],\n ]\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertArrayAlmostEqual(\n result.coord(\"longitude\").points, np.array([0.0, 2.0, 4.0, 6.0])\n )\n self.assertArrayAlmostEqual(\n result.coord(\"latitude\").points, np.array([45.0, 47.0, 49.0, 51.0])\n )", "def GetNewQFromUnitCell(old_cell, new_cell, old_qs):\n \n bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi)\n new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi)\n \n new_qs = []\n for iq, q in enumerate(old_qs):\n # Get the q point in crystal coordinates\n new_qprime = Methods.covariant_coordinates(bg, q)\n \n # Convert the crystal coordinates in the new reciprocal lattice vectors\n new_q = np.einsum(\"ji, j\", new_bg, new_qprime)\n new_qs.append(new_q)\n \n return new_qs", "def q(self):\n return self.coords.q", "def make_q(self):\n self.q = np.zeros((self.Nx+2,self.Ny+2))\n\n\n for i in range(1, self.Nx+1):\n for j in range(1, self.Ny+1):\n self.q[i,j] = self.qq(self.x[i-1], self.y[j-1])\n\n for i in range(1,self.Nx+1):\n self.q[i,0] = 2*self.q[i,1] - self.q[i,2]\n self.q[i,self.Ny +1] = 2*self.q[i,self.Ny] - self.q[i,self.Ny-1]\n\n\n for j in range(1,self.Ny+1):\n self.q[0,j] = 2*self.q[1,j] - self.q[2,j]\n self.q[self.Nx+1,j] = 2*self.q[self.Nx,j] - self.q[self.Nx-1,j]\n\n \"\"\"\n\n self.q[1:-1, 1:-1] = self.qq(self.X, self.Y)\n self.q[1:-1, 0] = 2*self.q[1:-1, 1] - self.q[1:-1, 2]\n self.q[1:-1, self.Ny +1] = 2*self.q[1:-1, self.Ny] - self.q[1:-1, self.Ny-1]\n self.q[0, 1:-1] = 2*self.q[1, 1:-1] - self.q[2, 1:-1]\n self.q[self.Nx+1, 1:-1] = 2*self.q[self.Nx, 1:-1] - self.q[self.Nx-1, 1:-1]\n \"\"\"\n self.stability()", "def get_q_glue(self) -> List[float]:\n # We take q above the glue\n flange_area = self.thickness*self.flange_sheets*self.flange_width * 2\n flange_d = self.web_height + (self.thickness*self.flange_sheets) / 2 - self.y_bar\n\n deck_area = self.thickness * self.deck_sheets * (self.width - 2*self.flange_width)\n deck_d = self.web_height + (self.thickness * self.deck_sheets) / 2 - self.y_bar\n\n return [flange_area*flange_d + deck_area*deck_d]", "def CheckSupercellQ(unit_cell, supercell_size, q_list):\n # Get the q point list for the given supercell\n correct_q = GetQGrid(unit_cell, supercell_size)\n \n # Get the reciprocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Check if the vectors are equivalent or not\n for iq, q in enumerate(q_list):\n for jq, qnew in enumerate(correct_q):\n if Methods.get_min_dist_into_cell(bg, q, qnew) < __EPSILON__:\n correct_q.pop(jq)\n break\n \n if len(correct_q) > 0:\n print (\"[CHECK SUPERCELL]\")\n print (\" MISSING Q ARE \")\n print (\"\\n\".join([\" q =%16.8f%16.8f%16.8f \" % (q[0], q[1], q[2]) for q in correct_q]))\n return False\n return True", "def define_computational_grid():\n start_point = -0.35 # [m]\n end_point = 0.35 # [m] # Positions.get_position_coilA()\n return np.linspace(start_point, end_point, num=700)", "def get_qeels_slice(data_stack: object, point: tuple,\n use_k_axis=False, starting_point=None) -> np.ndarray:\n if starting_point == None:\n centre = data_stack.get_centre(data_stack.pref_frame)\n else:\n centre = starting_point\n\n\n yp, xp = point\n path_length = int(np.hypot(xp-centre[1], yp-centre[0]))\n xsamp = np.linspace(centre[1], xp, path_length)\n ysamp = np.linspace(centre[0], yp, path_length)\n qmap = data_stack.stack[:,ysamp.astype(int),xsamp.astype(int)].T\n\n qaxis = np.zeros(int(path_length))\n data_stack.build_axes()\n\n\n if use_k_axis == False:\n mom_y, mom_x = np.meshgrid(data_stack.axis1, data_stack.axis2)\n mom_map = np.sqrt(mom_y**2 + mom_x**2)\n qaxis = mom_map[xsamp.astype(int), ysamp.astype(int)]\n else:\n if data_stack.naxis0 == None:\n raise ValueError('The transformed axes are not build, use transform_axis()')\n k_y, k_x = np.meshgrid(data_stack.naxis1, data_stack.naxis2)\n kmap = np.sqrt(k_x**2 + k_y**2)\n qaxis = kmap[xsamp.astype(int), ysamp.astype(int)]\n\n\n double_entries = np.asarray([])\n for i in range(0,len(qaxis)-1):\n if qaxis[i] == qaxis[i+1]:\n double_entries = np.append(double_entries, i)\n\n qaxis_sc = np.asarray([])\n qmap_sc = np.asarray([])\n for i in range(len(qaxis)):\n if i not in double_entries:\n qaxis_sc = np.append(qaxis_sc, qaxis[i])\n qmap_sc = np.append(qmap_sc, qmap[i])\n \"\"\" else:\n qm_avg = (qmap[i]+qmap[i+1])/2\n qaxis_sc = np.append(qaxis_sc, qaxis[i])\n qmap_sc = np.append(qmap_sc, qmap[i])\n \"\"\"\n qmap_sc = qmap_sc.reshape((len(qaxis_sc), qmap.shape[1]))\n return qmap_sc, qaxis_sc", "def get_qubit_neighbour_list(self, d):\n\n count = 0\n qubit_dict = {}\n qubit_neighbours = []\n for row in range(d):\n for col in range(d):\n qubit_dict[str(tuple([row,col]))] = count\n cells = starmap(lambda a,b: (row+a, col+b), product((0,-1,+1), (0,-1,+1)))\n qubit_neighbours.append(list(cells)[1:])\n count +=1\n \n neighbour_list = []\n for qubit in range(d**2):\n neighbours = []\n for neighbour in qubit_neighbours[qubit]:\n if str(neighbour) in qubit_dict.keys():\n neighbours.append(qubit_dict[str(neighbour)])\n neighbour_list.append(neighbours)\n\n return neighbour_list", "def get_subgrids(grid):\r\n subgrids = []\r\n for box_i in range(4):\r\n for box_j in range(4):\r\n subgrid = []\r\n for i in range(4):\r\n for j in range(4):\r\n subgrid.append(grid[4 * box_i + i][4 * box_j + j])\r\n subgrids.append(subgrid)\r\n return np.array(subgrids)", "def initqp(self):\n\n self.qp = get_spherical_quad_points()\n sp = cartesian2spherical(self.qp.points)\n self.sqp = sp", "def _compute_subgrids(self):\n\n\t\tgrid = self.minesweeper.grid\n\n\t\tsubgrids = []\n\t\tpos_list = []\n\t\tfor i, row in enumerate(grid):\n\t\t\tfor j, tile in enumerate(row):\n\t\t\t\tif tile == MaskedTile.MASKED:\n\t\t\t\t\tsubgrids.append(to_value_list(extract_subgrid(grid, i, j, self.subgrid_radius)))\n\t\t\t\t\tpos_list.append((i, j))\n\n\t\treturn pos_list, subgrids", "def grid(self, n = -1):\n if n >= 0:\n return _cantera.domain_grid(self._hndl, n)\n else:\n g = zeros(self.nPoints(),'d')\n for j in range(len(g)):\n g[j] = _cantera.domain_grid(self._hndl, j)\n return g", "def get_qsr_masks(self):\n for i in xrange(0,len(self.sorted_params)):\n\n #if i>3: continue\n if self.dbg: print \"\\nLOOP\", i\n cells = self.sorted_params[i][1] / self.res\n label = self.sorted_params[i][0]\n\n ##Make 'Touch' and 'Near' masks small enough to see on screen :)\n #if i == 0: cells = 3\n #elif i == 1: cells = 5\n\n if self.dbg: print \"cells = \", cells\n self.create_circle_mask(cells, i)", "def get_quad_points(self):\n return self.get_abstract_item(\"General\", \"Drag quadrature Points\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CHECK THE Q POINTS ================== This subroutine checks that the given q points of a dynamical matrix matches the desidered supercell. It is usefull to spot bugs like the wrong definitions of alat units, or error not spotted just by the number of q points (confusion between 1,2,2 or 2,1,2 supercell).
def CheckSupercellQ(unit_cell, supercell_size, q_list): # Get the q point list for the given supercell correct_q = GetQGrid(unit_cell, supercell_size) # Get the reciprocal lattice vectors bg = Methods.get_reciprocal_vectors(unit_cell) # Check if the vectors are equivalent or not for iq, q in enumerate(q_list): for jq, qnew in enumerate(correct_q): if Methods.get_min_dist_into_cell(bg, q, qnew) < __EPSILON__: correct_q.pop(jq) break if len(correct_q) > 0: print ("[CHECK SUPERCELL]") print (" MISSING Q ARE ") print ("\n".join([" q =%16.8f%16.8f%16.8f " % (q[0], q[1], q[2]) for q in correct_q])) return False return True
[ "def testQMatrix(self):\n # The data we have available is only accurate to the 4th decimal place. This should\n # be sufficient. kx and ky are given in the setup, fixed by our angles theta and phi.\n absoluteTolerance = 0.0001;\n relativeTolerance = 0.001;\n kx = 1.0006;\n ky = 0.4247;\n\n # Zeroth, we actually have data for our gap layer\n er = 1.0 + sq(kx) + sq(ky);\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 1.1804],[-2.0013, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # First, we have some data for layer 1\n er = 2.0;\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 0.9987],[-1.8196, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # Now, we have some data for layer 2.\n er = 1.0;\n ur = 3.0;\n\n Q_actual = complexArray([[0.1417, 0.6662],[-0.9399, -0.1417]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);", "def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the supercell\n bg_s = Methods.get_reciprocal_vectors(supercell)\n \n #print \"SUPERCELL:\", supercell_size\n \n for ix in range(supercell_size[0]):\n for iy in range(supercell_size[1]):\n for iz in range(supercell_size[2]):\n n_s = np.array( [ix, iy, iz], dtype = np.float64)\n q_vect = n_s.dot(bg_s)\n #q_vect = Methods.get_closest_vector(bg, q_vect)\n\n # Check if q is in the listcount = 0\n count = 0\n for q in q_list:\n if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__:\n count += 1\n break\n if count > 0:\n continue\n\n # Add the q point\n q_list.append(q_vect)\n \n # Check if -q and q are different\n if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__:\n q_list.append(-q_vect)\n \n\n \n return q_list", "def GetNewQFromUnitCell(old_cell, new_cell, old_qs):\n \n bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi)\n new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi)\n \n new_qs = []\n for iq, q in enumerate(old_qs):\n # Get the q point in crystal coordinates\n new_qprime = Methods.covariant_coordinates(bg, q)\n \n # Convert the crystal coordinates in the new reciprocal lattice vectors\n new_q = np.einsum(\"ji, j\", new_bg, new_qprime)\n new_qs.append(new_q)\n \n return new_qs", "def test_ikfast_6d_case_1(self):\n i = 0\n for (qseed, pose) in zip(self.qseeds, self.poses):\n i += 1\n T = orpy.matrixFromPose(pose)\n with self.robot:\n self.robot.SetActiveDOFValues(qseed)\n ts = time.time()\n sol = self.manip.FindIKSolution(T, ikfilter_checkcollision)\n te = time.time()\n\n if sol is not None:\n self.total_time += te - ts\n self.no_success += 1\n \n with self.robot:\n self.robot.SetActiveDOFValues(sol)\n pose_actual = self.manip.GetTransformPose()\n if pose_actual[0] < 0:\n pose_actual[:4] *= -1.\n \n np.testing.assert_allclose(pose_actual, pose, rtol=1e-5, atol=1e-5)\n self.assertTrue((sol <= self.q_max).all(), msg=\"Violate joint limits\")\n self.assertTrue((self.q_min <= sol).all(), msg=\"Violate joint limits\")", "def test_ikfast_5d_case_1(self):\n i = 0\n for (initsol, qseed, T) in zip(self.qsols, self.qseeds, self.transformations):\n i += 1\n point = T[0:3, 3]\n direction = T[0:3, 2] / np.linalg.norm(T[0:3, 2])\n ikparam = orpy.IkParameterization(orpy.Ray(point, direction), iktype5D)\n with self.robot:\n self.robot.SetActiveDOFValues(qseed)\n ts = time.time()\n qsol = self.manip.FindIKSolution(ikparam, ikfilter_checkcollision)\n te = time.time()\n \n if qsol is not None:\n self.total_time += te - ts\n self.no_success += 1\n \n with self.robot:\n self.robot.SetActiveDOFValues(qsol)\n Tmanip = self.manip.GetTransform()\n\n # Check direction\n direction_actual = Tmanip[0:3, 2] / np.linalg.norm(Tmanip[0:3, 2])\n\n try:\n np.testing.assert_allclose(direction, direction_actual, \n rtol=1e-5, atol=1e-5)\n except:\n print 'initsol = np.' + repr(initsol)\n print 'qsol = np.' + repr(qsol)\n\n # Check position\n point_actual = Tmanip[0:3, 3]\n np.testing.assert_allclose(point_actual, point, \n rtol=1e-5, atol=1e-5)\n \n self.assertTrue((qsol <= self.q_max).all(), msg=\"Violate joint limits\")\n self.assertTrue((self.q_min <= qsol).all(), msg=\"Violate joint limits\")", "def make_q(self):\n self.q = np.zeros((self.Nx+2,self.Ny+2))\n\n\n for i in range(1, self.Nx+1):\n for j in range(1, self.Ny+1):\n self.q[i,j] = self.qq(self.x[i-1], self.y[j-1])\n\n for i in range(1,self.Nx+1):\n self.q[i,0] = 2*self.q[i,1] - self.q[i,2]\n self.q[i,self.Ny +1] = 2*self.q[i,self.Ny] - self.q[i,self.Ny-1]\n\n\n for j in range(1,self.Ny+1):\n self.q[0,j] = 2*self.q[1,j] - self.q[2,j]\n self.q[self.Nx+1,j] = 2*self.q[self.Nx,j] - self.q[self.Nx-1,j]\n\n \"\"\"\n\n self.q[1:-1, 1:-1] = self.qq(self.X, self.Y)\n self.q[1:-1, 0] = 2*self.q[1:-1, 1] - self.q[1:-1, 2]\n self.q[1:-1, self.Ny +1] = 2*self.q[1:-1, self.Ny] - self.q[1:-1, self.Ny-1]\n self.q[0, 1:-1] = 2*self.q[1, 1:-1] - self.q[2, 1:-1]\n self.q[self.Nx+1, 1:-1] = 2*self.q[self.Nx, 1:-1] - self.q[self.Nx-1, 1:-1]\n \"\"\"\n self.stability()", "def test_m44_q_equivalence(self):\n m = Matrix4.from_x_rotation(np.pi / 2.)\n mq = Quaternion.from_matrix(m)\n\n q = Quaternion.from_x_rotation(np.pi / 2.)\n qm = Matrix4.from_quaternion(q)\n\n self.assertTrue(np.allclose(np.dot([1., 0., 0., 1.], m), [1., 0., 0., 1.]))\n self.assertTrue(np.allclose(np.dot([1., 0., 0., 1.], qm), [1., 0., 0., 1.]))\n\n self.assertTrue(np.allclose(q * Vector4([1., 0., 0., 1.]), [1., 0., 0., 1.]))\n self.assertTrue(np.allclose(mq * Vector4([1., 0., 0., 1.]), [1., 0., 0., 1.]))\n\n np.testing.assert_almost_equal(q, mq, decimal=5)\n np.testing.assert_almost_equal(m, qm, decimal=5)", "def test_Q_f(self):\n assert hasattr(self,'Q_f'), \"Q_f is undefined.\"\n assert (str(type(self.Q_f))==\"<class 'numpy.matrixlib.defmatrix.matrix'>\"\n and np.shape(self.Q_f)==(2,2)), \\\n \"Q_f must be a (2,2) numpy matrix. Default is 50*numpy.matrix(numpy.eye(2)).\"", "def validate(ddtable):\n margin_upp = ddtable.sum(axis=1).transpose()\n count_upp = count_vec(margin_upp)\n remainder_upp = np.remainder(margin_upp, count_upp)\n\n margin_low = ddtable.sum(axis=0)\n count_low = count_vec(margin_low)\n remainder_low = np.remainder(margin_low, count_low)\n\n if not ((remainder_low == 0).all() and (remainder_upp == 0).all()):\n return False\n\n # e_ij <= d^u_i * d^l_j\n div_upp = np.divide(margin_upp, count_upp)\n div_low = np.divide(margin_low, count_low)\n for i in xrange(0,div_upp.size):\n for j in xrange(0,div_low.size):\n if ddtable[i,j] > div_upp.A1[i] * div_low.A1[j]: # is this the right way to access this?\n print (i, j, ddtable[i,j], div_upp.A1[i] * div_low.A1[j])\n return False\n return True", "def check_is_q_node_column(input_dataframe, col_num) -> bool:\n if input_dataframe.iloc[:, col_num].dtype.name == \"object\":\n data = set(list(filter(None, input_dataframe.iloc[:, col_num].dropna().tolist())))\n if len(data) > 0 and all(re.match(r'^Q\\d+$', x) for x in data):\n return True\n return False", "def SymmetrizeDynQ(self, dyn_matrix, q_point):\n \n # TODO: implement hermitianity to speedup the conversion\n \n #Prepare the array to be passed to the fortran code\n QE_dyn = np.zeros( (3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n # Get the crystal coordinates for the matrix\n for na in range(self.QE_nat):\n for nb in range(self.QE_nat):\n fc = dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3]\n QE_dyn[:, :, na, nb] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, False)\n \n # Prepare the xq variable\n #xq = np.ones(3, dtype = np.float64)\n xq = np.array(q_point, dtype = np.float64)\n # print \"XQ:\", xq\n # print \"XQ_CRYST:\", Methods.covariant_coordinates(self.QE_bg.T, xq)\n # print \"NSYMQ:\", self.QE_nsymq, \"NSYM:\", self.QE_nsym\n # print \"QE SYM:\"\n # print np.einsum(\"abc->cba\", self.QE_s[:, :, :self.QE_nsymq])\n # print \"Other syms:\"\n # print np.einsum(\"abc->cba\", self.QE_s[:, :, self.QE_nsymq: self.QE_nsym])\n # print \"QE INVS:\"\n # print self.QE_invs[:self.QE_nsymq]\n # #print \"QE RTAU:\"\n # #print np.einsum(\"abc->bca\", self.QE_rtau[:, :self.QE_nsymq, :])\n # print \"IROTMQ:\", self.QE_irotmq\n # print \"MINUS Q:\", self.QE_minus_q\n # print \"IRT:\"\n # print self.QE_irt[:self.QE_nsymq, :]\n # print \"NAT:\", self.QE_nat\n\n # Inibhit minus q\n #self.QE_minus_q = 0\n \n \n # USE THE QE library to perform the symmetrization\n symph.symdynph_gq_new( xq, QE_dyn, self.QE_s, self.QE_invs, self.QE_rtau, \n self.QE_irt, self.QE_irotmq, self.QE_minus_q, self.QE_nsymq, self.QE_nat)\n \n # Return to cartesian coordinates\n for na in range(self.QE_nat):\n for nb in range(self.QE_nat):\n fc = QE_dyn[:, :, na, nb] \n dyn_matrix[3 * na : 3* na + 3, 3*nb: 3 * nb + 3] = Methods.convert_matrix_cart_cryst(fc, self.structure.unit_cell, True)", "def test_augment_q(Q):\n M, b = augment_Q(Q)\n assert M.shape == (10, 10)\n assert b.shape == (10, 1)\n assert all(b[0:-1]) == 0\n assert b[-1] == 1", "def test_brickq_array(self):\n b = B.Bricks()\n bqs = b.brickq(self.ra, self.dec)\n self.assertEqual(len(bqs), len(self.ra))\n self.assertTrue((bqs == self.brickqs).all())", "def test_get_q_prime(self):\n sb = solver.get_sb(p)\n sg = solver.get_sg(p)\n qprime = solver.get_q_prime(q, sb, sg)\n\n def explicit_q_k_prime(k, q, sb, sg):\n with np.errstate(divide='ignore'):\n return q[k] + q[k + 1] / (1 + 2 * sg[k] / sb[k])\n\n assert_almost_equals(qprime[0], explicit_q_k_prime(0, q, sb, sg))\n assert_almost_equals(qprime[1], explicit_q_k_prime(1, q, sb, sg))\n assert_almost_equals(qprime[2], explicit_q_k_prime(2, q, sb, sg))", "def test_quadratic(self):\n C = wilson.util.smeftutil.wcxf2arrays_symmetrized(wc_quadratic.dict)\n c_old = wilson.match._smeft_old.match_all_array(C, p)\n c_new = wilson.match.smeft_tree.match_all_array(C, p)\n for k in c_old:\n npt.assert_almost_equal(c_old[k], c_new[k], decimal=10,\n err_msg=f\"Failed for {k}\")", "def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break", "def test_set_get_Q(self):\n\t\tb = RigidBody()\n\n\t\tQ = [1,0,0,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,1,0,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,0,1,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,0,0,1]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\n\t\tQ = [0.5,0,0,0]\n\t\tb.set_Q(Q)\n\t\tQ = [1,0,0,0]\n\t\tfor i in range(len(Q)):\n\t\t\tself.assertTrue(b.get_Q()[i] - Q[i] < EPS_A)\n\t\t\tself.assertTrue(b.state_vector[6+i] - Q[i] < EPS_A)\n\n\t\tQ = [3,-4,0,0]\n\t\tb.set_Q(Q)\n\t\tQ = [3/5,-4/5,0,0]\n\t\tfor i in range(len(Q)):\n\t\t\tself.assertTrue(b.get_Q()[i] - Q[i] < EPS_A)\n\t\t\tself.assertTrue(b.state_vector[6+i] - Q[i] < EPS_A)", "def solutions_ok_quadratic(eq):\n s = diop_solve(eq)\n x, y = symbols(\"x, y\", Integer=True)\n ok = True\n\n while len(s) and ok:\n u, v = s.pop()\n\n if simplify(simplify(Subs(eq, (x, y), (u, v)).doit())) != 0:\n ok = False\n return ok", "def ApplyQStar(self, fcq, q_point_group):\n \n nq = np.shape(q_point_group)[0]\n final_fc = np.zeros(np.shape(fcq), dtype = np.complex128)\n \n # Setup all the symmetries\n self.SetupQPoint()\n \n new_dyn = np.zeros( (3 * self.QE_nat, 3*self.QE_nat), dtype = np.complex128, order = \"F\")\n \n dyn_star = np.zeros( (nq, 3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n for i in range(nq):\n # Get the q points order\n nq_new, sxq, isq, imq = symph.star_q(q_point_group[i,:], self.QE_at, self.QE_bg, \n self.QE_nsymq, self.QE_s, self.QE_invs, 0)\n \n\n #print \"Found nq:\", nq_new \n #print \"IMQ?\", imq\n\n # Check if the q star is correct\n if nq_new != nq and imq != 0:\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"Passed q star:\")\n print (q_point_group)\n print (\"QE q star:\")\n print (sxq[:, :nq_new].transpose())\n raise ValueError(\"Error, the passed q star does not match the one computed by QE\")\n# \n# # Print the star \n# print \"q point:\", q_point_group[i,:]\n# print \"Point in the stars:\", nq_new\n# print \"Star of q:\"\n# print sxq[:, :nq_new].transpose()\n# \n# print \"NEW_DYN:\", np.shape(new_dyn)\n# print \"AT:\", np.shape(self.QE_at)\n# print \"BG:\", np.shape(self.QE_bg)\n# print \"N SYM:\", self.QE_nsymq\n# print \"S:\", np.shape(self.QE_s)\n# print \"QE_INVS:\", np.shape(self.QE_invs)\n# print \"IRT:\", np.shape(self.QE_irt)\n# print \"RTAU:\", np.shape(self.QE_rtau)\n# print \"NQ_NEW:\", nq_new\n# print \"SXQ:\", np.shape(sxq)\n# print \"ISQ:\", np.shape(isq)\n# print \"IMQ:\", imq\n# print \"NAT:\", self.QE_nat\n \n new_dyn[:,:] = fcq[i,:,:]\n #print \"new dyn ready\"\n \n # Get the new matrix\n dyn_star = symph.q2qstar_out(new_dyn, self.QE_at, self.QE_bg, self.QE_nsymq, \n self.QE_s, self.QE_invs, self.QE_irt, self.QE_rtau,\n nq_new, sxq, isq, imq, nq, self.QE_nat)\n #print \"Fake\"\n \n #print \"XQ:\", q_point_group[i, :], \"NQ_NEW:\", nq_new\n\n # Now to perform the match bring the star in the same BZ as the q point\n # This facilitate the comparison between q points\n current_q = q_point_group.copy()\n #print \"Fake2\"\n# for xq in range(nq):\n# tmp = Methods.put_into_cell(self.QE_bg, sxq[:, xq])\n# sxq[:, xq] = tmp\n# current_q[xq,:] = Methods.put_into_cell(self.QE_bg, current_q [xq,:])\n# \n # Print the order of the q star\n sorting_q = np.arange(nq)\n for xq in range(nq):\n count = 0 # Debug (avoid no or more than one identification)\n for yq in range(nq):\n real_y = yq\n dot_f = 1\n if imq == 0 and yq >= nq_new:\n real_y -= nq_new\n dot_f = -1\n if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), dot_f* sxq[:, real_y], current_q[xq,:]) < __EPSILON__: \n sorting_q[xq] = yq\n count += 1\n \n if count != 1:\n print (\"Original star:\")\n print (q_point_group)\n print (\"Reshaped star:\")\n print (current_q)\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"STAR:\")\n print (sxq[:, :nq_new].transpose() )\n pta = (current_q[xq,:])\n print (\"Distances of xq in the QE star:\")\n for yq in range(nq_new):\n print (\"%.4f %.4f %.4f => \" % (sxq[0, yq], sxq[1, yq], sxq[2, yq]), Methods.get_min_dist_into_cell(self.QE_bg.transpose(), sxq[:, yq], current_q[xq,:]))\n raise ValueError(\"Error, the vector (%.3f, %.3f, %.3f) has %d identification in the star\" % (pta[0], pta[1], pta[2],\n count))\n #print \"Sorting array:\"\n #print sorting_q\n \n \n # Copy the matrix in the new one\n for xq in range(nq):\n for xat in range(self.QE_nat):\n for yat in range(self.QE_nat):\n final_fc[xq, 3*xat: 3*xat + 3, 3*yat : 3*yat + 3] += dyn_star[sorting_q[xq], :,:, xat, yat] \n \n \n # Now divide the matrix per the xq value\n final_fc /= nq\n \n # Overwrite the matrix\n fcq[:,:,:] = final_fc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET NEW Q POINTS AFTER A CELL STRAIN ==================================== This method returns the new q points after the unit cell is changed. Remember, when changing the cell to mantain the same kind (cubic, orthorombic, hexagonal...) otherwise the star identification will fail. The q point are passed (and returned) in cartesian coordinates.
def GetNewQFromUnitCell(old_cell, new_cell, old_qs): bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi) new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi) new_qs = [] for iq, q in enumerate(old_qs): # Get the q point in crystal coordinates new_qprime = Methods.covariant_coordinates(bg, q) # Convert the crystal coordinates in the new reciprocal lattice vectors new_q = np.einsum("ji, j", new_bg, new_qprime) new_qs.append(new_q) return new_qs
[ "def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the supercell\n bg_s = Methods.get_reciprocal_vectors(supercell)\n \n #print \"SUPERCELL:\", supercell_size\n \n for ix in range(supercell_size[0]):\n for iy in range(supercell_size[1]):\n for iz in range(supercell_size[2]):\n n_s = np.array( [ix, iy, iz], dtype = np.float64)\n q_vect = n_s.dot(bg_s)\n #q_vect = Methods.get_closest_vector(bg, q_vect)\n\n # Check if q is in the listcount = 0\n count = 0\n for q in q_list:\n if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__:\n count += 1\n break\n if count > 0:\n continue\n\n # Add the q point\n q_list.append(q_vect)\n \n # Check if -q and q are different\n if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__:\n q_list.append(-q_vect)\n \n\n \n return q_list", "def GetQStar(self, q_vector):\n self.SetupQPoint()\n nq_new, sxq, isq, imq = symph.star_q(q_vector, self.QE_at, self.QE_bg,\n self.QE_nsymq, self.QE_s, self.QE_invs, 0)\n \n #print (\"STAR IMQ:\", imq)\n if imq != 0:\n total_star = np.zeros( (nq_new, 3), dtype = np.float64)\n else:\n total_star = np.zeros( (2*nq_new, 3), dtype = np.float64)\n\n total_star[:nq_new, :] = sxq[:, :nq_new].transpose()\n\n if imq == 0:\n total_star[nq_new:, :] = -sxq[:, :nq_new].transpose()\n\n return total_star", "def q(self):\n return self.coords.q", "def ApplyQStar(self, fcq, q_point_group):\n \n nq = np.shape(q_point_group)[0]\n final_fc = np.zeros(np.shape(fcq), dtype = np.complex128)\n \n # Setup all the symmetries\n self.SetupQPoint()\n \n new_dyn = np.zeros( (3 * self.QE_nat, 3*self.QE_nat), dtype = np.complex128, order = \"F\")\n \n dyn_star = np.zeros( (nq, 3, 3, self.QE_nat, self.QE_nat), dtype = np.complex128, order = \"F\")\n \n for i in range(nq):\n # Get the q points order\n nq_new, sxq, isq, imq = symph.star_q(q_point_group[i,:], self.QE_at, self.QE_bg, \n self.QE_nsymq, self.QE_s, self.QE_invs, 0)\n \n\n #print \"Found nq:\", nq_new \n #print \"IMQ?\", imq\n\n # Check if the q star is correct\n if nq_new != nq and imq != 0:\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"Passed q star:\")\n print (q_point_group)\n print (\"QE q star:\")\n print (sxq[:, :nq_new].transpose())\n raise ValueError(\"Error, the passed q star does not match the one computed by QE\")\n# \n# # Print the star \n# print \"q point:\", q_point_group[i,:]\n# print \"Point in the stars:\", nq_new\n# print \"Star of q:\"\n# print sxq[:, :nq_new].transpose()\n# \n# print \"NEW_DYN:\", np.shape(new_dyn)\n# print \"AT:\", np.shape(self.QE_at)\n# print \"BG:\", np.shape(self.QE_bg)\n# print \"N SYM:\", self.QE_nsymq\n# print \"S:\", np.shape(self.QE_s)\n# print \"QE_INVS:\", np.shape(self.QE_invs)\n# print \"IRT:\", np.shape(self.QE_irt)\n# print \"RTAU:\", np.shape(self.QE_rtau)\n# print \"NQ_NEW:\", nq_new\n# print \"SXQ:\", np.shape(sxq)\n# print \"ISQ:\", np.shape(isq)\n# print \"IMQ:\", imq\n# print \"NAT:\", self.QE_nat\n \n new_dyn[:,:] = fcq[i,:,:]\n #print \"new dyn ready\"\n \n # Get the new matrix\n dyn_star = symph.q2qstar_out(new_dyn, self.QE_at, self.QE_bg, self.QE_nsymq, \n self.QE_s, self.QE_invs, self.QE_irt, self.QE_rtau,\n nq_new, sxq, isq, imq, nq, self.QE_nat)\n #print \"Fake\"\n \n #print \"XQ:\", q_point_group[i, :], \"NQ_NEW:\", nq_new\n\n # Now to perform the match bring the star in the same BZ as the q point\n # This facilitate the comparison between q points\n current_q = q_point_group.copy()\n #print \"Fake2\"\n# for xq in range(nq):\n# tmp = Methods.put_into_cell(self.QE_bg, sxq[:, xq])\n# sxq[:, xq] = tmp\n# current_q[xq,:] = Methods.put_into_cell(self.QE_bg, current_q [xq,:])\n# \n # Print the order of the q star\n sorting_q = np.arange(nq)\n for xq in range(nq):\n count = 0 # Debug (avoid no or more than one identification)\n for yq in range(nq):\n real_y = yq\n dot_f = 1\n if imq == 0 and yq >= nq_new:\n real_y -= nq_new\n dot_f = -1\n if Methods.get_min_dist_into_cell(self.QE_bg.transpose(), dot_f* sxq[:, real_y], current_q[xq,:]) < __EPSILON__: \n sorting_q[xq] = yq\n count += 1\n \n if count != 1:\n print (\"Original star:\")\n print (q_point_group)\n print (\"Reshaped star:\")\n print (current_q)\n print (\"Reciprocal lattice vectors:\")\n print (self.QE_bg.transpose() )\n print (\"STAR:\")\n print (sxq[:, :nq_new].transpose() )\n pta = (current_q[xq,:])\n print (\"Distances of xq in the QE star:\")\n for yq in range(nq_new):\n print (\"%.4f %.4f %.4f => \" % (sxq[0, yq], sxq[1, yq], sxq[2, yq]), Methods.get_min_dist_into_cell(self.QE_bg.transpose(), sxq[:, yq], current_q[xq,:]))\n raise ValueError(\"Error, the vector (%.3f, %.3f, %.3f) has %d identification in the star\" % (pta[0], pta[1], pta[2],\n count))\n #print \"Sorting array:\"\n #print sorting_q\n \n \n # Copy the matrix in the new one\n for xq in range(nq):\n for xat in range(self.QE_nat):\n for yat in range(self.QE_nat):\n final_fc[xq, 3*xat: 3*xat + 3, 3*yat : 3*yat + 3] += dyn_star[sorting_q[xq], :,:, xat, yat] \n \n \n # Now divide the matrix per the xq value\n final_fc /= nq\n \n # Overwrite the matrix\n fcq[:,:,:] = final_fc", "def SetupQStar(self, q_tot, supergroup = False):\n \n # Setup the symmetries\n #self.SetupQPoint()\n \n # Lets copy the q list (we are going to pop items from it)\n q_list = q_tot[:]\n q_stars = []\n \n count_qstar = 0\n count_q = 0\n q_indices = np.zeros( len(q_tot), dtype = int)\n while len(q_list) > 0:\n q = q_list[0]\n # Get the star of the current q point\n _q_ = np.array(q, dtype = np.float64) # Fortran explicit conversion\n \n nq_new, sxq, isq, imq = symph.star_q(_q_, self.QE_at, self.QE_bg, \n self.QE_nsym, self.QE_s, self.QE_invs, 0)\n \n # print (\"START WITH Q:\", q)\n # print (\"FOUND STAR:\")\n # for jq in range(nq_new):\n # print (sxq[:, jq])\n # print ()\n \n # print (\"TELL ME THE BG:\")\n # print (self.QE_bg.transpose())\n\n # print(\"Manual star:\")\n # for k in range(self.QE_nsym):\n # trial_q = q.dot(self.QE_s[:,:, k])\n # distance_q = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, q)\n # distance_mq = Methods.get_min_dist_into_cell(self.QE_bg.T, trial_q, -q)\n # print(\"trial_q : {} | DQ: {:.4f} | DMQ: {:.4f}\".format(trial_q, distance_q, distance_mq ))\n \n # Prepare the star\n q_star = [sxq[:, k] for k in range(nq_new)]\n\n # If imq is not zero (we do not have -q in the star) then add the -q for each in the star\n if imq == 0:\n old_q_star = q_star[:]\n min_dist = 1\n \n for q in old_q_star:\n q_star.append(-q)\n\n \n\n q_stars.append(q_star)\n \n # Pop out the q_star from the q_list\n for jq, q_instar in enumerate(q_star):\n # Look for the q point in the star and pop them\n #print(\"q_instar:\", q_instar)\n q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(), \n np.array(q_instar), q_point) for q_point in q_list]\n \n pop_index = np.argmin(q_dist) \n q_list.pop(pop_index)\n \n # Use the same trick to identify the q point\n q_dist = [Methods.get_min_dist_into_cell(self.QE_bg.transpose(), \n np.array(q_instar), q_point) for q_point in q_tot]\n \n q_index = np.argmin(q_dist)\n #print (q_indices, count_q, q_index)\n q_indices[count_q] = q_index\n \n count_q += 1\n \n \n return q_stars, q_indices", "def excellent_position(self, Q):\n PP = self.ambient_space()\n # check that Q is on this curve\n try:\n Q = self(Q)\n except TypeError:\n raise TypeError(\"(=%s) must be a point on this curve\"%Q)\n r = self.multiplicity(Q)\n d = self.degree()\n # first move Q to (0 : 0 : 1), (1 : 0 : 0), or (0 : 1 : 0)\n # this makes it easier to construct the main transformation\n i = 0\n while Q[i] == 0:\n i = i + 1\n coords = [PP.gens()[j] + Q[j]/Q[i]*PP.gens()[i] for j in range(3)]\n coords[i] = PP.gens()[i]\n accoords = [PP.gens()[j] - Q[j]/Q[i]*PP.gens()[i] for j in range(3)] # coords used in map construction\n accoords[i] = PP.gens()[i]\n baseC = PP.curve(self.defining_polynomial()(coords))\n P = [0]*3\n P[i] = 1\n P = PP(P)\n l = [0,1,2]\n l.pop(i)\n # choose points forming a triangle with one vertex at P to map to the coordinate triangle\n good = False\n a = 0\n while not good:\n a = a + 1\n # find points to map to (1 : 0 : 0) and (0 : 1 : 0), not on the curve\n Px = [0]*3\n Px[l[0]] = a\n Px[l[1]] = 1\n Py = [0]*3\n Py[l[0]] = -a\n Py[l[1]] = 1\n Py[i] = 1\n try:\n Px = baseC(Px)\n Py = baseC(Py)\n continue\n except TypeError:\n pass\n # by construction, P, Px, Py are linearly independent so the following matrix is invertible\n M = matrix([[Px[j], Py[j], P[j]] for j in range(3)])\n # M defines a change of coordinates sending (1 : 0 : 0) to Py, (0 : 1 : 0) to Px, (0 : 0 : 1) to P; the\n # inverse of the transformation we want, used to create the new defining polynomial\n coords = [sum([M.row(j)[k]*PP.gens()[k] for k in range(3)]) for j in range(3)]\n C = PP.curve(baseC.defining_polynomial()(coords))\n # check tangents at (0 : 0 : 1)\n T = C.tangents(PP([0,0,1]), factor=False)[0]\n if all([e[0] > 0 for e in T.exponents()]) or all([e[1] > 0 for e in T.exponents()]):\n continue\n # check that the other intersections of C with the exceptional lines are correct\n need_continue = False\n for j in range(3):\n poly = C.defining_polynomial().subs({PP.gens()[j]: 0})\n # this is a homogeneous polynomial in the other two variables\n # and so should factor completely into homogeneous linear factors\n # each corresponding to an intersection point where the jth coord is 0.\n # check if there are enough roots, up to multiplicity (that is, that PP.gens()[j]\n # doesn't divide the defining polynomial of C)\n if poly.degree() != d:\n need_continue = True\n break\n # if j != 2, then there should be d - r multiplicity 1 roots,\n # besides the root corresponding to (0 : 0 : 1)\n # if j == 2, then all roots should have multiplicity 1\n npoly = poly\n if j != 2:\n # since (0 : 0 : 1) has multiplicity r, divide out by the highest\n # shared power of the corresponding variable before doing the resultant computations\n if j == 0:\n div_pow = min([e[1] for e in npoly.exponents()])\n npoly = PP.coordinate_ring()(dict([((v[0],v[1] - div_pow,v[2]),g) for (v,g) in\\\n npoly.dict().items()]))\n else:\n div_pow = min([e[0] for e in npoly.exponents()])\n npoly = PP.coordinate_ring()(dict([((v[0] - div_pow,v[1],v[2]),g) for (v,g) in\\\n npoly.dict().items()]))\n # check the degree again\n if npoly.degree() != d - r:\n need_continue = True\n break\n # check that npoly isn't a constant now\n if npoly.degree() > 0:\n t = 0\n while npoly.degree(PP.gens()[t]) == 0:\n t = t + 1\n if npoly.resultant(npoly.derivative(PP.gens()[t]), PP.gens()[t]) == 0:\n need_continue = True\n break\n else:\n t = 0\n while npoly.degree(PP.gens()[t]) == 0:\n t = t + 1\n if poly.resultant(poly.derivative(PP.gens()[t]), PP.gens()[t]) == 0:\n need_continue = True\n break\n # check that intersections with the line PP.gens()[j] are transverse.\n # at a simple point P of the curve, the tangent at that point is\n # given by F_x(P)*x + F_y(P)*y + F_z(P)*z where F is the defining polynomial\n # of the curve\n tmp_l = [0,1,2]\n tmp_l.pop(j)\n poly1 = npoly.derivative(PP.gens()[tmp_l[0]])\n poly2 = npoly.derivative(PP.gens()[tmp_l[1]])\n if poly1.degree() > 0 or poly2.degree() > 0:\n t = 0\n while poly1.degree(PP.gens()[t]) == 0 and poly2.degree(PP.gens()[t]) == 0:\n t = t + 1\n # maybe a stricter check than necessary\n if poly1.resultant(poly2, PP.gens()[t]) == 0:\n need_continue = True\n break\n if need_continue:\n continue\n good = True\n # coords for map\n M = M.inverse()\n accoords2 = [sum([M.row(j)[k]*PP.gens()[k] for k in range(3)]) for j in range(3)]\n H = Hom(self, C)\n phi = H([f(accoords) for f in accoords2])\n return phi", "def make_q(self):\n self.q = np.zeros((self.Nx+2,self.Ny+2))\n\n\n for i in range(1, self.Nx+1):\n for j in range(1, self.Ny+1):\n self.q[i,j] = self.qq(self.x[i-1], self.y[j-1])\n\n for i in range(1,self.Nx+1):\n self.q[i,0] = 2*self.q[i,1] - self.q[i,2]\n self.q[i,self.Ny +1] = 2*self.q[i,self.Ny] - self.q[i,self.Ny-1]\n\n\n for j in range(1,self.Ny+1):\n self.q[0,j] = 2*self.q[1,j] - self.q[2,j]\n self.q[self.Nx+1,j] = 2*self.q[self.Nx,j] - self.q[self.Nx-1,j]\n\n \"\"\"\n\n self.q[1:-1, 1:-1] = self.qq(self.X, self.Y)\n self.q[1:-1, 0] = 2*self.q[1:-1, 1] - self.q[1:-1, 2]\n self.q[1:-1, self.Ny +1] = 2*self.q[1:-1, self.Ny] - self.q[1:-1, self.Ny-1]\n self.q[0, 1:-1] = 2*self.q[1, 1:-1] - self.q[2, 1:-1]\n self.q[self.Nx+1, 1:-1] = 2*self.q[self.Nx, 1:-1] - self.q[self.Nx-1, 1:-1]\n \"\"\"\n self.stability()", "def get_quad_points(self):\n return self.get_abstract_item(\"General\", \"Drag quadrature Points\")", "def qCurveAdjust(self):\n if self.qcurveless:\n return\n\n self.minimizer = Minuit(self.chisq, x = self.xOffset, error_x = 0.1, limit_x = (self.xOffsetMin, self.xOffsetMax), y = self.yOffset, error_y = 0.1, limit_y = (self.yOffsetMin, self.yOffsetMax), errordef = 1, print_level = 0)\n self.minimizer.migrad()\n #self.minimizer.print_param()\n #print self.minimizer.get_fmin()\n\n if self.minimizer.get_fmin().edm < 5.E-6:\n self.xOffset = self.minimizer.values['x']\n self.yOffset = self.minimizer.values['y']\n else:\n self.xOffset = 0.\n self.yOffset = 0.", "def handle_change_of_qspace(changed_sample_U_matrix=None):\n\n #Clear the try_position\n if not get_try_position() is None:\n if not get_try_position().try_position is None:\n #Clear it!\n get_try_position().try_position.coverage = None\n if not changed_sample_U_matrix is None:\n get_try_position().try_position.sample_U_matrix = changed_sample_U_matrix\n \n #TODO: Add a lock?\n #Copy the parameters over\n NextParams.update(LatestParams)\n #Clear the latest to force the thread to re-do everything.\n LatestParams.clear()\n #TODO: Fix the slice display parameter - it can be moved off-scale.\n \n #Re-init the qspace frame last?\n model.messages.send_message(model.messages.MSG_EXPERIMENT_QSPACE_SETTINGS_CHANGED)", "def trackQuadraturePoints(self,q):\n import pdb\n timeToTrackPoints = (self.transport.timeIntegration.t > self.transport.timeIntegration.tLast + 1.0e-8 or\n abs(self.tForLastTrackingStep-self.transport.timeIntegration.t) > 1.0e-8)\n\n #by default, tracking element quadrature points only (q array)\n x_depart = {}\n nPoints_track = {}\n for ci in range(self.transport.nc):\n x_depart[ci] = q['x']\n nPoints_track[ci] = self.transport.mesh.nElements_global*self.transport.nQuadraturePoints_element\n\n def setupInitialElementLocations(ci,q_e):\n for k in range(q_e[ci].shape[1]):\n q_e[ci][:,k] = numpy.arange(self.transport.mesh.nElements_global,dtype='i')\n #todo need to allow skipping nonzero points with q or gq\n\n #first generate SSIPs if needed\n #todo this could be turned into a data member\n #0 -- not backtracked at all\n #1 -- backtracked only nonzero solution points\n #2 -- backtracked everything\n #mwf debug\n #import pdb\n #pdb.set_trace()\n solutionBackTrackedFlag = 0\n if self.needToTrackPoints and timeToTrackPoints and self.SSIPflag > 0:\n self.trackSolutionBackwards(skipPointsWithZeroSolution=True)\n self.generateSSIPs()\n solutionBackTrackedFlag = 1\n self.trackSSIPs()\n if self.needToTrackPoints and timeToTrackPoints:\n #mwf debug\n #pdb.set_trace()\n #update velocity fields for particle tracking\n for ci in range(self.transport.nc):\n self.particle_tracker.setTrackingVelocity(self.transport.coefficients.adjoint_velocity_dofs_last[ci],ci,\n self.transport.coefficients.adjoint_velocity_times_last[ci],\n timeLevel=0,\n trackingVelocity_l2g=self.transport.coefficients.adjoint_velocity_l2g[ci])\n self.particle_tracker.setTrackingVelocity(self.transport.coefficients.adjoint_velocity_dofs[ci],ci,\n self.transport.coefficients.adjoint_velocity_times[ci],\n timeLevel=1)\n\n\n log(\" LADRellam tracking integration points backward ci=%s\" % ci,level=2)\n self.q_t_depart[ci].fill(self.transport.timeIntegration.t)\n #in desired output time, out actual time\n self.q_t_track[ci].fill(self.transport.timeIntegration.tLast)\n #try all points, now set to -1 to try, -3 to skip, 0 or greater if a node of the mesh\n self.q_flag_track[ci].fill(-1)\n #assign ownership of quadrature points to elements\n setupInitialElementLocations(ci,self.q_element_track)\n\n #todo make sure activeComponents set explicitly?\n #mwf debug just play with forwardTrack call, normally backward tracking\n self.particle_tracker.backwardTrack(self.q_t_depart,\n self.q_t_track,\n nPoints_track,\n x_depart,\n self.q_element_track,\n self.q_x_track,\n self.q_flag_track)\n\n\n #mwf debug\n #pdb.set_trace()\n for ci in range(self.transport.nc):\n self.q_dt_track[ci] = numpy.copy(self.q_t_depart[ci])\n self.q_dt_track[ci] -= self.q_t_track[ci]\n\n if not self.useBackwardTrackingForOldMass:\n for ci in range(self.transport.nc):\n log(\" LADRellam tracking integration points forward ci=%s \" % ci,level=2)\n #forward\n self.q_t_depart[ci].fill(self.transport.timeIntegration.tLast)\n self.q_t_track[ci].fill(self.transport.timeIntegration.t)\n #todo setup so can skip points with zero solution using q or gq, need to evaluate u at gq\n #try all points, now set to -1 to try, -3 to skip, 0 or greater if a node of the mesh\n self.q_flag_track[ci].fill(-1)\n #assign ownership of quadrature points to elements\n setupInitialElementLocations(ci,self.q_element_track)\n\n\n #todo make sure activeComponents set explicitly?\n self.particle_tracker.forwardTrack(self.q_t_depart,\n self.q_t_track,\n nPoints_track,\n x_depart,\n self.q_element_track,\n self.q_x_track,\n self.q_flag_track)\n\n\n if self.needToBackTrackSolution and solutionBackTrackedFlag < 1:\n self.trackSolutionBackwards(skipPointsWithZeroSolution=False)\n\n #end tracking interpolation points\n self.needToTrackPoints = False\n self.tForLastTrackingStep=self.transport.timeIntegration.t\n #mwf debug\n #pdb.set_trace()\n #end need to track integration points", "def get_q_glue(self) -> List[float]:\n # We take q above the glue\n flange_area = self.thickness*self.flange_sheets*self.flange_width * 2\n flange_d = self.web_height + (self.thickness*self.flange_sheets) / 2 - self.y_bar\n\n deck_area = self.thickness * self.deck_sheets * (self.width - 2*self.flange_width)\n deck_d = self.web_height + (self.thickness * self.deck_sheets) / 2 - self.y_bar\n\n return [flange_area*flange_d + deck_area*deck_d]", "def _get_initial_qpos(self):\n pos = self._convert_robosuite_to_toolbox_xpos(self.traj_pt)\n ori_euler = mat2euler(quat2mat(self.goal_quat))\n\n # desired pose\n T = SE3(pos) * SE3.RPY(ori_euler)\n\n # find initial joint positions\n if self.robots[0].name == \"UR5e\":\n robot = rtb.models.DH.UR5()\n sol = robot.ikine_min(T, q0=self.robots[0].init_qpos)\n\n # flip last joint around (pi)\n sol.q[-1] -= np.pi\n return sol.q\n\n elif self.robots[0].name == \"Panda\":\n robot = rtb.models.DH.Panda()\n sol = robot.ikine_min(T, q0=self.robots[0].init_qpos)\n return sol.q", "def pointPotential(x,y,q,posx,posy):\n k = 8.987e9 #N m^2/C^2\n Vxy = (k*q)/(((x-posx)**2 + (y-posy)**2)**(1/2.)) \n return Vxy", "def add_point(self):\n # Generate a new point that is in bounds and not in an obstacle\n if random.randint(0, 10) <= 5:\n q_new = self.getRandomPoint()\n else:\n q_new = self.getBiasedRandomPoint()\n\n #if self.endOfPath(q_new, 8):\n # self.start.setEnd(False)\n # return True\n \n # Find closest node to q_new\n q_nearest = self.tree_points[-1]\n best_distance = q_new.distance(q_nearest)\n for node in self.tree_points[:-1]:\n if q_new.distance(node) <= best_distance:\n q_nearest = node\n best_distance = q_new.distance(node)\n\n # Slide q_new closer\n heading = math.degrees(math.atan2(q_new.ycor - q_nearest.ycor,\n q_new.xcor - q_nearest.xcor))\n \n q_new = Node(q_nearest.xcor+(self.delta*math.cos(heading)),\n q_nearest.ycor+(self.delta*math.sin(heading)))\n q_new.setH(q_new.distance(self.goal))\n \n if not self.validNode(q_new):\n return True\n\n #if len(close_nodes) > 20:\n # q_new.setEnd(True)\n # for node in close_nodes:\n # q_new.setEnd(True)\n # self.start.setEnd(False)\n # return True\n\n # Find all nodes within self.neighborhood of q_new\n close_nodes = []\n for node in self.tree_points:\n if q_new.distance(node) <= self.neighborhood:\n close_nodes.append(node)\n\n # Find cheapest parent for q_new from close_nodes\n best_parent = close_nodes[0]\n for node in close_nodes[1:]:\n cost1 = q_new.distance(best_parent) + best_parent.getCost()\n cost2 = q_new.distance(node) + node.getCost()\n if cost2 < cost1:\n best_parent = node\n\n if self.obstacleFree(best_parent, q_new):\n q_new.setParent(best_parent)\n self.tree_points.append(q_new)\n\n # Look at close_nodes and see if any of them have a better path through q_new\n for node in close_nodes:\n cost1 = node.getCost()\n cost2 = q_new.getCost() + q_new.distance(node)\n if cost2 < cost1:\n if self.obstacleFree(q_new, node):\n node.setParent(q_new)\n\n # Determine if more points need to be added\n if q_new.distance(self.goal) <= 10:\n return False\n \n return True", "def initqp(self):\n\n self.qp = get_spherical_quad_points()\n sp = cartesian2spherical(self.qp.points)\n self.sqp = sp", "def GetSupercellFromQlist(q_list, unit_cell):\n\n # Get the bravais lattice\n bg = Methods.get_reciprocal_vectors(unit_cell) \n\n # Convert the q points in crystalline units\n supercell = [1,1,1]\n\n for q in q_list:\n qprime = Methods.covariant_coordinates(bg, q)\n qprime -= np.floor(qprime)\n qprime[np.abs(qprime) < __EPSILON__] = 1\n\n rmax = 1/np.abs(qprime)\n for j in range(3):\n if supercell[j] < int(rmax[j] + .5):\n supercell[j] = int(rmax[j] + .5)\n \n return supercell", "def GetQGrid(unit_cell, supercell_size, enforce_gamma_first = True):\n bg = Methods.get_reciprocal_vectors(unit_cell)\n\n n_vects = int(np.prod(supercell_size))\n q_final = np.zeros((3, n_vects), dtype = np.double, order = \"F\")\n q_final[:,:] = symph.get_q_grid(bg.T, supercell_size, n_vects)\n\n # Get the list of the closest vectors\n q_list = [Methods.get_closest_vector(bg, q_final[:, i]) for i in range(n_vects)]\n\n # Setup Gamma as the first vector\n if enforce_gamma_first:\n for i, q in enumerate(q_list):\n if np.abs(np.sum(q)) < __EPSILON__:\n tmp = q_list[0].copy()\n q_list[0] = q.copy()\n q_list[i] = tmp \n break \n\n\n return q_list", "def get_q(self,coord='rc',unit='au'):\n if(coord=='rc'):\n return self.param['q_rc'];\n if(coord=='cc' and unit=='au'):\n return self.param['q_cc'];\n if(coord=='cc' and unit=='si'):\n return self.param['q_cc']/0.529177249;" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET THE SUPERCELL FROM THE LIST OF Q POINTS =========================================== This method returns the supercell size from the list of q points and the unit cell of the structure.
def GetSupercellFromQlist(q_list, unit_cell): # Get the bravais lattice bg = Methods.get_reciprocal_vectors(unit_cell) # Convert the q points in crystalline units supercell = [1,1,1] for q in q_list: qprime = Methods.covariant_coordinates(bg, q) qprime -= np.floor(qprime) qprime[np.abs(qprime) < __EPSILON__] = 1 rmax = 1/np.abs(qprime) for j in range(3): if supercell[j] < int(rmax[j] + .5): supercell[j] = int(rmax[j] + .5) return supercell
[ "def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the supercell\n bg_s = Methods.get_reciprocal_vectors(supercell)\n \n #print \"SUPERCELL:\", supercell_size\n \n for ix in range(supercell_size[0]):\n for iy in range(supercell_size[1]):\n for iz in range(supercell_size[2]):\n n_s = np.array( [ix, iy, iz], dtype = np.float64)\n q_vect = n_s.dot(bg_s)\n #q_vect = Methods.get_closest_vector(bg, q_vect)\n\n # Check if q is in the listcount = 0\n count = 0\n for q in q_list:\n if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__:\n count += 1\n break\n if count > 0:\n continue\n\n # Add the q point\n q_list.append(q_vect)\n \n # Check if -q and q are different\n if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__:\n q_list.append(-q_vect)\n \n\n \n return q_list", "def CheckSupercellQ(unit_cell, supercell_size, q_list):\n # Get the q point list for the given supercell\n correct_q = GetQGrid(unit_cell, supercell_size)\n \n # Get the reciprocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Check if the vectors are equivalent or not\n for iq, q in enumerate(q_list):\n for jq, qnew in enumerate(correct_q):\n if Methods.get_min_dist_into_cell(bg, q, qnew) < __EPSILON__:\n correct_q.pop(jq)\n break\n \n if len(correct_q) > 0:\n print (\"[CHECK SUPERCELL]\")\n print (\" MISSING Q ARE \")\n print (\"\\n\".join([\" q =%16.8f%16.8f%16.8f \" % (q[0], q[1], q[2]) for q in correct_q]))\n return False\n return True", "def find_base_size(self):\n\n# Find longitudinal locations of first two points\n first_UTM = self.shapes[0].points[0][0]\n second_UTM = self.shapes[1].points[0][0]\n\n# Find the difference. This difference in meters is the size of the grid\n grid_size = second_UTM - first_UTM\n\n return grid_size", "def GetQGrid(unit_cell, supercell_size, enforce_gamma_first = True):\n bg = Methods.get_reciprocal_vectors(unit_cell)\n\n n_vects = int(np.prod(supercell_size))\n q_final = np.zeros((3, n_vects), dtype = np.double, order = \"F\")\n q_final[:,:] = symph.get_q_grid(bg.T, supercell_size, n_vects)\n\n # Get the list of the closest vectors\n q_list = [Methods.get_closest_vector(bg, q_final[:, i]) for i in range(n_vects)]\n\n # Setup Gamma as the first vector\n if enforce_gamma_first:\n for i, q in enumerate(q_list):\n if np.abs(np.sum(q)) < __EPSILON__:\n tmp = q_list[0].copy()\n q_list[0] = q.copy()\n q_list[i] = tmp \n break \n\n\n return q_list", "def getSuperCost(self) -> float:\n\n if(self.book.booktype is BookType.TRADITIONAL or self.book.booktype is BookType.QUARTER):\n paddedSpine = self.book.spine + self.paddingSpineForSuper\n sqInchSuper = paddedSpine * self.book.coverDim.height\n return sqInchSuper * self.superPrice\n return 0", "def getCellSizeFn(points):\n # Coordinates of target\n target = (5.0e+3, -10.0e+3, -10.0e+3)\n\n # Compute distance from target\n dist = ((points[:, 0] - target[0])**2 +\n (points[:, 1] - target[1])**2 +\n (points[:, 2] - target[2])**2)**0.5\n bias_factor = 1.05 # Geometric rate\n dxStart = 1.0e+3 # Discretization size at target\n npts = numpy.ceil(numpy.log(1 - dist / dxStart * (1 - bias_factor)) / numpy.log(bias_factor))\n cellSize = dxStart * bias_factor**npts\n return cellSize", "def GetNewQFromUnitCell(old_cell, new_cell, old_qs):\n \n bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi)\n new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi)\n \n new_qs = []\n for iq, q in enumerate(old_qs):\n # Get the q point in crystal coordinates\n new_qprime = Methods.covariant_coordinates(bg, q)\n \n # Convert the crystal coordinates in the new reciprocal lattice vectors\n new_q = np.einsum(\"ji, j\", new_bg, new_qprime)\n new_qs.append(new_q)\n \n return new_qs", "def getSuperpixelSize(self) -> retval:\n ...", "def _cell_num_point(self, cell):\n obs, reqs = self.tiling.cell_basis()[cell]\n ob_lens = sorted(map(len, obs))\n assert ob_lens[0] == 2, \"Unexpected obstruction\"\n assert len(reqs) <= 1, \"Unexpected number of requirement\"\n if len(obs) == 1:\n maxlen = None\n elif len(obs) == 2:\n maxlen = ob_lens[1] - 1\n else:\n raise RuntimeError(\"Unexpected number of obstructions\")\n if not reqs:\n minlen = 0\n elif len(reqs) == 1:\n minlen = len(reqs[0])\n else:\n raise RuntimeError(\"Unexpected number of requirements\")\n return minlen, maxlen", "def voxel_superset(s):\n # return ndim_grid(np.trunc(s.min(0)) - 1, np.trunc(s.max(0)) + 1)\n return ndim_grid(np.round(s.min(0)) - 1, np.round(s.max(0)) + 1)", "def getNumberOfSuperpixels(self) -> retval:\n ...", "def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \\\n supercell_size, crystal = True):\n\n # Check the supercell\n n_cell = np.prod(supercell_size)\n\n nat = unit_cell_structure.N_atoms\n nat_sc = np.shape(pols_sc)[0] / 3\n n_modes = np.shape(pols_sc)[1] \n\n ERR_MSG = \"\"\"\n Error, the supercell {} is not commensurate with the polarization vector given.\n nat = {}, nat_sc = {}\n \"\"\"\n assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc)\n assert nat_sc == supercell_structure.N_atoms\n\n # Get the reciprocal lattice\n bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi)\n\n # Get the possible Q list\n q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size)\n\n # Allocate the output variable\n q_list = np.zeros( (n_modes, 3), dtype = np.double, order = \"C\")\n\n # Get the correspondance between the unit cell and the super cell atoms\n itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py\n\n # Get the translational vectors\n R_vects = np.zeros( (nat_sc, 3), dtype = np.double)\n for i in range(nat_sc):\n R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:]\n \n R_vects = R_vects.ravel()\n __thr__ = 1e-6\n\n for imu in range(n_modes):\n pol_v = pols_sc[:, imu]\n\n nq = 0\n for q in q_grid:\n q_vec = np.tile(q, nat_sc)\n q_cos = np.cos(2*np.pi * q_vec * R_vects)\n q_cos /= np.sqrt(q_cos.dot(q_cos))\n q_sin = np.sin(2*np.pi * q_vec * R_vects)\n q_sin /= np.sqrt(q_cos.dot(q_cos))\n\n cos_proj = q_cos.dot(pol_v)\n sin_proj = q_sin.dot(pol_v)\n # Wrong, this select only a translational mode\n\n if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__:\n new_q = q\n if crystal:\n new_q = Methods.covariant_coordinates(bg, q)\n q_list[imu, :] = new_q\n break\n elif cos_proj**2 + sin_proj**2 > __thr__:\n print (q_cos)\n ERROR_MSG = \"\"\"\n Error, mixing between two |q|.\n Please provide polarization vectors that are well defined in |q|.\n This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell.\n q = {}\n i_mode = {}\n\n cos_proj = {} | sin_proj = {}\n \"\"\"\n raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj))\n else:\n nq += 1\n\n \n # If we are here not q has been found\n if nq == len(q_grid):\n ERROR_MSG = \"\"\"\n Error, the polarization vector {} cannot be identified!\n No q found in this supercell!\n \"\"\"\n raise ValueError(ERROR_MSG.format(imu))\n\n\n return q_list", "def getMinSize(self):\n minW = minH = 0 # Let's see if we need bigger than this.\n for e in self.elements:\n eMinW, eMinH = e.getMinSize()\n minW = max(minW, eMinW)\n minH += eMinH\n return minW, minH", "def getMinBoxArea(self) -> retval:\n ...", "def test_cell_size(self):\n pas = generate_sample_dataset_1() # h==1\n \n # times of the h that cell_size is set to\n fac = 2.0\n cm = CellManager(arrays_to_bin=pas, initialize=False)\n self.assertEqual(cm.cell_size, 0.0)\n cm.initialize()\n self.assertAlmostEqual(cm.cell_size, 2.0, 10)\n \n cm = CellManager(arrays_to_bin=pas, min_cell_size=3.0)\n self.assertEqual(cm.cell_size, 3.0)\n \n pas[0].h = numpy.linspace(0.2, 2, len(pas[0].h))\n cm = CellManager(arrays_to_bin=pas, min_cell_size=-0.1)\n\n val = max(pas[0].h)\n self.assertEqual(cm.cell_size, fac*val)", "def getGridSize(self): # real signature unknown; restored from __doc__\n pass", "def make_super_cell(structure, sc):\n\n supercell = Structure()\n supercell.structure_comment = \"{}x{}x{}\".format(sc[0],sc[1],sc[2])\n\n # set lattice parameter\n supercell.lattice_parameter = structure.lattice_parameter \n\n # set h_matrix\n h = np.zeros(shape=[3,3])\n for i in range(3):\n h[i,:] = structure.h_matrix[i,:] * sc[i]\n supercell.h_matrix = h\n\n # add supercell atoms\n for i in range(sc[0]):\n for j in range(sc[1]):\n for k in range(sc[2]):\n for atom in structure.atoms:\n symbol = atom.symbol\n position = atom.position\n position = [(i+position[0])/sc[0],\\\n (j+position[1])/sc[1],\\\n (k+position[2])/sc[2]]\n supercell.add_atom(symbol,position)\n\n # return a copy of the supercell\n return copy.deepcopy(supercell)", "def SoPointSizeElement_get(state: 'SoState') -> \"float\":\n return _coin.SoPointSizeElement_get(state)", "def cell_dimension(self):\n return (self._base_mesh.cell_dimension(), 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET SYMMETRIES ON MODES ======================= This methods returns a set of symmetry matrices that explains how polarization vectors interacts between them through any symmetry operation.
def _GetSymmetriesOnModes(symmetries, structure, pol_vects): # Get the vector of the displacement in the polarization m = np.tile(structure.get_masses_array(), (3,1)).T.ravel() disp_v = np.einsum("im,i->mi", pol_vects, 1 / np.sqrt(m)) underdisp_v = np.einsum("im,i->mi", pol_vects, np.sqrt(m)) n_dim, n_modes = np.shape(pol_vects) n_sym = len(symmetries) nat = structure.N_atoms # For each symmetry operation apply the pol_symmetries = np.zeros((n_sym, n_modes, n_modes), dtype = np.float64) for i, sym_mat in enumerate(symmetries): irt = GetIRT(structure, sym_mat) for j in range(n_modes): # Apply the i-th symmetry to the j-th mode new_vector = ApplySymmetryToVector(sym_mat, disp_v[j, :].reshape((nat, 3)), structure.unit_cell, irt).ravel() pol_symmetries[i, :, j] = underdisp_v.dot(new_vector.ravel()) return pol_symmetries
[ "def getSymmetryMatrix(*args, **kwargs):\n \n pass", "def get_diagonal_symmetry_polarization_vectors(pol_sc, w, pol_symmetries):\n raise NotImplementedError(\"Error, this subroutine has not been implemented.\")\n\n # First we must get the degeneracies\n deg_list = get_degeneracies(w) \n\n # Now perform the diagonalization on each degeneracies\n final_vectors = np.zeros( pol_sc.shape, dtype = np.complex128)\n final_vectors[:,:] = pol_sc.copy()\n\n n_modes = len(w)\n n_syms = pol_symmetries.shape[0]\n skip_list = []\n\n syms_values = np.zeros((n_modes, n_syms), dtype = np.complex128)\n\n print(\"All modes:\")\n for i in range(n_modes):\n print(\"Mode {} = {} cm-1 => \".format(i, w[i] * RY_TO_CM), deg_list[i])\n\n print()\n for i in range(n_modes):\n if i in skip_list:\n continue\n\n # If we have no degeneracies, we can ignore it\n if len(deg_list[i]) == 1:\n continue \n\n partial_modes = np.zeros((len(deg_list[i]), len(deg_list[i])), dtype = np.complex128)\n partial_modes[:,:] = np.eye(len(deg_list[i])) # identity matrix\n\n mask_final = np.array([x in deg_list[i] for x in range(n_modes)])\n\n # If we have degeneracies, lets diagonalize all the symmetries\n for i_sym in range(n_syms):\n skip_j = []\n diagonalized = False\n np.savetxt(\"sym_{}.dat\".format(i_sym), pol_symmetries[i_sym, :,:])\n\n \n # Get the symmetry matrix in the mode space (this could generate a problem with masses)\n ps = pol_symmetries[i_sym, :, :]\n sym_mat_origin = ps[np.outer(mask_final, mask_final)].reshape((len(deg_list[i]), len(deg_list[i]))) \n\n for j_mode in deg_list[i]:\n if j_mode in skip_j:\n continue \n\n # Get the modes that can be still degenerate by symmetries\n mode_dna = syms_values[j_mode, : i_sym]\n\n # Avoid a bad error if i_sym = 0\n if len(mode_dna) > 0:\n mode_space = [x for x in deg_list[i] if np.max(np.abs(syms_values[x, :i_sym] - mode_dna)) < 1e-3]\n else:\n mode_space = [x for x in deg_list[i]]\n\n # The mask for the whole symmetry and the partial_modes\n mask_all = np.array([x in mode_space for x in np.arange(n_modes)])\n mask_partial_mode = np.array([x in mode_space for x in deg_list[i]])\n n_deg_new = np.sum(mask_all.astype(int))\n\n if len(mode_space) == 1:\n continue\n\n p_modes_new = partial_modes[:, mask_partial_mode]\n\n \n print()\n print(\"SYMMETRY_INDEX:\", i_sym)\n print(\"SHAPE sym_mat_origin:\", sym_mat_origin.shape)\n print(\"MODES: {} | DEG: {}\".format(mode_space, deg_list[i]))\n print(\"SHAPE P_MODES_NEW:\", p_modes_new.shape)\n sym_mat = np.conj(p_modes_new.T).dot(sym_mat_origin.dot(p_modes_new))\n \n # Decompose in upper triangular (assures that eigenvectors are orthogonal)\n s_eigvals_mat, s_eigvects = scipy.linalg.schur(sym_mat, output = \"complex\")\n s_eigvals = np.diag(s_eigvals_mat)\n\n # Check if the s_eigvals confirm the unitary of sym_mat\n # TODO: Check if some mass must be accounted or not...\n print(\"SYM_MAT\")\n print(sym_mat)\n print(\"Eigvals:\")\n print(s_eigvals)\n print(\"Eigval_mat:\")\n print(s_eigvals_mat)\n print(\"Eigvects:\")\n print(s_eigvects)\n assert np.max(np.abs(np.abs(s_eigvals) - 1)) < 1e-5, \"Error, it seems that the {}-th matrix is not a rotation.\".format(i_sym).format(sym_mat)\n\n # Update the polarization vectors to account this diagonalization\n partial_modes[:, mask_partial_mode] = p_modes_new.dot(s_eigvects)\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(mode_space):\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Now add the modes analyzed up to know to the skip\n for x in mode_space:\n skip_j.append(x)\n \n diagonalized = True\n\n\n # Now we diagonalized the space\n # Apply the symmetries if we did not perform the diagonalization\n if not diagonalized:\n # Get the symmetrized matrix in the partial mode list:\n sym_mat = np.conj(partial_modes.T).dot(sym_mat_origin.dot(partial_modes))\n\n # Check that it is diagonal\n s_eigvals = np.diag(sym_mat) \n disp = sym_mat - np.diag( s_eigvals)\n if np.max(np.abs(disp)) > 1e-4:\n print(\"Matrix {}:\".format(i_sym))\n print(sym_mat)\n raise ValueError(\"Error, I expect the symmetry {} to be diagonal\".format(i_sym))\n\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(deg_list[i]):\n syms_values[k, i_sym] = s_eigvals[k_i]\n \n\n # Now we solved our polarization vectors, add them to the final ones\n final_vectors[:, mask_final] = pol_sc[:, mask_final].dot(partial_modes) \n\n # Do not further process the modes we used in this iteration\n for mode in deg_list[i]:\n skip_list.append(mode)\n\n\n return final_vectors, syms_values", "def getRawSymmetryMatrix(*args, **kwargs):\n \n pass", "def GetSymmetries(self, get_irt=False):\n \n syms = []\n for i in range(self.QE_nsym):\n s_rot = np.zeros( (3, 4))\n s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])\n s_rot[:, 3] = self.QE_ft[:, i]\n \n syms.append(s_rot)\n \n if not get_irt:\n return syms\n return syms, self.QE_irt[:self.QE_nsym, :].copy() - 1", "def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini", "def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini", "def PrintSymmetries(self):\n\n print()\n print(\"Number of symmetries: {}\".format(self.QE_nsym))\n syms = self.GetSymmetries()\n for i in range(self.QE_nsym):\n print(\" Symmetry {}\".format(i+1))\n for j in range(3):\n print(\" {:3.0f}{:3.0f}{:3.0f} | {:6.3f}\".format(*syms[i][j,:]))\n print()", "def getSymmetryPlane(*args, **kwargs):\n \n pass", "def GetSymmetryMatrix(sym, structure, crystal = False):\n\n # Get the IRT array\n irt = GetIRT(structure, sym)\n\n nat = structure.N_atoms\n sym_mat = np.zeros((3 * nat, 3*nat), dtype = np.double)\n\n # Comvert the symmetry matrix in cartesian\n if not crystal:\n sym_cryst = Methods.convert_matrix_cart_cryst2(sym[:,:3], structure.unit_cell, cryst_to_cart = True)\n else:\n sym_cryst = sym[:,:3]\n\n # Correctly fill the atomic position of sym_mat\n for i in range(nat):\n i_irt = irt[i]\n sym_mat[3 * i_irt : 3*i_irt+3, 3*i : 3*i+ 3] = sym_cryst\n\n return sym_mat", "def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):\n \n # Check if the type is correct\n if not \"translations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'translations' key.\")\n \n if not \"rotations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'rotations' key.\")\n \n # Get the number of symmetries\n out_sym = []\n n_sym = np.shape(spglib_sym[\"translations\"])[0]\n \n translations = spglib_sym[\"translations\"]\n rotations = spglib_sym[\"rotations\"]\n \n for i in range(n_sym):\n # Create the symmetry\n sym = np.zeros((3,4))\n sym[:,:3] = rotations[i, :, :]\n sym[:, 3] = translations[i,:]\n \n # Edit the translation\n if regolarize:\n sym[:, 3] *= 2\n sym[:, 3] = np.floor(sym[:, 3] + .5)\n sym[:, 3] *= .5\n sym[:, 3] = sym[:,3] % 1\n \n out_sym.append(sym)\n \n return out_sym", "def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)", "def _derive_layout_symmetry(self):\n self._sym_df = None # Default option\n if self.exploit_layout_symmetry:\n # Check symmetry of bounds & turbine_weights\n if np.unique(self.minimum_yaw_angle, axis=0).shape[0] > 1:\n print(\"minimum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.turbine_weights, axis=0).shape[0] > 1:\n print(\"turbine_weights is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n # Check if turbine_weights are consistently 1.0 everywhere\n if np.any(np.abs(self.turbine_weights - 1.0) > 0.001):\n print(\"turbine_weights are not uniformly 1.0.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n x = self.fi.layout_x\n y = self.fi.layout_y\n df = find_layout_symmetry(x=x, y=y)\n\n # If no axes of symmetry, exit function\n if df.shape[0] <= 0:\n print(\"Wind farm layout in floris is not symmetrical.\")\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n wd_array = self.fi.floris.flow_field.wind_directions\n sym_step = df.iloc[0][\"wd_range\"][1]\n if ((0.0 not in wd_array) or(sym_step not in wd_array)):\n print(\"Floris wind direction array does not \" +\n \"intersect {:.1f} and {:.1f}.\".format(0.0, sym_step))\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n ids_minimal = (wd_array >= 0.0) & (wd_array < sym_step)\n wd_array_min = wd_array[ids_minimal]\n wd_array_remn = np.remainder(wd_array, sym_step)\n\n if not np.all([(x in wd_array_min) for x in wd_array_remn]):\n print(\"Wind direction array appears irregular.\")\n print(\"Exploitation of symmetry has been disabled.\")\n\n self._sym_mapping_extrap = np.array(\n [np.where(np.abs(x - wd_array_min) < 0.0001)[0][0]\n for x in wd_array_remn], dtype=int)\n\n self._sym_mapping_reduce = copy.deepcopy(ids_minimal)\n self._sym_df = df\n\n return", "def symbology(self):\n\n\t\tif ARCMAP and self.layer_object.symbologyType == \"OTHER\":\n\t\t\traise NotSupportedError(\"Unsupported symbology type in ArcMap\")\n\n\t\treturn self.layer_object.symbology", "def test_get_symmetry_number(self):\n\n mol = Molecule().from_smiles('C')\n\n self.assertEquals(12, mol.get_symmetry_number())\n\n empty = Molecule()\n self.assertEquals(1, empty.get_symmetry_number())", "def apply_symmetrisation(self):\n\n # get the values to be symmetrised\n for sym_set in self.molecule.symm_hs.values():\n charges, sigmas, epsilons = [], [], []\n for atom_set in sym_set:\n for atom in atom_set:\n charges.append(float(self.non_bonded_force[atom - 1][0]))\n sigmas.append(float(self.non_bonded_force[atom - 1][1]))\n epsilons.append(float(self.non_bonded_force[atom - 1][2]))\n # calculate the average values to be used in symmetry\n charge, sigma, epsilon = sum(charges) / len(charges), sum(sigmas) / len(sigmas), sum(epsilons) / len(epsilons)\n\n # now loop through the atoms again and store the new values\n for atom in atom_set:\n self.non_bonded_force[atom - 1] = [str(charge), str(sigma), str(epsilon)]", "def switchSymmetry( mlist, upperToLower = True ):\n shape = int( math.sqrt( 2*len(mlist) ) )\n arrays = [[] for i in xrange(shape)]\n matiter = iter(mlist)\n for idx in xrange(shape):\n if upperToLower: lbound,ubound=idx,shape\n else: lbound,ubound=0,idx+1\n for jdx in xrange(lbound,ubound):\n arrays[jdx].append( matiter.next() )\n\n return [val for sublist in arrays for val in sublist]", "def get_rotations_problem(self):\n edges = []\n pairwise_rotations = []\n for m in self.models:\n edges.append((m.i, m.j))\n pairwise_rotations.append(m.Rij())\n return (edges, pairwise_rotations)", "def GetSymmetriesOnModesDeg(symmetries, structure, pol_vects, w_freq, timer = None, debug = False):\n\n\n Ns = len(symmetries)\n \n # Now we can pull out the translations\n pols = pol_vects\n w = w_freq\n #trans_mask = Methods.get_translations(pol_vects, structure.get_masses_array())\n\n # Exclude degeneracies\n #w = w_freq[~trans_mask]\n #pols = pol_vects[:, ~trans_mask]\n\n\n # Get the degeneracy\n n_modes = len(w)\n N_deg = np.ones(len(w), dtype = np.intc)\n n_blocks = min(len(w), 1) # Counter of the different non-degenerate modes\n start_deg = -1\n deg_space = [ [x] for x in range(n_modes)]\n final_space = []\n\n threshold = 1e-8\n\n # Compute irts once for all\n irts = []\n for i, sym_mat in enumerate(symmetries):\n irts.append(GetIRT(structure, sym_mat, timer, debug = debug))\n\n for i in range(1, len(w)):\n if np.abs(w[i-1] - w[i]) < threshold :\n N_deg[i] = N_deg[i-1] + 1\n\n if start_deg == -1:\n start_deg = i - 1\n\n for j in range(start_deg, i):\n N_deg[j] = N_deg[i]\n deg_space[j].append(i)\n deg_space[i].append(j)\n\n else:\n start_deg = -1\n n_blocks += 1\n deg_space[i-1].sort()\n final_space.append(deg_space[i-1])\n \n deg_space[-1].sort()\n final_space.append(deg_space[-1])\n\n assert len(final_space) == n_blocks\n \n \n # Now compute the symmetries only in the correct blocks\n i_mode = 0\n result_list = []\n for i in range(n_blocks): # TODO ADD MPI PARALLELIZATION\n mode_mask = np.zeros(n_modes, dtype = bool)\n\n for k in final_space[i]:\n mode_mask[k] = True\n\n \n \n #assert np.sum(mode_mask.astype(int)) == N_deg[i_mode], \"Error, something went wrong while computing the degeneracies.\"\n\n select_pols = pols[:, mode_mask]\n pol_syms = GetSymmetriesOnModes(symmetries, structure, select_pols, irts, timer, debug)\n\n i_mode += len(deg_space[i_mode])\n\n result_list.append(pol_syms)\n \n return result_list, final_space", "def orthogonalise_sym(vectors):\n ang = vec_angle(vectors[0],vectors[1])\n remainder = 90 - ang\n disp = remainder/2\n perp_unnormal = np.cross(vectors[0],vectors[1])\n normal = perp_unnormal / np.linalg.norm(perp_unnormal)\n\n rot_1 = rotation_matrix(normal,-disp)\n rot_2 = rotation_matrix(normal,disp)\n\n ovec_1 = np.dot(rot_1,vectors[0])\n ovec_2 = np.dot(rot_2,vectors[1])\n\n o_vecs = np.array([ovec_1,ovec_2])\n return o_vecs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET SYMMETRIES ON MODES ======================= This methods returns a set of symmetry matrices that explains how polarization vectors interacts between them through any symmetry operation. Differently from the previous subroutine GetSymmetriesOnModes, which returns a tensor of the size (n_sym, n_modes, n_modes), this subroutine returns a list of lenght n_deg as [(n_sym, ni, ni)] where n_sym is the number of symmetries, n_deg the number of different nondegenerate modes, and ni is the dimension of the degeneracy of the ith group of modes. This allows for a much lower memory consumption for symmetries
def GetSymmetriesOnModesDeg(symmetries, structure, pol_vects, w_freq, timer = None, debug = False): Ns = len(symmetries) # Now we can pull out the translations pols = pol_vects w = w_freq #trans_mask = Methods.get_translations(pol_vects, structure.get_masses_array()) # Exclude degeneracies #w = w_freq[~trans_mask] #pols = pol_vects[:, ~trans_mask] # Get the degeneracy n_modes = len(w) N_deg = np.ones(len(w), dtype = np.intc) n_blocks = min(len(w), 1) # Counter of the different non-degenerate modes start_deg = -1 deg_space = [ [x] for x in range(n_modes)] final_space = [] threshold = 1e-8 # Compute irts once for all irts = [] for i, sym_mat in enumerate(symmetries): irts.append(GetIRT(structure, sym_mat, timer, debug = debug)) for i in range(1, len(w)): if np.abs(w[i-1] - w[i]) < threshold : N_deg[i] = N_deg[i-1] + 1 if start_deg == -1: start_deg = i - 1 for j in range(start_deg, i): N_deg[j] = N_deg[i] deg_space[j].append(i) deg_space[i].append(j) else: start_deg = -1 n_blocks += 1 deg_space[i-1].sort() final_space.append(deg_space[i-1]) deg_space[-1].sort() final_space.append(deg_space[-1]) assert len(final_space) == n_blocks # Now compute the symmetries only in the correct blocks i_mode = 0 result_list = [] for i in range(n_blocks): # TODO ADD MPI PARALLELIZATION mode_mask = np.zeros(n_modes, dtype = bool) for k in final_space[i]: mode_mask[k] = True #assert np.sum(mode_mask.astype(int)) == N_deg[i_mode], "Error, something went wrong while computing the degeneracies." select_pols = pols[:, mode_mask] pol_syms = GetSymmetriesOnModes(symmetries, structure, select_pols, irts, timer, debug) i_mode += len(deg_space[i_mode]) result_list.append(pol_syms) return result_list, final_space
[ "def _GetSymmetriesOnModes(symmetries, structure, pol_vects):\n\n # Get the vector of the displacement in the polarization\n m = np.tile(structure.get_masses_array(), (3,1)).T.ravel()\n disp_v = np.einsum(\"im,i->mi\", pol_vects, 1 / np.sqrt(m))\n underdisp_v = np.einsum(\"im,i->mi\", pol_vects, np.sqrt(m))\n\n n_dim, n_modes = np.shape(pol_vects)\n\n n_sym = len(symmetries)\n nat = structure.N_atoms\n \n # For each symmetry operation apply the\n pol_symmetries = np.zeros((n_sym, n_modes, n_modes), dtype = np.float64)\n for i, sym_mat in enumerate(symmetries):\n irt = GetIRT(structure, sym_mat)\n \n for j in range(n_modes):\n # Apply the i-th symmetry to the j-th mode\n new_vector = ApplySymmetryToVector(sym_mat, disp_v[j, :].reshape((nat, 3)), structure.unit_cell, irt).ravel()\n pol_symmetries[i, :, j] = underdisp_v.dot(new_vector.ravel())\n\n return pol_symmetries", "def get_diagonal_symmetry_polarization_vectors(pol_sc, w, pol_symmetries):\n raise NotImplementedError(\"Error, this subroutine has not been implemented.\")\n\n # First we must get the degeneracies\n deg_list = get_degeneracies(w) \n\n # Now perform the diagonalization on each degeneracies\n final_vectors = np.zeros( pol_sc.shape, dtype = np.complex128)\n final_vectors[:,:] = pol_sc.copy()\n\n n_modes = len(w)\n n_syms = pol_symmetries.shape[0]\n skip_list = []\n\n syms_values = np.zeros((n_modes, n_syms), dtype = np.complex128)\n\n print(\"All modes:\")\n for i in range(n_modes):\n print(\"Mode {} = {} cm-1 => \".format(i, w[i] * RY_TO_CM), deg_list[i])\n\n print()\n for i in range(n_modes):\n if i in skip_list:\n continue\n\n # If we have no degeneracies, we can ignore it\n if len(deg_list[i]) == 1:\n continue \n\n partial_modes = np.zeros((len(deg_list[i]), len(deg_list[i])), dtype = np.complex128)\n partial_modes[:,:] = np.eye(len(deg_list[i])) # identity matrix\n\n mask_final = np.array([x in deg_list[i] for x in range(n_modes)])\n\n # If we have degeneracies, lets diagonalize all the symmetries\n for i_sym in range(n_syms):\n skip_j = []\n diagonalized = False\n np.savetxt(\"sym_{}.dat\".format(i_sym), pol_symmetries[i_sym, :,:])\n\n \n # Get the symmetry matrix in the mode space (this could generate a problem with masses)\n ps = pol_symmetries[i_sym, :, :]\n sym_mat_origin = ps[np.outer(mask_final, mask_final)].reshape((len(deg_list[i]), len(deg_list[i]))) \n\n for j_mode in deg_list[i]:\n if j_mode in skip_j:\n continue \n\n # Get the modes that can be still degenerate by symmetries\n mode_dna = syms_values[j_mode, : i_sym]\n\n # Avoid a bad error if i_sym = 0\n if len(mode_dna) > 0:\n mode_space = [x for x in deg_list[i] if np.max(np.abs(syms_values[x, :i_sym] - mode_dna)) < 1e-3]\n else:\n mode_space = [x for x in deg_list[i]]\n\n # The mask for the whole symmetry and the partial_modes\n mask_all = np.array([x in mode_space for x in np.arange(n_modes)])\n mask_partial_mode = np.array([x in mode_space for x in deg_list[i]])\n n_deg_new = np.sum(mask_all.astype(int))\n\n if len(mode_space) == 1:\n continue\n\n p_modes_new = partial_modes[:, mask_partial_mode]\n\n \n print()\n print(\"SYMMETRY_INDEX:\", i_sym)\n print(\"SHAPE sym_mat_origin:\", sym_mat_origin.shape)\n print(\"MODES: {} | DEG: {}\".format(mode_space, deg_list[i]))\n print(\"SHAPE P_MODES_NEW:\", p_modes_new.shape)\n sym_mat = np.conj(p_modes_new.T).dot(sym_mat_origin.dot(p_modes_new))\n \n # Decompose in upper triangular (assures that eigenvectors are orthogonal)\n s_eigvals_mat, s_eigvects = scipy.linalg.schur(sym_mat, output = \"complex\")\n s_eigvals = np.diag(s_eigvals_mat)\n\n # Check if the s_eigvals confirm the unitary of sym_mat\n # TODO: Check if some mass must be accounted or not...\n print(\"SYM_MAT\")\n print(sym_mat)\n print(\"Eigvals:\")\n print(s_eigvals)\n print(\"Eigval_mat:\")\n print(s_eigvals_mat)\n print(\"Eigvects:\")\n print(s_eigvects)\n assert np.max(np.abs(np.abs(s_eigvals) - 1)) < 1e-5, \"Error, it seems that the {}-th matrix is not a rotation.\".format(i_sym).format(sym_mat)\n\n # Update the polarization vectors to account this diagonalization\n partial_modes[:, mask_partial_mode] = p_modes_new.dot(s_eigvects)\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(mode_space):\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Now add the modes analyzed up to know to the skip\n for x in mode_space:\n skip_j.append(x)\n \n diagonalized = True\n\n\n # Now we diagonalized the space\n # Apply the symmetries if we did not perform the diagonalization\n if not diagonalized:\n # Get the symmetrized matrix in the partial mode list:\n sym_mat = np.conj(partial_modes.T).dot(sym_mat_origin.dot(partial_modes))\n\n # Check that it is diagonal\n s_eigvals = np.diag(sym_mat) \n disp = sym_mat - np.diag( s_eigvals)\n if np.max(np.abs(disp)) > 1e-4:\n print(\"Matrix {}:\".format(i_sym))\n print(sym_mat)\n raise ValueError(\"Error, I expect the symmetry {} to be diagonal\".format(i_sym))\n\n syms_values[k, i_sym] = s_eigvals[k_i]\n\n # Add the symmetry character on the new eigen modes\n for k_i, k in enumerate(deg_list[i]):\n syms_values[k, i_sym] = s_eigvals[k_i]\n \n\n # Now we solved our polarization vectors, add them to the final ones\n final_vectors[:, mask_final] = pol_sc[:, mask_final].dot(partial_modes) \n\n # Do not further process the modes we used in this iteration\n for mode in deg_list[i]:\n skip_list.append(mode)\n\n\n return final_vectors, syms_values", "def GetSymmetries(self, get_irt=False):\n \n syms = []\n for i in range(self.QE_nsym):\n s_rot = np.zeros( (3, 4))\n s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])\n s_rot[:, 3] = self.QE_ft[:, i]\n \n syms.append(s_rot)\n \n if not get_irt:\n return syms\n return syms, self.QE_irt[:self.QE_nsym, :].copy() - 1", "def getSymmetryMatrix(*args, **kwargs):\n \n pass", "def PrintSymmetries(self):\n\n print()\n print(\"Number of symmetries: {}\".format(self.QE_nsym))\n syms = self.GetSymmetries()\n for i in range(self.QE_nsym):\n print(\" Symmetry {}\".format(i+1))\n for j in range(3):\n print(\" {:3.0f}{:3.0f}{:3.0f} | {:6.3f}\".format(*syms[i][j,:]))\n print()", "def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):\n \n # Check if the type is correct\n if not \"translations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'translations' key.\")\n \n if not \"rotations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'rotations' key.\")\n \n # Get the number of symmetries\n out_sym = []\n n_sym = np.shape(spglib_sym[\"translations\"])[0]\n \n translations = spglib_sym[\"translations\"]\n rotations = spglib_sym[\"rotations\"]\n \n for i in range(n_sym):\n # Create the symmetry\n sym = np.zeros((3,4))\n sym[:,:3] = rotations[i, :, :]\n sym[:, 3] = translations[i,:]\n \n # Edit the translation\n if regolarize:\n sym[:, 3] *= 2\n sym[:, 3] = np.floor(sym[:, 3] + .5)\n sym[:, 3] *= .5\n sym[:, 3] = sym[:,3] % 1\n \n out_sym.append(sym)\n \n return out_sym", "def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini", "def coordination_geometry_symmetry_measures(self, coordination_geometry,\n tested_permutations=False,\n points_perfect=None,\n optimization=None):\n if tested_permutations:\n tested_permutations = set()\n if self.permutations_safe_override:\n raise ValueError('No permutations safe override anymore')\n csms = []\n permutations = []\n algos = []\n local2perfect_maps = []\n perfect2local_maps = []\n for algo in coordination_geometry.algorithms:\n if algo.algorithm_type == EXPLICIT_PERMUTATIONS:\n return self.coordination_geometry_symmetry_measures_standard(\n coordination_geometry, algo,\n points_perfect=points_perfect,\n optimization=optimization)\n if algo.algorithm_type == SEPARATION_PLANE:\n cgsm = self.coordination_geometry_symmetry_measures_separation_plane(\n coordination_geometry,\n algo,\n tested_permutations=tested_permutations,\n points_perfect=points_perfect)\n csm, perm, algo, local2perfect_map, perfect2local_map = cgsm\n\n csms.extend(csm)\n permutations.extend(perm)\n algos.extend(algo)\n local2perfect_maps.extend(local2perfect_map)\n perfect2local_maps.extend(perfect2local_map)\n return csms, permutations, algos, local2perfect_maps, perfect2local_maps", "def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini", "def modes(self):\n return self.get_attr_set('modes')", "def coordination_geometry_symmetry_measures_standard(self,\n coordination_geometry,\n algo,\n points_perfect=None,\n optimization=None):\n # permutations_symmetry_measures = np.zeros(len(algo.permutations),\n # np.float)\n if optimization == 2:\n permutations_symmetry_measures = [None] * len(algo.permutations)\n permutations = list()\n algos = list()\n local2perfect_maps = list()\n perfect2local_maps = list()\n for iperm, perm in enumerate(algo.permutations):\n\n local2perfect_map = {}\n perfect2local_map = {}\n permutations.append(perm)\n for iperfect, ii in enumerate(perm):\n perfect2local_map[iperfect] = ii\n local2perfect_map[ii] = iperfect\n local2perfect_maps.append(local2perfect_map)\n perfect2local_maps.append(perfect2local_map)\n\n points_distorted = self.local_geometry.points_wcs_ctwcc(\n permutation=perm)\n\n sm_info = symmetry_measure(points_distorted=points_distorted,\n points_perfect=points_perfect)\n sm_info['translation_vector'] = self.local_geometry.centroid_with_centre\n\n permutations_symmetry_measures[iperm] = sm_info\n algos.append(str(algo))\n return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps\n else:\n permutations_symmetry_measures = [None] * len(algo.permutations)\n permutations = list()\n algos = list()\n local2perfect_maps = list()\n perfect2local_maps = list()\n for iperm, perm in enumerate(algo.permutations):\n\n local2perfect_map = {}\n perfect2local_map = {}\n permutations.append(perm)\n for iperfect, ii in enumerate(perm):\n perfect2local_map[iperfect] = ii\n local2perfect_map[ii] = iperfect\n local2perfect_maps.append(local2perfect_map)\n perfect2local_maps.append(perfect2local_map)\n\n points_distorted = self.local_geometry.points_wcs_ctwcc(\n permutation=perm)\n\n sm_info = symmetry_measure(points_distorted=points_distorted,\n points_perfect=points_perfect)\n sm_info['translation_vector'] = self.local_geometry.centroid_with_centre\n\n permutations_symmetry_measures[iperm] = sm_info\n algos.append(str(algo))\n return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps", "def modes_list(modes):\n a = modes % 10\n b = (modes % 100 - a) // 10\n c = (modes % 1000 - b - a) // 100\n return [a, b, c]", "def coordination_geometry_symmetry_measures_fallback_random(self,\n coordination_geometry,\n NRANDOM=10,\n points_perfect=None):\n permutations_symmetry_measures = [None] * NRANDOM\n permutations = list()\n algos = list()\n perfect2local_maps = list()\n local2perfect_maps = list()\n for iperm in range(NRANDOM):\n perm = np.random.permutation(\n coordination_geometry.coordination_number)\n permutations.append(perm)\n p2l = {}\n l2p = {}\n for i_p, pp in enumerate(perm):\n p2l[i_p] = pp\n l2p[pp] = i_p\n perfect2local_maps.append(p2l)\n local2perfect_maps.append(l2p)\n\n points_distorted = self.local_geometry.points_wcs_ctwcc(\n permutation=perm)\n sm_info = symmetry_measure(points_distorted=points_distorted,\n points_perfect=points_perfect)\n sm_info['translation_vector'] = self.local_geometry.centroid_with_centre\n\n permutations_symmetry_measures[iperm] = sm_info\n algos.append('APPROXIMATE_FALLBACK')\n return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps", "def getSymmetryPlane(*args, **kwargs):\n \n pass", "def get_degeneracies(w):\n\n\n n_modes = len(w)\n\n ret_list = []\n for i in range(n_modes):\n deg_list = np.arange(n_modes)[np.abs(w - w[i]) < 1e-8]\n ret_list.append(deg_list)\n return ret_list", "def get_modes(_id):\n url = MODES_URL % _id\n arequest = requests.get(url, headers=HEADERS)\n status_code = str(arequest.status_code)\n if status_code == '401':\n _LOGGER.error(\"Token expired.\")\n return False\n return arequest.json()", "def board_symmetries(board):\n yield BoardWrapper(board)\n\n w, h = board.width, board.height\n\n symmetry_functions = [reflect_vertical, reflect_horizontal, rotate_180]\n board_is_square = (w == h)\n if board_is_square:\n symmetry_functions += [reflect_secondary_diagonal, reflect_primary_diagonal, rotate_90, rotate_270]\n\n for sf in symmetry_functions:\n new_board = board.copy()\n\n for player,move in board.__last_player_move__.items():\n if move is not board.NOT_MOVED:\n new_board.__last_player_move__[player] = sf(move, w, h)\n\n for row in range(h):\n for col in range(w):\n row2, col2 = sf((row, col), w, h)\n new_board.__board_state__[row2][col2] = board.__board_state__[row][col]\n\n yield BoardWrapper(new_board)", "def orthogonalize_modes(self):\n\n m = self.time_modes.shape[0]\n q = self.freq_modes.shape[0]\n # Find time modes with big noise.\n hi_noise_time_modes = []\n for ii in range(m):\n if np.any(self.time_mode_noise[ii].flat[::self.n_chan + 1]\n > 0.9 * T_large**2):\n hi_noise_time_modes.append(ii)\n # For each frequency mode, check the overlap with the hi noise time\n # modes.\n print hi_noise_time_modes\n for ii in range(q):\n freq_mode_noise = self.freq_mode_noise[ii]\n freq_mode = self.freq_modes[ii]\n for jj in hi_noise_time_modes:\n time_mode = self.time_modes[jj]\n amp = np.sum(time_mode\n * np.sum(freq_mode_noise * time_mode, 1))\n print jj, amp\n #if amp > T_large**2: # Very noisy mode.\n if True:\n # Subtract this mode out of the freq mode noise.\n tmp = np.sum(freq_mode_noise * time_mode, 1)\n tmp2 = np.sum(freq_mode_noise * time_mode[:,None], 0)\n freq_mode_noise[:,:] -= tmp[:,None] * time_mode\n freq_mode_noise[:,:] -= tmp2[None,:] * time_mode[:,None]\n freq_mode_noise[:,:] += (amp * time_mode[:,None]\n * time_mode[None,:])\n # Add the subtracted noise into the time mode noise. Add\n # it in as diagonal even though it isn't (conservative).\n self.time_mode_noise[jj,:,:] += np.diag(amp * freq_mode**2)\n # TODO: Similar proceedure could be done with time_modes and freq_modes\n # reversed. Also overlap between time_modes and time_modes as well as\n # freq_modes and freq_modes.", "def preset_modes(self) -> list:\n try:\n return list(self._ctrl_params['mode'].keys())\n except KeyError:\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET THE SUBSPACES OF DEGENERACIES ================================= From the given frequencies, for each mode returns a list of the indices of the modes of degeneracies.
def get_degeneracies(w): n_modes = len(w) ret_list = [] for i in range(n_modes): deg_list = np.arange(n_modes)[np.abs(w - w[i]) < 1e-8] ret_list.append(deg_list) return ret_list
[ "def _mode_subset(signal, freq, rate, main_freq, samples, modes=[1], width=0.2):\r\n # Compute the FFT.\r\n amp = fft_amp(signal, samples=samples)\r\n\r\n # Calculate resolution in frequency domain.\r\n res = (freq[1] - freq[0])\r\n\r\n for mode in modes:\r\n m_name = f'm{mode}'\r\n\r\n # Search limits indexes\r\n lower = int(round(main_freq * (mode - width) / res))\r\n upper = int(round(main_freq * (mode + width) / res))\r\n\r\n yield m_name, freq[lower: upper], amp[lower: upper]", "def get_mode_groups(trans_mode: int) -> Iterator[Tuple[int, int]]:\n l_array = np.arange(0, trans_mode + 1, dtype=int)\n p_array = np.arange(0, (trans_mode + 1)/2, dtype=int)\n for p in p_array:\n for l in l_array:\n if trans_mode == 2*p + l:\n yield (p, l)", "def modes_list(modes):\n a = modes % 10\n b = (modes % 100 - a) // 10\n c = (modes % 1000 - b - a) // 100\n return [a, b, c]", "def GetSymmetriesOnModesDeg(symmetries, structure, pol_vects, w_freq, timer = None, debug = False):\n\n\n Ns = len(symmetries)\n \n # Now we can pull out the translations\n pols = pol_vects\n w = w_freq\n #trans_mask = Methods.get_translations(pol_vects, structure.get_masses_array())\n\n # Exclude degeneracies\n #w = w_freq[~trans_mask]\n #pols = pol_vects[:, ~trans_mask]\n\n\n # Get the degeneracy\n n_modes = len(w)\n N_deg = np.ones(len(w), dtype = np.intc)\n n_blocks = min(len(w), 1) # Counter of the different non-degenerate modes\n start_deg = -1\n deg_space = [ [x] for x in range(n_modes)]\n final_space = []\n\n threshold = 1e-8\n\n # Compute irts once for all\n irts = []\n for i, sym_mat in enumerate(symmetries):\n irts.append(GetIRT(structure, sym_mat, timer, debug = debug))\n\n for i in range(1, len(w)):\n if np.abs(w[i-1] - w[i]) < threshold :\n N_deg[i] = N_deg[i-1] + 1\n\n if start_deg == -1:\n start_deg = i - 1\n\n for j in range(start_deg, i):\n N_deg[j] = N_deg[i]\n deg_space[j].append(i)\n deg_space[i].append(j)\n\n else:\n start_deg = -1\n n_blocks += 1\n deg_space[i-1].sort()\n final_space.append(deg_space[i-1])\n \n deg_space[-1].sort()\n final_space.append(deg_space[-1])\n\n assert len(final_space) == n_blocks\n \n \n # Now compute the symmetries only in the correct blocks\n i_mode = 0\n result_list = []\n for i in range(n_blocks): # TODO ADD MPI PARALLELIZATION\n mode_mask = np.zeros(n_modes, dtype = bool)\n\n for k in final_space[i]:\n mode_mask[k] = True\n\n \n \n #assert np.sum(mode_mask.astype(int)) == N_deg[i_mode], \"Error, something went wrong while computing the degeneracies.\"\n\n select_pols = pols[:, mode_mask]\n pol_syms = GetSymmetriesOnModes(symmetries, structure, select_pols, irts, timer, debug)\n\n i_mode += len(deg_space[i_mode])\n\n result_list.append(pol_syms)\n \n return result_list, final_space", "def mode(nums):\n frequency = {}\n nums_sorted = sorted(nums)\n for num in nums_sorted:\n if num in frequency:\n frequency[num] += 1\n else:\n frequency[num] = 1\n\n modes = []\n max = -1\n for key in frequency:\n if(frequency[key] > max):\n modes.clear()\n modes.append(key)\n max = frequency[key]\n elif(frequency[key] == max):\n modes.append(key)\n return min(modes)", "def mode_pattern(mode_number: int) -> List[int]:\n return MODE_PATTERNS[mode_number]", "def multimode(array, axis=1, num_modes=2, decimals=3, trans=False):\n modes = []\n count = []\n for i in range(array.shape[axis]):\n # get slice of array \n sliced = list(slicer(np.round(array,decimals=decimals),i,axis)) # for counting later\n a = sliced.copy()\n temp_modes = []\n while True:\n # group most_common output by frequency\n freqs = itertools.groupby(Counter(a).most_common(), lambda x:x[1])\n # pick off the first group (highest frequency)\n temp_modes.extend([val for val,count in next(freqs)[1]])\n if len(temp_modes) < num_modes:\n # remove the values that are already in the list of modes\n a = [value for value in a if value not in temp_modes] #TODO there is probably a faster way to do this\n else:\n break\n modes.append(temp_modes[:num_modes])\n temp_count = [sliced.count(value) for value in temp_modes[:num_modes]]\n count.append(temp_count)\n if trans:\n return np.array(modes).T, np.array(count).T\n else:\n return np.array(modes), np.array(count)", "def test_dfs_mode2():\n pdfs = np.array([0, 1, 12, 3, 5])\n x = np.arange(5)\n mds = pval.dfs_mode(pdfs, x)\n np.testing.assert_equal(mds, 2)", "def zmode(list) -> float:\n # mode = 0\n # mode_count = 0\n for i in list:\n mode_count = 0\n mode = 0\n # index = 0\n for i in list:\n if list.count(i) > mode_count:\n mode_count = list.count(i)\n mode = i\n return mode", "def get_mode(self, mode_number):\n \n # mode_number has to be a positive integer\n if mode_number <= 0:\n raise ValueError('mode_number must be a positive integer')\n \n # Get a list of the interval strings because it's easier to work with\n full_interval_list = self._str_list_of_interval_strings + [self._str_continuation_offset]\n \n # The first interval gives the rootedness of the scale, and we define all modes of a given scale to have\n # the same rootedness as that scale. To compute each mode, we simply do a cyclic permutation of the\n # remaining intervals mode_number places to the left\n rootedness = full_interval_list[0]\n intervals_to_permute = full_interval_list[1:]\n \n # First we handle the interval list\n # We usually talk about \"the first through seventh\" modes of an ionian scale, so \"mode 1\" of a scale here\n # is just the scale itself (in other words, we don't zero-index the way Python normally would). \n permuted_intervals = intervals_to_permute[mode_number - 1:] + intervals_to_permute[:mode_number - 1]\n new_list_of_interval_strings = [rootedness] + permuted_intervals[:-1]\n new_continuation_offset = permuted_intervals[-1]\n \n # Now we handle the degree list in a similar way. We'll write a function to do one permutation and then\n # run that function mode_number of times. We record some notes about the degree list. The first number\n # is the same by definition across all modes, since all modes of a given scale have the same rootedness\n # as that scale. The last number of the degree list is the number of the continuation offset, which will be the\n # same for all modes, since the distance between it and the root is the same (we add the same intervals, but in \n # a different cyclic permutation). First step is chop off the first number of the degree list to get a new list. Then we\n # decrement all numbers in the new list by (the second number - the first number) of the list. Then we add the\n # last degree back on. The new first degree then is the same as the old first degree by construction.\n def permute_degrees_once(degree_list): \n last_degree = degree_list[-1]\n degree_list_to_decrement = degree_list[1:]\n decrement = degree_list[1] - degree_list[0]\n decremented_degree_list = [degree - decrement for degree in degree_list_to_decrement]\n return decremented_degree_list + [last_degree]\n \n # Call this function mode_number - 1 times to get the final degree list\n # (for the first mode, we do nothing, so degree_list doesn't change)\n new_degree_list = self._degree_list\n for i in range(mode_number - 1):\n new_degree_list = permute_degrees_once(new_degree_list)\n\n # Return a scale with all of the new parameters we calculated\n return scale(new_list_of_interval_strings, new_continuation_offset, new_degree_list)", "def absorption_from_mode_intensities(f, modes, frequencies, sigmas, intensities):\n absorption = 0.0\n for mode in modes:\n v = np.real(frequencies[mode])\n sigma = sigmas[mode]\n icastep = intensities[mode]\n absorption = absorption + 2.0 * 4225.6 * icastep / PI * (sigma / (4.0 * (f - v)*(f - v) + sigma*sigma))\n return absorption", "def mode(self, mode_occurrence=False):\r\n\t\treturn find_mode(self.dataset, mode_occurrence)", "def count_modes(m, nest=False):\n npix = len(m)\n nside = ah.npix_to_nside(npix)\n for nmodes in range(npix):\n nonzeroipix = np.flatnonzero(m)\n if len(nonzeroipix):\n flood_fill(nside, nonzeroipix[0], m, nest=nest)\n else:\n break\n return nmodes", "def greedy_modelist(dZdata, comp_names, eng_cap=0.91, msg=False):\n dZdata_int = {}\n mode_lst = {}\n for ky in comp_names:\n dZdata_int[ky] = np.zeros(dZdata[ky].shape[0])\n dZdata_int[ky] = simps(np.abs(dZdata[ky])**2, axis=1)\n\n if msg:\n print('The {2} modal energy content = {0}% \\n for modes {1}\\n'.format(\n np.sort(dZdata_int[ky])[::-1][:10]/dZdata_int[ky].sum()*100,\n np.argsort(dZdata_int[ky])[::-1][:10], ky))\n total_energy = dZdata_int[ky].sum()\n assert total_energy > 0.\n energy = np.zeros((1,))\n mode_lst[ky] = []\n while energy/total_energy < eng_cap and len(mode_lst[ky]) < dZdata_int[ky].shape[0]-2:\n energy = np.sort(dZdata_int[ky])[::-1][:len(mode_lst[ky])+1].sum()\n mode_lst[ky] = np.argsort(dZdata_int[ky])[\n ::-1][:len(mode_lst[ky])+1]\n\n print('For {0}% of {2} energy, no. of modes required = {1}.'.format(\n eng_cap*100, len(mode_lst[ky]), ky))\n if msg:\n print('The selected modes are {0}\\n'.format(mode_lst[ky]))\n\n return mode_lst # , dZdata_int", "def modes(self):\n return self.get_attr_set('modes')", "def mode(mode):\r\n if len(mode) == 0:\r\n return '0'\r\n number = []\r\n numberz = {}\r\n for thenumber in mode:\r\n num = numberz.get(thenumber, None)\r\n if num == None:\r\n numberz[thenumber] = 1\r\n else:\r\n numberz[thenumber] = num + 1\r\n \r\n theMaximum = max(numberz.values())\r\n for key in numberz:\r\n if numberz[key] == theMaximum:\r\n print(\"The mode is\", key)", "def loadModes(self):\n self.rewind() #start at beginning of evecsf\n self.qs=[]\n while self.readMode(): self.qs.append(self.q) #append current mode\n self.nq=len(self.qs)\n self.qs=numpy.array(self.qs)", "def build_factors(\n frame,\n mode=\"xray\",\n ):\n\n factors = []\n for N in frame.N:\n symbol = _atom_symbol_table_[N]\n factors.append(AtomicFormFactor.build_factor(symbol=symbol, Z=N, mode=mode))\n return factors", "def find_modes_n_maps(rows):\n modes_n_maps = {}\n for row in rows:\n if row['mode'] not in modes_n_maps:\n modes_n_maps[row['mode']] = []\n if row['map'] not in modes_n_maps[row['mode']]:\n modes_n_maps[row['mode']].append(row['map'])\n return modes_n_maps" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET THE POLARIZATION VECTORS THAT DIAGONALIZES THE SYMMETRIES ============================================================= This function is very usefull to have a complex basis in which the application of symmetries is trivial. In this basis, each symmetry is diagonal. Indeed this forces the polarization vectors to be complex in the most general case.
def get_diagonal_symmetry_polarization_vectors(pol_sc, w, pol_symmetries): raise NotImplementedError("Error, this subroutine has not been implemented.") # First we must get the degeneracies deg_list = get_degeneracies(w) # Now perform the diagonalization on each degeneracies final_vectors = np.zeros( pol_sc.shape, dtype = np.complex128) final_vectors[:,:] = pol_sc.copy() n_modes = len(w) n_syms = pol_symmetries.shape[0] skip_list = [] syms_values = np.zeros((n_modes, n_syms), dtype = np.complex128) print("All modes:") for i in range(n_modes): print("Mode {} = {} cm-1 => ".format(i, w[i] * RY_TO_CM), deg_list[i]) print() for i in range(n_modes): if i in skip_list: continue # If we have no degeneracies, we can ignore it if len(deg_list[i]) == 1: continue partial_modes = np.zeros((len(deg_list[i]), len(deg_list[i])), dtype = np.complex128) partial_modes[:,:] = np.eye(len(deg_list[i])) # identity matrix mask_final = np.array([x in deg_list[i] for x in range(n_modes)]) # If we have degeneracies, lets diagonalize all the symmetries for i_sym in range(n_syms): skip_j = [] diagonalized = False np.savetxt("sym_{}.dat".format(i_sym), pol_symmetries[i_sym, :,:]) # Get the symmetry matrix in the mode space (this could generate a problem with masses) ps = pol_symmetries[i_sym, :, :] sym_mat_origin = ps[np.outer(mask_final, mask_final)].reshape((len(deg_list[i]), len(deg_list[i]))) for j_mode in deg_list[i]: if j_mode in skip_j: continue # Get the modes that can be still degenerate by symmetries mode_dna = syms_values[j_mode, : i_sym] # Avoid a bad error if i_sym = 0 if len(mode_dna) > 0: mode_space = [x for x in deg_list[i] if np.max(np.abs(syms_values[x, :i_sym] - mode_dna)) < 1e-3] else: mode_space = [x for x in deg_list[i]] # The mask for the whole symmetry and the partial_modes mask_all = np.array([x in mode_space for x in np.arange(n_modes)]) mask_partial_mode = np.array([x in mode_space for x in deg_list[i]]) n_deg_new = np.sum(mask_all.astype(int)) if len(mode_space) == 1: continue p_modes_new = partial_modes[:, mask_partial_mode] print() print("SYMMETRY_INDEX:", i_sym) print("SHAPE sym_mat_origin:", sym_mat_origin.shape) print("MODES: {} | DEG: {}".format(mode_space, deg_list[i])) print("SHAPE P_MODES_NEW:", p_modes_new.shape) sym_mat = np.conj(p_modes_new.T).dot(sym_mat_origin.dot(p_modes_new)) # Decompose in upper triangular (assures that eigenvectors are orthogonal) s_eigvals_mat, s_eigvects = scipy.linalg.schur(sym_mat, output = "complex") s_eigvals = np.diag(s_eigvals_mat) # Check if the s_eigvals confirm the unitary of sym_mat # TODO: Check if some mass must be accounted or not... print("SYM_MAT") print(sym_mat) print("Eigvals:") print(s_eigvals) print("Eigval_mat:") print(s_eigvals_mat) print("Eigvects:") print(s_eigvects) assert np.max(np.abs(np.abs(s_eigvals) - 1)) < 1e-5, "Error, it seems that the {}-th matrix is not a rotation.".format(i_sym).format(sym_mat) # Update the polarization vectors to account this diagonalization partial_modes[:, mask_partial_mode] = p_modes_new.dot(s_eigvects) # Add the symmetry character on the new eigen modes for k_i, k in enumerate(mode_space): syms_values[k, i_sym] = s_eigvals[k_i] # Now add the modes analyzed up to know to the skip for x in mode_space: skip_j.append(x) diagonalized = True # Now we diagonalized the space # Apply the symmetries if we did not perform the diagonalization if not diagonalized: # Get the symmetrized matrix in the partial mode list: sym_mat = np.conj(partial_modes.T).dot(sym_mat_origin.dot(partial_modes)) # Check that it is diagonal s_eigvals = np.diag(sym_mat) disp = sym_mat - np.diag( s_eigvals) if np.max(np.abs(disp)) > 1e-4: print("Matrix {}:".format(i_sym)) print(sym_mat) raise ValueError("Error, I expect the symmetry {} to be diagonal".format(i_sym)) syms_values[k, i_sym] = s_eigvals[k_i] # Add the symmetry character on the new eigen modes for k_i, k in enumerate(deg_list[i]): syms_values[k, i_sym] = s_eigvals[k_i] # Now we solved our polarization vectors, add them to the final ones final_vectors[:, mask_final] = pol_sc[:, mask_final].dot(partial_modes) # Do not further process the modes we used in this iteration for mode in deg_list[i]: skip_list.append(mode) return final_vectors, syms_values
[ "def _GetSymmetriesOnModes(symmetries, structure, pol_vects):\n\n # Get the vector of the displacement in the polarization\n m = np.tile(structure.get_masses_array(), (3,1)).T.ravel()\n disp_v = np.einsum(\"im,i->mi\", pol_vects, 1 / np.sqrt(m))\n underdisp_v = np.einsum(\"im,i->mi\", pol_vects, np.sqrt(m))\n\n n_dim, n_modes = np.shape(pol_vects)\n\n n_sym = len(symmetries)\n nat = structure.N_atoms\n \n # For each symmetry operation apply the\n pol_symmetries = np.zeros((n_sym, n_modes, n_modes), dtype = np.float64)\n for i, sym_mat in enumerate(symmetries):\n irt = GetIRT(structure, sym_mat)\n \n for j in range(n_modes):\n # Apply the i-th symmetry to the j-th mode\n new_vector = ApplySymmetryToVector(sym_mat, disp_v[j, :].reshape((nat, 3)), structure.unit_cell, irt).ravel()\n pol_symmetries[i, :, j] = underdisp_v.dot(new_vector.ravel())\n\n return pol_symmetries", "def GetSymmetriesOnModesDeg(symmetries, structure, pol_vects, w_freq, timer = None, debug = False):\n\n\n Ns = len(symmetries)\n \n # Now we can pull out the translations\n pols = pol_vects\n w = w_freq\n #trans_mask = Methods.get_translations(pol_vects, structure.get_masses_array())\n\n # Exclude degeneracies\n #w = w_freq[~trans_mask]\n #pols = pol_vects[:, ~trans_mask]\n\n\n # Get the degeneracy\n n_modes = len(w)\n N_deg = np.ones(len(w), dtype = np.intc)\n n_blocks = min(len(w), 1) # Counter of the different non-degenerate modes\n start_deg = -1\n deg_space = [ [x] for x in range(n_modes)]\n final_space = []\n\n threshold = 1e-8\n\n # Compute irts once for all\n irts = []\n for i, sym_mat in enumerate(symmetries):\n irts.append(GetIRT(structure, sym_mat, timer, debug = debug))\n\n for i in range(1, len(w)):\n if np.abs(w[i-1] - w[i]) < threshold :\n N_deg[i] = N_deg[i-1] + 1\n\n if start_deg == -1:\n start_deg = i - 1\n\n for j in range(start_deg, i):\n N_deg[j] = N_deg[i]\n deg_space[j].append(i)\n deg_space[i].append(j)\n\n else:\n start_deg = -1\n n_blocks += 1\n deg_space[i-1].sort()\n final_space.append(deg_space[i-1])\n \n deg_space[-1].sort()\n final_space.append(deg_space[-1])\n\n assert len(final_space) == n_blocks\n \n \n # Now compute the symmetries only in the correct blocks\n i_mode = 0\n result_list = []\n for i in range(n_blocks): # TODO ADD MPI PARALLELIZATION\n mode_mask = np.zeros(n_modes, dtype = bool)\n\n for k in final_space[i]:\n mode_mask[k] = True\n\n \n \n #assert np.sum(mode_mask.astype(int)) == N_deg[i_mode], \"Error, something went wrong while computing the degeneracies.\"\n\n select_pols = pols[:, mode_mask]\n pol_syms = GetSymmetriesOnModes(symmetries, structure, select_pols, irts, timer, debug)\n\n i_mode += len(deg_space[i_mode])\n\n result_list.append(pol_syms)\n \n return result_list, final_space", "def GetSymmetriesFromSPGLIB(spglib_sym, regolarize = False):\n \n # Check if the type is correct\n if not \"translations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'translations' key.\")\n \n if not \"rotations\" in spglib_sym:\n raise ValueError(\"Error, your symmetry dict has no 'rotations' key.\")\n \n # Get the number of symmetries\n out_sym = []\n n_sym = np.shape(spglib_sym[\"translations\"])[0]\n \n translations = spglib_sym[\"translations\"]\n rotations = spglib_sym[\"rotations\"]\n \n for i in range(n_sym):\n # Create the symmetry\n sym = np.zeros((3,4))\n sym[:,:3] = rotations[i, :, :]\n sym[:, 3] = translations[i,:]\n \n # Edit the translation\n if regolarize:\n sym[:, 3] *= 2\n sym[:, 3] = np.floor(sym[:, 3] + .5)\n sym[:, 3] *= .5\n sym[:, 3] = sym[:,3] % 1\n \n out_sym.append(sym)\n \n return out_sym", "def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini", "def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)", "def InitFromSymmetries(self, symmetries, q_point = np.array([0,0,0])):\n \n nsym = len(symmetries)\n \n self.QE_nsymq = np.intc(nsym)\n self.QE_nsym = self.QE_nsymq\n \n \n for i, sym in enumerate(symmetries):\n self.QE_s[:,:, i] = np.transpose(sym[:, :3])\n \n # Get the atoms correspondence\n eq_atoms = GetIRT(self.structure, sym)\n \n self.QE_irt[i, :] = eq_atoms + 1\n \n # Get the inverse symmetry\n inv_sym = np.linalg.inv(sym[:, :3])\n for k, other_sym in enumerate(symmetries):\n if np.sum( (inv_sym - other_sym[:, :3])**2) < __EPSILON__:\n break\n \n self.QE_invs[i] = k + 1\n \n # Setup the position after the symmetry application\n for k in range(self.QE_nat):\n self.QE_rtau[:, i, k] = self.structure.coords[eq_atoms[k], :].astype(np.float64)\n \n \n # Get the reciprocal lattice vectors\n b_vectors = self.structure.get_reciprocal_vectors()\n \n # Get the minus_q operation\n self.QE_minusq = False\n\n # NOTE: HERE THERE COULD BE A BUG\n \n # q != -q\n # Get the q vectors in crystal coordinates\n q = Methods.covariant_coordinates(b_vectors, q_point)\n for k, sym in enumerate(self.QE_s):\n new_q = self.QE_s[:,:, k].dot(q)\n if np.sum( (Methods.put_into_cell(b_vectors, -q_point) - new_q)**2) < __EPSILON__:\n self.QE_minus_q = True\n self.QE_irotmq = k + 1\n break", "def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini", "def orthogonalise_sym(vectors):\n ang = vec_angle(vectors[0],vectors[1])\n remainder = 90 - ang\n disp = remainder/2\n perp_unnormal = np.cross(vectors[0],vectors[1])\n normal = perp_unnormal / np.linalg.norm(perp_unnormal)\n\n rot_1 = rotation_matrix(normal,-disp)\n rot_2 = rotation_matrix(normal,disp)\n\n ovec_1 = np.dot(rot_1,vectors[0])\n ovec_2 = np.dot(rot_2,vectors[1])\n\n o_vecs = np.array([ovec_1,ovec_2])\n return o_vecs", "def _polyhedrize_constraints_symbolic(self,zeta):\n try:\n H=np.hstack((sym.Jacobian(zeta,self.q),sym.Jacobian(zeta,self.v_o),\n sym.Jacobian(zeta,self.u_torques),sym.Jacobian(zeta,self.u_m),\n sym.Jacobian(zeta,self.u_lambda) ))\n except:\n H=np.hstack((sym.Jacobian(zeta,self.q),sym.Jacobian(zeta,self.v_o),\n sym.Jacobian(zeta,self.u_m), # There is no torque input\n sym.Jacobian(zeta,self.u_lambda) )) \n h=np.dot(H,np.hstack((self.x,self.u,self.u_lambda)))-zeta\n return (H,h)", "def getSymmetryPlane(*args, **kwargs):\n \n pass", "def sym_z(self):\n return self._sym_z", "def GetSymmetryMatrix(sym, structure, crystal = False):\n\n # Get the IRT array\n irt = GetIRT(structure, sym)\n\n nat = structure.N_atoms\n sym_mat = np.zeros((3 * nat, 3*nat), dtype = np.double)\n\n # Comvert the symmetry matrix in cartesian\n if not crystal:\n sym_cryst = Methods.convert_matrix_cart_cryst2(sym[:,:3], structure.unit_cell, cryst_to_cart = True)\n else:\n sym_cryst = sym[:,:3]\n\n # Correctly fill the atomic position of sym_mat\n for i in range(nat):\n i_irt = irt[i]\n sym_mat[3 * i_irt : 3*i_irt+3, 3*i : 3*i+ 3] = sym_cryst\n\n return sym_mat", "def GetSymmetries(self, get_irt=False):\n \n syms = []\n for i in range(self.QE_nsym):\n s_rot = np.zeros( (3, 4))\n s_rot[:, :3] = np.transpose(self.QE_s[:, :, i])\n s_rot[:, 3] = self.QE_ft[:, i]\n \n syms.append(s_rot)\n \n if not get_irt:\n return syms\n return syms, self.QE_irt[:self.QE_nsym, :].copy() - 1", "def getSymmetryMatrix(*args, **kwargs):\n \n pass", "def get_hardcoded_sym_table() -> dict:\n sym_table = {'aa': 0, 'ae': 1, 'ah': 2, 'ao': 3, 'aw': 4, 'ay': 5, 'b': 6,\n 'ch': 7, 'd': 8, 'dh': 9, 'eh': 10, 'er': 11, 'ey': 12,\n 'f': 13, 'g': 14, 'hh': 15, 'ih': 16, 'iy': 17, 'jh': 18,\n 'k': 19, 'l': 20, 'm': 21, 'n': 22, 'ng': 23, 'ow': 24,\n 'oy': 25, 'p': 26, 'r': 27, 's': 28, 'sh': 29, 't': 30,\n 'th': 31, 'uh': 32, 'uw': 33, 'v': 34, 'w': 35, 'y': 36,\n 'z': 37, 'zh': 38, 'sil': 39}\n return sym_table", "def singularity_polylines(self):\n\t\treturn [[self.vertex_coordinates(vkey) for vkey in polyedge] for polyedge in self.singularity_polyedges()]", "def get_spin_operators(d):\n eye = np.eye(d, dtype=complex)\n s = (d-1)/2.\n # print(s)\n sx = np.zeros([d, d], dtype=complex)\n sy = np.zeros([d, d], dtype=complex)\n sz = np.zeros([d, d], dtype=complex)\n\n for a in range(d):\n if a != 0:\n sx[a, a - 1] = np.sqrt((s + 1) * (2 * a) - (a + 1) * a) / 2\n sy[a, a - 1] = 1j * np.sqrt((s + 1) * (2 * a) - (a + 1) * a) / 2\n if a != d - 1:\n sx[a, a + 1] = np.sqrt((s + 1) * (2 * a + 2) - (a + 2) * (a + 1)) / 2\n sy[a, a + 1] = -1j * np.sqrt((s + 1) * (2 * a + 2) - (a + 2) * (a + 1)) / 2\n sz[a, a] = s - a\n if d == 2:\n sx *= 2\n sy *= 2\n sz *= 2\n return sx, sy, sz, eye", "def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \\\n supercell_size, crystal = True):\n\n # Check the supercell\n n_cell = np.prod(supercell_size)\n\n nat = unit_cell_structure.N_atoms\n nat_sc = np.shape(pols_sc)[0] / 3\n n_modes = np.shape(pols_sc)[1] \n\n ERR_MSG = \"\"\"\n Error, the supercell {} is not commensurate with the polarization vector given.\n nat = {}, nat_sc = {}\n \"\"\"\n assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc)\n assert nat_sc == supercell_structure.N_atoms\n\n # Get the reciprocal lattice\n bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi)\n\n # Get the possible Q list\n q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size)\n\n # Allocate the output variable\n q_list = np.zeros( (n_modes, 3), dtype = np.double, order = \"C\")\n\n # Get the correspondance between the unit cell and the super cell atoms\n itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py\n\n # Get the translational vectors\n R_vects = np.zeros( (nat_sc, 3), dtype = np.double)\n for i in range(nat_sc):\n R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:]\n \n R_vects = R_vects.ravel()\n __thr__ = 1e-6\n\n for imu in range(n_modes):\n pol_v = pols_sc[:, imu]\n\n nq = 0\n for q in q_grid:\n q_vec = np.tile(q, nat_sc)\n q_cos = np.cos(2*np.pi * q_vec * R_vects)\n q_cos /= np.sqrt(q_cos.dot(q_cos))\n q_sin = np.sin(2*np.pi * q_vec * R_vects)\n q_sin /= np.sqrt(q_cos.dot(q_cos))\n\n cos_proj = q_cos.dot(pol_v)\n sin_proj = q_sin.dot(pol_v)\n # Wrong, this select only a translational mode\n\n if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__:\n new_q = q\n if crystal:\n new_q = Methods.covariant_coordinates(bg, q)\n q_list[imu, :] = new_q\n break\n elif cos_proj**2 + sin_proj**2 > __thr__:\n print (q_cos)\n ERROR_MSG = \"\"\"\n Error, mixing between two |q|.\n Please provide polarization vectors that are well defined in |q|.\n This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell.\n q = {}\n i_mode = {}\n\n cos_proj = {} | sin_proj = {}\n \"\"\"\n raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj))\n else:\n nq += 1\n\n \n # If we are here not q has been found\n if nq == len(q_grid):\n ERROR_MSG = \"\"\"\n Error, the polarization vector {} cannot be identified!\n No q found in this supercell!\n \"\"\"\n raise ValueError(ERROR_MSG.format(imu))\n\n\n return q_list", "def PrintSymmetries(self):\n\n print()\n print(\"Number of symmetries: {}\".format(self.QE_nsym))\n syms = self.GetSymmetries()\n for i in range(self.QE_nsym):\n print(\" Symmetry {}\".format(i+1))\n for j in range(3):\n print(\" {:3.0f}{:3.0f}{:3.0f} | {:6.3f}\".format(*syms[i][j,:]))\n print()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET THE Q VECTOR ================ For each polarization mode in the supercell computes the corresponding q vector. Indeed the polarization vector will be a have components both at q and at q. If a polarization vector mixes two q an error will be raised.
def GetQForEachMode(pols_sc, unit_cell_structure, supercell_structure, \ supercell_size, crystal = True): # Check the supercell n_cell = np.prod(supercell_size) nat = unit_cell_structure.N_atoms nat_sc = np.shape(pols_sc)[0] / 3 n_modes = np.shape(pols_sc)[1] ERR_MSG = """ Error, the supercell {} is not commensurate with the polarization vector given. nat = {}, nat_sc = {} """ assert n_cell * nat == nat_sc, ERR_MSG.format(supercell_size, nat, nat_sc) assert nat_sc == supercell_structure.N_atoms # Get the reciprocal lattice bg = Methods.get_reciprocal_vectors(unit_cell_structure.unit_cell) / (2 * np.pi) # Get the possible Q list q_grid = GetQGrid(unit_cell_structure.unit_cell, supercell_size) # Allocate the output variable q_list = np.zeros( (n_modes, 3), dtype = np.double, order = "C") # Get the correspondance between the unit cell and the super cell atoms itau = supercell_structure.get_itau(unit_cell_structure) - 1 #Fort2Py # Get the translational vectors R_vects = np.zeros( (nat_sc, 3), dtype = np.double) for i in range(nat_sc): R_vects[i, :] = unit_cell_structure.coords[itau[i],:] - supercell_structure.coords[i,:] R_vects = R_vects.ravel() __thr__ = 1e-6 for imu in range(n_modes): pol_v = pols_sc[:, imu] nq = 0 for q in q_grid: q_vec = np.tile(q, nat_sc) q_cos = np.cos(2*np.pi * q_vec * R_vects) q_cos /= np.sqrt(q_cos.dot(q_cos)) q_sin = np.sin(2*np.pi * q_vec * R_vects) q_sin /= np.sqrt(q_cos.dot(q_cos)) cos_proj = q_cos.dot(pol_v) sin_proj = q_sin.dot(pol_v) # Wrong, this select only a translational mode if np.abs(cos_proj**2 + sin_proj**2 -1) < __thr__: new_q = q if crystal: new_q = Methods.covariant_coordinates(bg, q) q_list[imu, :] = new_q break elif cos_proj**2 + sin_proj**2 > __thr__: print (q_cos) ERROR_MSG = """ Error, mixing between two |q|. Please provide polarization vectors that are well defined in |q|. This can be reached using the subroutine Phonons.Phonons.DiagonalizeSupercell. q = {} i_mode = {} cos_proj = {} | sin_proj = {} """ raise ValueError(ERROR_MSG.format(q, imu, cos_proj, sin_proj)) else: nq += 1 # If we are here not q has been found if nq == len(q_grid): ERROR_MSG = """ Error, the polarization vector {} cannot be identified! No q found in this supercell! """ raise ValueError(ERROR_MSG.format(imu)) return q_list
[ "def _compute_Q_vector(self):\n\n self.QVector = list(it.product([fsc.Q for fsc in self.fscs]))", "def GetNewQFromUnitCell(old_cell, new_cell, old_qs):\n \n bg = Methods.get_reciprocal_vectors(old_cell) #/ (2 * np.pi)\n new_bg = Methods.get_reciprocal_vectors(new_cell)# / (2 * np.pi)\n \n new_qs = []\n for iq, q in enumerate(old_qs):\n # Get the q point in crystal coordinates\n new_qprime = Methods.covariant_coordinates(bg, q)\n \n # Convert the crystal coordinates in the new reciprocal lattice vectors\n new_q = np.einsum(\"ji, j\", new_bg, new_qprime)\n new_qs.append(new_q)\n \n return new_qs", "def set_rq(self, r, q):\n self.position = numpy.asarray(r, dtype=numpy.float32)\n self.orientation = numpy.asarray(q, dtype=numpy.float32)\n if len(self.position.shape) == 1:\n self.position.resize((1,3))\n if len(self.position.shape) != 2:\n print('Error: can not make an array of 3D vectors from input position.')\n return None\n if len(self.orientation.shape) == 1:\n self.orientation.resize((1,4))\n if len(self.orientation.shape) != 2:\n print('Error: can not make an array of 4D vectors from input orientation.')\n return None", "def q(self):\n return self.coords.q", "def make_q(self):\n self.q = np.zeros((self.Nx+2,self.Ny+2))\n\n\n for i in range(1, self.Nx+1):\n for j in range(1, self.Ny+1):\n self.q[i,j] = self.qq(self.x[i-1], self.y[j-1])\n\n for i in range(1,self.Nx+1):\n self.q[i,0] = 2*self.q[i,1] - self.q[i,2]\n self.q[i,self.Ny +1] = 2*self.q[i,self.Ny] - self.q[i,self.Ny-1]\n\n\n for j in range(1,self.Ny+1):\n self.q[0,j] = 2*self.q[1,j] - self.q[2,j]\n self.q[self.Nx+1,j] = 2*self.q[self.Nx,j] - self.q[self.Nx-1,j]\n\n \"\"\"\n\n self.q[1:-1, 1:-1] = self.qq(self.X, self.Y)\n self.q[1:-1, 0] = 2*self.q[1:-1, 1] - self.q[1:-1, 2]\n self.q[1:-1, self.Ny +1] = 2*self.q[1:-1, self.Ny] - self.q[1:-1, self.Ny-1]\n self.q[0, 1:-1] = 2*self.q[1, 1:-1] - self.q[2, 1:-1]\n self.q[self.Nx+1, 1:-1] = 2*self.q[self.Nx, 1:-1] - self.q[self.Nx-1, 1:-1]\n \"\"\"\n self.stability()", "def get_scattered_q_vector(hkl, rot_matrix, ub_matrix):\n matrix = np.dot(rot_matrix, ub_matrix)\n q_vector = np.dot(matrix, hkl)\n return q_vector", "def computeQindices(self):\n\n self.surf_index_Q = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m)\n self.edge_index_Q = PUBSlib.computeedgeindicesq(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m, self.surf_c1)\n self.vert_index_Q = PUBSlib.computevertindicesq(self.nsurf, self.nedge, self.nvert, self.surf_vert, self.surf_edge, self.surf_c1, self.edge_c1)\n self.nQ = 0\n self.nQ += max(self.vert_index_Q)\n self.nQ += max(self.edge_index_Q[:,1])\n self.nQ += self.surf_index_Q[-1,1]\n\n self.Q = numpy.zeros((self.nQ,self.nvar),order='F') \n if self.printInfo:\n print '# Degrees of freedom =',self.nQ", "def Q(self):\n return np.vstack((self.data[self.Q_keys[i]].flatten() for i in ['h', 'k', 'l', 'e', 'temp'])).T", "def GetQGrid_old(unit_cell, supercell_size):\n \n q_list = []\n # Get the recirpocal lattice vectors\n bg = Methods.get_reciprocal_vectors(unit_cell)\n \n # Get the supercell\n supercell = np.tile(supercell_size, (3, 1)).transpose() * unit_cell\n \n # Get the lattice vectors of the supercell\n bg_s = Methods.get_reciprocal_vectors(supercell)\n \n #print \"SUPERCELL:\", supercell_size\n \n for ix in range(supercell_size[0]):\n for iy in range(supercell_size[1]):\n for iz in range(supercell_size[2]):\n n_s = np.array( [ix, iy, iz], dtype = np.float64)\n q_vect = n_s.dot(bg_s)\n #q_vect = Methods.get_closest_vector(bg, q_vect)\n\n # Check if q is in the listcount = 0\n count = 0\n for q in q_list:\n if Methods.get_min_dist_into_cell(bg, -q_vect, q) < __EPSILON__:\n count += 1\n break\n if count > 0:\n continue\n\n # Add the q point\n q_list.append(q_vect)\n \n # Check if -q and q are different\n if Methods.get_min_dist_into_cell(bg, -q_vect, q_vect) > __EPSILON__:\n q_list.append(-q_vect)\n \n\n \n return q_list", "def getq_python(azimuth, elevation, wl_output, rot_matrix, wl_input=None):\n #The Ewald sphere has 1/wl radius\n inelastic = True\n if wl_input is None:\n inelastic = False\n wl_input = wl_output\n\n #The scattered beam emanates from the centre of this spher.\n #Find the intersection of the scattered beam and the sphere, in XYZ\n beam = column(az_elev_direction(azimuth, elevation)) / wl_output\n\n #And here is the incident beam direction: Along the z-axis, positive\n incident = np.array([0, 0, 1.0]).reshape(3,1) / wl_input\n\n #The wave vector difference between the two is the q vector\n q = 2*pi * (beam - incident)\n\n #Now we switch to the coordinate system of the crystal.\n #The scattered beam direction (the detector location) is rotated relative to the crystal\n # because the sample is rotated.\n #So is the incident beam direction.\n #Therefore, the q-vector measured is simply rotated by the supplied rotation matrix (which has reversed angles)\n\n if inelastic:\n q_unrotated = q\n q = np.dot(rot_matrix, q_unrotated)\n return (q, q_unrotated)\n else:\n q = np.dot(rot_matrix, q)\n return q", "def get_q_v(self,v=None):\r\n# if v is None:\r\n v = self.net.res_bus.at[self.bus, 'vm_pu']\r\n# p = self.net.res_sgen.at[self.gid, 'p_mw']\r\n if abs(v-1) <= self.deadband:\r\n return 0\r\n if v <= 1-self.deadband:\r\n return min(self.qmax, (v-(1-self.deadband)) * self.m_vmin)\r\n else:\r\n return max(self.qmin, (v-(1+self.deadband)) * self.m_vmax)", "def get_q(self,coord='rc',unit='au'):\n if(coord=='rc'):\n return self.param['q_rc'];\n if(coord=='cc' and unit=='au'):\n return self.param['q_cc'];\n if(coord=='cc' and unit=='si'):\n return self.param['q_cc']/0.529177249;", "def test_set_get_Q(self):\n\t\tb = RigidBody()\n\n\t\tQ = [1,0,0,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,1,0,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,0,1,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,0,0,1]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\n\t\tQ = [0.5,0,0,0]\n\t\tb.set_Q(Q)\n\t\tQ = [1,0,0,0]\n\t\tfor i in range(len(Q)):\n\t\t\tself.assertTrue(b.get_Q()[i] - Q[i] < EPS_A)\n\t\t\tself.assertTrue(b.state_vector[6+i] - Q[i] < EPS_A)\n\n\t\tQ = [3,-4,0,0]\n\t\tb.set_Q(Q)\n\t\tQ = [3/5,-4/5,0,0]\n\t\tfor i in range(len(Q)):\n\t\t\tself.assertTrue(b.get_Q()[i] - Q[i] < EPS_A)\n\t\t\tself.assertTrue(b.state_vector[6+i] - Q[i] < EPS_A)", "def _setup_Q(self):\n self.Q_s = [None for _ in range(self.p+1)]\n self.Q_s[self.p] = np.eye(self.layers[self.p-1])\n for i in range(self.p-1, -1, -1):\n self.Q_s[i] = np.dot(self.U_s[i], self.Q_s[i+1])", "def QR_Qvec(QR, tau, v):\n vn = array_typed_copy(v)\n _gslwrap.gsl_linalg_QR_Qvec(QR,tau,vn)\n return vn", "def init_Q(self):\n self.Q = np.matrix(np.tril(self.A))", "def getQ(self):\n return self.qFactor.get()", "def test_get_r(self):\n # Implements the equations of Table II of PRE 92, 068809 (2015).\n sb = solver.get_sb(p)\n sg = solver.get_sg(p)\n q_prime = solver.get_q_prime(q, sb, sg)\n r = solver.get_r(q_prime, p)\n assert_almost_equals(r[0], (1 - q[0]) * (p[1] + q_prime[1] * (1 - p[1]) * (p[2] + q_prime[2] * (1 - p[2])))) # noqa\n assert_almost_equals(r[1], (1 - q_prime[1]) * (1 - p[1]) * (p[2] + q_prime[2] * (1 - p[2]))) # noqa\n assert_almost_equals(r[2], (1 - q_prime[2]) * (1 - p[1]) * (1 - p[2]))", "def electric_field(q, r, x, y):\n\n return q * (x - r[0]) / np.hypot(x - r[0], y - r[1]) ** 3, q * (y - r[1]) / np.hypot(x - r[0], y - r[1]) ** 3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Impose the translational symmetry directly on the supercell matrix.
def ApplyTranslationsToSupercell(fc_matrix, super_cell_structure, supercell): natsc = super_cell_structure.N_atoms # Check the consistency of the passed options natsc3, _ = np.shape(fc_matrix) assert natsc == int(natsc3 / 3), "Error, wrong number of atoms in the supercell structure" assert natsc3 == _, "Error, the matrix passed has a wrong shape" assert natsc % np.prod(supercell) == 0, "Error, the given supercell is impossible with the number of atoms" # Fill the auxiliary matrix new_v2 = np.zeros( (3,3, natsc, natsc), dtype = np.double, order ="F") for i in range(natsc): for j in range(natsc): new_v2[:, :, i, j] = fc_matrix[3*i : 3*(i+1), 3*j : 3*(j+1)] # The number of translations n_trans = np.prod(supercell) trans_irt = np.zeros((natsc, n_trans), dtype = np.double, order = "F") # Setup the translational symmetries for nx in range(supercell[0]): for ny in range(supercell[1]): for nz in range(supercell[2]): # Build the translational symmetry symmat = np.zeros((3,4)) symmat[:3,:3] = np.eye(3) symmat[:, 3] = np.array([nx, ny, nz], dtype = float) / np.array(supercell) nindex = supercell[2] * supercell[1] *nx nindex += supercell[2] * ny nindex += nz # Get the IRT for this symmetry operation in the supercell trans_irt[:, nindex] = GetIRT(super_cell_structure, symmat) + 1 # Apply the translations symph.trans_v2(new_v2, trans_irt) # Return back to the fc_matrix for i in range(natsc): for j in range(natsc): fc_matrix[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]
[ "def superimpose_apply(atoms, transformation):\n trans1, rot, trans2 = transformation\n s_coord = coord(atoms).copy()\n s_coord += trans1\n s_coord = np.dot(rot, s_coord.T).T\n s_coord += trans2\n\n if isinstance(atoms, np.ndarray):\n return s_coord\n else:\n transformed = atoms.copy()\n transformed.coord = s_coord\n return transformed", "def invert_in_place(self) -> \"vnl_diag_matrixSI &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_invert_in_place(self)", "def compute_gram_matrix_inv(self):\n if not hasattr(self, 'gram_matrix'):\n self.compute_gram_matrix()\n self.gram_matrix_inv = spla.inv(self.gram_matrix)", "def inverse(self):\n elements = [random.randint(-9, 9) for x in range(self.elmnt_count)]\n if self.symbols > 0:\n elements = self.insert_symbols(elements)\n A = Matrix(self.rows, self.rows, elements)\n try:\n inv_A = A.inv()\n except ValueError:\n inv_A = 'Singular matrix'\n A = self.printable(A)\n inv_A = self.printable(inv_A)\n return A, inv_A", "def clearaffine(self):\n self.eyeset(affine=np.identity(3))", "def transform_cell(cell):\n cell = np.array(cell)\n transform, upper_tri = np.linalg.qr(cell.T, mode=\"complete\")\n new_cell = np.transpose(upper_tri)\n\n # LAMMPS also requires positive values on the diagonal of the,\n # so invert cell if necessary\n inversion = np.eye(3)\n for i in range(3):\n if new_cell[i][i] < 0.0:\n inversion[i][i] = -1.0\n new_cell = np.dot(inversion, new_cell.T).T\n transform = np.dot(transform, inversion.T).T\n\n return new_cell, transform", "def inverse(self):\r\n\r\n try:\r\n return MMatrix(super().inverse()) # python3\r\n except:\r\n return MMatrix(super(MMatrix, self).inverse()) # python2\r", "def change_cell(nsys0,X0):\n if X0.dtype != int:\n raise TypeError('X0.dtype is wrong.')\n if X0.shape != (3,3):\n raise TypeError('X0 has wrong shape.')\n X = np.array(X0,dtype=float)\n ncp = np.zeros(3,dtype=int)\n ncp[0] = X0[0,:].max()\n ncp[1] = X0[1,:].max()\n ncp[2] = X0[2,:].max()\n\n nsys = replicate(nsys0,ncp[0],ncp[1],ncp[2])\n hmat0 = nsys0.get_hmat()\n hmat = np.dot(hmat0,X0)\n print(nsys)\n sposs = nsys.get_scaled_positions()\n spnews = np.array(sposs)\n nsys.set_hmat(hmat)\n #...Since hmat0 is that of extended system,\n #...X should correspond to it.\n X[0,:] /= ncp[0]\n X[1,:] /= ncp[1]\n X[2,:] /= ncp[2]\n Xi = np.linalg.inv(X)\n for i,p in enumerate(sposs):\n pnew = np.dot(Xi,p)\n for l in range(3):\n pnew[l] = nappy.util.pbc(pnew[l])\n spnews[i,:] = pnew[:]\n nsys.set_scaled_positions(spnews)\n return nsys", "def _inverse_rotation_matrix(self):\n return simplify(self._parent_rotation_matrix**-1)", "def make_symmetric(mat):\n mat = vectorization.expand_dims(mat, to_ndim=3)\n return (mat + np.transpose(mat, axes=(0, 2, 1))) / 2", "def hermitianize(matrix):\n\tcount_calls('hermitianize')\n\tfor i in range(0,len(matrix)):\n\t\tfor j in range(0,i):\n\t\t\tmatrix[i][j]=matrix[j][i].conjugate()\n\treturn matrix", "def inverseDeviceTransform(self, subdev=None):\n invtr = self.__inverseTransform\n if invtr == 0:\n tr = self.__transform * 1 # *1 makes a copy\n if tr is None:\n invtr = None\n else:\n inv, invertible = tr.inverted()\n if not invertible:\n raise Exception(\"Transform is not invertible.\")\n invtr = inv\n self.__inverseTransform = invtr\n tr = Qt.QMatrix4x4(invtr)\n if subdev == 0: ## indicates we should skip any subdevices\n return tr\n ## if a subdevice is specified, multiply by the subdevice's transform before returning\n dev = self.getSubdevice(subdev)\n if dev is None:\n return tr\n else:\n return dev.inverseDeviceTransform() * tr", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n #if self.h > 2:\n # raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n \n if self.w == 1:\n return Matrix([[1.0/self.g[0][0]]])\n \n array_row =[]\n for i in range(self.w):\n array_column=[]\n for j in range(self.h):\n array_column.append(cofactor(self,i,j))\n array_row.append(array_column)\n inverse = 1.0/self.determinant()*Matrix(array_row).T()\n\n return inverse", "def getTranslationSpaceMatrix(self, mat: 'SbMatrix', inv: 'SbMatrix') -> \"void\":\n return _coin.SoTransform_getTranslationSpaceMatrix(self, mat, inv)", "def inverse_mass_matrix_multiply(self, minv):\n self.F = np.einsum('ij,i->ij', self.F, minv)", "def ApplySymmetriesToV2(self, v2, apply_translations = True):\n\n # Apply the Permutation symmetry\n v2[:,:] = 0.5 * (v2 + v2.T)\n\n # First lets recall that the fortran subroutines\n # Takes the input as (3,3,nat,nat)\n new_v2 = np.zeros( (3,3, self.QE_nat, self.QE_nat), dtype = np.double, order =\"F\")\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n new_v2[:, :, i, j] = v2[3*i : 3*(i+1), 3*j : 3*(j+1)]\n\n # Apply the translations\n if apply_translations:\n # Check that the translations have been setted up\n assert len(np.shape(self.QE_translations_irt)) == 2, \"Error, symmetries not setted up to work in the supercell\"\n symph.trans_v2(new_v2, self.QE_translations_irt)\n \n # Apply the symmetrization\n symph.sym_v2(new_v2, self.QE_at, self.QE_bg, self.QE_s, self.QE_irt, self.QE_nsym, self.QE_nat)\n\n # Return back\n for i in range(self.QE_nat):\n for j in range(self.QE_nat):\n v2[3*i : 3*(i+1), 3*j : 3*(j+1)] = new_v2[:, :, i, j]", "def translate_to_zero(self):\n self.translate(-1 * self.geometric_center())", "def getInverse(self):\n # transpose inverts rotation but keeps the scale\n # dividing by scale^2 inverts the scale as well\n scale = self.getScale()\n mat = self.getTranspose()\n mat.m11 /= scale.x ** 2\n mat.m12 /= scale.x ** 2\n mat.m13 /= scale.x ** 2\n mat.m21 /= scale.y ** 2\n mat.m22 /= scale.y ** 2\n mat.m23 /= scale.y ** 2\n mat.m31 /= scale.z ** 2\n mat.m32 /= scale.z ** 2\n mat.m33 /= scale.z ** 2", "def normalize_adj(adj: sps.spmatrix) -> sps.spmatrix:\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sps.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET THE SYMMETRY MATRIX ======================= This subroutine converts the 3x4 symmetry matrix to a 3N x 3N matrix. It also transform the symmetry to be used directly in cartesian space. However, take care, it could be a very big matrix, so it is preverred to work with the small matrix, and maybe use a fortran wrapper if you want speed.
def GetSymmetryMatrix(sym, structure, crystal = False): # Get the IRT array irt = GetIRT(structure, sym) nat = structure.N_atoms sym_mat = np.zeros((3 * nat, 3*nat), dtype = np.double) # Comvert the symmetry matrix in cartesian if not crystal: sym_cryst = Methods.convert_matrix_cart_cryst2(sym[:,:3], structure.unit_cell, cryst_to_cart = True) else: sym_cryst = sym[:,:3] # Correctly fill the atomic position of sym_mat for i in range(nat): i_irt = irt[i] sym_mat[3 * i_irt : 3*i_irt+3, 3*i : 3*i+ 3] = sym_cryst return sym_mat
[ "def getSymmetryMatrix(*args, **kwargs):\n \n pass", "def getRawSymmetryMatrix(*args, **kwargs):\n \n pass", "def force_symmetry(matrix, symmetry):\n symmetric_matrix = matrix.copy()\n\n if symmetry is None:\n return symmetric_matrix\n\n for index, x in np.ndenumerate(matrix):\n\n if symmetry == 'upper':\n if index[0] > index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'lower':\n if index[0] < index[1]:\n symmetric_matrix[index] = matrix[tuple(reversed(index))]\n\n if symmetry == 'mean':\n if index[0] != index[1]:\n symmetric_matrix[index] = np.mean((matrix[index], matrix[tuple(reversed(index))]))\n\n return symmetric_matrix", "def make_symmetric(mat):\n mat = vectorization.expand_dims(mat, to_ndim=3)\n return (mat + np.transpose(mat, axes=(0, 2, 1))) / 2", "def symmetric_matrix(elements):\n\n n = len(elements)\n N = int((-1 + np.sqrt(1 + 9 * n)) / 2)\n if n != (N + 1) * N / 2:\n raise Exception()\n\n triangular = np.zeros((N, N))\n triangular[np.triu_indices(N)] = np.array(elements)\n\n return triangular + np.triu(triangular, 1).T", "def unflatten_symmetric(m):\n \n vector_size = m.shape[0]\n matrix_size = int((math.sqrt(1 + 8*vector_size) - 1) / 2)\n M = np.empty((matrix_size, matrix_size), dtype=m.dtype)\n \n k = 0\n for i in range(matrix_size):\n for j in range(i, matrix_size):\n M[i,j] = m[k]\n M[j,i] = m[k]\n k += 1\n \n return M", "def make_symmetrical(matrix):\n size = len(matrix)\n result = np.zeros((size, size))\n for i in range(size):\n for j in range(i, size):\n average = (matrix[i, j] + matrix[j, i]) / 2.0\n result[i, j] = average\n result[j, i] = average\n return result", "def initialize_volume_symmetry_map(self):\n #@type pg PointGroup\n pg = self.crystal.get_point_group()\n if pg is None:\n print \"ERROR!\"\n return\n\n t1 = time.time()\n\n order = len(pg.table)\n #@type inst Instrument\n inst = self.inst\n\n #Initialize the symmetry map. Last dimension = the ORDER equivalent indices\n n = len(inst.qx_list)\n numpix = n**3\n symm = np.zeros( (numpix, order) , dtype=int)\n\n if self.verbose: print \"Starting volume symmetry calculation. Order is %d. Matrix is %d voxels (%d to a side).\" % (order, n**3, n)\n\n #--- From get_hkl_from_q functions: (moved here for speed) --\n #Get the inverse the B matrix to do the reverse conversion\n B = self.crystal.get_B_matrix()\n invB = np.linalg.inv(B)\n\n #Limit +- in q space\n qlim = inst.qlim\n \n if config.cfg.force_pure_python:\n #----------- Pure Python Version --------------\n\n #Go through each pixel\n q_arr = np.zeros( (3, numpix) )\n for (ix, qx) in enumerate(inst.qx_list):\n for (iy, qy) in enumerate(inst.qx_list):\n for (iz, qz) in enumerate(inst.qx_list):\n i = iz + iy*n + ix*n*n\n #Find the (float) HKL of this voxel at qx,qy,qz.\n q_arr[:, i] = (qx,qy,qz)\n\n #Matrix multiply invB.hkl to get all the HKLs as a column array\n hkl = np.dot(invB, q_arr)\n\n #Now get ORDER equivalent HKLs, as a long list.\n #(as equivalent q)\n q_equiv = np.zeros( (3, numpix, order) )\n for ord in xrange(order):\n #Ok, we go TABLE . hkl to get the equivalent hkl\n #Them, B . hkl gives you the Q vector\n q_equiv[:,:, ord] = np.dot(B, np.dot(pg.table[ord], hkl) )\n\n #Now we need to find the index into the array.\n #Start by finding the x,y,z, indices\n ix = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[0, :, ord])\n iy = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[1, :, ord])\n iz = numpy_utils.index_array_evenly_spaced(-qlim, n, inst.q_resolution, q_equiv[2, :, ord])\n\n #Now put the index into the symmetry matrix\n index = iz + iy*n + ix*n*n\n index[np.isnan(index)] = -1 #Put -1 where a NAN was found\n symm[:, ord] = index\n\n\n else:\n #--------------- Inline C version (about 17x faster than Python) ---------------\n code = \"\"\"\n\n //-- Calculate the hkl array ---\n int ix, iy, iz;\n int eix, eiy, eiz, eindex;\n int index, ord;\n double qx, qy, qz;\n double eqx, eqy, eqz;\n double h, k, l;\n double eh, ek, el;\n for (ix=0; ix<n; ix++)\n {\n qx = ix*qres - qlim;\n for (iy=0; iy<n; iy++)\n {\n qy = iy*qres - qlim;\n for (iz=0; iz<n; iz++)\n {\n qz = iz*qres - qlim;\n index = iz + iy*n + ix*n*n;\n //Ok, now we matrix multiply invB.hkl to get all the HKLs as a column array\n h = qx * INVB2(0,0) + qy * INVB2(0,1) + qz * INVB2(0,2);\n k = qx * INVB2(1,0) + qy * INVB2(1,1) + qz * INVB2(1,2);\n l = qx * INVB2(2,0) + qy * INVB2(2,1) + qz * INVB2(2,2);\n\n //Now go through each equivalency table.\n for (ord=0; ord<order; ord++)\n {\n //Do TABLE.hkl to find a new equivalent hkl\n eh = h * TABLE3(ord, 0,0) + k * TABLE3(ord, 0,1) + l * TABLE3(ord, 0,2);\n ek = h * TABLE3(ord, 1,0) + k * TABLE3(ord, 1,1) + l * TABLE3(ord, 1,2);\n el = h * TABLE3(ord, 2,0) + k * TABLE3(ord, 2,1) + l * TABLE3(ord, 2,2);\n //Now, matrix mult B . equiv_hkl to get the other q vector\n eqx = eh * B2(0,0) + ek * B2(0,1) + el * B2(0,2);\n eqy = eh * B2(1,0) + ek * B2(1,1) + el * B2(1,2);\n eqz = eh * B2(2,0) + ek * B2(2,1) + el * B2(2,2);\n\n //Ok, now you have to find the index into QSPACE\n eix = round( (eqx+qlim)/qres ); if ((eix >= n) || (eix < 0)) eix = -1; \n eiy = round( (eqy+qlim)/qres ); if ((eiy >= n) || (eiy < 0)) eiy = -1;\n eiz = round( (eqz+qlim)/qres ); if ((eiz >= n) || (eiz < 0)) eiz = -1;\n\n if ((eix < 0) || (eiy < 0) || (eiz < 0))\n {\n //One of the indices was out of bounds.\n //Put this marker to mean NO EQUIVALENT\n SYMM2(index, ord) = -1;\n }\n else\n {\n //No problem!, Now I put it in there\n eindex = eiz + eiy*n + eix*n*n;\n //This pixel (index) has this equivalent pixel index (eindex) for this order transform ord.\n SYMM2(index, ord) = eindex;\n }\n\n }\n \n }\n }\n }\n \"\"\"\n qres = inst.q_resolution\n n = len(self.inst.qx_list)\n table = np.array(pg.table) #Turn the list of 3x3 arrays into a Nx3x3 array\n varlist = ['B', 'invB', 'symm', 'qres', 'qlim', 'n', 'order', 'table']\n weave.inline(code, varlist, compiler='gcc', support_code=\"\")\n\n #Done with either version\n self.volume_symmetry = symm\n\n if self.verbose: print \"Volume symmetry map done in %.3f sec.\" % (time.time()-t1)", "def getSymmetryPlane(*args, **kwargs):\n \n pass", "def flatten_symmetric(M):\n \n matrix_size = M.shape[0]\n vector_size = matrix_size * (1 + matrix_size) / 2\n m = np.empty((vector_size,), dtype=M.dtype)\n \n k = 0\n for i in range(matrix_size):\n for j in range(i, matrix_size):\n m[k] = M[i,j]\n k += 1\n \n return m", "def symCrossMat3x3( v ):\n\n A = matrix(SR,3,3)\n A[0,1] = -1*v[2][0]\n A[0,2] = v[1][0]\n A[1,0] = v[2][0]\n A[1,2] = -1*v[0][0]\n A[2,0] = -1*v[1][0]\n A[2,1] = v[0][0]\n\n return A", "def _sym3x3(T):\n T[1,0], T[2,0], T[2,1] = T[0,1], T[0,2], T[1,2]", "def switchSymmetry( mlist, upperToLower = True ):\n shape = int( math.sqrt( 2*len(mlist) ) )\n arrays = [[] for i in xrange(shape)]\n matiter = iter(mlist)\n for idx in xrange(shape):\n if upperToLower: lbound,ubound=idx,shape\n else: lbound,ubound=0,idx+1\n for jdx in xrange(lbound,ubound):\n arrays[jdx].append( matiter.next() )\n\n return [val for sublist in arrays for val in sublist]", "def to_symmetric(self,sym):\n # Throw error if tensor is not loaded\n if not self.in_mem: raise ValueError('GEN_TEN not in memory for operation to_symmetric')\n\n # Return a copy of self if already a symtensor\n if self.is_symmetric:\n return self.copy()\n\n # Convert the full dense (sparse in symtensor lang) to symmetric version\n else:\n # Create the new tensor\n newten = self.ten.copy()\n assert(len(sym[0]) == len(newten.shape))\n # Convert the shape\n newshape = []\n for i in range(len(newten.shape)):\n newshape.append(len(sym[1][i]))\n newshape.append(newten.shape[i]/len(sym[1][i]))\n newten = newten.reshape(newshape)\n # Do a transpose on the indices\n order = []\n for i in range(len(sym[1])):\n order.append(2*i)\n for i in range(len(sym[1])):\n order.append(2*i+1)\n newten = newten.transpose(order)\n # Create a random symtensor\n newsymten = rand(newten.shape[len(sym[1]):],\n sym=sym,\n backend=self.backend,\n dtype=self.dtype,\n legs=self.legs,\n in_mem=self.in_mem)\n # Contract with delta to get dense irrep\n delta = newsymten.ten.get_irrep_map()\n einstr = LETTERS[:len(sym[1])].upper() + \\\n LETTERS[:len(sym[1])] + ',' + \\\n LETTERS[:len(sym[1])].upper() + '->' + \\\n LETTERS[:len(sym[1])-1].upper() + \\\n LETTERS[:len(sym[1])]\n newten = newsymten.backend.einsum(einstr,newten,delta)\n # Put the result into a symtensor\n newsymten.ten.array = newten\n # Return result\n return newsymten", "def to_symmetric_function(self):\n m = SymmetricFunctions(self.parent().base_ring()).monomial()\n if self.is_symmetric():\n return m._from_dict({_Partitions(list(I)): coeff\n for I, coeff in self\n if list(I) in _Partitions}, remove_zeros=False)\n else:\n raise ValueError(\"%s is not a symmetric function\"%self)", "def generateStoichiometryMatrix(self):\n cython.declare(rxn=Reaction, spec=Species, i=cython.int, j=cython.int, nu=cython.int)\n from scipy import sparse\n\n # Use dictionary-of-keys format to efficiently assemble stoichiometry matrix\n stoichiometry = sparse.dok_matrix((len(self.species), len(self.reactions)), numpy.float64)\n for rxn in self.reactions:\n j = rxn.index - 1\n # Only need to iterate over the species involved in the reaction,\n # not all species in the reaction model\n for spec in rxn.reactants:\n i = spec.index - 1\n nu = rxn.getStoichiometricCoefficient(spec)\n if nu != 0: stoichiometry[i,j] = nu\n for spec in rxn.products:\n i = spec.index - 1\n nu = rxn.getStoichiometricCoefficient(spec)\n if nu != 0: stoichiometry[i,j] = nu\n\n # Convert to compressed-sparse-row format for efficient use in matrix operations\n stoichiometry.tocsr()\n\n return stoichiometry", "def test_sym_m_product():\n amat = np.array([[1, 2, 3], [3, 4, 6]], float, order='F')\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])\n\n amat = np.array([[1, 2, 3], [3, 4, 6]], float)\n amat = np.asfortranarray(amat.dot(amat.T))\n\n out1 = amat.T.dot(amat)\n out2 = my_dsyrk(amat)\n idx = np.triu_indices(amat.shape[1])\n\n assert np.allclose(out1[idx], out2[idx])", "def to_sparse_matrix3(mtx, mtype=\"csr\") -> np.ndarray:\n sparser = {\n \"csr\": csr_matrix,\n \"lil\": lil_matrix,\n }\n if mtx.shape[2] != 3:\n raise ValueError(\"Matrix must have 3 channels\")\n return np.array(\n (\n sparser[mtype](mtx[:, :, 0]),\n sparser[mtype](mtx[:, :, 1]),\n sparser[mtype](mtx[:, :, 2]),\n )\n )", "def smatrix_from_3d_idx(ijk, nn=18):\n G = wgraph_from_3d_grid(ijk, nn)\n return G.to_coo_matrix()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET FORCE CONSTANTS GENERATORS ============================== Compute a minimal basis for the force constants matrix. It is very useful for the compression of the force constants matrix, and to reduce the number of independent displacements that need to be calculated. If it is for a supercell dynamical matrix, symmetries and structure must match the supercell. Note, the basis of the generators is not orthonormal by default. It returns both generators and the basis. Each generator contain the atom index, the cartesian direction and the symmetry index. The corresponding row of the basis is transformed displacement vector according to the symmetry. This allow to transform the force constants from the original basis to a more compact one. This function is not parallelized and runs only on the master core.
def get_force_constants_generators(symmetries, irts, structure, timer=None): displacements = [] generators = [] list_of_calculations = [] n_syms = len(symmetries) nat3 = structure.N_atoms * 3 if Settings.am_i_the_master(): for i in range(structure.N_atoms): for j in range(3): # Generate the displacement disp = np.zeros((structure.N_atoms, 3), dtype=np.double) disp[i, j] += 1 #if debug: # print("Simulating displacement", i, j) # Check if the displacement can be decomposed in those already computed if timer is not None: coefficients = timer.execute_timed_function(Methods.get_generic_covariant_coefficients, disp.ravel(), displacements) else: coefficients = Methods.get_generic_covariant_coefficients(disp.ravel(), displacements) #if debug: # print("The decomposition is:", coefficients) if coefficients is None: # The displacement needs to be computed list_of_calculations.append((i,j)) # Generate the symmetry equivalent displacements if timer is not None: disp_sym = timer.execute_timed_function(ApplySymmetriesToVector, symmetries, disp, structure.unit_cell, irts) else: disp_sym = ApplySymmetriesToVector(symmetries, disp, structure.unit_cell, irts) # Check wether to add or not the newly generated displacements to the space for i_sym in range(n_syms): v = disp_sym[i_sym, :, :] #if debug: # print("The symmetry {} gives a vector v = {}".format(i_sym, v)) if timer is not None: coeffs = timer.execute_timed_function(Methods.get_generic_covariant_coefficients, v.ravel(), displacements) else: coeffs = Methods.get_generic_covariant_coefficients(v.ravel(), displacements) #if debug: # print("Is new?", coeffs is None) if coeffs is None: displacements.append(v.ravel()) generators.append({"sym_index": i_sym, "atom_index": i, "direction": j}) assert len(displacements) <= nat3, "The number of displacements is not correct. Something went wrong." if len(displacements) == nat3: break # Early exit if len(displacements) == nat3: break # Early exit if len(displacements) == nat3: break # Broadcast the displacements to all the processes displacements = Settings.broadcast(displacements) list_of_calculations = Settings.broadcast(list_of_calculations) generators = Settings.broadcast(generators) return generators, list_of_calculations, displacements
[ "def _build_basis(self, N, ms):\n states_spin = [combinations(range(m), n) for n, m in zip(N, ms)] #generate all possible indices for particles per component, combinations ensures no double occupacies\n Nstates_total = int(np.prod([int(binom(m, n)) for n, m in zip(N, ms)])) #compute total number of states as binomoial coefficient\n m_total = int(np.sum(ms))\n basis = np.zeros((Nstates_total, m_total), dtype=np.uint8) #set up basis array\n st_fermi = product(*states_spin) #cartesian product between all components generates all states, still indices\n basis_l = np.zeros(Nstates_total, dtype=np.int) #angular momentum \"operator\" (diagonal in this basis)/index per component\n self.offs_arr = np.insert(ms[:-1], 0, 0) #compute offsets for each component for indexing\n self.offs_arr_end = np.insert(ms[1:], -1, -1)\n l_diag = np.concatenate([np.arange(m) for m in ms])\n for i, idx in enumerate(st_fermi):\n s = np.zeros(m_total, dtype=np.uint8)\n for j, m in enumerate(ms):\n if idx[j]: #ensure that list of indices is not empty\n np.add.at(s, np.array(idx[j]) + self.offs_arr[j], 1) #create vector of zeros and add ones at particle indices\n basis[i, :] = s #insert into basis array\n basis_l[i] = np.sum(s * l_diag) #compute L for this state\n\n idx = np.argsort(basis_l) #sort all states wiht increasing L\n return basis[idx], basis_l[idx]", "def get_basis_functions(self, reshape=True):\n if self._basis_functions is None:\n from symfem import create_element\n\n self._basis_functions = []\n for coeff_list in self.dual_coefficients:\n v0 = self.reference.origin\n pieces = []\n for coeffs, v1, v2 in zip(\n coeff_list, self.reference.vertices,\n self.reference.vertices[1:] + self.reference.vertices[:1]\n ):\n sub_e = create_element(\"triangle\", self.fine_space, self.order)\n\n sub_basis = sub_e.map_to_cell([v0, v1, v2])\n\n if self.range_dim == 1:\n sub_fun = sym_sum(a * b for a, b in zip(coeffs, sub_basis))\n else:\n sub_fun = tuple(\n sym_sum(a * b[i] for a, b in zip(coeffs, sub_basis))\n for i in range(self.range_dim))\n pieces.append(((v0, v1, v2), sub_fun))\n self._basis_functions.append(PiecewiseFunction(pieces))\n return self._basis_functions", "def getBasis(self):\n\n\t\t# get the basis set order\n\t\torder = self.generateBasisSetOrders()\n\n\t\t# generate the basis set functions for this shell\n\t\t# each basis set function is characterized by three numbers\n\t\tl = len(order)/3\n\t\tbasisSets = [ ]\n\t\ti = 0\n\t\twhile i < l:\n\t\t\tbasisSet = basis.basis(order[3*i],order[3*i+1],order[3*i+2])\n\t\t\tbasisSets.append(basisSet)\n\t\t\ti = i + 1\n\t\treturn basisSets", "def get_unique_magnetic_structures(\n self,\n atoms,\n supercell_dim=[1, 1, 1],\n magnetic_ions=None,\n noferri=True,\n magmom=3.0,\n ):\n if magnetic_ions is None:\n magnetic_ions = set(atoms.elements)\n\n ss = atoms.make_supercell(dim=supercell_dim)\n # spg = Spacegroup3D(atoms)\n spg = Spacegroup3D(atoms) # kfg\n\n # Apply symmetry with various tolerances until we find one that works\n worked = False\n for tol in [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:\n permutations, worked = self.apply_symmetry_operations(\n ss, spg, tol=tol\n )\n if worked:\n print(\"applied sym \", tol)\n break\n if not worked:\n print(\"error in apply_symmetry_operations\")\n\n print(\"number of sym permutations:\", len(permutations))\n nat = ss.num_atoms\n\n magnetic_list = []\n magnetic_count = 0\n mag_dict = {}\n for i, el in enumerate(ss.elements):\n if el in magnetic_ions:\n magnetic_list.append(True)\n mag_dict[magnetic_count] = i\n magnetic_count += 1\n\n else:\n magnetic_list.append(False)\n\n print(\"magnetic count: \", magnetic_count)\n if magnetic_count == 0:\n print(\"no magnetic ions, what are you doing????\")\n return\n\n # generate all magnetic configurations\n magnetic_list = []\n for i in range(2 ** (magnetic_count)):\n magnetic_list.append(np.zeros(magnetic_count))\n # magnetic_list[-1][0] = 5.0\n\n tmp = \"00000000000000000000000000000000000000000000000000000000000000\"\n if magnetic_count > 0:\n for i in range(2 ** (magnetic_count)):\n binary_int = bin(i).replace(\"0b\", \"\") # convert to binary\n total_int = tmp + binary_int\n\n for ii, d in enumerate(total_int[-magnetic_count:]):\n if d == \"0\":\n # print(i, ii, d)\n magnetic_list[i][ii] = magmom\n else:\n # print(i, ii, d)\n magnetic_list[i][ii] = -1 * magmom\n\n if (\n noferri\n ): # get rid if ferrimagnetic configurations, only exact AFM and FM\n newlist = []\n for i in range(2 ** (magnetic_count)):\n if (\n np.abs(\n np.abs(np.sum(magnetic_list[i])) / abs(magmom)\n - magnetic_count\n )\n < 1e-5\n or np.abs(np.sum(magnetic_list[i])) < 1e-5\n ):\n newlist.append(magnetic_list[i])\n magnetic_list = newlist\n\n # convert to all atoms in cell\n mag_all = []\n for mag in magnetic_list:\n z = np.zeros(nat)\n for i, m in enumerate(mag):\n z[mag_dict[i]] = m\n mag_all.append(z)\n\n print(\"generated, now apply sym opps to find unique\")\n # apply symmetry ops\n symm_list = []\n for mag in mag_all:\n already_in_list = False\n for p in permutations:\n mag_new = mag[p]\n for s in symm_list:\n if (\n np.sum(np.abs(s - mag_new)) < 1e-5\n or np.sum(np.abs(s + mag_new)) < 1e-5\n ): # either we found the same config, or same config * -1\n already_in_list = True\n break\n if not already_in_list: # then we need it\n symm_list.append(mag)\n\n print(\"number of unique configs: \", len(symm_list))\n return symm_list, ss", "def compute_basis(self, x):\n # For each diagonal element, we have a basis element with one in that\n # position, while for off-diagonal elements we have 1 / sqrt(2) and the\n # corresponding symmetric element.\n # For example, in the case of 2x2 matrices:\n # [[1., 0.] [[0, 1/sqrt(2)] [[0., 0.]\n # [0., 0.]] [1/sqrt(2), 0.]] [0., 1.]]\n dim = ps.shape(x)[-1]\n n = tf.cast(dim * (dim + 1) / 2, dtype=np.int32)\n basis_tensor = tf.eye(n, dtype=x.dtype)\n basis_tensor = linalg.fill_triangular(basis_tensor)\n sqrt_2 = dtype_util.as_numpy_dtype(x.dtype)(np.sqrt(2.))\n basis_tensor = (\n basis_tensor + tf.linalg.matrix_transpose(basis_tensor)) / sqrt_2\n basis_tensor = tf.linalg.set_diag(\n basis_tensor, tf.linalg.diag_part(basis_tensor) / sqrt_2)\n return spaces.DenseBasis(basis_tensor)", "def get_conformers_distmatrix (self) :\n ncomformers = len(self.geometries)\n for i,(en,b) in enumerate(zip(self.energies,self.bconsts)) :\n for j in range(i+1,len(self.geometries)) :\n dist = self.get_overlap_with_conformer(j,en,b)\n dist_matrix[i,j] = dist\n # Symmetrize\n dist_matrix = dist_matrix + dist_matrix.transpose()\n return dist_matrix", "def basis(self):\n d = self.get_dimension()\n basis = [LieAlgebra(self) for _ in range(d)]\n z = np.zeros(d)\n for ii in range(d):\n z[ii] = 1\n basis[ii].set_vector(z)\n z[ii] = 0\n return basis", "def generate_jacobians(self):\n logger.debug(f'- Generating Jacobians for {self.class_name}')\n\n # clear storage\n self.df_syms, self.dg_syms = Matrix([]), Matrix([])\n self.calls.clear_ijv()\n\n # NOTE: SymPy does not allow getting the derivative of an empty array\n if len(self.g_matrix) > 0:\n self.dg_syms = self.g_matrix.jacobian(self.vars_list)\n\n if len(self.f_matrix) > 0:\n self.df_syms = self.f_matrix.jacobian(self.vars_list)\n\n self.df_sparse = SparseMatrix(self.df_syms)\n self.dg_sparse = SparseMatrix(self.dg_syms)\n\n vars_syms_list = list(self.vars_dict)\n algebs_and_ext_list = list(self.cache.algebs_and_ext)\n states_and_ext_list = list(self.cache.states_and_ext)\n\n fg_sparse = [self.df_sparse, self.dg_sparse]\n j_args = defaultdict(list) # argument list for each jacobian call\n j_calls = defaultdict(list) # jacobian functions (one for each type)\n\n for idx, eq_sparse in enumerate(fg_sparse):\n for item in eq_sparse.row_list():\n e_idx, v_idx, e_symbolic = item\n if idx == 0:\n eq_name = states_and_ext_list[e_idx]\n else:\n eq_name = algebs_and_ext_list[e_idx]\n\n var_name = vars_syms_list[v_idx]\n eqn = self.cache.all_vars[eq_name] # `BaseVar` that corr. to the equation\n var = self.cache.all_vars[var_name] # `BaseVar` that corr. to the variable\n jname = f'{eqn.e_code}{var.v_code}'\n\n # jac calls with all arguments and stored individually\n self.calls.append_ijv(jname, e_idx, v_idx, 0)\n\n # collect unique arguments for jac calls\n free_syms = self._check_expr_symbols(e_symbolic)\n for fs in free_syms:\n if fs not in j_args[jname]:\n j_args[jname].append(fs)\n j_calls[jname].append(e_symbolic)\n\n for jname in j_calls:\n self.calls.j_args[jname] = [str(i) for i in j_args[jname]]\n self.calls.j[jname] = lambdify(j_args[jname], tuple(j_calls[jname]), modules=self.lambdify_func)\n\n self.calls.j_names = list(j_calls.keys())\n\n # The for-loop below is intended to add an epsilon small value to the diagonal of `gy`.\n # The user should take care of the algebraic equations by using `diag_eps` in `Algeb` definition\n\n for var in self.parent.cache.vars_int.values():\n if var.diag_eps == 0.0:\n continue\n elif var.diag_eps is True:\n eps = self.parent.system.config.diag_eps\n else:\n eps = var.diag_eps\n\n if var.e_code == 'g':\n eq_list = algebs_and_ext_list\n else:\n eq_list = states_and_ext_list\n\n e_idx = eq_list.index(var.name)\n v_idx = vars_syms_list.index(var.name)\n\n self.calls.append_ijv(f'{var.e_code}{var.v_code}c', e_idx, v_idx, eps)", "def get_basis_functions(self, reshape=True, symbolic=True):\n raise NotImplementedError()", "def generate_1D_supercell(atoms,positions,basis,a): \n split_atoms = []\n new_atoms = {}\n for atom in atoms:\n index = atom\n letter_and_number = re.split('(\\d+)',index)\n del letter_and_number[-1]\n split_atoms.append(letter_and_number)\n for atom in range(len(atoms)):\n element = split_atoms[atom][0]\n number = int(split_atoms[atom][1])\n split_atoms.append([element,number+len(atoms)])\n for atom in range(len(split_atoms)):\n new_label = str(str(split_atoms[atom][0])+str(split_atoms[atom][1]))\n new_atoms[new_label] = atom\n\n\n \"\"\"\n Generate new list of positions\n Loop through previous list, adding new coordinates with the lattice\n parameter added on\n For 1D case, assume periodicity along z\n \"\"\"\n new_positions = positions\n for atom in range(len(positions)):\n new_positions = np.append(new_positions,[[positions[atom,0],positions[atom,1],positions[atom,2]+a]],axis=0)\n\n \n \"\"\"\n Generate new basis\n Double original basis set, then loop through the new basis modifying the\n basis function numbers and atom labels\n \"\"\"\n new_basis = np.append(basis,basis,axis=0)\n atom_list = []\n for atom in new_atoms:\n atom_list.append(atom)\n for entry in range(len(new_basis)):\n new_basis[entry][0] = int(int(entry) + 1)\n new_basis[entry][1] = atom_list[entry]\n \n return new_atoms,new_positions,new_basis", "def _forces(self):\n # Loop through force in all links (tree and twigs)\n tree_uids = [self.tree_id] + self.twigIds\n force_trees = sum(\n [normal_force_between_bodies(self.source_model.uid, i) for i in tree_uids]\n )\n\n # Loop through force in all links\n force_fruit = sum(\n [\n normal_force_between_bodies(self.source_model.uid, i)\n for i in self.fruitIds\n ]\n )\n\n # Get force on stems\n return dict(\n trees=force_trees,\n fruit=force_fruit,\n )", "def Generate(self, dyn, qe_sym = None):\n \n # Check if the symmetries must be initialize\n if qe_sym is None:\n qe_sym = CC.symmetries.QE_Symmetry(dyn.structure)\n \n \n # Get the number of irreducible q points from the matrix\n self.nq = dyn.nqirr\n self.nat = dyn.structure.N_atoms\n \n # Initialize the symmetries at q = 0\n qe_sym.SetupQPoint()\n \n # Prepare the wyckoff basis\n tmp_wyck_gen = np.zeros((3 * self.nat, self.nat, 3), dtype = np.float64)\n \n for i in range( 3 * self.nat):\n x = i % 3\n n = i / 3\n tmp_wyck_gen[i, n, x] = 1\n \n # Symmetrize the vector\n qe_sym.SymmetrizeVector(tmp_wyck_gen[i, :, :])\n \n # Apply the gram-schmidt\n new_gen = tmp_wyck_gen.reshape((3 * self.nat, 3 * self.nat)).transpose()\n new_gen = scipy.linalg.orth(new_gen).transpose()\n \n # Get the number of wyckoff coefficients\n self.wyck_ncoeff = new_gen.shape()[0]\n \n # Reshape the array and get the coefficients\n self.wyck_gen = new_gen.reshape((self.wyck_ncoeff, self.nat, 3))\n \n r = np.arange(3 * self.nat)\n \n self.dyn_ncoeff = np.zeros(self.nq, dtype = int)\n self.dyn_gen = []\n \n # Cycle for each irreducible q point of the matrix\n for iq in range(self.nq):\n q = dyn.q_stars[iq][0]\n # Setup the symmetries for this q point\n qe_sym.SetupQPoint(q)\n \n gh = []\n \n for i in range(self.nat * 3):\n for j in range(i, self.nat * 3):\n # Take the generator\n fc = np.zeros((3 * self.nat, 3 * self.nat), dtype = np.complex128)\n fc[i, j] = 1\n \n # Apply the symmetry\n qe_sym.SymmetrizeDynQ(q, fc)\n \n # Check if the generator has already be generated\n is_new = True\n for k in range(i+1):\n mask = fc[k, :] != 0\n first_value = r[mask]\n if len(first_value):\n if k == i:\n if first_value[0] < j:\n is_new = False\n break\n else:\n is_new = False\n break\n \n # If the generator is new\n if is_new:\n qe_sym.ImposeSumRule(fc, \"simple\")\n \n # Check if the sum rule makes this generator desappearing\n if np.sum ((fc != 0).as_type(int)) != 0:\n gh.append(fc / np.sqrt(np.trace(fc.dot(fc))))\n \n dim = len(gh)\n \n # Prepare the gram-shmidt\n gh = np.array(gh, dtype = np.complex128)\n \n gh_new = np.reshape((dim, 9 * self.nat**2)).transpose()\n gh_new = scipy.linalg.orth(gh_new).transpose()\n \n self.dyn_ncoeff = np.shape(gh_new)[0]\n \n self.dyn_gen.append(np.reshape(gh_new, (self.dyn_ncoeff, 3*self.nat, 3*self.nat)))", "def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini", "def basis_mvs(self) -> tf.Tensor:\n return self._basis_mvs", "def _generate_known_charged_molecules():\n from openforcefield.topology import Molecule\n from simtk import unit as simtk_unit\n\n sodium = Molecule.from_smiles('[Na+]')\n sodium.partial_charges = np.array([1.0]) * simtk_unit.elementary_charge\n\n potassium = Molecule.from_smiles('[K+]')\n potassium.partial_charges = np.array([1.0]) * simtk_unit.elementary_charge\n\n calcium = Molecule.from_smiles('[Ca+2]')\n calcium.partial_charges = np.array([2.0]) * simtk_unit.elementary_charge\n\n chlorine = Molecule.from_smiles('[Cl-]')\n chlorine.partial_charges = np.array([-1.0]) * simtk_unit.elementary_charge\n\n water = Molecule.from_smiles('O')\n water.partial_charges = np.array([-0.834, 0.417, 0.417]) * simtk_unit.elementary_charge\n\n return [sodium, potassium, calcium, chlorine, water]", "def gen_regular_junctions(self):\n yield from self.get_junctions_info().keys()", "def form_mat(basis, atomlist):\n mat = np.zeros((len(atomlist), len(atomlist)))\n for i, mol in enumerate(basis):\n mat[i] = get_stoich(ob.get_formula(ob.get_mol(mol)), atomlist)\n mat = mat.T\n\n return mat", "def test_generate_constants( self ) :\n print( \"test_generate_constants\" )\n\n entropy_bits = \\\n 0xd262fbc7cbc7e757d16234bd7e88f12cc5dfef7c2ee82c9a4e289113d83d8724\n n_prngs = 19\n for integer_width in [ 64, 128, 256 ] :\n\n for n_prngs in [ 7, 19, 31 ] :\n constant_generator = generate_constants( integer_width, n_prngs,\n entropy_bits )\n\n for _ in range( n_prngs ) :\n multiplier, addition, lag, delta = next( constant_generator)\n print( multiplier, addition, lag, delta )\n\n try :\n multiplier, addition, lag, delta = next( constant_generator)\n\n except StopIteration :\n print( \"StopIteration -- Proper result\" )\n\n print( \"success test_generate_constants\" )", "def get_Bconst (self, coords, units='MHz') :\n coords = self._translate_to_center_of_mass(coords)\n masses = numpy.array([atom.mass for atom in self.molecule])\n nats = len(masses)\n\n tensor_comps = numpy.zeros((nats,3,3))\n for k in range(3):\n for l in range(3):\n if k == l:\n p = (k+1) %3 \n q = (k+2) %3 \n tensor_comps[:,k,k] += masses * (coords[:,p]**2 + coords[:,q]**2)\n else: \n tensor_comps[:,k,l] += -1 * masses * coords[:,k] * coords[:,l]\n i_tens = tensor_comps.sum(axis=0)\n\n eigvals, eigvecs = scipy.linalg.eig(i_tens)\n eigvals = eigvals.real\n\n # Convert from (g/mol)*A2 to kg*m*s-1\n eigvals *= 1.e-23/sconst.N_A\n\n # Compute rotational constant in s-1 (correct for linear molecules)\n eigvals_tmp = eigvals.copy()\n eigvals_tmp[eigvals==0.] = 1.\n bconsts = sconst.h / (8*sconst.pi**2 * eigvals_tmp)\n bconsts[eigvals==0.] = 0.\n\n if units=='cm-1' :\n # Convert to from s-1 to cm-1\n bconsts = bconsts*1.e-2 / sconst.c\n elif units=='MHz' :\n # Convert from s-1 to MHz\n bconsts *= 1e-6\n else :\n raise Exception('The unit %s is not recognized for rotational constants '%(units))\n bconsts = numpy.sort(bconsts)[::-1]\n \n return bconsts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels the currently running training process. If training is not running, do nothing.
def cancel_training(self): raise NotImplementedError
[ "def training_rejected(self):\n # immediately stop writing to shared stoage\n self.logger.info(\"training_rejected: for AI:{}\".format(self.ai_id))\n self.controller.save_controller.forget_ai(self.ai_id)\n\n # queue a new coroutine to release the status lock\n asyncio.create_task(self.stop_training())", "def stop_call(self):\n print(\"Stopping Training..\")\n self.epoch.emit(0)\n self.finished.emit()", "def end_training(self):\n save_model = True\n if self.scheduler.num_bad_epochs >= self.scheduler.patience:\n self.num_bad_epochs += 1\n save_model = False\n if self.num_bad_epochs >= self.early_stopping_criteria:\n print (\"\\nEnding training early!\")\n return True\n else:\n if save_model:\n self.save(self.model_filepath)\n return False", "def terminate(self):\n self._logger.info(\n \"Terminate signaled to trainer. Training will stop after current epoch is finished\")\n self.should_terminate = True", "def cancel(self):\n assert self.running\n\n self._cancelled = True\n\n # in this section we callback on processes's deferreds, it's\n # callbacks need to know that conversion is cancelled\n self.stop_running_processes()\n self.reset_tasks_queue()\n\n self.stop_scheduler()", "def rm_trainer(self):\n self.trainers[-1].exit_flag.value = 1\n self.trainers[-1].join()\n self.trainers.pop()", "def cancel():\n\t\traise NotImplementedError()", "def cancelDonwload(self):\n if self.thread3.isRunning():\n try:\n print(\"Hilo activado y listo para detener\")\n self.ui.downModel.setEnabled(1)\n self.ui.progressBar.setValue(0)\n\n modelsDir = str(os.path.join(os.getcwd(), \"models\")) # se guarda en carpeta models\n filename = os.path.join(modelsDir, os.path.basename(self.url))\n os.remove(filename)\n self.thread3.terminate()\n self.ui.downModel.setEnabled(1)\n\n except Exception as ex:\n print(ex)\n print('!error descargar modelo')\n else:\n print(\"Hilo inactivo\")", "def cancel(self):\n with self._lock:\n self._stepsToPerform = 0\n self._lock.notifyAll()", "def cancel(self):\n query = f\"qdel {self.jobid}\"\n logger.debug(f\"Cancelling job {self.jobid} by running: {query}\")\n cmd = BuildTestCommand(query)\n cmd.execute()", "def delete_training_run(self, user, name, wait_for_completion=True):\n data = super().get_run(\"training\", user, name, fields=\"*\")\n uuid = data[\"job\"][\"parameters\"][\"generated\"][\"uuid\"]\n ret = super().delete_run(\"training\", user, name)\n if wait_for_completion:\n self._wait_for_rundelete_completion(uuid, \"training\", name)\n return ret", "def cancel_command(self):\n self._network.controller.cancel_command()\n return True", "def continue_training(self, trial: Trial) -> None:\n self._train(trial)", "def on_epoch_end(self, epoch: int, logs: Any = None) -> None:\n if os.path.exists(\"./.stop\"):\n os.remove(\"./.stop\")\n print(\"\")\n print(\"Interrupting training after epoch {0}!\".format(epoch))\n self.model.stop_training = True", "def cancel_job(self, command):\n pass", "def stop(self):\n sdk.AbortAcquisition()\n sdk.CancelWait() # I hope this doesn't throw an error", "def cancel(self):\n if self._jobid == -1:\n return\n\n os_ext.run_command('scancel %s' % self._jobid,\n check=True, timeout=settings.job_submit_timeout)\n self._is_cancelling = True\n self.wait()", "def cancel(self):\n self.cancelled = True\n cb, self._cancel_cb = self._cancel_cb, None\n if cb: cb()", "def cancelCurrent():\n if not isActive():\n return True\n \n canceled = False\n for nzb in Hellanzb.queue.currentNZBs():\n # FIXME: should GC here\n canceled = True\n nzb.cancel()\n os.remove(nzb.nzbFileName)\n info('Canceling download: ' + nzb.archiveName)\n Hellanzb.queue.cancel()\n try:\n hellaRename(os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))\n move(Hellanzb.WORKING_DIR, os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))\n os.mkdir(Hellanzb.WORKING_DIR)\n rmtree(os.path.join(Hellanzb.TEMP_DIR, 'canceled_WORKING_DIR'))\n except Exception, e:\n error('Problem while canceling WORKING_DIR', e)\n\n if not canceled:\n debug('ERROR: isActive was True but canceled nothing (no active nzbs!??)')\n\n for nsf in Hellanzb.nsfs:\n clients = nsf.activeClients.copy()\n for client in clients:\n client.transport.loseConnection()\n \n # NOTE: WEIRD: after pool-coop branch, I have to force this to prevent\n # fetchNextNZBSegment from re-calling the fetch loop (it gets called\n # twice. the parseNZB->beginDownload->fetchNext call is made before the client\n # gets to call connectionLost). or has this problem always existed??? See r403\n client.isLoggedIn = False\n \n client.deactivate()\n \n writeStateXML()\n reactor.callLater(0, scanQueueDir)\n \n return canceled" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns new workspace names with number prefix switched.
def _swap_numbers(ws1, ws2, all_workspaces): new_ws1_name = re.sub('^[0-9]+', str(ws2['num']), ws1['name']) new_ws2_name = re.sub('^[0-9]+', str(ws1['num']), ws2['name']) used_names = frozenset(ws['name'] for ws in all_workspaces) def _avoid_used(new_name): while new_name in used_names: new_name = '{}__{}'.format(new_name, random.choice(random_workspace.WORDS)) return new_name return _avoid_used(new_ws1_name), _avoid_used(new_ws2_name)
[ "def _add_number_to_workspace(workspace, all_workspaces):\n max_num = i3.max_workspace_number(all_workspaces)\n # If there are no numbered workspaces, start at 1.\n target_num = 1 if max_num is None else 1 + max_num\n i3.command('rename', 'workspace', workspace['name'], 'to',\n '{}:{}'.format(target_num, workspace['name']))", "def _update_project_numbering(self):\n new_project_numbering = [0]\n for path in self.projects_dict:\n project = self.projects_dict[path]\n\n if self.new_project_name_template[:-3] in project.name:\n try:\n number = int(project.name.split(' ')[-1])\n except Exception:\n number = 0\n new_project_numbering.append(number)\n\n self.project_counter = max(new_project_numbering) + 1", "def _reorder_workspaces(prev, debug=False):\n all_ws = i3.get_workspaces()\n output_ws = i3.focused_output_workspaces(all_ws)\n\n focused = i3.focused_workspace(output_ws)\n if focused['num'] == -1:\n _add_number_to_workspace(focused, output_ws)\n return\n\n numbered_ws = [ws for ws in output_ws if ws['num'] != -1]\n if debug: print('numbered workspaces:', numbered_ws)\n # Add buffer for wrapping.\n ws = list(itertools.chain([numbered_ws[-1]], numbered_ws, [numbered_ws[0]]))\n if prev:\n ws = list(reversed(ws))\n workspace_pairs = list(itertools.izip(ws, ws[1:]))\n\n for (ws1, ws2) in workspace_pairs:\n if debug: print('checking <{}> vs <{}>'.format(ws1['name'], ws2['name']))\n if ws1['focused']:\n new_ws1_name, new_ws2_name = _swap_numbers(ws1, ws2, all_ws)\n # TODO: sending 2 renames in 1 command causes weird inconsistencies. is\n # that expected?\n i3.command('rename', 'workspace', ws1['name'], 'to', new_ws1_name)\n i3.command('rename', 'workspace', ws2['name'], 'to', new_ws2_name)\n break\n else:\n raise RuntimeError(\"this shouldn't happen\")", "def get_workspace_names():\n workspaces = i3.get_workspaces()\n return [x.name for x in workspaces]", "def workspaces_sorted_by_number(workspaces):\n return sorted(workspaces, key=lambda workspace: int(workspace.num))", "def _generate_names(self):\n\n return [f\"Player {n + 1}\" for n in range(self._nplayers)]", "def lb_workspace_target(name):\n return 'ws-' + name.replace(':', '-').replace('/', '-')", "def rename_unique(cls):\n import re\n\n [node.rename(re.sub(r\"[\\d]+\", \"#\", node.name())) for node in pm.selected()]", "def reprefix(self, old, new):\n\n rv = FileList()\n\n for f in self:\n rv.append(f.reprefix(old, new))\n\n return rv", "def _getPrefix(self) -> str:\n return 'CHAPTER' + ('0' if int(self.number) < 10 else '') + str(self.number)", "def generate_output_workspace_name(self, event_file_name):\n out_ws_name = os.path.basename(event_file_name).split(\n '.')[0] + '_{0}banks'.format(self._number_banks)\n ref_id = out_ws_name\n\n return out_ws_name, ref_id", "def _kaloom_nw_name(prefix, network_id):\n return prefix + network_id", "def set_workspaces(self):\n for fn in os.listdir(paths.wkps):\n fn_observed, ext_observed = os.path.splitext(fn)\n if ext_observed.lower() == sppasWorkspaces.ext:\n # remove path and extension to set the name of the workspace\n wkp_name = os.path.basename(fn_observed)\n # append in the list\n self.__wkps.append(wkp_name)\n logging.debug('Founded workspace {:s}'.format(wkp_name))", "def rename(self, index, new_name):\n if index == 0:\n raise IndexError('It is not allowed to rename the Blank workspace.')\n\n su = sppasUnicode(new_name)\n u_name = su.to_strip()\n\n if u_name in self:\n raise ValueError('A workspace with name {:s} is already existing.'\n ''.format(u_name))\n\n cur_name = self[index]\n if cur_name == new_name:\n return\n\n src = self.check_filename(index)\n dest = os.path.join(paths.wkps, u_name) + sppasWorkspaces.ext\n shutil.move(src, dest)\n self.__wkps[index] = u_name\n\n return u_name", "def _shard_name(self, n):\n return self.output_prefix + '.' + str(n)", "def ResourceNameProjectNumberToId(name):\n template = 'projects/{}/locations/'\n project_id = properties.VALUES.core.project.GetOrFail()\n project_num = util.GetProjectNumber(project_id)\n project_id_str = template.format(project_id)\n project_num_str = template.format(project_num)\n return name.replace(project_num_str, project_id_str)", "def renameReplicaList(repls):\n import collections\n solvs = collections.Counter()\n for r in repls:\n solvs[r.solvent] += 1\n n = '%s_%i'%(r.solvent, solvs[r.solvent])\n r.setName(n)", "def new_local_orderid(self) -> str:\n self.order_count += 1\n local_orderid = self.order_prefix + str(self.order_count).rjust(self.order_rjust, \"0\")\n return local_orderid", "def component_id_creation(self, comp_no):\n comp_list = []\n for i in range(1, comp_no + 1):\n comp_list.append(component_prefix + `i`)\n return comp_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a number prefix to the workspace name.
def _add_number_to_workspace(workspace, all_workspaces): max_num = i3.max_workspace_number(all_workspaces) # If there are no numbered workspaces, start at 1. target_num = 1 if max_num is None else 1 + max_num i3.command('rename', 'workspace', workspace['name'], 'to', '{}:{}'.format(target_num, workspace['name']))
[ "def __prefixNumber(num, leading):\n length = int(leading)+1\n num = str(num)\n while len(num) < length:\n num = '0' + num\n return num", "def _getPrefix(self) -> str:\n return 'CHAPTER' + ('0' if int(self.number) < 10 else '') + str(self.number)", "def prefixed(filename, i, digits):\n s = str(i)\n prefix = \"0\"*(max(0,digits-len(s))) + s + \"_\"\n return prefix + filename", "def addPrefix(self, prefix):\n \n pass", "def AddPrefix(patch, text):\n return '%s%s' % (site_config.params.CHANGE_PREFIX[patch.remote], text)", "def _kaloom_nw_name(prefix, network_id):\n return prefix + network_id", "def prefix_name(name: str, prefix: str) -> str:\n if ':' in name:\n variant, c_name = name.split(':')\n s = f\"{variant}:{prefix}{c_name}\"\n else:\n s = f\"{prefix}{name}\"\n return s", "def _increment_counter(self, item, prefix):\n ref = str(item[0:ITEM_PREF]) + '_' + str(prefix) + str(self._counter)\n self._counter += 1\n return ref", "def add_prefix(name, prefix):\n index = name.find('\"')\n if index == 0:\n return name[:1] + prefix + '_' + name[1:]\n else:\n return '_'.join((prefix, name))", "def lookup_prefix(digits: str) -> int:\n if digits.startswith('977'):\n return 3\n raise ValueError(\"ISSN prefix must be '977'.\")", "def _shard_name(self, n):\n return self.output_prefix + '.' + str(n)", "def makeNumber(cls, doc):\n result = \"\"\n if not doc or not doc.teilenummer:\n result = \"D%06d\" % (util.nextval(\"DOK_NR_SEQ\"))\n else:\n doc._check_partno()\n prefSet = sqlapi.RecordSet2(\"prefixes\",\n \"prefix='%s'\" % doc.teilenummer,\n updatable=1)\n if not prefSet:\n curSeq = 1\n sqlapi.SQLinsert(\"into prefixes (prefix,seq) values ('%s',%s)\"\n % (doc.teilenummer, 2))\n else:\n curSeq = prefSet[0].seq\n prefSet[0].update(seq=(curSeq + 1))\n result = \"%s-%d\" % (doc.teilenummer, curSeq)\n return result", "def get_name(self, number: str) -> str:\n number = re.sub(REGEX_NUMBER, \"\", str(number))\n\n with suppress(KeyError):\n return self.number_dict[number]\n\n if not self.prefixes:\n return UNKNOWN_NAME\n\n for prefix in self.prefixes:\n with suppress(KeyError):\n return self.number_dict[prefix + number]\n with suppress(KeyError):\n return self.number_dict[prefix + number.lstrip(\"0\")]\n\n return UNKNOWN_NAME", "def _gen_prefix():\n timestamp = str(int(time.time() * 1000))[-5:-1]\n code = ''.join([str(random.randint(0, 9)) for x in xrange(0, 4)])\n prefix = '%s%s' % (timestamp, code)\n return prefix", "def numbered_name_of(code):\n name = code_names.get(code, '(Unnamed)')\n return '{0} {1}'.format(code, name)", "def fix_ssm_variable_prefix(param_name: str) -> str:\n prefix = get_ssm_variable_prefix()\n\n # Strip trailing slash\n param_name = param_name.rstrip(\"/\")\n\n return f\"{prefix}/\" + param_name.replace(prefix, \"\", 1).lstrip(\"/\")", "def addprefixed(unitname, range='full'):\n if range == 'engineering':\n _prefixes = _engineering_prefixes\n else:\n _prefixes = _full_prefixes\n unit = unit_table[unitname]\n for prefix in _prefixes:\n prefixedname = prefix[0] + unitname\n if prefixedname not in unit_table:\n addunit(prefixedname, prefix[1] * unit, prefixed=True, baseunit=unit, verbosename=unit.verbosename,\n url=unit.url)", "def KHR_prefix(self):\n return self.api_prefix + 'KHR_'", "def release_prefix(prefix, chart):\n return f'{prefix}-{chart[\"chart\"][\"release_name\"]}'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reorders adjacent workspaces by renaming and swapping their numbers.
def _reorder_workspaces(prev, debug=False): all_ws = i3.get_workspaces() output_ws = i3.focused_output_workspaces(all_ws) focused = i3.focused_workspace(output_ws) if focused['num'] == -1: _add_number_to_workspace(focused, output_ws) return numbered_ws = [ws for ws in output_ws if ws['num'] != -1] if debug: print('numbered workspaces:', numbered_ws) # Add buffer for wrapping. ws = list(itertools.chain([numbered_ws[-1]], numbered_ws, [numbered_ws[0]])) if prev: ws = list(reversed(ws)) workspace_pairs = list(itertools.izip(ws, ws[1:])) for (ws1, ws2) in workspace_pairs: if debug: print('checking <{}> vs <{}>'.format(ws1['name'], ws2['name'])) if ws1['focused']: new_ws1_name, new_ws2_name = _swap_numbers(ws1, ws2, all_ws) # TODO: sending 2 renames in 1 command causes weird inconsistencies. is # that expected? i3.command('rename', 'workspace', ws1['name'], 'to', new_ws1_name) i3.command('rename', 'workspace', ws2['name'], 'to', new_ws2_name) break else: raise RuntimeError("this shouldn't happen")
[ "def _swap_numbers(ws1, ws2, all_workspaces):\n new_ws1_name = re.sub('^[0-9]+', str(ws2['num']), ws1['name'])\n new_ws2_name = re.sub('^[0-9]+', str(ws1['num']), ws2['name'])\n used_names = frozenset(ws['name'] for ws in all_workspaces)\n def _avoid_used(new_name):\n while new_name in used_names:\n new_name = '{}__{}'.format(new_name,\n random.choice(random_workspace.WORDS))\n return new_name\n return _avoid_used(new_ws1_name), _avoid_used(new_ws2_name)", "def _add_number_to_workspace(workspace, all_workspaces):\n max_num = i3.max_workspace_number(all_workspaces)\n # If there are no numbered workspaces, start at 1.\n target_num = 1 if max_num is None else 1 + max_num\n i3.command('rename', 'workspace', workspace['name'], 'to',\n '{}:{}'.format(target_num, workspace['name']))", "def reflow_from_left_to_right():\n\n # get all the things\n workspaces = i3.get_workspaces()\n outputs = [output for output in i3.get_outputs() if output.active]\n focused_workspace = get_focused_workspace(workspaces)\n non_empty_workspace_names = get_non_empty_workspaces()\n\n # sort and extract the the ids (names)\n workspace_names = get_names(workspaces_sorted_by_number(workspaces))\n output_names = get_names(outputs_from_left_to_right(outputs))\n focused_workspace_name = focused_workspace.name if focused_workspace else None\n sorted_non_empty_workspace_names = [\n w for w in workspace_names if w in non_empty_workspace_names\n ]\n\n print((\"workspaces found: %s\" % \", \".join(workspace_names)))\n print((\"non empty workspaces: %s\" % \", \".join(non_empty_workspace_names)))\n print((\"outputs found: %s\" % \", \".join(output_names)))\n print((\"focused workspace: %s\" % focused_workspace_name))\n\n # move all workspaces to first output before reflowing\n for w in workspace_names:\n move_workspace(w, output_names[0])\n\n # reflow workspaces\n _reflow(sorted_non_empty_workspace_names, output_names)\n\n # focus back the workspace that was focused before reflowing\n if focused_workspace_name in non_empty_workspace_names:\n i3.command(\"workspace %s\" % focused_workspace_name)", "def renumber_window(sess_name, win_id_from, win_id_to):\n p = (sess_name + ':' + str(win_id_from), \\\n sess_name + ':' + str(win_id_to))\n\n cmd = (CMD_MOVE_WINDOW % p).split(config.CMD_SEP)\n util.exec_cmd(cmd)", "def move_con_to_workspace_by_name(window, workspace):\n logger.debug(\"Moving window to workspace: {}\".format(workspace))\n window.command(\"move container to workspace {}\".format(workspace))", "def swap(i3, _):\n\n # For each sticky group, try swapping the sticky container into this\n # workspace.\n for group in get_groups(i3):\n # TODO XXX For the (technically invalid) case of the placeholder being\n # on the same workspace as the sticky container, perhaps we should\n # first look up the sticky container by mark, check that it's on a\n # different workspace and then execute the command.\n i3.command('[workspace=\"__focused__\" con_mark=\"^_sticky_%s_\"] swap container with mark \"_sticky_%s\"' % (group, group))", "def _reflow(workspaces, outputs):\n\n if len(workspaces) < 2:\n # if there's 1 or 0 workspaces, apply initial configuration\n _reflow_initial_configuration(outputs)\n return\n\n i = 0\n j = 1\n workspaces_per_output = math.ceil(float(len(workspaces)) / len(outputs))\n while j < len(workspaces) + 1:\n workspace = workspaces[j - 1]\n output = outputs[i]\n print((\"%s goes on %s\" % (workspace, output)))\n move_workspace(workspace, output)\n if math.fmod(j, workspaces_per_output) == 0 and i < len(outputs) - 1:\n i += 1\n j += 1", "def moveNamespace(*args, **kwargs):\n \n pass", "def workspaces_sorted_by_number(workspaces):\n return sorted(workspaces, key=lambda workspace: int(workspace.num))", "def test_reassignNames(self):\n t = self.TreeRoot\n mapping = dict([(x, str(i)) for i,x in enumerate('abfg')])\n exp_names = ['0','1','2','3','c','d','e','h']\n t.reassignNames(mapping)\n obs_names = sorted(t.getNodeNames())\n self.assertEqual(obs_names, exp_names)", "def reorder(objects, relative=int, back=bool, front=bool):\n pass", "def cmd_switch_groups(self, name):\r\n self.qtile.cmd_switch_groups(self.name, name)", "def move_game(self, current_list, game_name):\n # print('current_list, game_name: ', current_list, game_name)\n folder_name = game_name\n source_list = self.games_list if current_list == 'games' else self.ranked_games\n dest_list = self.ranked_games if current_list == 'games' else self.games_list\n # game_idx = source_list.index(game_name)\n source_list.remove(game_name)\n dest_list.insert(0, game_name)\n dest_list_name = 'rank' if current_list == 'games' else 'games'\n\n self.game_moved.emit(dest_list_name, folder_name)", "def module_order_move(self, idx_old, idx_new):\n with self.order_lock:\n self.module_order.move(idx_old, idx_new)\n \n self._listeners.notify(\"order\")\n self._listeners.notify(\"dependency\")", "def test_reorder(self):\n infile = StringIO(\"((a,b),(c,d));\")\n tree = read_tree(infile)\n\n infile = StringIO(\"((d,c),(b,a));\")\n tree2 = read_tree(infile)\n\n hashtree1 = tree.get_one_line_newick()\n hashtree2 = tree2.get_one_line_newick()\n self.assertTrue(hashtree1 != hashtree2)\n\n reorder_tree(tree, tree2)\n hashtree1 = tree.get_one_line_newick()\n hashtree2 = tree2.get_one_line_newick()\n self.assertEqual(hashtree1, hashtree2)", "def name_mapping_swap(self, position, direction, with_position):\n return self.request( \"name-mapping-swap\", {\n 'position': [ position, 'position', [ int, 'None' ], False ],\n 'direction': [ direction, 'direction', [ basestring, 'None' ], False ],\n 'with_position': [ with_position, 'with-position', [ int, 'None' ], False ],\n }, {\n } )", "def _create_swap_layer(qc, pattern, starting_point):\n num_qubits = len(pattern)\n for j in range(starting_point, num_qubits - 1, 2):\n if pattern[j] > pattern[j + 1]:\n qc.swap(j, j + 1)\n pattern[j], pattern[j + 1] = pattern[j + 1], pattern[j]", "def reorderContainer(relative=int, back=bool, front=bool):\n pass", "def swap(self, n1, n2):\n c1 = self.cluster_of(n1)\n c2 = self.cluster_of(n2)\n\n self.move(n1, c2, c1)\n self.move(n2, c1, c2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies the current block chain and return True if it's valid, False otherwise.
def verify_chain(cls, block_chain): for (index, block) in enumerate(block_chain): if index == 0: continue if block.previous_hash != Hasher.hash_block(block_chain[index - 1]): ConsoleLogger.write_log( 'warn', __name__, 'verify_chain', 'Block chain is invalid.' ) return False if not cls.valid_proof( block.transactions[:-1], block.previous_hash, block.proof ): ConsoleLogger.write_log( 'warn', __name__, 'verify_chain', 'Proof of work is invalid.' ) return False return True
[ "def valid_chain(self, chain):\n previous_block = chain[0]\n index = 1\n while index < len(chain):\n block = chain[index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n if not self.valid_proof(block['proof'], previous_block['proof']):\n return False\n index += 1\n previous_block = block\n return True", "def is_chain_valid(self, chain):\n \n # Init. variables for while loop\n previous_block = chain[0]\n block_index = 1\n \n # Check that each block in the chain is valid\n while block_index < len(chain):\n # Get corresponding block\n block = chain[block_index]\n \n # Check previous_hash\n if block['previous_hash'] != self.hash(previous_block): return False\n \n # Check proof\n # Get proofs\n previous_proof = previous_block['proof']\n proof = block['proof']\n \n # Find hash\n hash_operation = hashlib.sha256(\n str(\n proof**2 - previous_proof**2).encode()).hexdigest()\n \n # Invalidate if leading values are not '0000'\n if hash_operation[:4] != '0000': return False\n \n # Move along chain if everything is okay\n previous_block=block\n block_index+=1\n \n return True", "def validate_chain(self):\n if not self.validate(self.chain[0], None):\n # genesis block\n return False\n for parent_idx, block in enumerate(self.chain[1:]):\n # remainder of chain\n if not self.validate(block, self.chain[parent_idx]):\n return False\n\n return True", "def verify_blockchain(self):\n for i in range(1, len(self.chain)):\n current_block = self.chain[i]\n previous_block = self.chain[i - 1]\n \n if current_block.previous_hash != previous_block.hash:\n return False\n\n return True", "def verify_proof_of_work(self) -> bool:\n block_dict = copy.deepcopy(self.__dict__)\n block_dict['transactions'] = [str(tx) for tx in block_dict['transactions']]\n incoming_hash = block_dict.pop('hash') # remove hash from object to verify the rest of the contents\n verify_hash = hashlib.sha256(json.dumps(block_dict).encode()).hexdigest() # recompute hash value of contents\n return verify_hash == incoming_hash", "def validate(self, block, parent):\n if not self.check_hash(block) == block.hash_val:\n # block's stored hash matches\n return False\n\n if (block.hash_val[:self.difficulty] !=\n \"\".join([\"0\" for _ in range(self.difficulty)])):\n # block's hash has the required number of zerores\n return False\n\n if parent is not None:\n # checks for non-genesis blocks (parent required)\n if block.timestamp < parent.timestamp:\n # block must have been created after its parent\n return False\n\n if parent.hash_val != block.parent_hash:\n # block's stored hash of its parent should match the parent\n # block's hash\n # n.b. the parent's hash is verified to be valid of its stored\n # hash since it is part of the chain, thus `validate` approved\n # it before\n return False\n\n if block.index != parent.index+1:\n # block should immediately follow its parent in the chain\n return False\n\n return True", "def replace_chain(self, chain: List[Block]) -> bool:\n return len(chain) > len(self.chain) and BlockchainVerifier.is_verified(chain)", "def verify_block(self, block):\n\t\tsha = hasher.sha256('a')\n\t\tsha.update(\n\t\t\t\tstr(block.block_id) +\n\t\t\t\tstr(block.miner_id) + \n\t\t\t\tstr(block.timestamp) + \n\t\t\t\tstr(block.data) + \n\t\t\t\tstr(block.previous_hash))\n\t\tverify_hashed = sha.hexdigest()\n\t\tif verify_hashed != block.hash:\n\t\t\tprint(\"Miner ({}) could not verify the previous generated block.\", self.mid)\n\t\t\treturn 0.\n\t\treturn 1.", "def is_valid():\n \n # Get validity of blockchain\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n \n if is_valid: response = {'message': 'The blockchain is valid!'}\n else: response = {'message': 'Error, the blockchain is invalid!'}\n\n return jsonify(response), 200", "def verify_block(self, block, data):\n existing_data = self.read_block(block)\n print(\"Verifying...\", end='')\n if existing_data != data:\n print(\" VERIFICATION ERROR!\", end='\\n\\n')\n raise VerificationError\n print(\" VERIFICATION OK\", end='\\n\\n')", "def IsBlock(self) -> bool:", "def validate_merkle_root(self):\n # reverse all the transaction hashes (self.tx_hashes)\n hashes = [h[::-1] for h in self.tx_hashes]\n # get the Merkle Root\n root = merkle_root(hashes)\n # reverse the Merkle Root\n # return whether self.merkle root is the same as\n # the reverse of the calculated merkle root\n return root[::-1] == self.merkle_root", "def validate(self):\n\t\treturn self.checksum == self.create_checksum()", "def verify_transaction(self, transaction):\n\t\tsender = Bee(transaction.sender, 0)\n\t\tsender.calculate_balance(self.chain, self.last_block().index + 1)\n\n\t\treturn sender.honeycomb >= int(transaction.amount)", "def validate_pow(self, block):\n compareStr='0'\n for idx in range(self.difficulty - 1):\n compareStr += '0'\n return block.getHeaderHash()[:self.difficulty] == compareStr and block.previousBlockHash == self.blockchain[-1].hash", "def validate_cert_chain(self):\r\n\r\n\t\tchain = self.trusting_chain\r\n\t\tif len(self.trusting_chain) <= 1:\r\n\t\t\treturn False \r\n\t\tfor i in range(0, len(chain) - 1):\r\n\r\n\t\t\tif not self.validate_certificate(chain[i]):\r\n\t\t\t\treturn False\r\n\r\n\t\t\t#verifies if the signatures are valid \r\n\t\t\tif not self.validate_signature(chain[i+1], chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\t\t# verifies if the certificate is not on a CRL \r\n\t\t\tif not self.crl_validation(chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\treturn True", "def valid_chain(chain):\n\n if chain['length'] < MIN_NT_DISCREPANCY:\n return False\n\n if chain['method'] != 'SOLUTION NMR':\n return chain['resolution'] is not None and \\\n chain['resolution'] <= MAX_RESOLUTION_DISCREPANCY\n return True", "def verify(self):\n ret = _core.LLVMVerifyModule(self.ptr)\n if ret != \"\":\n raise llvm.LLVMException, ret", "def validate_block_to_invoke(self, block: 'Block'):\n if self._root.block.height < 0:\n # Exception handling for genesis block\n return\n\n parent: 'PrecommitDataManager.Node' = self._precommit_data_mapper.get(block.prev_hash)\n if parent:\n if block.prev_hash == parent.block.hash and block.height == parent.block.height + 1:\n return\n\n raise InvalidParamsException(\n f'Failed to invoke a block: '\n f'prev_block({parent.block if parent else None}) '\n f'block_to_invoke({block})')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies the signature of transaction.
def verify_tx_signature(tx): public_key = RSA.importKey( binascii.unhexlify(tx.sender) ) verifier = PKCS1_v1_5.new(public_key) data_hash = Hasher.create_data_hash_256( tx.sender, tx.recipient, tx.amount ) return verifier.verify( data_hash, binascii.unhexlify(tx.signature) )
[ "def __verifySignature(self, transaction: Transaction) -> bool:\n senderPublicKey = self.getSenderAccount(transaction.getSender()).get('publicKey')\n publicKey = RSA.importKey(binascii.unhexlify(senderPublicKey))\n verifier = PKCS1_v1_5.new(publicKey)\n txString = str(transaction.getOrderedDict())\n h = TLCUtilities.getDoubleHash256(txString)\n result = verifier.verify(h, binascii.unhexlify(transaction.getSignature()))\n\n if result:\n return True\n else:\n return False", "def test_signature():\n blockchain = Blockchain()\n blockchain.read_metadata()\n blockchain.read_address_pool_data()\n blockchain.read_genesis_data()\n block = blockchain._blocks[0]\n blockchain.verify_transaction('Eric Chen', block.transactions[0])", "def verify_signature(request_body, signature, hmac_key):\n computed = hmac.new(hmac_key, request_body, hashlib.sha1)\n if not hmac.compare_digest(computed.hexdigest(), signature.encode('ascii', 'ignore')):\n raise SignatureError('Computed signature does not match request signature.')", "def _verify_transaction(self, source, sign_data):\n # Process the signed transaction\n vk = self._wallet_pool.get_wallet_verifying_key(source)\n tx_data, signature = sign_data.split('|')\n tx_data = tx_data.encode()\n signature = base58.b58decode(signature.encode())\n\n # Verify the signature\n return vk.verify(signature, tx_data)", "def verify(txid, signature, public_key=None):\n if not isinstance(signature, Signature):\n if not public_key:\n raise BKeyError(\"No public key provided, cannot verify\")\n signature = Signature.from_str(signature, public_key=public_key)\n return signature.verify(txid, public_key)", "def verify_receipt_signature(self, receipt_update_retrieve_res):\n pass", "def verify(self, public_key, message, signature):", "def verify(self, key, signature, data):\n # type: (Any, bytes, bytes) -> None\n # narrow down the key type\n # https://github.com/aws/aws-dynamodb-encryption-python/issues/66\n if hasattr(key, \"private_bytes\"):\n _key = key.public_key()\n else:\n _key = key\n try:\n _key.verify(signature, data, self.padding_type(), self.hash_type())\n except Exception:\n message = \"Unable to verify signature\"\n _LOGGER.exception(message)\n raise SignatureVerificationError(message)", "def signature_checking(self,meta):\n if self.vertification(meta):\n pass\n else:\n raise Exception('Incorrect Signature')", "def test_signature_validity(curve, generator, Msg, Qx, Qy, R, S, expectedVerification):\n pubk = Public_key(generator, ellipticcurve.Point(curve, Qx, Qy))\n verificationRes = pubk.verifies(digest_integer(Msg), Signature(R, S))\n assert verificationRes == expectedVerification, \"Signature verification failed\"", "def verify(self,doc, signature):\n\n\t\tif self.pubKey:\n\t\t\tm = hashlib.sha256()\n\t\t\tm.update(doc.encode())\n\t\t\th = m.digest()\n\n\t\t\treturn self.pubKey.verify(h,signature)\n\n\t\treturn False", "def _check_signature(self, signature, data):\n\n hashed = hmac.new(self.secret_token, data, sha1)\n sig_check = f\"sha1={hashed.hexdigest()}\"\n\n return hmac.compare_digest(signature, sig_check)", "def verify_data_and_signature(signed_transaction: SignedTransaction, transaction: Transaction) -> None:\n\n assert isinstance(signed_transaction, SignedTransaction)\n assert isinstance(transaction, Transaction)\n\n if not is_signed_transaction_data_equal_to_transaction_data(signed_transaction, transaction):\n raise SCICallbackPayloadError('Received SignedTransaction does not match TransactionSigningRequest.')\n\n # If the Ethereum signature in SignedTransaction is not a valid signature or\n # or the transaction in SignedTransaction is not signed with the right Ethereum private key.\n message_hash = sha3(encode(transaction, UnsignedTransaction))\n\n if not ecdsa_raw_verify(\n message_hash,\n (\n signed_transaction.v,\n signed_transaction.r,\n signed_transaction.s,\n ),\n settings.CONCENT_ETHEREUM_PUBLIC_KEY,\n ):\n raise SCICallbackTransactionSignatureError(\n 'Received SignedTransaction signature data is not signed by right Ethereum private key.'\n )", "def verifies( self, hash, signature ):\n\n # From X9.62 J.3.1.\n\n G = self.generator\n n = G.order()\n r = signature.r\n s = signature.s\n if r < 1 or r > n-1: return False\n if s < 1 or s > n-1: return False\n c = numbertheory.inverse_mod( s, n )\n u1 = ( hash * c ) % n\n u2 = ( r * c ) % n\n xy = u1 * G + u2 * self.point\n v = xy.x() % n\n return v == r", "def verify(self, txid=None, public_key=None):\n if txid is not None:\n self.txid = to_hexstring(txid)\n if public_key is not None:\n self.public_key = public_key\n\n if not self.txid or not self.public_key:\n raise BKeyError(\"Please provide txid and public_key to verify signature\")\n\n if USE_FASTECDSA:\n return _ecdsa.verify(\n str(self.r),\n str(self.s),\n self.txid,\n str(self.x),\n str(self.y),\n str(secp256k1_p),\n str(secp256k1_a),\n str(secp256k1_b),\n str(secp256k1_n),\n str(secp256k1_Gx),\n str(secp256k1_Gy)\n )\n else:\n transaction_to_sign = to_bytes(self.txid)\n signature = self.bytes()\n if len(transaction_to_sign) != 32:\n transaction_to_sign = double_sha256(transaction_to_sign)\n ver_key = ecdsa.VerifyingKey.from_string(self.public_key.public_uncompressed_byte[1:],\n curve=ecdsa.SECP256k1)\n try:\n if len(signature) > 64 and signature.startswith(b'\\x30'):\n try:\n signature = convert_der_sig(signature[:-1], as_hex=False)\n except Exception:\n pass\n ver_key.verify_digest(signature, transaction_to_sign)\n except ecdsa.keys.BadSignatureError:\n return False\n except ecdsa.keys.BadDigestError as e:\n _logger.info(\"Bad Digest %s (error %s)\" % (signature.hex(), e))\n return False\n return True", "def verify(self, key, signature, data):\n # type: (Any, bytes, bytes) -> None", "def verify_certificate(self, message, signature):\n\n # detach the signature from the message\n message_without_sign = message.split(\"&sign=\")[0]\n # decode base64 the signature\n binary_signature = base64.b64decode(signature)\n # create a pubkey object\n if self.production:\n key = RSA.importKey(\n settings.PAYBOX_PUBLIC_KEY\n )\n else:\n key = RSA.importKey(\n settings.PAYBOX_TEST_PUBLIC_KEY\n )\n # digest the message\n h = SHA.new(bytes(message_without_sign, encoding=\"utf8\"))\n # and verify the signature\n verifier = PKCS1_v1_5.new(key)\n assert verifier.verify(h, binary_signature), \"Signature Verification Failed\"\n\n return True", "def verify(\n pk: PublicKey,\n signature: Signature,\n msgs: List[bytes]\n ) -> bool:\n\n #recreatign exposant part of the signature\n product = jsonpickle.decode(pk[len(msgs) + 3 - 1])\n for i, y in enumerate(pk[len(msgs) + 4 - 1: 2*len(msgs) + 4 -1]):\n product = product * (jsonpickle.decode(y) ** Bn.from_binary(msgs[i].encode()))\n\n #checking that the signaure is correct using the bilinear function and that sigma1 is not the neutral element\n if (jsonpickle.decode(signature[0]).pair(product) == jsonpickle.decode(signature[1]).pair(jsonpickle.decode(pk[len(msgs) + 2 -1])) \n and not jsonpickle.decode(signature[0]).is_neutral_element()):\n return True\n else :\n return False", "def verify(hash, signature, key_path=\"~/.ssh/ida_rsa\"):\n key = open(expanduser(key_path), \"r\").read()\n rsakey = RSA.importKey(key) \n pubkey = key.publickey()\n return pubkey.verify(hash, b64decode(signature)) == True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a File vertex to the graph.
def _addFileNode(self, f: File): # Add a vertex for the file. self.vertices[str(f.inode)] = "file"
[ "def add_vertex(self, vertex):\r\n self.vertices.append(vertex)", "def add_vertex(self, v):\n pass", "def add_vertex(self, vertex):\n\n\t\tself.vertices.append(vertex)", "def add_vertex(self, vertex: str):\n Logger.log(Logger.LogLevel.VERBOSE,\n f\"Adding vertex {self.vertex_count}: {vertex}\")\n self.vertices[self.vertex_count] = vertex\n self.vertex_count += 1", "def add_vertex(self, v):\n self.v_sources.add(v)", "def add_file(self, file_path):\n self._repo.index.add([str(file_path)])", "def append_vertex(self, vertex):", "def add(self, filename):\n self.index.add_new_file(filename)", "def _add_file(file_path):\n _db_content[\"files\"].append(file_path)", "def add_vertex(self, vertex):\n if vertex.label not in self.vertices():\n self.__graph_dict[vertex.label] = vertex", "def add_vertex(self, item: Any, kind: str) -> None:\r\n if item not in self.vertices:\r\n self.vertices[item] = WeightedVertex(item, kind)", "def _addAccess(self, f: File, acc: FileAccess):\n # Get the source and destination vertex ids.\n source = acc.actor.desktopid\n dest = str(f.inode)\n\n # Add the edge.\n self.edges.add((source, dest))\n\n # Calculate the number of individual instances who accessed the file.\n insts = self.instancesPerFile.get(source+dest) or set()\n insts.add(acc.actor.uid())\n self.instancesPerFile[source+dest] = insts\n self.weights[(source, dest)] = len(insts)", "def add_vertex(self):\n u = self.g.add_vertex()\n return u", "def add_file(self, fpath):\n if not os.path.isfile(fpath):\n print(\"cloudtalker: cannot find file\", fpath)\n return None\n #try to parse filename\n parsed = self.parse_filename(fpath)\n print(\"after parsing:\", parsed)\n if parsed is not None:\n fdata = {\n \"path\": fpath,\n \"type\": parsed[0],\n \"ts\": parsed[1],\n \"segno\": parsed[2],\n }\n self.inq.put(fdata)\n print(\"upload module accepted file\", fpath)", "def addVertex(self, label):\n index = len(self.index)\n self.index[label] = index\n self.vertex[index] = label", "def add_vertex(self, vertex):\n if isinstance(vertex, Vertex) and vertex.name not in self.vertices:\n self.vertices[vertex.name] = vertex\n return True\n else:\n return False", "def add_file(self, root, filename):\n if filename in self.ignored_filenames:\n return\n\n item = File(os.path.join(root, filename, configuration=self.configuration))\n if root in self.__directory_index__:\n item.parent = self.__directory_index__[root]\n self.files.append(item)", "def _addAccess(self, f: File, acc: FileAccess):\n # Get the source and destination vertex ids.\n source = acc.actor.uid()\n dest = str(f.inode)\n\n # Add the edge, and count a single access (unweighted clustering).\n self.edges.add((source, dest))\n self.weights[(source, dest)] = 1", "def add_file(self, filename, UUID):\n self.files[UUID] = Data(filename=filename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the clusters for this graph.
def computeClusters(self): comm = self.g.community_fastgreedy(weights=self.g.es["weight"]) self.clusters = comm.as_clustering()
[ "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(1)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def get_cluster_centers(self):\n pass", "def generate_clusters(self):\n\n self.cluster_labels = None", "def __compute_cluster_centers(self):\n center = dict()\n for index,class_key in enumerate(self.classes):\n membership_list = np.array([mb[index] for mb in self.df.membership])\n membership_list = membership_list**self.m\n num = np.dot(membership_list, self.X)\n den = np.sum(membership_list)\n center[class_key] = num/den\n return center", "def cluster_nodes(self):\n clusters = []\n explored = set()\n nodes_iter = iter(self._data)\n while len(explored) < len(self._data):\n node1 = next(nodes_iter)\n if node1 not in explored:\n this_cluster = set()\n new_neighbors = self._data[node1]\n while new_neighbors:\n this_cluster.update(new_neighbors)\n explored.update(new_neighbors)\n just_added = new_neighbors\n new_neighbors = set(i for node in just_added for i in self._data[node] if i not in explored)\n clusters.append(this_cluster)\n return clusters", "def get_cluster_centers(self):\n return None", "def __update_clusters(self):\n \n clusters = [[] for i in range(len(self.__centers))];\n for index_point in range(len(self.__pointer_data)):\n index_optim = -1;\n dist_optim = 0.0;\n \n for index in range(len(self.__centers)):\n # dist = euclidean_distance(data[index_point], centers[index]); # Slow solution\n dist = euclidean_distance_sqrt(self.__pointer_data[index_point], self.__centers[index]); # Fast solution\n \n if ( (dist < dist_optim) or (index is 0)):\n index_optim = index;\n dist_optim = dist;\n \n clusters[index_optim].append(index_point);\n \n # If cluster is not able to capture object it should be removed\n clusters = [cluster for cluster in clusters if len(cluster) > 0];\n \n return clusters;", "def to_clusters(self):\n ltrue, lpred = self.to_labels()\n return labels_to_clusters(ltrue, lpred)", "def get_clusters(self):\n return set(host.cluster for host in self.hosts)", "def cluster_nodes(self) -> pulumi.Output[Sequence['outputs.ClusterClusterNode']]:\n return pulumi.get(self, \"cluster_nodes\")", "def __update_centers(self):\n \n centers = [[] for i in range(len(self.__clusters))];\n \n for index in range(len(self.__clusters)):\n point_sum = [0] * len(self.__pointer_data[0]);\n \n for index_point in self.__clusters[index]:\n point_sum = list_math_addition(point_sum, self.__pointer_data[index_point]);\n \n centers[index] = list_math_division_number(point_sum, len(self.__clusters[index]));\n \n return centers;", "def compute_centers(self):\n for cluster_ in range(self.number_clusters): # type: ignore\n center = np.mean(self.data[self.model.labels_ == cluster_], axis=0) # type: ignore\n if center.isnull().values.any(): # type: ignore\n self.centers[cluster_] = center.fillna(0) # type: ignore\n else:\n self.centers[cluster_] = center", "def get_cluster_list(self):\n LOG.info(\"Getting clusters\")\n return self.client.request(constants.GET,\n constants.GET_CLUSTER.format\n (self.server_ip), payload=None,\n querystring=constants.\n SELECT_ID_AND_NAME)", "def clusters(self):\n return (self.input_array[lower:upper]\n for lower, upper in self.slices)", "def centers(self, sort=True):\n if sort:\n centers = sorted(map(list, self.clusters))\n else:\n centers = list(map(list, self.clusters))\n return centers", "def _initialize_clusters(self):\n max_cap = self.config.capacity_cst\n total_demand = self.manager_stops.demand\n list_number_cluster = [int(total_demand/(i * max_cap)) for i in [0.75,1,1.25]]\n # list_number_cluster = [int(total_demand/(k * max_cap)) for k in [0.4]]\n\n Kmean_basic = basic_K_mean.basicKMeans(manager_cluster=self.manager_cluster,manager_stops=self.manager_stops)\n for k in list_number_cluster:\n Kmean_basic.run_K_mean(list(self.manager_stops.keys()),k)", "def cluster_characters(self):\n cooccurences = np.zeros((len(self.characters), len(self.characters)))\n for scene in self:\n for character_i in scene.characters:\n for character_j in scene.characters:\n cooccurences[character_i.id, character_j.id] += 1.0\n cooccurences[character_j.id, character_i.id] = cooccurences[\n character_i.id, character_j.id]\n cooccurences = cooccurences / cooccurences.sum()\n clusterer = DBSCAN(eps=cooccurences.mean(), min_samples=1)\n clustering = clusterer.fit_predict(cooccurences)\n for character in self.characters:\n # check if this propagates\n character.cluster = clustering[character.id]", "def centroids(self):\n return [r2.centroid(n) for i, n in self.to_vertices.items()]", "def generate_clusters(self,D):\n\n condensed = squareform(D.dist_frame)\n linkage = hcl.average(condensed)\n self.clusters = hcl.fcluster(linkage,self.factor,criterion=self.criterion)\n\n self.num_clusters = n_clusters = len(np.unique(self.clusters)) - (1 if -1 in clusters else 0)\n self.cluster_labels = pd.DataFrame({'sequences' : D.dist_frame.index, \n 'cluster' : self.clusters})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an Application vertex to the graph.
def _addAppNode(self, app: Application): # Add a vertex for the app. self.vertices[app.uid()] = "app" # Remember instances of an app so we can connect them. inst = self.instances.get(app.desktopid) or [] inst.append(app.uid()) self.instances[app.desktopid] = inst # Ensure there is a node modelling the app's state. self.vertices[app.desktopid] = "appstate" self.edges.add((app.desktopid, app.uid())) self.weights[(app.desktopid, app.uid())] = 1
[ "def _addAppNode(self, app: Application):\n # Add a vertex for the app.\n self.vertices[app.uid()] = \"app\"\n\n # Remember instances of an app so we can connect them.\n inst = self.instances.get(app.desktopid) or []\n inst.append(app.uid())\n self.instances[app.desktopid] = inst\n\n # Ensure there is a node modelling the app's state.\n self.vertices[app.desktopid] = \"appstate\"\n self.edges.add((app.desktopid, app.uid()))\n self.weights[(app.desktopid, app.uid())] = 0.0000000001", "def add_vertex(self, vertex):\r\n self.vertices.append(vertex)", "def add_vertex(self):\n u = self.g.add_vertex()\n return u", "def add_vertex(self, vertex):\n\n\t\tself.vertices.append(vertex)", "def add_vertex(self, v):\n pass", "def add_vertex(self, vertex: str):\n Logger.log(Logger.LogLevel.VERBOSE,\n f\"Adding vertex {self.vertex_count}: {vertex}\")\n self.vertices[self.vertex_count] = vertex\n self.vertex_count += 1", "def append_vertex(self, vertex):", "def add_vertex(self, vertex):\n if vertex.label not in self.vertices():\n self.__graph_dict[vertex.label] = vertex", "def add_vertex(self, item: Any, kind: str) -> None:\r\n if item not in self.vertices:\r\n self.vertices[item] = WeightedVertex(item, kind)", "def add_vertex(self, vertex):\n if isinstance(vertex, Vertex) and vertex.name not in self.vertices:\n self.vertices[vertex.name] = vertex\n return True\n else:\n return False", "def addVertex(self, label):\n index = len(self.index)\n self.index[label] = index\n self.vertex[index] = label", "def add_bgp_vertices(self, router):\n self.add_vertex(self.bgp_name(router), subgraph=self._bgp_sub)", "def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n self.__directed_dict[vertex] = []", "def add_program(self, program):\n ...", "def add_vertex(self, v):\n self.v_sources.add(v)", "def add_vertex(self, v):\n if v not in self.vertices.keys(): \n self.vertices[v] = [False,[],0]", "def add_vertex(self, id, vertex):\n \n # Check if vertex with given id already exists.\n if id in self.vertices:\n return\n \n # Check if each vertex in adjacent_to exists.\n for i in vertex.adjacent_to:\n if not i in self.vertices:\n return\n \n # Add given vertex at given id.\n self.vertices[id] = vertex\n \n # Add id to adjacent_to of each vertex in vertex's adjacent_to.\n for i in vertex.adjacent_to:\n self.vertices[i].add_edge(id)", "def addEdge(self, u, v):\r\n self.graph[u].append(v)", "def add_atom(self, atom):\n return self.add_vertex(atom)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a FileAccess edge to the graph.
def _addAccess(self, f: File, acc: FileAccess): # Get the source and destination vertex ids. source = acc.actor.uid() dest = str(f.inode) # Add the edge, and count a single access (unweighted clustering). self.edges.add((source, dest)) self.weights[(source, dest)] = 1
[ "def _addAccess(self, f: File, acc: FileAccess):\n # Get the source and destination vertex ids.\n source = acc.actor.desktopid\n dest = str(f.inode)\n\n # Add the edge.\n self.edges.add((source, dest))\n\n # Calculate the number of individual instances who accessed the file.\n insts = self.instancesPerFile.get(source+dest) or set()\n insts.add(acc.actor.uid())\n self.instancesPerFile[source+dest] = insts\n self.weights[(source, dest)] = len(insts)", "def add_edge(self, edge, edgetype=1):\n self.add_edges([edge], edgetype)", "def addEdge(self,edge):\r\n self.adj.append(edge)", "def add_edge(self, ed):\n self.edge.append(ed)\n\n\t# This one creates a new edge and adds it to the tree.", "def add_edge(self, edge):\n edge = set(edge)\n (label1, label2) = tuple(edge)\n if label1 in self.vertices() and label2 in self.vertices():\n vertex1 = self[label1]\n vertex2 = self[label2]\n vertex1.add_edge(vertex2)\n vertex2.add_edge(vertex1) # assume undirected", "def add_edge(self, label, node):\n\n\t\tif not isinstance(node, DFANode):\n\t\t\traise DFAException(\"Cannot add an NFA node edge to a non-NFA node.\")\n\n\t\tif label in self._edges:\n\t\t\traise DFAException(\"Non-deterministic DFA node (duplicate edge '%s').\" % label)\n\n\t\tself._edges[label] = node", "def _addFileNode(self, f: File):\n # Add a vertex for the file.\n self.vertices[str(f.inode)] = \"file\"", "def add_edge(self, destination):\r\n self.edges.append(Graph.Edge(self, destination))", "def append_edge(self, edge):", "def add_edge(self, edge):\n\t\tedge = set(edge)\n\t\t(vertex, neighbor) = tuple(edge)\n\t\tif vertex not in self.g:\n\t\t\tself.g[vertex] = [neighbor]\n\t\telse:\n\t\t\tself.g[vertex].append(neighbor)\n\t\tprint \"Added Edge : {}\".format(edge)", "def add_edge(self, v1, v2):\n self.__graph[v1].append(v2)", "def add_edge(self, edge):\n self.edges.append(edge)\n scene = self.scene()\n if edge not in scene.items():\n scene.addItem(edge)", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph", "def add_edge(self, edge_key, edge_value):\n self.edge_list.append(edge_value)\n self.edge_dict[edge_key] = (edge_value.__len__(), self.edge_list.__len__() - 1)\n self.connection.append(edge_key)", "def add_edge(self, node1, node2):\n raise NotImplementedError", "def add_edge(self, edge: Edge):\n if edge.node_to not in self.nodes:\n raise NodeNotInGraphError(edge.node_to, edge)\n elif edge.node_from not in self.nodes:\n raise NodeNotInGraphError(edge.node_from, edge)\n self.edges.add(edge)", "def AddEdge(self, *args):\n return _snap.TNGraph_AddEdge(self, *args)", "def add_edge(self, key, edge, weight):\n target = self.get_node(key)\n if target:\n # add edge from Node(key) to Node(edge)\n target.add_nbr(edge, weight)\n else:\n return 'Cannot add edge. Selected node does not exist'", "def append(self, edge):\n self.agenda.append(edge)\n self.total += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Link application instance vertices together.
def _linkInstances(self): for (app, insts) in self.instances.items(): edges = list(itertools.combinations(insts, 2)) for edge in edges: self.edges.add(edge) self.weights[edge] = 1
[ "def _addAppNode(self, app: Application):\n # Add a vertex for the app.\n self.vertices[app.uid()] = \"app\"\n\n # Remember instances of an app so we can connect them.\n inst = self.instances.get(app.desktopid) or []\n inst.append(app.uid())\n self.instances[app.desktopid] = inst\n\n # Ensure there is a node modelling the app's state.\n self.vertices[app.desktopid] = \"appstate\"\n self.edges.add((app.desktopid, app.uid()))\n self.weights[(app.desktopid, app.uid())] = 1", "def _addAppNode(self, app: Application):\n # Add a vertex for the app.\n self.vertices[app.uid()] = \"app\"\n\n # Remember instances of an app so we can connect them.\n inst = self.instances.get(app.desktopid) or []\n inst.append(app.uid())\n self.instances[app.desktopid] = inst\n\n # Ensure there is a node modelling the app's state.\n self.vertices[app.desktopid] = \"appstate\"\n self.edges.add((app.desktopid, app.uid()))\n self.weights[(app.desktopid, app.uid())] = 0.0000000001", "def link_program_to_vbo(self):\n glBindVertexArray(self._vao)\n glBindBuffer(GL_ARRAY_BUFFER, self._vbo[0])\n glVertexAttribPointer(self.program.attributes['vertex_position'], 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n glBindBuffer(GL_ARRAY_BUFFER, self._vbo[1])\n glVertexAttribPointer(self.program.attributes['text_coord'], 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n glBindVertexArray(0)", "def append_vertex(self, vertex):", "def _connect(self, v1, v2):\n v1.neighbours.append(v2)\n v2.neighbours.append(v1)", "def add_vertex(self, v):\n pass", "def add_bgp_vertices(self, router):\n self.add_vertex(self.bgp_name(router), subgraph=self._bgp_sub)", "def add_vertex(self):\n u = self.g.add_vertex()\n return u", "def add_vertices(self, amount):\n raise NotImplementedError(\"Not implemented on backend \" + type(self).backend)", "def add_vertex(self, vertex):\r\n self.vertices.append(vertex)", "def add_vertices(self, vertices):\n if not vertices:\n return\n\n for v in vertices:\n self.add_vertex(v)", "def addVertexKey(self):\n self.outMesh.addKey(VertexKey())", "def join_two_vertices(context):\n\n scene = context.scene\n pg = scene.pdt_pg\n obj = context.view_layer.objects.active\n if all([bool(obj), obj.type == \"MESH\", obj.mode == \"EDIT\"]):\n bm = bmesh.from_edit_mesh(obj.data)\n verts = [v for v in bm.verts if v.select]\n if len(verts) == 2:\n try:\n bm.edges.new([verts[-1], verts[-2]])\n bmesh.update_edit_mesh(obj.data)\n bm.select_history.clear()\n return\n except ValueError:\n pg.error = PDT_ERR_CONNECTED\n context.window_manager.popup_menu(oops, title=\"Error\", icon=\"ERROR\")\n raise PDT_VerticesConnected\n else:\n pg.error = f\"{PDT_ERR_SEL_2_VERTS} {len(verts)})\"\n context.window_manager.popup_menu(oops, title=\"Error\", icon=\"ERROR\")\n raise PDT_SelectionError\n else:\n pg.error = f\"{PDT_ERR_EDOB_MODE},{obj.mode})\"\n context.window_manager.popup_menu(oops, title=\"Error\", icon=\"ERROR\")\n raise PDT_ObjectModeError", "def add_vertex(self, v):\n self.v_sources.add(v)", "def add_edge(self, v1, v2, weight):", "def addVertex(self, label):\n index = len(self.index)\n self.index[label] = index\n self.vertex[index] = label", "def add_edge(self, v1, v2):\n self.__graph[v1].append(v2)", "def add_vertex(self, id, vertex):\n \n # Check if vertex with given id already exists.\n if id in self.vertices:\n return\n \n # Check if each vertex in adjacent_to exists.\n for i in vertex.adjacent_to:\n if not i in self.vertices:\n return\n \n # Add given vertex at given id.\n self.vertices[id] = vertex\n \n # Add id to adjacent_to of each vertex in vertex's adjacent_to.\n for i in vertex.adjacent_to:\n self.vertices[i].add_edge(id)", "def insertVertex(self, index, v):\n self.vertexList.insert(index, v)\n \n if self.augVertexList is None:\n self.augVertexList = {generator: \\\n [StackingVertex(vertex, [], [], [], []) for vertex in self.vertexList]\\\n for generator in self.complex.oneCells}\n \n else:\n for generator in self.augVertexList.keys():\n self.augVertexList[generator].insert( \\\n index, StackingVertex(v, [], [], [], []))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a FileAccess edge to the graph.
def _addAccess(self, f: File, acc: FileAccess): # Get the source and destination vertex ids. source = acc.actor.desktopid dest = str(f.inode) # Add the edge. self.edges.add((source, dest)) # Calculate the number of individual instances who accessed the file. insts = self.instancesPerFile.get(source+dest) or set() insts.add(acc.actor.uid()) self.instancesPerFile[source+dest] = insts self.weights[(source, dest)] = len(insts)
[ "def _addAccess(self, f: File, acc: FileAccess):\n # Get the source and destination vertex ids.\n source = acc.actor.uid()\n dest = str(f.inode)\n\n # Add the edge, and count a single access (unweighted clustering).\n self.edges.add((source, dest))\n self.weights[(source, dest)] = 1", "def add_edge(self, edge, edgetype=1):\n self.add_edges([edge], edgetype)", "def addEdge(self,edge):\r\n self.adj.append(edge)", "def add_edge(self, ed):\n self.edge.append(ed)\n\n\t# This one creates a new edge and adds it to the tree.", "def add_edge(self, edge):\n edge = set(edge)\n (label1, label2) = tuple(edge)\n if label1 in self.vertices() and label2 in self.vertices():\n vertex1 = self[label1]\n vertex2 = self[label2]\n vertex1.add_edge(vertex2)\n vertex2.add_edge(vertex1) # assume undirected", "def add_edge(self, label, node):\n\n\t\tif not isinstance(node, DFANode):\n\t\t\traise DFAException(\"Cannot add an NFA node edge to a non-NFA node.\")\n\n\t\tif label in self._edges:\n\t\t\traise DFAException(\"Non-deterministic DFA node (duplicate edge '%s').\" % label)\n\n\t\tself._edges[label] = node", "def _addFileNode(self, f: File):\n # Add a vertex for the file.\n self.vertices[str(f.inode)] = \"file\"", "def add_edge(self, destination):\r\n self.edges.append(Graph.Edge(self, destination))", "def append_edge(self, edge):", "def add_edge(self, edge):\n\t\tedge = set(edge)\n\t\t(vertex, neighbor) = tuple(edge)\n\t\tif vertex not in self.g:\n\t\t\tself.g[vertex] = [neighbor]\n\t\telse:\n\t\t\tself.g[vertex].append(neighbor)\n\t\tprint \"Added Edge : {}\".format(edge)", "def add_edge(self, v1, v2):\n self.__graph[v1].append(v2)", "def add_edge(self, edge):\n self.edges.append(edge)\n scene = self.scene()\n if edge not in scene.items():\n scene.addItem(edge)", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph", "def add_edge(self, edge_key, edge_value):\n self.edge_list.append(edge_value)\n self.edge_dict[edge_key] = (edge_value.__len__(), self.edge_list.__len__() - 1)\n self.connection.append(edge_key)", "def add_edge(self, node1, node2):\n raise NotImplementedError", "def add_edge(self, edge: Edge):\n if edge.node_to not in self.nodes:\n raise NodeNotInGraphError(edge.node_to, edge)\n elif edge.node_from not in self.nodes:\n raise NodeNotInGraphError(edge.node_from, edge)\n self.edges.add(edge)", "def AddEdge(self, *args):\n return _snap.TNGraph_AddEdge(self, *args)", "def add_edge(self, key, edge, weight):\n target = self.get_node(key)\n if target:\n # add edge from Node(key) to Node(edge)\n target.add_nbr(edge, weight)\n else:\n return 'Cannot add edge. Selected node does not exist'", "def append(self, edge):\n self.agenda.append(edge)\n self.total += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an Application vertex to the graph.
def _addAppNode(self, app: Application): # Add a vertex for the app. self.vertices[app.uid()] = "app" # Remember instances of an app so we can connect them. inst = self.instances.get(app.desktopid) or [] inst.append(app.uid()) self.instances[app.desktopid] = inst # Ensure there is a node modelling the app's state. self.vertices[app.desktopid] = "appstate" self.edges.add((app.desktopid, app.uid())) self.weights[(app.desktopid, app.uid())] = 0.0000000001
[ "def _addAppNode(self, app: Application):\n # Add a vertex for the app.\n self.vertices[app.uid()] = \"app\"\n\n # Remember instances of an app so we can connect them.\n inst = self.instances.get(app.desktopid) or []\n inst.append(app.uid())\n self.instances[app.desktopid] = inst\n\n # Ensure there is a node modelling the app's state.\n self.vertices[app.desktopid] = \"appstate\"\n self.edges.add((app.desktopid, app.uid()))\n self.weights[(app.desktopid, app.uid())] = 1", "def add_vertex(self, vertex):\r\n self.vertices.append(vertex)", "def add_vertex(self):\n u = self.g.add_vertex()\n return u", "def add_vertex(self, vertex):\n\n\t\tself.vertices.append(vertex)", "def add_vertex(self, v):\n pass", "def add_vertex(self, vertex: str):\n Logger.log(Logger.LogLevel.VERBOSE,\n f\"Adding vertex {self.vertex_count}: {vertex}\")\n self.vertices[self.vertex_count] = vertex\n self.vertex_count += 1", "def append_vertex(self, vertex):", "def add_vertex(self, vertex):\n if vertex.label not in self.vertices():\n self.__graph_dict[vertex.label] = vertex", "def add_vertex(self, item: Any, kind: str) -> None:\r\n if item not in self.vertices:\r\n self.vertices[item] = WeightedVertex(item, kind)", "def add_vertex(self, vertex):\n if isinstance(vertex, Vertex) and vertex.name not in self.vertices:\n self.vertices[vertex.name] = vertex\n return True\n else:\n return False", "def addVertex(self, label):\n index = len(self.index)\n self.index[label] = index\n self.vertex[index] = label", "def add_bgp_vertices(self, router):\n self.add_vertex(self.bgp_name(router), subgraph=self._bgp_sub)", "def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n self.__directed_dict[vertex] = []", "def add_program(self, program):\n ...", "def add_vertex(self, v):\n self.v_sources.add(v)", "def add_vertex(self, v):\n if v not in self.vertices.keys(): \n self.vertices[v] = [False,[],0]", "def add_vertex(self, id, vertex):\n \n # Check if vertex with given id already exists.\n if id in self.vertices:\n return\n \n # Check if each vertex in adjacent_to exists.\n for i in vertex.adjacent_to:\n if not i in self.vertices:\n return\n \n # Add given vertex at given id.\n self.vertices[id] = vertex\n \n # Add id to adjacent_to of each vertex in vertex's adjacent_to.\n for i in vertex.adjacent_to:\n self.vertices[i].add_edge(id)", "def addEdge(self, u, v):\r\n self.graph[u].append(v)", "def add_atom(self, atom):\n return self.add_vertex(atom)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Link file vertices of an instance together.
def _linkInstances(self): filePairs = dict() for (source, files) in self.filesPerInstance.items(): # We'll have duplicate edges in the edges set (e.g. 6->4 and 4->6) # if we don't sort inodes prior to listing inode pairs. edges = list(itertools.combinations(sorted(files), 2)) for edge in edges: cnt = filePairs.get(edge) or 0 filePairs[edge] = cnt+1 for (pair, count) in filePairs.items(): self.edges.add(pair) self.weights[pair] = count
[ "def _addFileNode(self, f: File):\n # Add a vertex for the file.\n self.vertices[str(f.inode)] = \"file\"", "def _addAccess(self, f: File, acc: FileAccess):\n # Get the source and destination vertex ids.\n source = acc.actor.desktopid\n dest = str(f.inode)\n\n # Add the edge.\n self.edges.add((source, dest))\n\n # Calculate the number of individual instances who accessed the file.\n insts = self.instancesPerFile.get(source+dest) or set()\n insts.add(acc.actor.uid())\n self.instancesPerFile[source+dest] = insts\n self.weights[(source, dest)] = len(insts)", "def __init__(self, gfile):\n # open the file\n f = open(gfile, \"r\")\n # read the file\n file = f.readlines()\n\n line_count = 0\n for line in file:\n if line_count == 0:\n # initialise all vertices in graph\n num_vertices = int(line.strip())\n self.vertices = []\n for i in range(num_vertices):\n self.vertices.append(Vertex(i))\n else:\n # add edges\n edge = line.split()\n # convert to integers\n for i in range(len(edge)):\n edge[i] = int(edge[i])\n self.add_directed_edge(edge[0], edge[1], edge[2])\n self.add_directed_edge(edge[1], edge[0], edge[2])\n line_count += 1\n\n # close the file\n f.close()", "def add_edges(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"%%%%%%%%%% ADDING EDGES %%%%%%%%%%%%%\\n\\n\")\n\t\t\tfor v in self.G.nodes:\t\t\t\n\t\t\t\tfor w in self.G.nodes:\n\t\t\t\t\tif (v, w) in self.G.edges:\n\t\t\t\t\t\tf.write('\\t\\\\Edge({})({})\\n'.format(self.vtoid[v], self.vtoid[w]))", "def append_vertex(self, vertex):", "def create_vertices(vertex_type, file, g):\n lines = rawcount(filename=file)\n collection_name = vertex_type.__name__\n with open(file, newline='') as csvfile:\n reader = csv.DictReader(csvfile, dialect=csv.excel_tab, fieldnames=vertex_type._csv_headers)\n next(reader, None) # skip the headers\n # label = click.style(f\"Processing {collection_name}\", fg='green')\n with progressbar.ProgressBar(max_value=lines, redirect_stdout=True) as bar:\n try:\n for row in reader:\n bar.update(reader.line_num)\n # Only keep attributes defined in the Class's _fields\n attribs = {k: row[k] for k in vertex_type._fields.keys()}\n try:\n vertex = g.createVertex(collection_name, attribs)\n except CreationError as e:\n print(e)\n pass # Ignore duplicate info FIXME this should be configurable\n try:\n for e in vertex_type._edges:\n values = row[e['key']].split(',')\n if 'to' in e:\n for v in values:\n try:\n graph.createEdge(e['collection'],\n _fromId=f\"{collection_name}/{vertex._key}\",\n _toId=f\"{e['to']}/{v}\",\n edgeAttributes=dict())\n except CreationError:\n pass # Ignore duplicate info FIXME this should be configurable\n elif 'from' in e:\n for v in values:\n try:\n graph.createEdge(e['collection'],\n _toId=f\"{collection_name}/{vertex._key}\",\n _fromId=f\"{e['from']}/{v}\",\n edgeAttributes=dict())\n except CreationError:\n pass # Ignore duplicate info FIXME this should be configurable\n except AttributeError:\n pass\n except Exception as e:\n print(\"Bad line is: \", reader.line_num, e)", "def add_vertex(self, v):\n self.v_sources.add(v)", "def add_nodes(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"\\n%%%%%%%%%% ADDING NODES %%%%%%%%%%%%%\\n\\n\")\n\t\t\ti = 0\n\t\t\tfor v in self.G.nodes:\n\t\t\t\tf.write('\\t\\\\Vertex[x={}, y={}]{{{}}}\\n'.format(round(self.factor*v.x, 3), round(self.factor*v.y, 3), i))\n\t\t\t\t\n\t\t\t\tself.vtoid[v] = i\t\t\t\t\n\t\t\t\t\n\t\t\t\ti += 1", "def _addAccess(self, f: File, acc: FileAccess):\n # Get the source and destination vertex ids.\n source = acc.actor.uid()\n dest = str(f.inode)\n\n # Add the edge, and count a single access (unweighted clustering).\n self.edges.add((source, dest))\n self.weights[(source, dest)] = 1", "def relink_all(cls, old_file, new_file):\n assert old_file.checksum == new_file.checksum\n assert old_file.id\n assert new_file.id\n\n with db.session.begin_nested():\n ObjectVersion.query.filter_by(file_id=str(old_file.id)).update(\n {ObjectVersion.file_id: str(new_file.id)}\n )", "def _add_links(self, cmdline):\n # need to add the current file to the DB so that we have the filefilelink and filecodelink info\n current_file = os.path.join(self.dbu.getIncomingPath(), self.filename)\n df = self.pq.figureProduct(current_file) # uses all the inspectors to see what product a file is\n if df is None:\n DBlogging.dblogger.error(\"{0} did not have a product\".format(current_file))\n self.moveToError(current_file)\n return\n df.params['verbose_provenance'] = ' '.join(cmdline)\n f_id = self.pq.diskfileToDB(df)\n ## here the file is in the DB so we can add the filefilelink an filecodelinks\n if f_id is not None: # None comes back if the file goes to error\n self.dbu.addFilecodelink(f_id, self.code_id)\n for val in self.input_files: # add a link for each input file\n self.dbu.addFilefilelink(f_id, val)", "def add_vertex(self, v):\n pass", "def __init__(self, file_name=None):\n \n # Initialize instance variables\n self.vertices = {}\n \n # Read graph\n if file_name != None:\n self.read_file(file_name)", "def add_single_edge_from_pickle(self, filename):\r\n self.edges.append(s3d.RoundedEdge.from_pickle(filename))", "def add_file_to_instance(self, *, agent_name: str, instance_name: str, file_id: str, file_path: str) -> None:", "def _linkInstances(self):\n for (app, insts) in self.instances.items():\n edges = list(itertools.combinations(insts, 2))\n for edge in edges:\n self.edges.add(edge)\n self.weights[edge] = 1", "def add_multi_link_attributes(self): \n for (u, v) in self.G.edges(): \n self.G.add_edge(u, v, w = self.weight_edge_list[(u,v)])", "def add_edge(self, v1, v2, weight):", "def link_file_ann(conn, object_type, object_id, file_ann_id):\n file_ann = conn.getObject(\"Annotation\", file_ann_id)\n if file_ann is None:\n sys.stderr.write(\"Error: File Annotation not found: %s.\\n\"\n % file_ann_id)\n sys.exit(1)\n omero_object = get_object(conn, object_type, object_id)\n # Check for existing links\n links = list(conn.getAnnotationLinks(object_type, parent_ids=[object_id],\n ann_ids=[file_ann_id]))\n if len(links) == 0:\n omero_object.linkAnnotation(file_ann)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the GraphEngine for the entire application.
def get(): if GraphEngine.__engine is None: GraphEngine.__engine = GraphEngine() return GraphEngine.__engine
[ "def _get_engine(self, context=None):\n context = context or self.context\n engine = self.engine_class(context=context, stores=self.stores)\n return engine", "def get_engine():\n from nam.core import application\n return application.database_engine", "def get_engine() -> Engine:\n global SESSION_FACTORY\n if SESSION_FACTORY is None:\n raise ValueError(\"Engine must be initialized first.\") # pragma: no cover\n # pyre-fixme[16]: `Optional` has no attribute `bind`.\n return SESSION_FACTORY.bind", "def get_engine(config: AppConfig) -> VizierEngine:\n # Get backend identifier. Raise ValueError if value does not identify\n # a valid backend.\n backend_id = config.engine.backend.identifier\n if backend_id not in base.BACKENDS:\n raise ValueError('unknown backend \\'' + str(backend_id) + '\\'')\n # Get the identifier factory for the viztrails repository and create\n # the object store. At this point we use the default object store only.\n # We could add another environment variable to use different object\n # stores (once implemented).\n if config.engine.use_short_ids:\n id_factory = get_short_identifier\n else:\n id_factory = get_unique_identifier\n object_store = DefaultObjectStore(\n identifier_factory=id_factory\n )\n # Create index of supported packages\n packages = load_packages(config.engine.package_path)\n # By default the vizier engine uses the objectstore implementation for\n # the viztrails repository. The datastore and filestore factories depend\n # on the values of engine identifier (DEV or MIMIR).\n base_dir = config.engine.data_dir\n # Create the local viztrails repository\n viztrails = OSViztrailRepository(\n base_path=os.path.join(base_dir, app.DEFAULT_VIZTRAILS_DIR),\n object_store=object_store\n )\n filestores_dir = os.path.join(base_dir, app.DEFAULT_FILESTORES_DIR)\n datastores_dir = os.path.join(base_dir, app.DEFAULT_DATASTORES_DIR)\n if config.engine.identifier in [base.DEV_ENGINE, base.MIMIR_ENGINE]:\n filestore_factory=FileSystemFilestoreFactory(filestores_dir)\n datastore_factory: DatastoreFactory\n if config.engine.identifier == base.DEV_ENGINE:\n datastore_factory = FileSystemDatastoreFactory(datastores_dir)\n elif config.engine.identifier == base.HISTORE_ENGINE:\n import vizier.datastore.histore.factory as histore\n datastore_factory = histore.HistoreDatastoreFactory(datastores_dir)\n else:\n datastore_factory = MimirDatastoreFactory(datastores_dir)\n # The default engine uses a common project cache.\n projects: ProjectCache = CommonProjectCache(\n datastores=datastore_factory,\n filestores=filestore_factory,\n viztrails=viztrails\n )\n # Get set of task processors for supported packages\n processors = load_processors(config.engine.processor_path)\n # Create an optional task processor for synchronous tasks if given\n sync_commands_list = config.engine.sync_commands\n if not sync_commands_list is None:\n commands:Dict[str,Dict[str,TaskProcessor]] = dict()\n for el in sync_commands_list.split(':'):\n package_id, command_id = el.split('.')\n if package_id not in commands:\n commands[package_id] = dict()\n commands[package_id][command_id] = processors[package_id]\n synchronous: TaskExecEngine = SynchronousTaskEngine(\n commands=commands,\n projects=projects\n )\n else:\n synchronous = NonSynchronousEngine()\n # Create the backend\n backend: VizierBackend\n if backend_id == base.BACKEND_MULTIPROCESS:\n backend = MultiProcessBackend(\n processors=processors,\n projects=projects,\n synchronous=synchronous\n )\n elif backend_id == base.BACKEND_CELERY:\n # Create and configure routing information (if given)\n backend = CeleryBackend(\n routes=config_routes(config),\n synchronous=synchronous\n )\n else:\n # Not all combinations of engine identifier and backend identifier\n # are valid.\n raise ValueError('invalid backend \\'' + str(backend_id) + '\\'')\n elif config.engine.identifier == base.CONTAINER_ENGINE:\n if backend_id == base.BACKEND_CONTAINER:\n projects = ContainerProjectCache(\n viztrails=viztrails,\n container_file=os.path.join(base_dir, app.DEFAULT_CONTAINER_FILE),\n config=config,\n datastores=MimirDatastoreFactory(datastores_dir),\n filestores=FileSystemFilestoreFactory(filestores_dir)\n )\n backend = ContainerBackend(projects=projects)\n else:\n # The container engine only supports a single backend type.\n raise ValueError('invalid backend \\'' + str(backend_id) + '\\'')\n else:\n raise ValueError('unknown vizier engine \\'' + str(config.engine.identifier) + '\\'')\n return VizierEngine(\n name=config.engine.identifier + ' (' + backend_id + ')',\n projects=projects,\n backend=backend,\n packages=packages\n )", "def get_backend_engine(self, name, **kwargs):\n if name not in self._engines:\n msg = \"Given settings backend is unknowed: {}\"\n raise SettingsBackendError(msg.format(name))\n\n return self._engines[name](**kwargs)", "def engine(self) -> \"DatabaseInstanceEngine\":\n return self._values.get('engine')", "def execution_engine(self):\n return self._execution_engine", "def get_backend(self):\n raise NotImplementedError('Please implement me')", "def active_graph(self):\n return self._neural_graph_manager.active_graph", "def get_db_engine(self):\n connection_str = self.get_db_connection_str()\n return create_engine(connection_str)", "def get_instance() -> 'RenderEngine':\n return _SINGLETON", "def rdflib_graph(self) -> Graph:\n return self.g", "def graph(self):\n return self.service_root.graph", "def db(self) -> Engine:\n return self.request.app['db']", "def getApplication(self):\r\n return self.app", "def _get_graph_encoder(self, triples, num_entity, num_relation, embedding_dim):\n assert self.graph_encoder_name in self.configs.all_graph_encoder\n\n if self.graph_encoder_name == 'GAKE':\n from gcake.model.graph_encoder.Transformer import GAKEGraphEncoder\n return GAKEGraphEncoder(triples, num_entity, num_relation, embedding_dim)", "def get_default_graph():\t\n\treturn _default_graph", "def get_engine(\n onnx_file_path,\n engine_file_path,\n convert_mode,\n dynamic_shapes=False,\n max_batch_size=1,\n calibrator=None,\n):\n\n if os.path.exists(engine_file_path):\n # If a serialized engine exists, use it instead of building an engine.\n console.print(f\"Reading engine from file {engine_file_path}\", style='info')\n with open(engine_file_path, \"rb\") as f, trt.Runtime(TRT_LOGGER) as runtime:\n return runtime.deserialize_cuda_engine(f.read())\n else:\n return build_engine(\n onnx_file_path,\n engine_file_path,\n convert_mode,\n dynamic_shapes,\n max_batch_size,\n calibrator,\n )", "def new_graph(self):\n graph = GraphWrapper.blank_graph()\n graph.graph['graph_builder'] = self\n return graph" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the value by returning it, instead of storing in a buffer.
def write(self, value): return value
[ "def write(self, value, destination=None):\n pass", "def _write_value(self, value):\n writer = self._writer\n writer.dataElement(u'value', value)\n writer.newline()", "def _write_ret(self, val):\n self._log.debug('writing deferred return value %d', val)\n ovpn.write_deferred_ret_file(self._ret_f, val)\n self._wrote_ret = True", "def char_write(self, handle, value, wait_for_response=False):\r\n pass", "def _write_result(self, result_value, store_addr=None):\n if store_addr == None:\n result_addr = self._opdecoder.get_store_address()\n else:\n result_addr = store_addr\n\n if result_addr != None:\n if result_addr == 0x0:\n log(\"Push %d to stack\" % result_value)\n self._stackmanager.push_stack(result_value)\n elif 0x0 < result_addr < 0x10:\n log(\"Local variable %d = %d\" % (\n result_addr - 1, result_value))\n self._stackmanager.set_local_variable(result_addr - 1,\n result_value)\n else:\n log(\"Global variable %d = %d\" % (result_addr,\n result_value))\n self._memory.write_global(result_addr, result_value)", "def write(self, value):\n if self.register_type == Register.HOLDING_REGISTER or self.register_type == Register.INPUT_REGISTER:\n return self.modbus_client.write_single_register(self.register_address, value)\n\n else:\n raise Exception(\"invalid register type. Only use Register class static variables (i.e. Register.foo)\")", "def write(self, key, value):\n super(_WriterBase, self).write(key, value)", "def writeBlobValue(self, value):\n raise NotImplementedError(\"TODO. Need type check.\")", "def encode(self, value):\n return value", "def writeTagValue(self, value):\n raise NotImplementedError(\"TODO. Need type check.\")", "def write_return(self):\n\n # Temporary variables\n frame = 'R13'\n ret = 'R14'\n return self.result_return.format(frame, ret)", "def _encode_(self, val):\n return pickle.dumps(val, protocol=-1)", "def write_value(interface, value):\n newline = \"\\r\"\n\n if not isinstance(value, int):\n raise TypeError(\"Given value is not integer\")\n if 0 <= value <= 2000:\n command = str(value) + newline\n interface.write(command)\n\n print(\"WRITE: \" + str(command))\n \n else:\n raise ValueError(\"Given value is out of range\")", "def write(self, *args, **kwargs):\n return self.stream.write(ending=\"\", *args, **kwargs)", "def gattc_write(\n self,\n conn_handle: memoryview,\n value_handle: memoryview,\n data: bytes,\n mode: int = 0,\n /,\n ) -> None:", "def write_string(self, value):\n\n encoded_string = value.encode('utf8')\n self.write_2_byte_int(len(encoded_string))\n self.fp.write(encoded_string)", "def __write_function(\n self, writable_file, function, return_value_name, \n return_operator=\"=\", use_cse=True\n ):\n if use_cse:\n func_cse = sympy.cse(function)\n for i in range(len(func_cse[0])):\n cse_exp, cse_rhs = func_cse[0][i]\n writable_file.write(\n ' double '+sympy.ccode(cse_exp)\n +' = '+sympy.ccode(cse_rhs)+';\\n'\n )\n for i in range(len(func_cse[1])):\n writable_file.write(\n ' '+return_value_name+'[%d] '%i+return_operator+' '\n +sympy.ccode(func_cse[1][i])+';\\n'\n )\n else:\n writable_file.writelines(\n [' '+return_value_name+'[%d] '%i+return_operator+' '\n +sympy.ccode(function[i])+';\\n' for i in range(len(function))]\n )", "def gatts_write(\n self, value_handle: memoryview, data: bytes, send_update: bool = False, /\n ) -> None:", "def write_byte(self, byte_value):\n pass", "def write(self, value_: typing.Any, **kwargs: typing.Any) -> Result:\n\n new = self.format(**kwargs)\n new.value = value_\n assert new.location is not None\n\n relevant_context_keys = sorted(\n {\n 'today',\n 'yesterday',\n 'tomorrow',\n 'flow_name',\n 'task_name',\n 'map_index',\n 'task_full_name',\n 'task_slug',\n 'task_tags',\n 'task_run_name',\n 'flow_id',\n 'flow_run_id',\n 'flow_run_version',\n 'flow_run_name',\n 'task_id',\n 'task_run_id',\n 'task_run_version',\n }\n )\n additional_metadata = {key: kwargs.get(key, '') for key in relevant_context_keys}\n\n self.logger.debug('Starting to upload result to {}...'.format(new.location))\n self.cache_store.put(\n key=new.location,\n value=new.value,\n serializer=self.serializer,\n dump_kwargs=new.serializer_dump_kwargs,\n additional_metadata=additional_metadata,\n )\n self.logger.debug('Finished uploading result to {}.'.format(new.location))\n return new" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the path to the plugin_description.xml associated with the server.
def plugin_description(self): return str(self._plugin_description[0])
[ "def getPluginDescription(self, pkg):\n import rospkg\n rp = rospkg.RosPack()\n man = rp.get_manifest(pkg)\n return man.get_export(pkg, 'plugin')", "def get_plugin_path(self):\n\t\tpath = os.path.join(self.get_path(), 'plugins')\n\t\tif os.path.isdir(path):\n\t\t\treturn path\n\t\telse:\n\t\t\treturn None", "def plugin_dir(self):\n return \"\"\"--plugin-dir=path\"\"\"", "def plugin_info():\n\n return {\n 'name': 'Wind Sensors Poll Plugin',\n 'version': '1.0',\n 'mode': 'poll',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }", "def get_description(self, *path):\n return get_description(self, *path)", "def badreadme_plugin_url(self):\r\n return TestRemotePluginInstaller.plugin_url('badreadme_medfilter_plugin.zip')", "def get_plugin_documentation(self, plugin_name):\n\t\ttry:\n\t\t\tplugin_class = self._unit_manager.plugins_repository.load_plugin(\n\t\t\t\tplugin_name\n\t\t\t)\n\t\texcept ImportError:\n\t\t\treturn \"\"\n\t\treturn plugin_class.__doc__", "def good_plugin_url(self):\r\n return TestRemotePluginInstaller.plugin_url('good_medfilter_plugin.zip')", "def installable_description(self):", "def device_description(self):\n return self.call_action(\"DeviceInfo1\", \"GetInfo\")[\"NewDescription\"]", "def badreadme_plugin_loc(self):\r\n return TestPluginInstaller.local_plugin('badreadme_medfilter_plugin.zip')", "def FrameworkDescription(self) -> str:", "def get_description(cls):\n if cls.__doc__ is None:\n return \"\"\n return cls.__doc__.strip().split(\"\\n\", 1)[0]", "def _description_string(self) -> str:", "def default_plugin_path(self):\n return self._default_plugin_path", "def local_plugin_url(cls, plugin_name):\r\n return urllib.pathname2url(TestRemotePluginInstaller.local_plugin(plugin_name))", "def SystemConfigurationFile(self) -> str:", "def good_plugin_loc(self):\r\n return TestPluginInstaller.local_plugin('good_medfilter_plugin.zip')", "def make_plugin_release_filesystem_location():\n return os.path.join(public_filesystem_location(), make_plugin_release_filename())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure the server's ZMQ ports and ROS subscribers.
def configureServer(self): # TODO: add dynamic reconfigure to change subscriber topic # configure ROS subscriber for bootstrapping templates sub = rospy.Subscriber("/foo", Marker, self.markerSub) # init zmq to port 6789 context = zmq.Context() self.socket = context.socket(zmq.REP) self.socket.bind("tcp://*:6789") self.poller = zmq.Poller() self.poller.register(self.socket, zmq.POLLIN) print "Afford Template Server started on port 6789"
[ "def setup(self):\n self.set_stream_listener()\n self.setup_mq()\n self.start_listener()", "def init_zmq_socket(self):\n # Socket to talk to server\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.SUB)\n self.logs_filter = ZMQ_FILTER\n\n self.socket.connect(ZMQ_CONNECT_ADDRESS)\n\n self.socket.setsockopt(zmq.SUBSCRIBE, self.logs_filter)", "def setup_zmq(self):\n self.context = zmq.Context()\n self.push = self.context.socket(zmq.PUSH)\n self.push_port = self.push.bind_to_random_port(\"tcp://%s\" % self.host)\n # start a listener for the pull socket\n eventlet.spawn(self.zmq_pull)\n eventlet.sleep(0)", "def main():\n\n (options, port) = parse_args()\n\n EchoServerProtocol.MESSAGE_PREFIX = options.msg_prefix\n EchoServerProtocol.REPETITIONS = options.repetitions\n\n factory = EchoServerFactory()\n reactor.listenTCP(port, factory)\n\n reactor.run()", "def __init__(self, address=DEFAULT_CLIENT_ADDRESS, port=DEFAULT_TCP_PORT):\n binding = 'tcp://*:{}'.format(port)\n super(TCPServer, self).__init__(zmq.Context(), binding)", "def _setup_subscribers(self):\n rospy.Subscriber(\n '/mavros/state', State, callback=self._state_cb)\n rospy.Subscriber(\n '/mavros/local_position/pose',\n PoseStamped,\n callback=self._pose_cb)\n rospy.Subscriber(\n '/mavros/local_position/velocity',\n TwistStamped,\n callback=self._velocity_cb)\n rospy.Subscriber(\n '/mavros/global_position/raw/fix',\n NavSatFix,\n callback=self._gps_cb)\n rospy.Subscriber(\n '/mavros/estimator_status',\n EstimatorStatus,\n callback=self._est_status_cb)", "def _setup_publishers(self):\n # mavros publishers\n self._local_vel_pub = \\\n rospy.Publisher(\n 'mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=1)", "def __create_ZMQ_publisher(self):\n\n success, self.port, self.ip = self.__network_selection()\n if success: \n # Create a new ZeroMQ context and a publisher socket\n try:\n context = zmq.Context()\n # Define the socket using the \"Context\"\n self.sock = context.socket(zmq.PUB)\n #Set the topic of the publisher and the end_point\n self.__connect_ZMQ_socket()\n self.connected = True\n except:\n print (\"NEP ERROR: socket already in use\")\n \n time.sleep(1)\n #This delay in important, whithout them the comunication is not effective\n \n # ZeroMQ note:\n # There is one more important thing to know about PUB-SUB sockets: \n # you do not know precisely when a subscriber starts to get messages.\n # Even if you start a subscriber, wait a while, and then start the publisher, \n # the subscriber will always miss the first messages that the publisher sends. \n\n\n # In Chapter 2 - Sockets and Patterns we'll explain how to synchronize a \n # publisher and subscribers so that you don't start to publish data until \n # the subscribers really are connected and ready. There is a simple and \n # stupid way to delay the publisher, which is to sleep. Don't do this in a\n # real application, though, because it is extremely fragile as well as\n # inelegant and slow. Use sleeps to prove to yourself what's happening, \n # and then wait for \n # Chapter 2 - Sockets and Patterns to see how to do this right", "def setUp(self):\n self.realm = TestRealm()\n self.portal = portal.Portal(self.realm)\n self.factory = ConnectionNotifyServerFactory(self.portal)\n self.port = reactor.listenTCP(0, self.factory, interface=\"127.0.0.1\")\n self.portno = self.port.getHost().port", "def create_servers(self):\n port = tempesta.upstream_port_start_from()\n self.servers = [deproxy.Server(port=port)]", "def run_server(self):\n self.protocol = MeaseWebSocketServerProtocol\n\n reactor.listenTCP(port=self.port, factory=self, interface=self.host)\n\n logger.info(\"Websocket server listening on {address}\".format(\n address=self.address))\n\n reactor.run()", "def __init__(self, broker_address: str, broker_xsub_port: int, broker_xpub_port: int, logger: Logger = Logger()):\n\n super().__init__(logger)\n self.__logger = logger\n self.__isRunning = True\n self.__context = zmq.Context().instance()\n\n url = f'{broker_address}:{broker_xpub_port}'\n self.__xpub_socket = self.__context.socket(zmq.XPUB)\n self.__xpub_socket.bind(f'tcp://{url}')\n self.__logger.log(f'Started XPUB socket on {url}')\n\n url = f'{broker_address}:{broker_xsub_port}'\n self.__xsub_socket = self.__context.socket(zmq.XSUB)\n self.__xsub_socket.bind(f'tcp://{url}')\n self.__logger.log(f'Started XSUB socket on {url}')\n\n self.__proxy: zmq.proxy = None", "def __init__(self):\n rospy.init_node('route_network')\n self.config = None\n\n # advertise visualization marker topic\n self.pub = rospy.Publisher('route_network', RouteNetwork,\n latch=True, queue_size=10)\n self.graph = None\n rospy.wait_for_service('get_geographic_map')\n self.get_map = rospy.ServiceProxy('get_geographic_map',\n GetGeographicMap)\n\n # register dynamic reconfigure callback, which runs immediately\n self.reconf_server = ReconfigureServer(Config, self.reconfigure)", "def main():\n factory = protocol.ServerFactory()\n factory.protocol = Echo\n reactor.listenTCP(80,factory)\n reactor.run()", "def setup(self):\n self.videoRtp = VideoServerRtp(self.addr)\n self.videoRtp.setClientInfo(self.clientAddr, self.clientVideoRtpPort)\n self.videoRtp.setSsrc(self.ssrc)\n self.videoRtp.setCapture(self.cap)\n\n fs = self.info['video']['framerate']\n self.audioRtp = AudioServerRtp(self.addr)\n self.audioRtp.setClientInfo(self.clientAddr, self.clientVideoRtpPort + 2)\n self.audioRtp.setSsrc(self.ssrc)\n self.audioRtp.setAudio(self.audioClip, self.info['video']['length'] / fs, fs)", "def __connect_ZMQ_socket(self):\n endpoint = \"tcp://\" + self.ip + \":\" + str(self.port)\n if self.mode == \"one2many\":\n # This allows only use one publisher connected at the same endpoint\n self.sock.bind(endpoint)\n if self.debug or self.network == \"direct\":\n if not self.topic == \"/nep_node\":\n print(\"PUB: \" + self.topic + \" endpoint: \" + endpoint + \" bind\")\n elif self.mode == \"many2one\":\n # This allows two use more that one publisher ate the same endpoint\n self.sock.connect(endpoint)\n if self.debug or self.network == \"direct\":\n if not self.topic == \"/nep_node\":\n print(\"PUB: \" + self.topic + \" endpoint: \" + endpoint + \" connect\")\n elif self.mode == \"many2many\":\n self.sock.connect(endpoint)\n if self.debug or self.network == \"direct\":\n print(\"PUB: \" + self.topic + \" endpoint: \" + endpoint + \" connect\")", "def connect_zmq(self):\n # Bind our ZeroMQ\n # FIXME: Add a setting for this\n self.capture_port = get_capture_port(self.port_id)\n self.zmq_context = zmq.Context()\n self.zmq_socket = self.zmq_context.socket(zmq.PAIR)\n self.zmq_socket.bind(self.capture_port)\n self.logger.debug(\"ZeroMQ Socket connected to Capture Port\")", "def configure_vpn_server(self):\n epiper = self.sysconfig.epiper_path()\n connect = '%s vpnauth-notif --vpncfg --event connected' % epiper\n disconnect = '%s vpnauth-notif --vpncfg --event disconnected' % epiper\n up = '%s vpnauth-notif --vpncfg --event up' % epiper\n down = '%s vpnauth-notif --vpncfg --event down' % epiper\n self.ovpn.configure_server_scripts(connect=connect, disconnect=disconnect, up=up, down=down)", "def main():\n factory = protocol.ServerFactory()\n factory.protocol = Echo\n reactor.listenTCP(4002,factory)\n reactor.run()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop a template process and remove it from the server's map. class_type string class_type The class type e.g. "Wheel", "Car", etc. instance_id int instance_id The ID of this instance. bool True if process was stopped/removed.
def removeTemplate(self, class_type, instance_id): if class_type in self.class_map and instance_id in self.class_map[class_type]: self.class_map[class_type][instance_id].terminate() del self.class_map[class_type][instance_id]
[ "def stop_process(self, name_or_id):\n\n with self._lock:\n # stop all processes of the template name\n if isinstance(name_or_id, six.string_types):\n self._stop_processes(name_or_id)\n else:\n # stop a process by its internal pid\n self._stop_process(name_or_id)", "def stop(self, instance: RuntimeInstance.Params, env: RuntimeEnvironment.Params, **kwargs):", "def stop_instance(self, instance_id):\n result = self.post('instance/{}/command'.format(instance_id),\n {'type': 'stop'})\n\n return result['success']", "def _stop_processes(self, name):\n if name not in self.processes:\n return\n\n # get the template\n state = self.processes[name]\n if state.stopped:\n return\n\n state.stopped = True\n\n # notify others that all processes of the templates are beeing\n # stopped.\n self._publish(\"stop\", name=name)\n self._publish(\"proc.%s.stop\" % name, name=name)\n\n # stop the flapping detection.\n if state.flapping_timer is not None:\n state.flapping_timer.stop()\n\n # iterrate over queued processes.\n while True:\n try:\n p = state.dequeue()\n except IndexError:\n break\n\n # notify other that the process is beeing stopped\n self._publish(\"stop_pid\", name=p.name, pid=p.id, os_pid=p.pid)\n self._publish(\"proc.%s.stop_pid\" % p.name, name=p.name,\n pid=p.id, os_pid=p.pid)\n\n # remove the pid from the running processes\n if p.id in self.running:\n self.running.pop(p.id)\n\n # stop the process\n p.stop()\n\n # track this process to make sure it's killed after the\n # graceful time\n self._tracker.check(p, state.graceful_timeout)", "def stop(self):\n c = Controller()\n instance_id = c.instance.id\n c.terminate_instance()\n\n print('Successfully shut down instance: ' + instance_id)", "def StopInstance(*, session, instanceid):\n ec2conn = session.connect_to(\"ec2\")\n ret = ec2.stop_instances(instance_ids=[instanceid,])\n return True", "def stop() -> None:\n config = load_config_file()\n instance_ips = [i.public_ip_address for i in get_running_instances(config)]\n if not instance_ips:\n raise Exception('ERROR: No instances with public IPs found. Exiting.')\n try:\n execute_commands_on_linux_instances(\n config,\n [\n COMMAND_KILL\n ],\n instance_ips\n )\n except Exception as e:\n logging.error(\"Something went wrong.\")\n raise\n logging.info('Done!')", "def stop_transport_process(self, process_num):", "def stop():\n local('aws ec2 stop-instances --instance-ids %s'%(AWS_INSTANCE_ID))", "def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()", "def stop(self):\n if self.send('/stop', 'post') is None:\n self.delete()", "def stop_instance(stackName, instanceName=None):\n control_instance(stackName=stackName, action='stop', instanceName=instanceName)", "def stopInstances(self, api_client):\n\n cmd = {'group': self.id}\n return api_client.stopVirtualMachine(**cmd)", "def close(self):\n self.log.debug('template_igt - in template_igt close()')\n\n subprocess.check_output([\"docker\", \"stop\", self.template_igt_container_id])\n self.log.info(\"containers.cisco.com/skyukdeliverypdl/template_igt container id {} stopped\".format(self.template_igt_container_id.decode()))\n subprocess.check_output([\"docker\", \"rm\", self.template_igt_container_id])\n self.log.info(\"containers.cisco.com/skyukdeliverypdl/template_igt container id {} removed\".format(self.template_igt_container_id.decode()))", "def remove_stopped(self, instance):\n if self.host.container_exists(instance.name):\n if self.host.container_running(instance.name):\n raise DockerRuntimeError(\"The container {} is already running.\".format(instance.container.name))\n else:\n self.host.client.remove_container(instance.name)", "def stop_process(self) -> None:\n if self.sp:\n self.sp.send_signal(signal.SIGINT)\n self.sp = None", "def terminate(self):\n ips_to_remove = self.floating_ips[:]\n for ip in ips_to_remove:\n self.remove_floating_ip(ip)\n _id = self.id\n self._instance.delete()\n self.log.debug('Waiting for instance (%s) to be terminated.' % _id)\n\n try:\n while self.instance:\n self.log.debug('Nova instance %s has status %s...' % (_id, self.status))\n sleep(5)\n except:\n self.log.debug('Nova instance %s deleted.' % _id)\n\n if self.key_pair:\n self.log.debug('Removing key pair: %s' % self.key_pair.name)\n\n key_dir = os.path.expanduser('~/') + '.ssh/'\n try:\n self.stack_env.nova.keypairs.delete(self.key_pair)\n os.remove(key_dir + self.key_pair.name)\n os.remove(key_dir + self.key_pair.name + '.pub')\n except:\n self.log.exception('Unable to remove key pair %s%s' % (key_dir, self.key_pair.name))", "def undeploy_system_instance(id=None):\n pass", "def server_stop():\n # since dev_appserver2, we need to kill 2 processes..\n run = \"\"\"\n psgrep dev_appserver.py | awk '{print $2}' | xargs kill -9\n psgrep _python_runtime.py | awk '{print $2}' | xargs kill -9\n \"\"\"\n\n # std: kill pid file..\n daemon.kill(opts.proj.dirs.gae.dev_appserver_pid)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start a template process using subprocess.Popen. class_type string class_type The class type e.g. "Wheel", "Car", etc. instance_id int instance_id The ID of this instance. int The Popen object started by the server.
def addTemplate(self, class_type, instance_id): if class_type in self.class_map: filename = os.path.join(self.template_path, ''.join([class_type, '.py'])) if self.topic_arg is None: args = [filename, str(instance_id), "True"] else: args = [filename, str(instance_id)] print("templateServer.addTemplate: adding template " + str(class_type)) return subprocess.Popen(args)
[ "def spawn_process(self, proc):\n\n ofunc = None\n ifunc = None\n\n # def send_info(ref, cmdline, env, childpid):\n # info = {\n # \"host\": HOSTNAME.split(\".\")[0],\n # \"pid\": os.getpid(),\n # \"cmdline\": cmdline,\n # \"env\": env,\n # \"childpid\": childpid,\n # }\n # self.log.info(\"Process spawned: %s\", cmdline, taskid=ref, childpid=childpid)\n # batch.batch_info(ref, info)\n\n # if \"info\" in self.request.kwargs:\n # id = self.request.kwargs[\"info\"]\n # ifunc = lambda *args: send_info(id, *args)\n\n # if \"output\" in self.request.kwargs:\n # id = self.request.kwargs[\"output\"]\n # ofunc = lambda line: batch.batch_log(id, line)\n\n p = ProcessRunner(self, proc)\n return p.run(output=ofunc, info=ifunc)", "def spawn_subprocess(cls, **Popen_args):\n args = [sys.executable, '-m', cls.__module__]\n conn, proc = ipc.spawn_subprocess(args, **Popen_args)\n return cls(conn), proc", "def instanciate_template(id_template, id, env=\"default\"):\n if id is None:\n id = \"root\"\n config = ConfigParser.RawConfigParser()\n config.read(script_dir + \"/properties/nifi.properties\")\n nifi_connection = NifiConnect()\n nifi_connection.load_properties(env, config)\n nifi_connection.connect()\n results = nifi_connection.instanciate_template(id, id_template)\n print(\"Process group is created : \" + results[\"flow\"][\"processGroups\"][0][\"status\"][\"name\"] + \" - \" +\n results[\"flow\"][\"processGroups\"][0][\"status\"][\"id\"])", "def launch(self):\n self.processdev.start()\n pid = self.processdev.pid\n p = psutil.Process(self.processdev.pid)\n p.nice(psutil.HIGH_PRIORITY_CLASS)\n print(str(pid) + \"est le pid\")", "def _spawn(self, spawnargs):\n log.debug(\"run cmd: %s\", \" \".join(spawnargs))\n return subprocess.Popen(spawnargs, close_fds=True)", "def elt_process(self) -> Popen:\n\n # Create a Meltano ELT process if it does not already exists\n if not self._elt_process:\n self._elt_process = Popen(\n self.elt_command,\n stdout=PIPE,\n stderr=STDOUT,\n cwd=os.getenv(\n \"MELTANO_PROJECT_ROOT\"\n ), # Start the command in the root of the Meltano project\n env={\n **os.environ, # Pass all environment variables from the Dagster environment\n **self._env_vars,\n },\n start_new_session=True,\n )\n\n return self._elt_process", "def start_program(self):\n if self.__proc__ is None: \n args = self.gen_args()\n self.__proc__ = subprocess.Popen(self.gen_args(),\n stdout=PIPE,\n stderr=PIPE)\n else:\n logger.warn('Process exists already. Doing nothing.')", "def test_create_process(self):\n self.assertIsNotNone(self.pid)", "def create_instance_launcher(project):\n create_launcher(project, name=\"Plone local server\")", "def spawn(self, type_and_target, *args, **kwargs):\n if isinstance(type_and_target, tuple):\n proc_type, proc_target = type_and_target\n proc_callable = self.type_callables[proc_type]\n proc = proc_callable(proc_target, *args, **kwargs)\n elif issubclass(type_and_target, PyonProcess) and hasattr(type_and_target, 'target'):\n proc = type_and_target(*args, **kwargs)\n else:\n raise PyonProcessError('Invalid proc_type (must be tuple or PyonProcess subclass with a target method)')\n\n proc.supervisor = self\n\n proc.start()\n self.children.add(proc)\n\n # install failure monitor\n proc.proc.link_exception(self._child_failed)\n\n return proc", "def spawn(self, type_and_target, *args, **kwargs):\n if isinstance(type_and_target, tuple):\n proc_type, proc_target = type_and_target\n proc_callable = self.type_callables[proc_type]\n proc = proc_callable(proc_target, *args, **kwargs)\n elif isinstance(type_and_target, PyonProcess) and hasattr(type_and_target, 'target'):\n proc = type_and_target(*args, **kwargs)\n else:\n raise PyonProcessError('Invalid proc_type (must be tuple or PyonProcess subclass with a target method)')\n\n proc.supervisor = self\n\n proc.start()\n self.children.add(proc)\n return proc", "def _spawn_process(self, state):\n # get internal process id\n pid = self.get_process_id()\n\n # start process\n p = state.make_process(self.loop, pid, self._on_process_exit)\n p.spawn()\n\n # add the process to the running state\n state.queue(p)\n\n # we keep a list of all running process by id here\n self.running[pid] = p\n\n self._publish(\"spawn\", name=p.name, pid=pid,\n detached=p.detach, os_pid=p.pid)\n self._publish(\"proc.%s.spawn\" % p.name, name=p.name, pid=pid,\n detached=p.detach, os_pid=p.pid)", "def _spawn_subprocess(self, cmd, shell=False, **env):\n environ = os.environ.copy()\n environ.update(env)\n try:\n return subprocess.Popen(cmd, env=environ, shell=shell,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n except OSError as err: # pragma: no cover\n raise RuntimeError('Could not start runtime with command %r:\\n%s' %\n (cmd[0], str(err)))", "def setUp(self):\n with self.slap.instance_supervisor_rpc as supervisor:\n all_process_info = supervisor.getAllProcessInfo()\n process_info, = [p for p in all_process_info if 'proftpd' in p['name']]\n process = psutil.Process(process_info['pid'])\n self.assertEqual('proftpd', process.name()) # sanity check\n self.proftpdProcess = process", "def _as_process(self):\n pid = self.pid\n if not pid:\n raise self.NotStarted()\n return psutil.Process(pid)", "def create_and_run_exp_process(meta_info, expconfig):\n process = Process(target=run_exp, args=(meta_info, expconfig, ))\n process.daemon = True\n process.start()\n return process", "def make_process(self, loop, id, on_exit):\r\n return self.config.make_process(loop, id, self.name, env=self.env,\r\n on_exit=on_exit)", "def _Popen(self, dummy_command_line):\n object_type, args, kwargs = self._popen_results.pop(0)\n return object_type(*args, **kwargs)", "def startinstance(imagename, instance_type='m1.large'):\n if not settings.get_image(imagename):\n raise SystemExit(\"Invalid imagename '%s'\" % imagename)\n\n username, conn = _getbotoconn(auth_user)\n\n print \"starting an instance from the %s image under the %s account of \" \\\n \"type %s\" % \\\n (imagename, username, instance_type)\n\n username, accesskey, secretkey, pkname = settings.get_user(username)\n imagename, imageid = settings.get_image(imagename)\n\n image = conn.get_image(imageid)\n reservation = None\n if pkname:\n reservation = image.run(instance_type=instance_type, key_name=pkname)\n else:\n reservation = image.run(instance_type=instance_type)\n\n instance = reservation.instances[0]\n\n # The image has been started in the pending state, wait for it to transition\n # into the running state\n while True:\n if instance.update() == u'running':\n # [AN] it would be nice if the user knew it was still working\n break\n time.sleep(1)\n\n print \"\"\n print \"Instance started\"\n print \"DNS name: %s\" % instance.dns_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the plugin_description.xml for a ROS package.
def getPluginDescription(self, pkg): import rospkg rp = rospkg.RosPack() man = rp.get_manifest(pkg) return man.get_export(pkg, 'plugin')
[ "def plugin_description(self):\n return str(self._plugin_description[0])", "def installable_description(self):", "def format_desc(self):\n return '\\nDescription:\\n{}\\n'.format(\n C(\n FormatBlock(get_pkg_description(self.package)).format(\n width=76,\n newlines=True,\n prepend=' '\n ),\n fore='green'\n )\n )", "def plugin_info():\n\n return {\n 'name': 'Wind Sensors Poll Plugin',\n 'version': '1.0',\n 'mode': 'poll',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }", "def get_plugin_documentation(self, plugin_name):\n\t\ttry:\n\t\t\tplugin_class = self._unit_manager.plugins_repository.load_plugin(\n\t\t\t\tplugin_name\n\t\t\t)\n\t\texcept ImportError:\n\t\t\treturn \"\"\n\t\treturn plugin_class.__doc__", "def _create ( self ):\n mref = nodes.MetadataRoot()\n data = self._package_info ['desc_data']\n\n max_textline_width = roverlay.config.get ( 'METADATA.linewidth', 65 )\n\n description = None\n\n if USE_FULL_DESCRIPTION and 'Title' in data and 'Description' in data:\n description = data ['Title'] + ' // ' + data ['Description']\n\n elif 'Description' in data:\n description = data ['Description']\n\n elif 'Title' in data:\n description = data ['Title']\n\n #if description:\n if description is not None:\n mref.add (\n nodes.DescriptionNode ( description, linewidth=max_textline_width )\n )\n\n # these USE flags are described in profiles/use.desc,\n # no need to include them here\n #mref.add_useflag ( 'byte-compile', 'enable byte-compiling' )\n #\n #if package_info ['has_suggests']:\n # mref.add_useflag ( 'R_suggests', 'install optional dependencies' )\n\n return mref", "def get_pkg_description(pkg):\n\n if hasattr(pkg, 'description'):\n return pkg.description or ''\n if hasattr(pkg, 'installed'):\n installedpkg = pkg.installed\n if installedpkg:\n # Use installed version description\n return installedpkg.description or ''\n\n # Get first description found in all versions.\n desc = ''\n for ver in pkg.versions:\n if ver.description:\n desc = ver.description\n break\n return desc\n\n return ''", "def get_description(cls):\n if cls.__doc__ is None:\n return \"\"\n return cls.__doc__.strip().split(\"\\n\", 1)[0]", "def get_template_description(name, module: ModuleType) -> str:\n return \"{}: {}\".format(name, (module.__doc__ or \"\").strip())", "def XMLDesc(self, flags=0):\n ret = libvirtmod.virSecretGetXMLDesc(self._o, flags)\n if ret is None: raise libvirtError ('virSecretGetXMLDesc() failed')\n return ret", "def XMLDesc(self, flags=0):\n ret = libvirtmod.virDomainGetXMLDesc(self._o, flags)\n if ret is None: raise libvirtError ('virDomainGetXMLDesc() failed', dom=self)\n return ret", "def _description_string(self) -> str:", "def get_package_info(package_name):\n log_helper = logging_helper.logging_helper.Logger()\n log_helper.logger.debug(\"Getting additional package info for %s\" % package_name)\n command = \"smart info \" + package_name\n output = shell_ops.run_command(command)\n description = ''\n version = ''\n if output.count('Name:') > 1:\n # Multiple versions available. Narrow down smart info scope to get accurate info for the current version\n response = shell_ops.run_command(\"smart query --installed \" + package_name + \" --show-format=$version\")\n version = response[response.index('[100%]') + 6:response.index('@')].replace('\\n', '')\n if 'not' in version: # Workaround for \"(not installed)\" case\n version = 'Unknown'\n\n output = output[output.rindex(version):]\n\n if 'Name' in output:\n if output.index('Name') > output.index('Description'):\n # Additional entry after description\n description = output[output.rindex(\"Description:\") + 14: output.index(\"Name\")].replace('\\n', '').strip()\n else:\n description = output[output.rindex(\"Description:\") + 14:].replace('\\n', '').strip()\n else:\n version = output[output.index(\"Version:\") + 9: output.index(\"Priority:\")].replace('\\n', '')\n version = version[:version.index('@')]\n if 'not' in version: # Workaround for \"(not installed)\" case\n version = 'Unknown'\n description = output[output.rindex(\"Description:\") + 14:].replace('\\n', '').strip()\n\n url = output[output.index(\"Reference URLs:\") + 16: output.index(\"Flags:\")].replace('\\n', '')\n my_license = output[output.index(\"License:\") + 9: output.index(\"Installed Size:\")].replace('\\n', '')\n size = output[output.index(\"Installed Size:\") + 16: output.index(\"Reference URLs:\")].replace('\\n', '')\n group = output[output.index(\"Group:\") + 7: output.index(\"License:\")].replace('\\n', '')\n summary = output[output.index(\"Summary:\") + 9: output.index(\"Description:\")].replace('\\​r\\n', '')\n\n # escape special JSON charater (\") if any in description and summary\n summary = summary.replace('\"', '\\\\\"')\n description = description.replace('\"', '\\\\\"')\n\n package = {\n 'url': url,\n 'license': my_license,\n 'size': size,\n 'description': description,\n 'summary': summary,\n 'group': group,\n 'version': version\n }\n log_helper.logger.debug(\"Returning package info: \" + str(package))\n return json.dumps(package)", "def XMLDesc(self, flags=0):\n ret = libvirtmod.virStoragePoolGetXMLDesc(self._o, flags)\n if ret is None: raise libvirtError ('virStoragePoolGetXMLDesc() failed', pool=self)\n return ret", "def getDescription(self):\n\n prod = self.productClass()\n\n if prod: result = prod.description\n else : result = None\n\n return result", "def pkg_info():\n try:\n doc = __doc__.decode(\"UTF-8\")\n except (AttributeError, UnicodeError):\n doc = __doc__ # Python3, or some strangeness\n\n return dict(\n # project data & layout\n name = __name__.split('.')[0],\n ## TODO: version = re.search(r\"(?<=\\()[^)]+(?=\\))\", changelog).group(),\n package_dir = {\"\": \"src\"},\n ## TODO: packages = find_packages(projectdir / \"src\", exclude=[\"tests\"]),\n test_suite = \"nose.collector\",\n zip_safe = True,\n include_package_data = True,\n data_files = [\n (\"EGG-INFO\", [\n \"README.md\", \"LICENSE\", \"debian/changelog\",\n ]),\n ],\n entry_points = {\n \"console_scripts\": [\n \"wand = neutrino_wand.cli:run\",\n ],\n },\n\n # dependency management\n install_requires = [\n ],\n setup_requires = [\n \"docutils\",\n \"Sphinx\",\n ],\n extras_require = {\n },\n\n # PyPI\n url = \"https://github.com/jhermann/neutrino-wand\",\n license = \"Apache License Version 2.0\",\n keywords = \"python tool monitoring influxdb devops reporting visualops\",\n author = u\"Jürgen Hermann\",\n author_email = \"jh@web.de\",\n description = doc.split('.')[0].strip(),\n long_description = doc.split('.', 1)[1].strip(),\n classifiers = [\n # values at http://pypi.python.org/pypi?:action=list_classifiers\n \"Development Status :: 3 - Alpha\",\n #\"Development Status :: 4 - Beta\",\n #\"Development Status :: 5 - Production/Stable\",\n \"Operating System :: OS Independent\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: Documentation\",\n \"Topic :: Utilities\",\n ],\n )", "def getDescription(self) -> \"ScXMLDocument const *\":\n return _coin.ScXMLStateMachine_getDescription(self)", "def extractMetadata(filename):\n zf = ZipFile(filename)\n metadataFile = filter(lambda x: x.endswith('metadata.txt'), zf.namelist())[0]\n metadata = zf.open(metadataFile)\n\n config = ConfigParser.ConfigParser()\n config.readfp(metadata)\n\n root = etree.Element('pyqgis_plugin',\n version = config.get('general', 'version'),\n name = config.get('general', 'name'))\n\n \n values = [ ('description', 'description'),\n ('version', 'version'),\n ('qgisMinimumVersion', 'qgis_minimum_version'),\n ('qgisMaximumVersion', 'qgis_maximum_version'),\n ('author', 'author_name'),\n ('homepage', 'homepage')]\n\n for (mtd, xml) in values:\n attribute = etree.SubElement(root, xml)\n if config.has_option('general', mtd):\n attribute.text = config.get('general', mtd).decode('utf-8')\n\n download = etree.SubElement(root, 'download_url')\n download.text = os.path.join(repoURL, 'plugins', os.path.basename(filename))\n \n md5_sum = etree.SubElement(root, 'md5_sum')\n md5_sum.text = md5(filename)\n\n file_name = etree.SubElement(root, 'file_name')\n file_name.text = os.path.basename(filename)\n\n return root", "def FrameworkDescription(self) -> str:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse affordance_templates manifest for available classes.
def getAvailableTemplates(self, templates): # if os.path.exists(manifest): from xml.etree.ElementTree import ElementTree class_map = {} # parse manifest.xml tree = ElementTree() tree.parse(templates) # get all <class> tags for c in tree.findall('class'): # starting with groovy, 'name' tag is optional if 'name' in c: class_map[self.getRawName(c.get('name'))] = {} else: class_map[self.getRawName(c.get('type'))] = {} return class_map # else: # print manifest, 'does not exist!' # return {}
[ "def read_template (self):\r\n log.info(\"reading manifest template '%s'\", self.template)\r\n template = TextFile(self.template,\r\n strip_comments=1,\r\n skip_blanks=1,\r\n join_lines=1,\r\n lstrip_ws=1,\r\n rstrip_ws=1,\r\n collapse_join=1)\r\n\r\n while 1:\r\n line = template.readline()\r\n if line is None: # end of file\r\n break\r\n\r\n try:\r\n self.filelist.process_template_line(line)\r\n except DistutilsTemplateError, msg:\r\n self.warn(\"%s, line %d: %s\" % (template.filename,\r\n template.current_line,\r\n msg))", "def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)", "def all_matchers(manifest):\n match_array = []\n # V group one is the type, group two is the stuff between {}\n find_declerations = re.compile(\"(\\S*)\\s{(.*?)}\")\n # V group one is name, group two is the ensure value\n resources_info = re.compile(\n \"'(.*)'\\:.*\\sensure\\s=>[\\s|\\']*(.*?)[\\']*,|\\Z\")\n\n manifest = TestRunner.collapse_manifest(manifest)\n\n for (type, content) in re.findall(find_declerations, manifest):\n\n s = 'Notice: /Stage[main]/Main/' + string.capwords(type) + '['\n\n (name, value) = re.findall(resources_info, content)[0]\n if name is not None and value is not None:\n s += name + ']/ensure: '\n s += 'created' if value == 'present' else 'removed'\n\n match_array.append(s)\n\n return match_array", "def __init__(self, dirname):\n\n self.templateDir = dirname\n print(\"Template directory: \" + self.templateDir)\n self.templateList = list()\n\n dirList = os.listdir(dirname)\n for dir in dirList:\n try:\n reader = TemplateReader(self.templateDir + os.path.sep + dir, TemplateReader.TemplateXMLFilename)\n self.templateList.append(reader)\n except TemplateError:\n print(\"Error reading: \" + dir + \". Not Adding\")", "def parse_manifest(self) -> Dict[str, Any]:\n required_docs: Set[str] = {'epiphany-cluster', 'configuration/feature-mappings'}\n parse_doc: Dict[str, Callable] = {\n 'epiphany-cluster': self.__parse_cluster_doc,\n 'configuration/feature-mappings': self.__parse_feature_mappings_doc\n }\n\n parsed_docs: Set[str] = set()\n for manifest_doc in load_yaml_file_all(self.__dest_manifest):\n try:\n kind: str = manifest_doc['kind']\n parse_doc[kind](manifest_doc)\n parsed_docs.add(kind)\n except KeyError:\n pass\n\n if len(parsed_docs) < len(required_docs):\n raise CriticalError(f'ManifestReader - could not find document(s): {parsed_docs ^ required_docs}')\n\n return {'requested-components': sorted(list(self.__requested_components)),\n 'requested-features': sorted(list(self.__requested_features))}", "def process_terms(self):\n template_data = []\n in_class = \"Record-level\"\n # sequence matters in config and it starts with Record-level which we populate here ad-hoc\n class_group = {}\n class_group[\"label\"] = \"Record-level\"\n class_group[\"iri\"] = None\n class_group[\"class\"] = None\n class_group[\"definition\"] = None\n class_group[\"comments\"] = None\n class_group[\"rdf_type\"] = None\n class_group[\"terms\"] = []\n class_group[\"namespace\"] = None\n\n addedUseWithIRI = False\n for term in self.versions(): # sequence of the terms file used as order\n term_data = self.get_term_definition(term['term_iri'])\n test = term['term_iri']\n if term_data[\"rdf_type\"] == \"http://www.w3.org/2000/01/rdf-schema#Class\":\n # new class encountered\n # store previous section in template_data\n template_data.append(class_group)\n #start new class group\n class_group = term_data\n class_group[\"terms\"] = []\n in_class = term_data[\"label\"] # check on the class working in\n elif term['term_iri']=='http://purl.org/dc/terms/language':\n # Vulnerable to ordering terms in term_versions.csv, but...\n # This is the first row of dwciri terms\n # store previous section in template_data\n template_data.append(class_group)\n #start a class group for UseWithIRI\n class_group = {\"label\":\"UseWithIRI\"}\n class_group[\"terms\"] = []\n in_class = \"UseWithIRI\" # check on the class working in\n addedUseWithIRI = True\n class_group['terms'].append(term_data)\n else:\n class_group['terms'].append(term_data)\n # save the last class to template_data\n template_data.append(class_group)\n return template_data", "def _load_attr_archetype(self, archetypes):\n for target, arches in archetypes.items():\n if self._args.verbose:\n print('processing archetype \"{}\"'.format(target))\n for name, attrs in arches['attributes'].items():\n self._session.add(AttrArchetype(target=target, name=name, **attrs))\n self._session.commit()", "def _discover_templates():\n vms = []\n for file in os.listdir(paths.packer_templates):\n json = os.path.join(paths.packer_templates,\n file, file + '.json')\n if os.path.exists(json):\n vms.append(file)\n return vms", "def parse_accidents(self, accidents_url):\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--window-size=600x400\")\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--no-sandbox')\n driver = webdriver.Chrome(executable_path=os.getcwd() + \"/chromedriver\", chrome_options=chrome_options)\n driver.get(accidents_url)\n html = driver.page_source\n accidents_soup = bs(html, 'html.parser')\n accidents_table = accidents_soup.find(\"table\")\n links = accidents_table.find_all(\"a\")\n hrefs = []\n\n for link in links:\n hrefs.append(\"https://aviation-safety.net\" + link.get(\"href\"))\n hrefs = hrefs[1:]\n for href in hrefs:\n driver.get(href)\n html = driver.page_source\n driver.back()\n\n soup = bs(html, 'html.parser')\n table = soup.find(\"table\")\n if not table:\n continue\n type_caption = table.find(\"td\", \"caption\", text=\"Type:\")\n date_caption = table.find(\"td\", \"caption\", text=\"Date:\")\n if type_caption:\n type_desc = type_caption.parent.find(\"td\", \"desc\").find(\"a\").text\n else:\n type_desc = None\n if date_caption:\n date_desc = date_caption.nextSibling.text\n else:\n date_desc = None\n data = [type_desc, date_desc]\n\n for caption_text in [\"First flight:\", \"Total airframe hrs:\",\n \"Total:\", \"Aircraft damage:\", \"Phase:\"]:\n td = table.find(\"td\", \"caption\", text=caption_text)\n if not td:\n desc = None\n else:\n desc = td.parent.find(\"td\", \"desc\").text.strip()\n data.append(desc)\n if not data[2] or data[6] == \"()\":\n continue\n self.accidents.add(Accident(data))", "def _parse_ucf_filename_classes(classlist_file):\n filename_classes = {}\n with open(classlist_file, \"rb\") as csv_file:\n split_reader = csv.reader(csv_file, delimiter=\",\")\n for row in split_reader:\n filename_classes[row[0]] = row[1]\n\n return filename_classes", "def word_types(template_filepath):\n with open(template_filepath, 'r') as fh:\n prompts = []\n content = fh.read().split()\n for word in content:\n if word[0] == '{' and word[-1] == '}':\n prompts.append(word[1:-1])\n return prompts\n\n # return fh.read()\n # return fh.readlines()\n # try/except FileNotFoundError:\n # sys.exit('File Not Found. Try again.')", "def parse_class(element):\n assert element.tag == 'class'\n style_class = {\n 'name': element.get('type'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag != 'category':\n continue\n style_class['entries'].append(parse_category(child))\n return style_class", "def _get_instance_templates(self):\r\n CFN_TO_HOT_ATTRS = {'Type': 'type',\r\n 'Properties': 'properties',\r\n 'Metadata': 'metadata',\r\n 'DependsOn': 'depends_on',\r\n 'DeletionPolicy': 'deletion_policy',\r\n 'UpdatePolicy': 'update_policy'}\r\n\r\n def to_hot(template):\r\n hot_template = {}\r\n\r\n for attr, attr_value in template.iteritems():\r\n hot_attr = CFN_TO_HOT_ATTRS.get(attr, attr)\r\n hot_template[hot_attr] = attr_value\r\n\r\n return hot_template\r\n\r\n return [(instance.name, to_hot(instance.t))\r\n for instance in self.get_instances()]", "def _all_templates(self):\n for startmodel in self._all_starting_models():\n for template in startmodel.templates:\n yield template", "def _load_templates():\n mod = import_module(settings.TCMS_PAGES)\n\n entries, dir_name = {}, dirname(mod.__file__)\n for path, subdirs, files in walk(dir_name):\n name = path.replace(dir_name, '').strip(sep).replace(sep, '.')\n\n for file in filter(lambda f: f.endswith('.py'), files):\n fname = file.replace('.py', '')\n import_name = filter(None, (settings.TCMS_PAGES, name, fname))\n\n try:\n mod = import_module('.'.join(import_name))\n if hasattr(mod, 'PAGE'):\n entries[name or fname] = mod.PAGE\n except (ImportError, AttributeError):\n pass\n return entries", "def initialize(context):\n\n setDefaultRoles('jcu.booking: Manage bookings', ())\n setDefaultRoles('jcu.booking: Book another user in', ())\n setDefaultRoles('jcu.booking: View all bookings', ())\n\n # Retrieve the content types that have been registered with Archetypes\n # This happens when the content type is imported and the registerType()\n # call in the content type's module is invoked. Actually, this happens\n # during ZCML processing, but we do it here again to be explicit. Of\n # course, even if we import the module several times, it is only run\n # once.\n\n content_types, constructors, ftis = atapi.process_types(\n atapi.listTypes(config.PROJECTNAME),\n config.PROJECTNAME)\n\n # Now initialize all these content types. The initialization process takes\n # care of registering low-level Zope 2 factories, including the relevant\n # add-permission. These are listed in config.py. We use different\n # permissions for each content type to allow maximum flexibility of who\n # can add which content types, where. The roles are set up in rolemap.xml\n # in the GenericSetup profile.\n\n for atype, constructor in zip(content_types, constructors):\n utils.ContentInit('%s: %s' % (config.PROJECTNAME, atype.portal_type),\n content_types = (atype,),\n permission = config.ADD_PERMISSIONS[atype.portal_type],\n extra_constructors = (constructor,),\n ).initialize(context)", "def _parse_albumentations(self, albumentations):\n\n compose_init_params = albumentations.pop('compose_init_params', {})\n sample_keys = albumentations['sample_keys']\n\n albumentations_fns = []\n it = albumentations['albumentations']\n for albumentation in albumentations['albumentations']:\n assert len(albumentation) == 1\n albumentation_importpath = list(albumentation.keys())[0]\n albumentation_init_params = list(albumentation.values())[0]\n\n Albumentation = import_object(albumentation_importpath)\n albumentation_fn = Albumentation(**albumentation_init_params)\n albumentations_fns.append(albumentation_fn)\n\n albumentation_composition = Compose(\n albumentations_fns, **compose_init_params\n )\n processed_albumentations = [\n (albumentation_composition, {}, sample_keys)\n ]\n\n return processed_albumentations", "def instantiate_extensions(self, template):\n return [ ext_cls(template) for ext_cls in self.extensions ]", "def load_initial_transaction_types(apps, schema_editor):\n transaction_types = [\n {\n \"pk\": TransactionTypeConstants.CashWithdraw.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"cash\",\n \"description\": \"Cash Withdraw\",\n \"name\": \"withdraw\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.CashDeposit.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"cash\",\n \"description\": \"Cash Deposit\",\n \"name\": \"deposit\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.AdminCancelWithdraw.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"cash\",\n \"description\": \"Cancel Cash Withdraw\",\n \"name\": \"cancel withdraw\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.TicketConsume.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"ticket\",\n \"description\": \"Ticket Consume\",\n \"name\": \"ticket consume\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.TicketDeposit.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"ticket\",\n \"description\": \"Ticket Deposit\",\n \"name\": \"ticketDeposit\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.FppWithdraw.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"fpp\",\n \"description\": \"FPP Withdraw\",\n \"name\": \"fpp-withdraw\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.FppDeposit.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"fpp\",\n \"description\": \"FPP Deposit\",\n \"name\": \"fpp-deposit\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.BonusCashWithdraw.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"bonuscash\",\n \"description\": \"BonusCash Withdraw\",\n \"name\": \"bonuscash-withdraw\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.BonusCashDeposit.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"bonuscash\",\n \"description\": \"BonusCash Deposit\",\n \"name\": \"bonuscash-deposit\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.PromoCodeAdd.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"promocode\",\n \"description\": \"Promo Code Added\",\n \"name\": \"promocode-add\"\n }\n },\n {\n \"pk\": TransactionTypeConstants.PromoCodeRemove.value,\n \"model\": \"transaction.transactiontype\",\n \"fields\":\n {\n \"category\": \"promocode\",\n \"description\": \"Promo Code Removed\",\n \"name\": \"promocode-remove\"\n }\n }\n ]\n\n #\n # get the model by name\n TransactionType = apps.get_model('transaction', 'TransactionType')\n\n #\n # create the \"fixtures\" the 1.8 way, ie: programmatically\n for data in transaction_types:\n fields = data['fields']\n try:\n t = TransactionType.objects.get( pk=data['pk'] )\n except TransactionType.DoesNotExist:\n t = TransactionType()\n\n #\n # set the data['fields'] to the email notification\n t.category = fields['category']\n t.description = fields['description']\n t.name = fields['name']\n t.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the button state.
def state(self, val): if isinstance(self._state, Button.State): self._state = val
[ "def set_state(self, state: bool) -> None:\n # Send EasyRemote update_element event for this button\n # with the given state.\n self.er.s.sendto((f\"action=update_element&id={self.id}\"\n f\"&page={self.page}&value={int(state)}\"\n \"&type=btn&event=up\").encode(), self.er.addr)", "def buttons(self, state):\n pass", "def toggle(self):\n if self._active == Button.State.ON:\n self._active = Button.State.OFF\n else:\n self._active = Button.State.ON", "def tkConfigureShowHand(self, state):\n if self.verbose:\n print(self.name, \"Changing the state of the 'show hand' button to\", state)\n if self.log is not None:\n self.log.write(self.name + \" Changing the state of the 'show hand' button to \" + str(state) + '\\n')\n self.showhandbutton.config(state=state)", "def toggle_buttons(self):\n buttons = (self.buffer_sample_buffer_button,\n self.clean_button,\n self.load_buffer_button,\n self.load_sample_button,\n self.refill_only_button,\n self.clean_only_button)\n if self.queue_busy:\n for button in buttons:\n button['state'] = 'disabled'\n else:\n for button in buttons:\n button['state'] = 'normal'", "def set_state_change(self, boolean):\n self.state_change = boolean", "def press_button(self, pobject):\r\n # deactivate the button so it won't flash/close lid etc.\r\n self.scripts.add(scriptexamples.DeactivateButtonEvent)\r\n # blind the person pressing the button. Note that this\r\n # script is set on the *character* pressing the button!\r\n pobject.scripts.add(scriptexamples.BlindedState)", "def __updateButtonState(self, button, newState):\n #Update button 1 if needed\n current_state = self.mouse_state[button]\n if (current_state==newState):\n return False\n else:\n self.mouse_state[button] = newState\n return True", "def BSConfig(self, state):\n if self.verbose:\n print(self.name, \"Changing the state of the BS buttons to\", state)\n if self.log is not None:\n self.log.write(self.name + ' Changing the state of the BS buttons to ' + str(state) + '\\n')\n self.bsbutton.config(state=state)\n self.notbsbutton.config(state=state)\n if state == NORMAL: # if the player's Bs buttons and not bs buttons are enabled\n for player in self.world.getPlayerList():\n if player is not self:\n player.bsbutton.config(state=DISABLED) # disable all the other players' bs and not bs buttons", "def released(self):\n self.state = Button.State.OFF", "def setTimepoint(self, number, flag): \n\t\tif not self.selectedFrames.has_key(number):\n\t\t\tself.selectedFrames[number] = 0\n\t\tbutton = self.buttonList[number]\n\t\t\n\t\tif flag:\n\t\t\tself.selectedFrames[number] = 1\n\t\t\tself.setButtonState(button, 1)\n\t\telse:\n\t\t\tself.selectedFrames[number] = 0\n\t\t\tself.setButtonState(button, 0)", "def __init__(self, definition: ButtonDefinition):\n super(ToggleButton, self).__init__(definition)\n self._is_toggled: bool = False", "def toggle_state(self):\n if self.__is_enabled:\n self.get_widget().configure(state='disabled')\n else:\n self.get_widget().configure(state='enabled`')\n self.__is_enabled = not self.__is_enabled", "def set_state(self, gameState) :\n self.__gameState = gameState", "def set_cmdrunning(self, cmdrunning):\n self.cmdrunning = cmdrunning # toggle state variable\n \n # enable or disable run-related buttons\n if cmdrunning:\n disable_on_run = 'disable'\n enable_on_run = 'normal'\n else:\n disable_on_run = 'normal'\n enable_on_run = 'disable'\n self.b_reset.config(state=disable_on_run)\n self.e_cmd.config(state=disable_on_run)\n self.b_run_batch.config(state=disable_on_run)\n self.b_stop.config(state=enable_on_run)", "def display_state(self, running_state):\n if not running_state in [\"running_continuous\",\n \"running_single\",\n \"paused\",\n \"stopped\"]:\n raise ValueError(\"Na running_state should be either \"\n \"running_continuous, \"\n \"running_single, \"\n \"paused or \"\n \"stopped\")\n if running_state==\"running_continuous\":\n self.button_single.setEnabled(False)\n self.button_single.setText(\"Run single\")\n self.button_continuous.setEnabled(True)\n self.button_continuous.setText(\"Pause\")\n return\n if running_state== \"running_single\":\n self.button_single.setEnabled(True)\n self.button_single.setText(\"Pause\")\n self.button_continuous.setEnabled(False)\n self.button_continuous.setText(\"Run continuous\")\n return\n if running_state == \"paused\":\n self.button_continuous.setText(\"Resume continuous\")\n self.button_single.setText(\"Run single\")\n self.button_continuous.setEnabled(True)\n self.button_single.setEnabled(False)\n return\n if running_state == \"stopped\":\n self.button_continuous.setText(\"Run continuous\")\n self.button_single.setText(\"Run single\")\n self.button_continuous.setEnabled(True)\n self.button_single.setEnabled(True)\n return", "def set_active(self):\n bytes_to_write = self._to_byte_array((Commands.TOGGLE_STATE_COMMAND, Commands.ACTIVE))\n Controller._write_bytes(bytes_to_write)", "def _store_button_states(self) -> None:\n bit_index = self._event.key_number\n current_state = (self._pad_states >> bit_index) & 1\n if current_state != self._event.pressed:\n self._pad_states = (1 << bit_index) ^ self._pad_states", "def update_state(self):\n self.state = self.new_state" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Signal when button is released.
def released(self): self.state = Button.State.OFF
[ "def release(self, button, port=0):", "def mouseReleased(self, button, mouseX, mouseY):\n pass", "def on_release(self, global_state, widgets):\n logging.info('Button released, now turning off LED')\n\n widgets.led_berry.on()", "def on_button_down_event(self):\n raise NotImplementedError()", "def key_released(self, event):\n pass", "def handleMouseRelease(self, event):\n if self._board.determineIfBought():\n if self._type == \"purchase\":\n self._board.purchaseButton()\n else:\n self._board.passButton()", "def cancel_btn_event(self):\n if self.hold_down:\n self.button_event.cancel()", "def on_button_up_event(self):\n raise NotImplementedError()", "def mouseReleaseEvent(self, event):\n if event.button() == QtCore.Qt.RightButton and self.prev_index == self.tabAt(event.pos()):\n self.right_click.emit(self.prev_index)\n\n if event.button() == QtCore.Qt.MiddleButton:\n self.onCloseTabSignal.emit(int(self.tabAt(event.pos())))\n\n self.prev_index = -1\n\n QtWidgets.QTabBar.mouseReleaseEvent(self, event)", "def mouseReleaseEvent(self, event):\n \n if self.sceneDragging and not event.buttons() == (QtCore.Qt.LeftButton | QtCore.Qt.RightButton) and not event.buttons() & QtCore.Qt.MidButton:\n self.sceneDragging = False\n globals.GApp.scene.setCursor(QtCore.Qt.ArrowCursor)\n else: \n QtGui.QGraphicsView.mouseReleaseEvent(self, event)", "def is_released(self):\n return not self.is_pressed()", "def keyReleased(self, key):\n pass", "def OnButton(self,e):\n self.queue.put('button!')", "def mouseReleased(self, event):\n self.itemconfigure(self.__rect, outline=self.__outline)\n\n if self.__click_handler is not None:\n self.__click_handler()", "def press_button(self, pobject):\r\n # deactivate the button so it won't flash/close lid etc.\r\n self.scripts.add(scriptexamples.DeactivateButtonEvent)\r\n # blind the person pressing the button. Note that this\r\n # script is set on the *character* pressing the button!\r\n pobject.scripts.add(scriptexamples.BlindedState)", "def on_release(self, event):\n\n if event.inaxes != self.circ.axes:\n return None;\n contains, attrd = self.circ.contains(event)\n if not contains:\n return None;\n \n self.press = None;\n self.fig.canvas.draw();\n # store drag stop position\n self.Terminate = (event.xdata, event.ydata);\n\n # pass stop position to data\n global data;\n data[Drag.identify(self,event)] = self.Terminate;\n print(\"data:\", data);\n return 0;", "def mouse_released(self, x, y, modifiers):\n return False", "def _onButtonPressed(self):\n button = self.sender()\n container = self._widgets[button].widget()\n container.dockItem().clearAlert() # likey a no-op, but just in case", "def mouse_up(button=''):\r\n _audll.AU3_MouseUp(unicode(button))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle the button state.
def toggle(self): if self._active == Button.State.ON: self._active = Button.State.OFF else: self._active = Button.State.ON
[ "def toggle(self):", "def toggle_state(self):\n if self.__is_enabled:\n self.get_widget().configure(state='disabled')\n else:\n self.get_widget().configure(state='enabled`')\n self.__is_enabled = not self.__is_enabled", "def toggle_buttons(self):\n buttons = (self.buffer_sample_buffer_button,\n self.clean_button,\n self.load_buffer_button,\n self.load_sample_button,\n self.refill_only_button,\n self.clean_only_button)\n if self.queue_busy:\n for button in buttons:\n button['state'] = 'disabled'\n else:\n for button in buttons:\n button['state'] = 'normal'", "def test_toggle_button(self):\n # Get a current state of the check box control\n button = self.dlg.ToggleMe.find()\n cur_state = button.get_toggle_state()\n self.assertEqual(cur_state, uia_defs.toggle_state_on)\n\n # Toggle the next state\n cur_state = button.toggle().get_toggle_state()\n\n # Get a new state of the check box control\n self.assertEqual(cur_state, uia_defs.toggle_state_off)\n\n # Toggle the next state\n cur_state = button.toggle().get_toggle_state()\n self.assertEqual(cur_state, uia_defs.toggle_state_on)", "def set_state(self, state: bool) -> None:\n # Send EasyRemote update_element event for this button\n # with the given state.\n self.er.s.sendto((f\"action=update_element&id={self.id}\"\n f\"&page={self.page}&value={int(state)}\"\n \"&type=btn&event=up\").encode(), self.er.addr)", "def released(self):\n self.state = Button.State.OFF", "def untoggle_buttons(self, button):\r\n for test_buttton in self.buttons:\r\n if test_buttton == button:\r\n test_buttton.configure(relief=tk.SUNKEN)\r\n else:\r\n test_buttton.configure(relief=tk.RAISED)\r\n\r\n # Clear the status label.\r\n self.status_label[\"text\"] = \"\"", "def toggle_activate(self):\n self.set_active(status = not self._is_active)", "def state(self, val):\n if isinstance(self._state, Button.State):\n self._state = val", "def press_button(self, pobject):\r\n # deactivate the button so it won't flash/close lid etc.\r\n self.scripts.add(scriptexamples.DeactivateButtonEvent)\r\n # blind the person pressing the button. Note that this\r\n # script is set on the *character* pressing the button!\r\n pobject.scripts.add(scriptexamples.BlindedState)", "def toggle_relay(self):\n state = self.get_state()\n if state == 'open':\n self.last_action = 'close'\n self.last_action_time = time.time()\n elif state == 'closed':\n self.last_action = 'open'\n self.last_action_time = time.time()\n else:\n self.last_action = None\n self.last_action_time = None\n\n self.remote_pi.write(self.relay_pin, 0)\n time.sleep(0.2)\n self.remote_pi.write(self.relay_pin, 1)", "def toggle_text():\n if button[\"text\"] == \"Hi\":\n # switch to Goodbye\n button[\"text\"] = \"Goodbye\"\n else:\n # reset to Hi\n button[\"text\"] = \"Hi\"", "def inv():\n global inv_toggle\n \n # To Toggle On\n if not inv_toggle:\n sci_upper_frame.pack_forget()\n sci_lower_frame.pack_forget()\n inverse_button.configure(bg=inv_color)\n sci_upper_frame2.pack()\n sci_lower_frame.pack()\n inv_toggle = True\n \n # To Toggle Off\n elif inv_toggle:\n sci_upper_frame2.pack_forget()\n sci_lower_frame.pack_forget()\n \n sci_upper_frame.pack()\n sci_lower_frame.pack()\n inv_toggle = False", "def buttons(self, state):\n pass", "def turn_off(self):\n self._is_pressed = False\n self.config(relief=RAISED)\n self.actionframe.turn_off()", "def toggle_inputhook_flag(self, state):\r\n self.inputhook_flag = state", "def _on_button_toggled(self, button):\r\n\t\tif button.get_active():\r\n\t\t\tself.payment_mode = button.get_label()", "def is_pressed(self):\n if self._button.value() == 1:\n flag = False\n else:\n flag = True\n return flag", "def flip(self):\r\n self._on = not self._on" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor for a general simulation object.
def __init__(self, name, verbose=False): if verbose: print "Simulation base class constructor called" if isinstance(name, str): self.simulationName = name # String name of simulation code (eg GaussianJuly21) else: print "1st arg should be string name for the simulation" raise TypeError # Debug/status flag self.verbose = verbose # Main attributes self.simulationExec = "" # String name of simulation code executable (eg lmp) self.inputFileNames = list() # List of file name strings (SWS: full paths?) self.simDir = str() # Location of simulation (where input files scripts etc should be copied) self.isStrucSet = False # Flag if object has structure container set self.topDir = os.getcwd() # Current location of calling module self.templateDir = "./" # Location of directory containing any needed templates
[ "def initialise_simulation(self):\n my_Simulator = Simulator(logger_=log)\n trace_files_dir = os.path.join('Tests','TraceFiles')\n trace_file_path = os.path.join(trace_files_dir,'trace_2_tst.txt')\n my_Simulator.parse_traceFile(trace_file_path)\n return my_Simulator", "def __init__(self, simulatedSurvey, lowParallax, upParallax, minMeanAbsoluteMagnitude,\n maxMeanAbsoluteMagnitude, minTau, maxTau):\n self.simulatedSurvey=simulatedSurvey\n self.numberOfStarsInSurvey=self.simulatedSurvey.numberOfStarsInSurvey\n self.lowParallax=lowParallax\n self.upParallax=upParallax\n self.minMeanAbsoluteMagnitude=minMeanAbsoluteMagnitude\n self.maxMeanAbsoluteMagnitude=maxMeanAbsoluteMagnitude\n self.minTau=minTau\n self.maxTau=maxTau\n self.pyMCModel=Model(self._buildModel())", "def setup_necsim(self):\n if self.protracted:\n if self.is_spatial:\n self.c_simulation = CPSpatialSimulation(self.logger)\n else:\n self.c_simulation = CPNSESimulation(self.logger)\n elif self.is_spatial:\n self.c_simulation = CSpatialSimulation(self.logger)\n else:\n self.c_simulation = CNSESimulation(self.logger)", "def __init__(self, simulatedSurvey, lowParallax, upParallax, minMeanAbsoluteMagnitude,\n maxMeanAbsoluteMagnitude, shapeTau, scaleTau):\n self.simulatedSurvey=simulatedSurvey\n self.numberOfStarsInSurvey=self.simulatedSurvey.numberOfStarsInSurvey\n self.lowParallax=lowParallax\n self.upParallax=upParallax\n self.minMeanAbsoluteMagnitude=minMeanAbsoluteMagnitude\n self.maxMeanAbsoluteMagnitude=maxMeanAbsoluteMagnitude\n self.shapeTau=shapeTau\n self.scaleTau=scaleTau\n self.pyMCModel=Model(self._buildModel())", "def __init__(self):\n self.omega = 0.9\n self.phi_particle = 0.3\n self.phi_swarm = 0.1\n self.log = None\n self.max_iterations = 500\n self.max_initial_velocity = 0.02\n self.population_size = 100\n self.timeout = None", "def __init__(self, iterations, simulation_instances=1, analysis_instances=1, adaptive_simulation=False, sim_extraction_script=None):\n self._iterations = iterations\n self._simulation_instances = simulation_instances\n self._analysis_instances = analysis_instances\n self._adaptive_simulation = adaptive_simulation\n self._sim_extraction_script = sim_extraction_script\n\n super(SimulationAnalysisLoop, self).__init__()", "def getSimulation(self):\r\n raise NotImplementedError()", "def __init__(self, lifespan=None):\r\n if lifespan is None:\r\n self.LIFESPAN = 10 ** 4\r\n else:\r\n self.LIFESPAN = lifespan\r\n # Starting at -1 allows for an intialization pass.\r\n self.timestep = -1\r\n self.world_visualize_period = 1e4\r\n self.brain_visualize_period = 1e4\r\n self.name = 'abstract base world'\r\n # These will likely be overridden in any subclass\r\n self.num_sensors = 0\r\n self.num_actions = 0\r\n self.classifier = False", "def _initSim(self,sim2):\n sim = rebound.Simulation()\n sim.units = 'day', 'AU', 'Msun'\n sim.integrator = \"ias15\"\n\n mars = sim2.particles[2]\n rocket = sim2.particles[3]\n\n rocketm = sim2.particles[3].m\n\n rocketx = rocket.x-mars.x\n rockety = rocket.y-mars.y\n rocketz = rocket.z-mars.z\n\n rocketvx = rocket.vx-mars.vx\n rocketvy = rocket.vy-mars.vy\n rocketvz = rocket.vz-mars.vz\n\n sim.add(m=1e-6)\n sim.add(m=rocketm, x=rocketx , y=rockety , z=rocketz,\n vx = rocketvx, vy =rocketvx, vz = rocketvx) #only second stage now\n sim.move_to_com()\n return sim", "def __init__(self, **kwargs):\n # Set All Variables\n self.name = kwargs.get('name', 'UNAMED')\n self.variables = kwargs.get('variables', [])\n self.definition = self.__populate_definition(\n kwargs.get('definition', {}))\n self.objective = kwargs.get('objective', [])\n self.ineq = kwargs.get('ineq', [])\n self.eq = kwargs.get('eq', [])\n self.starting_gen = kwargs.get('starting_gen', [])\n self.max_gen_size = kwargs.get('max_gen_size', len(self.starting_gen))\n self.crossover_prob = kwargs.get('crossover_prob', 0.6)\n self.mutation_prob = kwargs.get('mutation_prob', 0.1)\n self.total_generations = kwargs.get('total_generations', 10)\n self.beta = kwargs.get('beta', 5)\n self.trim_first = kwargs.get('trim_first', True)\n\n # Check Feasibilities (whether given parameters are allowed)\n self.__check_variables_definition_feasibility()\n self.__check_objective_feasibility()\n self.__check_constraints_feasibility()\n self.__check_initial_conditions_feasibility()\n\n # Initialize current generation\n self.__initialize_current_generation()", "def __init__(self, sim, lamda, mu):\n self.lamda = lamda\n self.mu = mu\n super(PoissonGenerator, self).__init__(sim=sim)", "def __init__(self):\n if self.isverbose:\n print >> self.log_file, \"In 'noisegen' class\"", "def __init__(self, system):\n \n # Store all the system info\n self.system = system # a SystemSetup() object containing salt concentration, forcefields, etc,\n \n # All possible ion names that we know about and could occur in gromacs forcefield files.\n # JDC: Should this be moved to static class data?\n self.possibleIons = ['Ca','Cl','Na','Mg', 'K', 'Rb', 'CS', 'Li', 'Zn', 'Sr', 'Ba' ]\n\n # Read list of ions, their charges, and masses from gromacs forcefield files, and\n # create a dictionary to contain all the ions and ion information provided by the forcefield.\n \n #try:\n if (1):\n self.ions = self.getIons() # a dictionary of Ion objects {'Cl', <Ion object> }\n #except:\n # raise IonParsingError\n \n return", "def __init__(self, species=''):\n\n # Check if the gas species is valid\n if species=='':\n raise ValueError(\"Gas species not defined.\")\n if type(species) is not str:\n raise TypeError(\"Gas species must be a string\")\n\n # SemiperfectIdealGas object:\n self.__from_semiperfect_gas = SemiperfectIdealGas(species)\n \n # Store the thermodynamic properties of the gas\n self.thermo_prop = self.__from_semiperfect_gas.thermo_prop\n\n # Reference temperature and gas species\n self.T_ref = cts.T_ref\n self._gas_species = species\n \n return", "def __init__(self, runtime_dir=\"/tmp/tbots\"):\n # inputs to er_force_simulator_main\n self.sim_tick_sender = ThreadedUnixSender(runtime_dir + SIMULATION_TICK_PATH)\n self.world_state_sender = ThreadedUnixSender(runtime_dir + WORLD_STATE_PATH)\n self.blue_world_sender = ThreadedUnixSender(runtime_dir + BLUE_WORLD_PATH)\n self.yellow_world_sender = ThreadedUnixSender(runtime_dir + YELLOW_WORLD_PATH)\n self.blue_primitive_set_sender = ThreadedUnixSender(\n runtime_dir + BLUE_PRIMITIVE_SET\n )\n self.yellow_primitive_set_sender = ThreadedUnixSender(\n runtime_dir + YELLOW_PRIMITIVE_SET\n )\n\n # outputs from er_force_sim_main\n self.ssl_wrapper_listener = ThreadedUnixListener(\n runtime_dir + SSL_WRAPPER_PACKET_PATH, SSL_WrapperPacket\n )\n self.blue_robot_status_listener = ThreadedUnixListener(\n runtime_dir + BLUE_ROBOT_STATUS_PATH, RobotStatus\n )\n self.yellow_robot_status_listener = ThreadedUnixListener(\n runtime_dir + YELLOW_ROBOT_STATUS_PATH, RobotStatus,\n )\n\n self.world_state = WorldState()\n\n self.simulator_process = Popen([\"software/er_force_simulator_main\"])", "def __init__(self, sim_spec: typing.Mapping, *, backend: str):\n self.domain = sim_spec.get(\"domain\", analytical.DOMAIN)\n self.time_step = 1e-3\n self.max_time = 1e-2\n self.shape = sim_spec.get(\"shape\", (16, 16, 16))\n self.backend_name = backend\n self.tolerance = sim_spec[\"tolerance\"]\n dspace = numpy.array(analytical.DOMAIN, dtype=numpy.float64) / numpy.array(\n self.shape, dtype=numpy.float64\n )\n stencil_args = {\n \"backend\": self.backend_name,\n \"shape\": self.shape,\n \"dspace\": dspace,\n \"time_step\": self.time_step,\n }\n stencil_args.update(sim_spec.get(\"extra-args\", {}))\n self.extra_args = sim_spec.get(\"extra-args\", {})\n self.stencil = sim_spec[\"stencil\"](**stencil_args)\n self.reference = sim_spec[\"reference\"]\n storage_b = self.stencil.storage_builder().default_origin(self.stencil.min_origin())\n\n self.data = storage_b.from_array(numpy.fromfunction(self.get_reference, shape=self.shape))\n self.data1 = copy.deepcopy(self.data)\n self._initial_state = copy.deepcopy(self.data)\n self._expected = numpy.fromfunction(\n functools.partial(self.get_reference, time=self.max_time), shape=self.shape\n )", "def __init__(self, system, N, NA, mass, epsilon, sigma, timestep, nsteps_per_frame, nframes, temperature, s, platform_name=None):\n\n import numpy\n import simtk.unit as units\n\n # Store local copy of System.\n self.system = system\n\n self.mass = mass\n self.epsilon = epsilon\n self.sigma = sigma\n\n self.timestep = timestep\n self.nsteps_per_frame = nsteps_per_frame\n self.delta_t = timestep * nsteps_per_frame\n\n self.temperature = temperature\n\n # Compute thermal energy and inverse temperature from specified temperature.\n kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA \n self.kT = kB * self.temperature # thermal energy\n self.beta = 1.0 / self.kT # inverse temperature\n\n # Store number of A particles.\n self.N = N\n self.NA = NA\n\n # Store field.\n self.s = s\n\n # Statistics\n self.nattempted = 0\n self.naccepted = 0\n\n # Store reduced units\n self.t_obs = nframes * self.delta_t\n self.s_reduced_unit = 1.0 / (self.sigma**2 * self.delta_t)\n self.K_reduced_unit = (self.N * self.t_obs * self.sigma**2)\n self.H_reduced_unit = self.epsilon # reduced unit for path Hamiltonian (energy)\n self.beta_reduced_unit = 1.0 / self.epsilon # reduced unit for inverse temperature\n\n # Store requested OpenMM platform name.\n self.platform_name = platform_name\n self.deviceid = 0\n\n # Form vectors of masses and sqrt(kT/m) for force propagation and velocity randomization.\n nparticles = system.getNumParticles()\n self.mass = units.Quantity(numpy.zeros([nparticles,3], numpy.float64), units.amu)\n for particle_index in range(nparticles):\n self.mass[particle_index,:] = self.system.getParticleMass(particle_index)\n kT = kB * temperature # thermal energy \n self.sqrt_kT_over_m = units.Quantity(numpy.zeros([nparticles,3], numpy.float64), units.nanometers / units.picosecond)\n for particle_index in range(nparticles):\n self.sqrt_kT_over_m[particle_index,:] = units.sqrt(kT / self.mass[particle_index,0]) # standard deviation of velocity distribution for each coordinate for this atom\n\n return", "def __init__(self, machine, sample_size=16):\n if \"_C_netket.machine\" in str(type(machine)):\n self.sampler = c_sampler.ExactSampler(\n machine=machine, sample_size=sample_size\n )\n else:\n self.sampler = PyExactSampler(\n machine=machine, sample_size=sample_size)\n super().__init__(machine, sample_size)", "def setup_sim(sim):\n\n sim.evaluate_config()\n sim.update_id()\n sim.make_logger()\n sim.make_coord_arrays()\n sim.configure()\n sim.build_device()\n sim.set_excitation()\n return sim" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print out name of simulation object (for logging/status)
def __str__(self): return self.simulationName
[ "def __str__(self):\r\n \r\n for att in self.__dict__:\r\n print('%s: %r' % (att, getattr(self, att)))\r\n \r\n return 'Survey Simulation class object attributes'", "def __str__(self):\r\n\r\n for att in self.__dict__:\r\n print(\"%s: %r\" % (att, getattr(self, att)))\r\n\r\n return \"Optical System class object attributes\"", "def sampler(obj):\n s = unicode(type(obj).__name__)\n s += _getdoc(obj) + '\\n'\n s += u'\\n db format: ' + obj.dbformat\n s += u'\\n db name: ' + obj.dbname\n s += u'\\n save simulation: ' + str(obj.save_sim)\n s += u'\\n parallel: ' + type(obj.repeat).__module__.split('.')[-1]\n return s", "def name(self):\n return self._output.name", "def describe(obj):\n return u'Sampler:\\n--------\\n{}\\n\\nModel:\\n------\\n{}'.format(sampler(obj), setup(obj.setup))", "def get_plot_name(self):\n print(self.plot_name)", "def print_thymio(thymio: Thymio):\n print('All Thymio instance attributes:')\n pprint.pprint(dir(thymio))\n variables = thymio.variable_description() # see what the different read-write variables that you can access are\n print('\\nVariables of Thymio:')\n for var in variables:\n print(var)", "def output_tb_name(self) -> str:\n try:\n return self.attr_getter(\"_output_tb_name\", None)\n except AttributeError:\n raise ValueError(\"Nothing set for the sim testbench name yet\")", "def __str__(self):\n a = \"benchmark: {0}\".format(self.name)\n return a", "def _get_instance_name(self):\n pass", "def object_name(self, object_type: int, index: int) -> str:\n return output.get_elem_name(self.handle, object_type, index)", "def print_student_info(self):\n print(\"ID: %s\" % self.__ID)\n print(\"Name: %s, %s\" % (self.__last_name, self.__first_name))", "def show_details(name, f):\n print '%s:' % name\n print '\\tobject:', f\n print '\\t__name__:',\n try:\n print f.__name__\n except AttributeError:\n print '(no __name__)'\n print '\\t__doc__', repr(f.__doc__)\n return", "def printWorld(self) -> None:\n\t\tself.__printBoardInfo()\n\t\tself.__printActionInfo()\n\t\tself.__printAgentInfo()", "def get_space_object_name(self):\n return self.get_abstract_item(\"Space Object\", \"Name\")", "def display_name(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n text = \"Attractor \" + self.label + \"\\n\"\n text += \"\\tLength: \"+ str(len(self.states)) + \"\\n\"\n text += \"\\tBasin: \"+ str(self.basin) + \"\\n\"\n text += \"\\tWith nodes: \"+ ', '.join(self.node_names) + \"\\n\" \n text += \"\\tWith states: \"\n for a in self.states: text += \" -> \" + state_to_str(a)\n return text.strip()", "def __str__(self) -> str: #__str__:a built-in function that computes the \"informal\" string representations of an object\n s = \"\"\n # Initialize with cofactor name\n s += \"Cofactor Name: {}\\n\".format(self.name) #\\n:new line in string\n s += \"------------ \\n\" #Draw a line between cofactor info (looks cuter!)\n # Print cofactor info, with state_id and relative redox potential\n for i in range(len(self.redox)):\n s += \"Redox State ID: {}, Oxidation Potential: {}\\n\".format(i, self.redox[i])\n\n return s", "def print_parameters(self):\n\n print(\"**********************************\")\n print(\"* Parameters\\n*\")\n print(\"* Simulation time: {}s\".format(self.total_simtime/1000.0))\n print(\"* Simintervall: {}\".format(SIM_INTERVAL))\n print(\"* Timestep: {}\".format(TIMESTEP))\n\n print(\"* \")\n self.model.print_structure()\n print(\"**********************************\\n\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set template directory location
def setTemplateDir(self, tdir): if not os.path.exists(tdir): print "Template directory does not exist... check full path \n" sys.exit(0) self.templateDir = tdir
[ "def set_views_folder(self, *path):\n\t\tglobal template_dir\n\t\ttemplate_dir = os.path.join(os.path.dirname(__file__), *path)\n\t\tself.set_jinja2_options()", "def get_template_dir(self) -> str:", "def template_path(self):\n\n return super().template_path+[os.path.join(os.path.dirname(__file__), \"templates\")]", "def test_sets_templatedir(self):\n set_templates_directory(self.templates_directory)\n\n config = git.cmd.Git().config('--global', '--list')\n\n self.assertIn(\n 'init.templatedir',\n config\n )\n\n self.assertIn(\n self.templates_directory,\n config\n )", "def test_set_template_path(self):\n\n\t\tself.assertEqual(self.app.template_path, 'tiny/templates')", "def add_template_dir(self, directory):\n ldr = jinja2.FileSystemLoader(directory)\n self.template_dir_list.insert(0, ldr)\n self.env.loader = jinja2.ChoiceLoader(self.template_dir_list)", "def _ensure_templates_directory(self):\n scm = BranchSourceCodeManager(\n make_options_with_fallback(self.options), self.get_input_dir()\n )\n repository = scm.make_repository_spec(SPINNAKER_IO_REPOSITORY_NAME)\n\n # These documents arent tied to version control, especially since they are\n # published to a different repository.\n scm.ensure_git_path(repository)\n\n self.__templates_directory = os.path.join(repository.git_dir, \"_api_templates\")", "def get_template_dir():\n return os.path.join(get_base_dir(), TEMPLATE_DIR)", "def set_template(self, template_file=r'default'):\n if template_file.lower() == \"default\":\n template_file = os.path.abspath(os.path.join(os.getcwd(), '..', 'config\\\\template-test.txt'))\n self.template = Template(template_file)", "def git_config_template_dir():\n\n g = git.Git()\n\n logging.info(\"Setting default config directory globally.\")\n g.config('--global', 'init.templatedir', template_dir)", "def template():\n template_dir = os.path.join(googkit_root(), TEMPLATE_DIR)\n if not os.path.isdir(template_dir):\n msg = 'Template directory is not found: {path}'.format(\n path=template_dir)\n raise GoogkitError(msg)\n\n return template_dir", "def set_source_template(template):", "def _get_test_template_dir():\n return os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'test_templates/')", "def inject_templates(self):\n\n # Sorry, found no other way to get this\n mod_path = sys.modules[self.__class__.__module__].__file__\n mod_dir = os.path.dirname(mod_path)\n tmpl_dir = os.path.join(\n mod_dir,\n 'templates',\n self.site.template_system.name\n )\n if os.path.isdir(tmpl_dir):\n # Inject tmpl_dir low in the theme chain\n self.site.template_system.inject_directory(tmpl_dir)", "def test_template_dir_setting(self):\n variable_exists = 'TEMPLATE_DIR' in dir(settings)\n self.assertTrue(variable_exists, f\"{FAILURE_HEADER}Your settings.py module does not have the variable TEMPLATE_DIR defined!{FAILURE_FOOTER}\")\n \n template_dir_value = os.path.normpath(settings.TEMPLATE_DIR)\n template_dir_computed = os.path.normpath(self.templates_dir)\n self.assertEqual(template_dir_value, template_dir_computed, f\"{FAILURE_HEADER}Your TEMPLATE_DIR setting does not point to the expected path. Check your configuration, and try again.{FAILURE_FOOTER}\")", "def setDirectory(*args, **kwargs):\n \n pass", "def select_template_dir(template_name):\n templates_path = get_templates_path()\n selected_template_path = os.path.join(templates_path, template_name)\n user_extra_path = os.path.join(TEMPLATES, template_name)\n if os.path.isdir(user_extra_path):\n selected_template_path = user_extra_path\n LOG.debug('Using user defined template path \"%s\"', template_name)\n if os.path.isdir(template_name):\n selected_template_path = template_name\n LOG.debug('Using user defined template path \"%s\"', template_name)\n\n if not os.path.isdir(selected_template_path):\n raise TemplateError(\n 'Unable to load requested template set \"%s\"' % template_name\n )\n\n if not os.path.isfile(os.path.join(selected_template_path, '_template')):\n raise TemplateError(\n 'Selected template \"%s\" does not contain'\n ' \"_template\" file, so it is not considered a template'\n )\n LOG.debug('Selected template: %s', selected_template_path)\n return selected_template_path", "def setOutFileTemplate(self, outfiletemplate):\n self.outfiletemplate = outfiletemplate\n self.log.debug(\"Changed tempalte to %s\"%outfiletemplate)\n if self.__folderscreated: self.write()", "def createTemplatesDirectory(self, pbfPackageRoot):\r\n templateDirectoryMaker = MakeTemplatesDirectory()\r\n templateDirectoryMaker.makeTemplatesDirectory(pbfPackageRoot)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter for the structure container.
def setStructureContainer(self, strucC): self.strucC = strucC self.isStrucSet = True
[ "def setContainer(self, cont: 'SoFieldContainer') -> \"void\":\n return _coin.SoField_setContainer(self, cont)", "def ModifyContainer(self, container):", "def setContainer(self, container: 'ScXMLObject') -> \"void\":\n return _coin.ScXMLDataObj_setContainer(self, container)", "def setContainer(self, container: 'ScXMLElt') -> \"void\":\n return _coin.ScXMLElt_setContainer(self, container)", "def __setitem__(self, key, value):\n ret = super(BaseContainer,self).__setitem__(key, value)\n try: \n self.data[key].__parent__ = self\n self.data[key].__name__ = key\n except: pass\n return ret", "def set_data_structure(self, ds):\n try:\n self.ds_handle = ds\n self.vis_type = ds.get_data_structure_type()\n except ValueError:\n print(\"Exception Thrown: Data structure passed to BRIDGES is null!\\n\")", "def containerSet(self, fieldDataString: 'char const *') -> \"void\":\n return _coin.SoNodeKitListPart_containerSet(self, fieldDataString)", "def copyStructureContainerInto(self, strucC):\n self.strucC = copy.deepcopy(strucC) \n self.isStrucSet = True", "def set_structure(self, structure):\n if not self.molecule:\n try:\n self.molecule = [Molecule(smiles=structure)]\n except ValueError:\n try:\n self.molecule = [Molecule().from_adjacency_list(structure)]\n except ValueError:\n logging.error(\"Cannot understand the given structure '{0}' of species {1}. Could not \"\n \"interpret it as SMILES nor as adjacency list\".format(structure, self.label))\n raise\n self.generate_resonance_structures()", "def setContainer(self, engine: 'SoEngine') -> \"void\":\n return _coin.SoEngineOutput_setContainer(self, engine)", "def _set_set_(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=set_.set_, is_container='container', presence=False, yang_name=\"set\", rest_name=\"set\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'set cos,traffic-class or dscp value', u'cli-compact-syntax': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_ must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=set_.set_, is_container='container', presence=False, yang_name=\"set\", rest_name=\"set\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'set cos,traffic-class or dscp value', u'cli-compact-syntax': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__set_ = t\n if hasattr(self, '_set'):\n self._set()", "def setValueAndUpdateObject(self, memberName, value, object, container = None):\r\n info = self._memberDict[memberName]\r\n newValue = info.interpret(value)\r\n if PyUtils.safeEqual( newValue, self.__getattribute__(memberName) ) : return\r\n self.__setattr__(memberName, newValue)\r\n info.set(object, newValue, container )", "def set_contents(self, contents=None):\n \n if contents is None and self.__contents is None:\n return\n \n root = self.__container\n root.InitChange()\n try:\n old_contents = self.__contents\n if old_contents is not None:\n root.RemChild(old_contents)\n self.__contents = contents\n if contents:\n #print \"Set %s as contents of %s\" % (contents, root)\n root.AddTail(contents)\n finally:\n root.ExitChange()\n return old_contents", "def reset_values(self):\n self.pointer = self.structure\n self.root = None", "def fillObject(self, object, container = None): \r\n try: object.beginBatchChanges() \r\n except AttributeError: pass\r\n try:\r\n for member in self._members:\r\n member.set(object, self.__getattribute__(member.name), container )\r\n finally:\r\n try: object.endBatchChanges()\r\n except AttributeError: pass\r\n return object", "def __init__(self, mi_structure):\n self.mi_structure = mi_structure", "def SetValue(self,value,baseType=None):\n\t\tif baseType is not None:\n\t\t\tself.baseType=baseType\n\t\tif value is None:\n\t\t\tself.value=None\n\t\telse:\n\t\t\t# assume that value is iterable\n\t\t\tself.value=[]\n\t\t\tfor v in value:\n\t\t\t\tif v is None:\n\t\t\t\t\t# ignore NULLs\t\t\t\t\t\n\t\t\t\t\tcontinue\n\t\t\t\tif self.baseType is None:\n\t\t\t\t\t# wild-card lists only work if they're empty!\n\t\t\t\t\traise ValueError(\"Can't create non-empty ordered container without a base type\")\n\t\t\t\tvAdd=SingleValue.NewValue(self.baseType,v)\n\t\t\t\tself.value.append(vAdd.value)\n\t\t\tif not self.value:\n\t\t\t\tself.value=None", "def setContainerType(self, newContainerType: 'SoType') -> \"void\":\n return _coin.SoNodeKitListPart_setContainerType(self, newContainerType)", "def SetValue(self,value,baseType=None):\n\t\tif baseType is not None:\n\t\t\tself.baseType=baseType\n\t\tif value is None:\n\t\t\tself.value=None\n\t\telse:\n\t\t\tself.value={}\n\t\t\tfor v in value:\n\t\t\t\tif v is None:\n\t\t\t\t\t# ignore NULLs\t\t\t\t\t\n\t\t\t\t\tcontinue\n\t\t\t\tif self.baseType is None:\n\t\t\t\t\t# wild-card lists only work if they're empty!\n\t\t\t\t\traise ValueError(\"Can't create non-empty multiple container without a base type\")\n\t\t\t\tvAdd=SingleValue.NewValue(self.baseType,v)\n\t\t\t\tself.value[vAdd.value]=self.value.get(vAdd.value,0)+1\n\t\t\tif not self.value:\n\t\t\t\tself.value=None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the structure container. Deep copy performed, so that external changes to structure container are not reflected here.
def copyStructureContainerInto(self, strucC): self.strucC = copy.deepcopy(strucC) self.isStrucSet = True
[ "def setStructureContainer(self, strucC):\n self.strucC = strucC\n self.isStrucSet = True", "def setContainer(self, container: 'ScXMLObject') -> \"void\":\n return _coin.ScXMLDataObj_setContainer(self, container)", "def ModifyContainer(self, container):", "def setContainer(self, cont: 'SoFieldContainer') -> \"void\":\n return _coin.SoField_setContainer(self, cont)", "def setContainer(self, container: 'ScXMLElt') -> \"void\":\n return _coin.ScXMLElt_setContainer(self, container)", "def setup_structure(self, structure):\n self.initial_structure = structure.copy()\n if self.structure_refinement == self.STRUCTURE_REFINEMENT_NONE:\n self.structure = structure.copy()\n self.spg_analyzer = None\n self.symmetrized_structure = None\n else:\n self.spg_analyzer = SpacegroupAnalyzer(self.initial_structure,\n symprec=\n self.spg_analyzer_options[\n 'symprec'],\n angle_tolerance=\n self.spg_analyzer_options[\n 'angle_tolerance'])\n if self.structure_refinement == self.STRUCTURE_REFINEMENT_REFINED:\n self.structure = self.spg_analyzer.get_refined_structure()\n self.symmetrized_structure = None\n elif self.structure_refinement == self.STRUCTURE_REFINEMENT_SYMMETRIZED:\n self.structure = self.spg_analyzer.get_refined_structure()\n self.spg_analyzer_refined = SpacegroupAnalyzer(self.structure,\n symprec=\n self.spg_analyzer_options[\n 'symprec'],\n angle_tolerance=\n self.spg_analyzer_options\n [\n 'angle_tolerance'])\n self.symmetrized_structure = self.spg_analyzer_refined.get_symmetrized_structure()", "def SetCurrentWorkingContainer(self, the_container):\n self._current_working_container = the_container", "def _set_set_(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=set_.set_, is_container='container', presence=False, yang_name=\"set\", rest_name=\"set\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'set cos,traffic-class or dscp value', u'cli-compact-syntax': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_ must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=set_.set_, is_container='container', presence=False, yang_name=\"set\", rest_name=\"set\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'set cos,traffic-class or dscp value', u'cli-compact-syntax': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__set_ = t\n if hasattr(self, '_set'):\n self._set()", "def setup(self):\n self.data = ContainerSet(self.name)\n for stage in self.stages:\n stage.data = self.data\n stage.setup()", "def setContainer(self, engine: 'SoEngine') -> \"void\":\n return _coin.SoEngineOutput_setContainer(self, engine)", "def fillObject(self, object, container = None): \r\n try: object.beginBatchChanges() \r\n except AttributeError: pass\r\n try:\r\n for member in self._members:\r\n member.set(object, self.__getattribute__(member.name), container )\r\n finally:\r\n try: object.endBatchChanges()\r\n except AttributeError: pass\r\n return object", "def set_data_structure(self, ds):\n try:\n self.ds_handle = ds\n self.vis_type = ds.get_data_structure_type()\n except ValueError:\n print(\"Exception Thrown: Data structure passed to BRIDGES is null!\\n\")", "def set_structure(self, structure):\n if not self.molecule:\n try:\n self.molecule = [Molecule(smiles=structure)]\n except ValueError:\n try:\n self.molecule = [Molecule().from_adjacency_list(structure)]\n except ValueError:\n logging.error(\"Cannot understand the given structure '{0}' of species {1}. Could not \"\n \"interpret it as SMILES nor as adjacency list\".format(structure, self.label))\n raise\n self.generate_resonance_structures()", "def containerSet(self, fieldDataString: 'char const *') -> \"void\":\n return _coin.SoNodeKitListPart_containerSet(self, fieldDataString)", "def set_contents(self, contents=None):\n \n if contents is None and self.__contents is None:\n return\n \n root = self.__container\n root.InitChange()\n try:\n old_contents = self.__contents\n if old_contents is not None:\n root.RemChild(old_contents)\n self.__contents = contents\n if contents:\n #print \"Set %s as contents of %s\" % (contents, root)\n root.AddTail(contents)\n finally:\n root.ExitChange()\n return old_contents", "def setContainerType(self, newContainerType: 'SoType') -> \"void\":\n return _coin.SoNodeKitListPart_setContainerType(self, newContainerType)", "def reset_values(self):\n self.pointer = self.structure\n self.root = None", "def setValueAndUpdateObject(self, memberName, value, object, container = None):\r\n info = self._memberDict[memberName]\r\n newValue = info.interpret(value)\r\n if PyUtils.safeEqual( newValue, self.__getattribute__(memberName) ) : return\r\n self.__setattr__(memberName, newValue)\r\n info.set(object, newValue, container )", "def setNodeContainer(self, nodeengine: 'SoNodeEngine') -> \"void\":\n return _coin.SoEngineOutput_setNodeContainer(self, nodeengine)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the 'effective' base class interface for a method that writes an input file based on the internal attributes of an instance of the Simulation object This method should be redefined for each kind of file types (typically defined by simulation version eg LAMMPS, Gaussian etc)
def writeInput(self, fileName): print "No Simulation:writeInput method defined for pure base class" sys.exit(0)
[ "def create_simulation_file(self):\n\t\t# create and open the simulation file\n\t\tlines_to_write = []\n\t\t# In function of the model selected copy and past the contents of the template\n\t\t# in this new file\n\t\t# Select the template\n\t\tpath_template = self.templatePath + '/templates_models/' + \\\n\t\t self.model['ModelType'] + '_template.py'\n\t\twith open(path_template, 'r') as template_file:\n\t\t # We read each line of the template file, copy and paste them to the new simulation\n\t\t # file while replacing parts of them\n\t\t for line in template_file:\n\t\t # if the line contains something that need to be replaced\n\t\t if \"token\" in line:\n\t\t new_lines = self.create_line_simulation(line)\n\t\t if type(new_lines) == list:\n\t\t # more than one line to write\n\t\t lines_to_write = lines_to_write + new_lines\n\t\t if type(new_lines) == str:\n\t\t # only one line to create\n\t\t lines_to_write.append(new_lines)\n\t\t # other wise we just copy-paste the line in simulation.py\n\t\t else:\n\t\t lines_to_write.append(line)\n\t\twith open(self.userPath + '/simulation.py', 'w+') as simu_file:\n\t\t\t# write the functors at the beginning of the file\n\t\t\tfor line in lines_to_write:\n\t\t\t\tif 'token_functors' in line:\n\t\t\t\t\t# write every functors\n\t\t\t\t\tfor fun in self.functors:\n\t\t\t\t\t\t# open the corresponding template and write the lines in the simu file\n\t\t\t\t\t\twith open(self.templatePath +'/templates_BC/{}_template.py'.format(fun)) as fun_template:\n\t\t\t\t\t\t\tfor template_line in fun_template:\n\t\t\t\t\t\t\t\tsimu_file.write(template_line)\n\t\t\t\telse:\n\t\t\t\t\tsimu_file.write(line)", "def writeParAndInputFiles(self):\n pass", "def write_to_file(self, data):", "def _write(self, *args, **kwargs):\n raise NotImplementedError('Writing VASP standard streams files is not supported.')", "def make_param_file(self):\n t = self.get_t_out()\n t_timeunits = t.to(self.timeUnit)\n t_steps = t_timeunits / self.dDelta\n\n print(\"t_steps = \" + str(t_steps))\n achInFile = self.writename\n\n # write data in a file.\n file = open(self.writename + \".param\", \"w\")\n L = [\"nSteps = \" + str(round(t_steps.value)) + \"\\n\",\n \"dTheta = 0.7 \\n\",\n \"dEta = .03 \\n\",\n \"dDelta = \" + str(self.dDelta) + \" \\n\",\n \"iOutInterval = \" + str(round(t_steps.value / 5)) + \" \\n\",\n \"achInFile = \" + achInFile + \" \\n\",\n \"achOutName = \" + self.writename + \" \\n\",\n \"iLogInterval = 1 \\n\",\n \"dMsolUnit = \" + str(self.dMsolUnit.value) + \" \\n\",\n \"dKpcUnit = \" + str(self.dKpcUnit.value) + \" \\n\",\n \"dDumpFrameStep = 25 \\n\",\n \"iDirector = 1 \\n\",\n \"bGasAdiabatic = 1\"]\n\n file.writelines(L)\n file.close()", "def write(self):\n\n # Write file lines according to gaussian requirements\n with open(self.filepath, 'w') as file:\n # file.write('%Chk={}checkpoint.com\\n'.format(utils.sanitize_path(os.path.dirname(self.filepath),\n # add_slash=True)))\n file.write(self.calculation.get_calc_line() + '\\n\\n')\n file.write(self.molecule_name + '\\n\\n')\n file.write(self.multiplicity + '\\n')\n file.write(''.join(line for line in self.mol_coords))\n file.write('\\n\\n')", "def to_file(self, file_path):\n raise NotImplementedError(\"Must be implemented by child class\")", "def write(self, output_path, constraints=dict(), add_params=dict()):\n\n assert self.input_format==\"json\", \"MOM_input file can only be generated from a json input file.\"\n\n # Apply the constraints on the general data to get the targeted values\n self.apply_constraints(constraints,add_params)\n\n # 2. Now, write MOM_input\n\n MOM_input_header =\\\n \"\"\"/* WARNING: DO NOT EDIT this file. Any changes you make will be overriden. To make\n changes in MOM6 parameters within CESM framework, use SourceMods or\n user_nl_mom mechanisms.\n\n This input file provides the adjustable run-time parameters for version 6 of\n the Modular Ocean Model (MOM6), a numerical ocean model developed at NOAA-GFDL.\n Where appropriate, parameters use usually given in MKS units.\n\n This MOM_input file contains the default configuration for CESM. A full list of\n parameters for this example can be found in the corresponding\n MOM_parameter_doc.all file which is generated by the model at run-time. */\\n\\n\"\"\"\n\n with open(os.path.join(output_path), 'w') as MOM_input:\n\n MOM_input.write(MOM_input_header)\n\n tab = \" \"*32\n for module in self.data:\n\n # Begin module block:\n if module != \"Global\":\n MOM_input.write(\"%\"+module+\"\\n\")\n\n for var in self.data[module]:\n val = self.data[module][var][\"final_val\"]\n if val==None:\n continue\n\n # write \"variable = value\" pair\n MOM_input.write(var+\" = \"+str(self.data[module][var][\"final_val\"])+\"\\n\")\n\n # Write the variable description:\n var_comments = self.data[module][var][\"description\"].split('\\n')\n if len(var_comments[-1])==0:\n var_comments.pop()\n for line in var_comments:\n MOM_input.write(tab+\"! \"+line+\"\\n\")\n MOM_input.write(\"\\n\")\n\n # End module block:\n if module != \"Global\":\n MOM_input.write(module+\"%\\n\")", "def save(self, file):\n # get __init__ arguments\n signature = inspect.signature(self.__init__)\n args_list = list(signature.parameters)\n args = {arg: getattr(self, arg) for arg in args_list}\n try:\n data = toml.load(file)\n except FileNotFoundError:\n data = {}\n\n data[f\"{self.__class__.__name__}_{self.tag}\"] = args\n with open(file, \"w\") as f:\n toml.dump(data, f)", "def output(self, file):\n self.output_string_field(file, 'Name', 'test_name')\n self.output_string_field(file, 'Description', 'test_description')\n self.output_string_list_field(file, 'Architectures', 'test_archs')\n self.output_string_field(file, 'Owner', 'owner')\n self.output_string_field(file, 'TestVersion', 'testversion')\n self.output_string_list_field(file, 'Releases', 'releases')\n self.output_string_field(file, 'Priority', 'priority')\n self.output_bool_field(file, 'Destructive', 'destructive')\n self.output_string_field(file, 'License', 'license')\n self.output_bool_field(file, 'Confidential', 'confidential')\n self.output_string_field(file, 'TestTime', 'avg_test_time')\n self.output_string_field(file, 'Path', 'test_path')\n self.output_string_list_field(file, 'Requires', 'requires')\n self.output_string_list_field(file, 'RhtsRequires', 'rhtsrequires')\n self.output_string_list_field(file, 'RunFor', 'runfor')\n self.output_string_list_field(file, 'Bugs', 'bugs')\n self.output_string_list_field(file, 'Type', 'types')\n self.output_string_list_field(file, 'RhtsOptions', 'options')\n self.output_string_dict_field(file, 'Environment', 'environment')\n self.output_string_list_field(file, 'Provides', 'provides')\n for (name, op, value) in self.need_properties:\n file.write('NeedProperty: %s %s %s\\n'%(name, op, value))\n file.write(self.generate_siteconfig_lines())", "def write(self, file, experiment):\n self._write_generators(file)\n file.write('\\n')\n file.write('main =\\n')\n file.write(' do putStrLn \"Loading SVG fonts...\"\\n')\n file.write(' fonts <- loadCommonFonts\\n')\n file.write(' putStrLn \"Loaded.\"\\n')\n file.write(' putStrLn \"Started running the simulation and saving the results...\"\\n')\n file.write(' let renderer = DiagramsRenderer SVG (return fonts)\\n')\n file.write(' path = WritableFilePath ' + encode_str(experiment.get_path()) + '\\n')\n file.write(' runExperimentParallel experiment generators (WebPageRenderer renderer path) model\\n')", "def genOutSpec(self, tofile=None):\r\n \r\n # start with a copy of MissionSim _outspec\r\n out = copy.copy(self._outspec)\r\n \r\n # add in all modules _outspec's\r\n for module in self.modules.values():\r\n out.update(module._outspec)\r\n \r\n # add in the specific module names used\r\n out['modules'] = {}\r\n for (mod_name, module) in self.modules.items():\r\n # find the module file \r\n mod_name_full = module.__module__\r\n if mod_name_full.startswith('EXOSIMS'):\r\n # take just its short name if it is in EXOSIMS\r\n mod_name_short = mod_name_full.split('.')[-1]\r\n else:\r\n # take its full path if it is not in EXOSIMS - changing .pyc -> .py\r\n mod_name_short = re.sub('\\.pyc$', '.py',\r\n inspect.getfile(module.__class__))\r\n out['modules'][mod_name] = mod_name_short\r\n # add catalog name\r\n if self.TargetList.keepStarCatalog:\r\n module = self.TargetList.StarCatalog\r\n mod_name_full = module.__module__\r\n if mod_name_full.startswith('EXOSIMS'):\r\n # take just its short name if it is in EXOSIMS\r\n mod_name_short = mod_name_full.split('.')[-1]\r\n else:\r\n # take its full path if it is not in EXOSIMS - changing .pyc -> .py\r\n mod_name_short = re.sub('\\.pyc$', '.py',\r\n inspect.getfile(module.__class__))\r\n out['modules'][mod_name] = mod_name_short\r\n else:\r\n out['modules']['StarCatalog'] = self.TargetList.StarCatalog # we just copy the StarCatalog string\r\n\r\n #if we don't know about the SurveyEnsemble, just write a blank to the output\r\n if 'SurveyEnsemble' not in out['modules']:\r\n out['modules']['SurveyEnsemble'] = \" \"\r\n\r\n # add in the SVN/Git revision\r\n path = os.path.split(inspect.getfile(self.__class__))[0]\r\n path = os.path.split(os.path.split(path)[0])[0]\r\n #handle case where EXOSIMS was imported from the working directory\r\n if path is '':\r\n path = os.getcwd()\r\n #comm = \"git -C \" + path + \" log -1\"\r\n comm = \"git --git-dir=%s --work-tree=%s log -1\"%(os.path.join(path,\".git\"),path)\r\n rev = subprocess.Popen(comm, stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE,shell=True)\r\n (gitRev, err) = rev.communicate()\r\n if sys.version_info[0] > 2:\r\n gitRev = gitRev.decode(\"utf-8\")\r\n if isinstance(gitRev, basestring) & (len(gitRev) > 0):\r\n tmp = re.compile('\\S*(commit [0-9a-fA-F]+)\\n[\\s\\S]*Date: ([\\S ]*)\\n') \\\r\n .match(gitRev)\r\n if tmp:\r\n out['Revision'] = \"Github \" + tmp.groups()[0] + \" \" + tmp.groups()[1]\r\n else:\r\n rev = subprocess.Popen(\"svn info \" + path + \\\r\n \"| grep \\\"Revision\\\" | awk '{print $2}'\", stdout=subprocess.PIPE,\r\n shell=True)\r\n (svnRev, err) = rev.communicate()\r\n if isinstance(svnRev, basestring) & (len(svnRev) > 0):\r\n out['Revision'] = \"SVN revision is \" + svnRev[:-1]\r\n else: \r\n out['Revision'] = \"Not a valid Github or SVN revision.\"\r\n \r\n # dump to file\r\n if tofile is not None:\r\n with open(tofile, 'w') as outfile:\r\n json.dump(out, outfile, sort_keys=True, indent=4, ensure_ascii=False,\r\n separators=(',', ': '), default=array_encoder)\r\n \r\n return out", "def _save_model(self, out_file):\n pass", "def write_products(self):\n if self.has_option('write.pattern'):\n try:\n self.write_scan_pattern()\n except Exception as err:\n log.warning(f\"Could not write scan pattern: {err}\")\n\n if self.configuration.get_bool('write.pixeldata'):\n out_file = os.path.join(self.configuration.work_path,\n f'pixel-{self.get_file_id()}.dat')\n try:\n self.channels.write_channel_data(\n out_file, header=self.get_ascii_header())\n except Exception as err:\n log.warning(f\"Could not write pixel data: {err}\")\n\n if self.configuration.get_bool('write.flatfield'):\n if self.has_option('write.flatfield.name'):\n out_name = self.configuration.get_string(\n 'write.flatfield.name')\n else:\n out_name = f'flat-{self.get_file_id()}.fits'\n out_file = os.path.join(self.configuration.work_path, out_name)\n try:\n self.channels.write_flat_field(out_file)\n except Exception as err:\n log.warning(f\"Could not write flat field: {err}\")\n\n if self.has_option('write.covar'):\n try:\n self.write_covariances()\n except Exception as err:\n log.warning(f\"Could not write covariances: {err}\")\n\n if self.configuration.get_bool('write.ascii'):\n try:\n self.write_ascii_time_stream()\n except Exception as err:\n log.warning(f'Could not write time stream data: {err}')\n\n if self.configuration.get_bool('write.signals'):\n for name, signal in self.signals.items():\n try:\n out_file = os.path.join(\n self.configuration.work_path,\n f'{signal.mode.name}-{self.get_file_id()}.tms')\n signal.write_signal_values(out_file)\n log.info(f\"Written signal data to {out_file}\")\n except Exception as err:\n log.warning(f\"Could not write signal data: {err}\")\n\n if self.has_option('write.spectrum'):\n window_name = self.configuration.get('write.spectrum',\n default='Hamming')\n window_size = self.configuration.get(\n 'write.spectrum.size',\n default=2 * self.frames_for(self.filter_time_scale))\n try:\n self.write_spectra(window_name=window_name,\n window_size=window_size)\n except Exception as err:\n log.warning(f\"Could not write spectra: {err}\")\n\n if self.has_option('write.coupling'):\n try:\n self.write_coupling_gains(\n self.configuration.get_list('write.coupling'))\n except Exception as err:\n log.warning(f\"Could not write coupling gains: {err}\")", "def save(self):\n\n # Write to outfile\n msgs.info('Writing sensitivity function results to file: {:}'.format(self.sensfile))\n\n # Standard init\n hdr = io.initialize_header()\n\n hdr['PYP_SPEC'] = (self.spectrograph.name, 'PypeIt: Spectrograph name')\n hdr['PYPELINE'] = self.spectrograph.pypeline\n # - List the completed steps\n hdr['STEPS'] = (','.join(self.steps), 'Completed sensfunc steps')\n # - Provide the file names\n hdr['SPC1DFIL'] = self.spec1dfile\n\n # Write the fits file\n data = [self.wave_sens, self.sensfunc]\n extnames = ['WAVE', 'SENSFUNC']\n # Write the fits file\n hdulist = fits.HDUList([fits.PrimaryHDU(header=hdr)] + [fits.ImageHDU(data=d, name=n) for d, n in zip(data, extnames)])\n hdu_meta = fits.table_to_hdu(self.meta_table)\n hdu_meta.name = 'METADATA'\n hdu_out = fits.table_to_hdu(self.out_table)\n hdu_out.name = 'OUT_TABLE'\n hdulist.append(hdu_meta)\n hdulist.append(hdu_out)\n hdulist.writeto(self.sensfile, overwrite=True, checksum=True)", "def output_file_setup(model):\n \n filename = model.filename\n\n if os.path.isfile(filename):\n print('\\n'+filename+' already exists, deleting '+filename+'\\n')\n os.remove(filename)\n \n \n \n model.out_file = nc4.Dataset(filename,'w',format='NETCDF4')\n\n model.data_group = model.out_file.createGroup('data')\n model.data_group.createDimension('time',None)\n \n var_dict = model()\n model.save_dict = {}\n sizes = []\n for key in var_dict.keys():\n \n if type(var_dict[key]) in (int,float,np.int64,np.float64):\n s = 1\n elif not isinstance(type(var_dict[key]), (str,np.ndarray)):\n s = len(var_dict[key])\n else:\n pdb.set_trace()\n \n if s not in sizes:\n model.data_group.createDimension(str(s),s)\n \n sizes.append(s)\n \n if s == 1:\n model.save_dict[key] = model.data_group.createVariable(key,'f8',('time','1'))\n else:\n model.save_dict[key] = model.data_group.createVariable(key,'f8',('time',str(s)))\n \n \n \n \n types = (int, float, np.int, np.float, np.ndarray, str)\n \n parameter_group = model.out_file.createGroup('parameters')\n\n for key, value in model.parameters.items():\n if type(value) in types:\n setattr(parameter_group, key, value)\n \n \n return model", "def writeInputFile(self):\n geom = str(self.qmData.numberOfAtoms) + \"\\n\"\n coords_in_angstrom = self.qmData.atomCoords.value_si * 1e10\n for i in range(self.qmData.numberOfAtoms):\n geom = geom + \" \".join((str(self.qmData.atomicNumbers[i]),\n str(coords_in_angstrom[i][0]),\n str(coords_in_angstrom[i][1]),\n str(coords_in_angstrom[i][2])\n )) + \"\\n\"\n with open(self.inputFilePath, 'w') as input_file:\n input_file.write(geom)\n input_file.close()\n logging.info(\"Symmetry input file written to {0}\".format(self.inputFilePath))\n return input_file", "def __write_input(self, inp):\n # find the input id:\n self.__input_id = self.__resultsdb.get_next_input_id()\n # write the input to the results database:\n row = {t2s.INPUT_TESTNAME: self.__test_name,\n t2s.INPUT_IID: self.__input_id,\n t2s.INPUT_CID: self.__circuit_id,\n t2s.INPUT_NUMZEROS: inp.get_num_zeros(),\n t2s.INPUT_NUMONES: inp.get_num_ones()}\n self.__resultsdb.add_row(t2s.INPUT_TABLENAME, row)\n # write the input to an input file:\n input_file_name = os.path.join(self.__input_dir_name,\n str(self.__input_id) + \".input\")\n input_file = self.__fho.get_file_object(input_file_name, 'w')\n input_file.write(str(inp))\n self.__fho.close_file_object(input_file)\n # write the input location to the test file:\n self.__test_file.write(\n \"\".join([\"INPUT\\n\",\n self.__get_testfile_path(input_file_name), \"\\n\"]))", "def create_output_file_for_parametrization(input_file, is_pdb, molset_atom_types, classifier_name):\n input_filename = os.path.basename(input_file)\n if input_filename.endswith('.sdf') or input_filename.endswith('.pdb'):\n input_filename = input_filename[:-4]\n # returns parent directory of directory where io.py is saved\n parent_dir = Path(__file__).resolve().parents[1]\n output_dirname = 'attyc_outputs'\n if not os.path.isdir(os.path.join(parent_dir, output_dirname)):\n print(f'Creating directory {output_dirname}...')\n os.mkdir(os.path.join(parent_dir, output_dirname))\n\n if is_pdb:\n file_extension = 'PDB'\n else:\n file_extension = 'SDF'\n output_filename = f'{input_filename}{file_extension}_{classifier_name}.txt'\n print(f'Output filename: {output_filename},\\n'\n f'path: {os.path.join(parent_dir, output_dirname, output_filename)}')\n with open(os.path.join(parent_dir, output_dirname, output_filename), 'w') as file:\n file.writelines(','.join(mol_atom_types) + os.linesep for mol_atom_types in molset_atom_types)\n print('Finished successfully.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks for existence of a top level simulation directory and writes out all files needed for running a simulation. Files copied/output are contained in the attribute 'inputFileNames'. In principle many input files/scripts could be copied to this location. If directory not found, then directory is created. Directory is creating from top level of where this class is executed
def createSimulation(self): # Check for run directory if (not os.path.exists(self.simDir)): print self.simDir, "does not exist... creating" os.mkdir(self.simDir) # For all simulation files, move into run directory for inFile in self.inputFileNames: fromInFile = os.path.join(self.topDir, inFile) mvInFile = os.path.join(self.topDir, self.simDir, inFile) shutil.move(fromInFile, mvInFile) if self.verbose: print "Moved input file to ", mvInFile
[ "def check_or_create_output_dir(self):\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)", "def _make_output_dirs_if_needed(self):\n output_dir = os.path.dirname(self.output_path)\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if os.path.exists(self.output_path):\n LOGGER.warning(\"Overwriting file %s\", self.output_path)", "def prepareOutput():\r\n\r\n os.removedirs(\"output\")\r\n os.mkdir(\"output\")", "def _create_dir(self):\n self.out_fp = str(self.pb.wd + \n 'out_'+str(self.pb.conf_num) + '/')\n if not os.path.exists(self.out_fp):\n os.makedirs(self.out_fp)", "def simulations_dir():\n return _mkifnotexists(\"web/simulations\")", "def _make_dirs(self) -> None:\n self._make_log_dir()\n self._make_ckpt_dir()\n if self.config.habitat_baselines.il.eval_save_results:\n self._make_results_dir()", "def _initialize_directory(self):\n if os.path.exists(self.location):\n sys.exit(\"WARNING: %s already exists, exiting\" % self.location)\n self._generate_settings()\n self._print_initialization_message()\n self._create_directories()\n self._create_general_config_file()\n self._create_default_pipeline_config_file()\n self._create_filelist()\n print", "def prepare_dir(args):\n job_name = args.top[:-7]\n _, _, _, lambda_states = calc_fe_paramters(args.fe_type)\n if args.subset:\n lambda_states_to_evavluate_on = args.subset\n else:\n lambda_states_to_evavluate_on = list(range(lambda_states))\n print 'Starting job {}'.format(job_name)\n print 'Simulation will be run for {} states'.format(lambda_states_to_evavluate_on)\n try_to_make_dir(job_name)\n incrd_f = os.path.join(job_name, args.crd)\n prmtop_f = os.path.join(job_name, args.top)\n shutil.copy(args.crd, incrd_f)\n shutil.copy(args.top, prmtop_f)\n os.chdir(job_name)\n try_to_make_dir(MDOUT_FOLDER) \n return job_name, lambda_states_to_evavluate_on", "def create_directories():\n if not os.path.exists(DATA_DIRECTORY):\n os.makedirs(DATA_DIRECTORY)\n if not os.path.exists(OUTPUT_DIRECTORY):\n os.makedirs(OUTPUT_DIRECTORY)", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % self.location\n ensure_path(self.conf_path)\n for subdir in config.DATA_DIRS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def _prepare_output_path(self):\n\n self._image_dir = os.path.join(self._output_dir, 'images')\n self._annotation_dir = os.path.join(self._output_dir, 'annotations')\n self._resized_dir = os.path.join(self._output_dir, 'resized')\n\n if not os.path.exists(self._output_dir):\n os.makedirs(self._output_dir)\n\n if not os.path.exists(self._image_dir):\n os.makedirs(self._image_dir)\n\n if not os.path.exists(self._annotation_dir):\n os.makedirs(self._annotation_dir)\n\n if not os.path.exists(self._resized_dir):\n os.makedirs(self._resized_dir)", "def prepare_output_dir(params_dict):\n if not os.path.exists(params_dict['output_dir']):\n print 'Output dir does not exist. Creating.'\n os.mkdir(params_dict['output_dir'])\n\n dir_is_empty = (os.listdir(params_dict['output_dir']) == [])\n if not dir_is_empty and params_dict['overwrite_output']:\n print 'Output dir is not empty, and overwrite is\\\n set to true. Deleting contents'\n shutil.rmtree(params_dict['output_dir'])\n os.mkdir(params_dict['output_dir']) # rmtree deletes the directory as well", "def setup_dir_tree(self):\n _top_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id])\n _sequence_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, 'sequences'])\n _cur_sequence_dir = _sequence_dir+'/sequence_{:d}'.format(CXP.reconstruction.sequence)\n _raw_data_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, 'raw_data'])\n _dpc_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, 'dpc'])\n _CXP_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, '.CXPhasing'])\n _py_dir = '/'.join([CXP.io.base_dir, CXP.io.scan_id, 'python'])\n\n if not os.path.exists(_top_dir):\n CXP.log.info('Setting up new scan directory...')\n os.mkdir(_top_dir)\n os.mkdir(_sequence_dir)\n os.mkdir(_cur_sequence_dir)\n os.mkdir(_raw_data_dir)\n os.mkdir(_dpc_dir)\n os.mkdir(_CXP_dir)\n os.mkdir(_py_dir)\n try:\n shutil.copy(CXP.io.code_dir+'/CXParams.py', _py_dir)\n except IOError:\n CXP.log.error('Was unable to save a copy of CXParams.py to {}'.format(_py_dir))\n else:\n CXP.log.info('Dir tree already exists.')\n if not os.path.exists(_sequence_dir):\n os.mkdir(_sequence_dir)\n if not os.path.exists(_cur_sequence_dir):\n CXP.log.info('Making new sequence directory')\n os.mkdir(_cur_sequence_dir)\n try:\n shutil.copy(CXP.io.code_dir+'/CXParams.py', _py_dir)\n shutil.copy(CXP.io.code_dir+'/CXParams.py',\n _cur_sequence_dir+'/CXParams_sequence{}.py'.format(CXP.reconstruction.sequence))\n except IOError:\n CXP.log.error('Was unable to save a copy of CXParams.py to {}'.format(_py_dir))", "def _prepare_dirs(self):\n # If a directory already exists, probably\n # this script has already been executed\n try:\n os.makedirs(self.basedir)\n except OSError as error:\n msg = 'Cannot create: {0} ({1})'.format(self.basedir, error)\n log.debug(msg)\n raise MasterError(msg)", "def create_data_directories(self):\r\n\r\n try:\r\n self.dir_variant_raw.mkdir(exist_ok=True, parents=True)\r\n self.dir_variant_effects.mkdir(exist_ok=True, parents=True)\r\n self.dir_variant_meta.mkdir(exist_ok=True, parents=True)\r\n\r\n self.dir_gene_raw.mkdir(exist_ok=True, parents=True)\r\n self.dir_gene_meta.mkdir(exist_ok=True, parents=True)\r\n\r\n self.dir_annotated_inter.mkdir(exist_ok=True, parents=True)\r\n self.dir_annotated_intra.mkdir(exist_ok=True, parents=True)\r\n\r\n except OSError as e:\r\n logging.getLogger(__name__).error('Could not make data directories: %s', e)\r\n exit(1)", "def create_folders(config: _data.Distribute, dict_input: _data.DistributeData) -> None:\n # Move xyz to temporal file\n os.makedirs(dict_input.folder_path, exist_ok=True)\n shutil.move(dict_input.file_xyz, dict_input.folder_path)\n\n # Scratch directory\n batch_dir = join(config.scratch_path, f'batch_{dict_input.index}')\n os.makedirs(batch_dir, exist_ok=True)", "def create_output_dirs():\n if not os.path.exists(\"./ingest_logs\"):\n os.makedirs(\"./ingest_logs\")\n if not os.path.exists(\"./providerMetadata\"):\n os.makedirs(\"./providerMetadata\")", "def create_directory_structure(self):\n for model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:\n for model_type in [\n ModelType.Main,\n ModelType.Vae,\n ModelType.Lora,\n ModelType.ControlNet,\n ModelType.TextualInversion,\n ]:\n path = self.dest_models / model_base.value / model_type.value\n path.mkdir(parents=True, exist_ok=True)\n path = self.dest_models / \"core\"\n path.mkdir(parents=True, exist_ok=True)", "def write_input(self, structure, output_dir, make_dir_if_not_present=True):\n if make_dir_if_not_present and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n for k, v in self.get_all_vasp_input(structure).items():\n v.write_file(os.path.join(output_dir, k))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the list of section and port assignment file and returns two dictionaries, one for the section > port assignment, and the other with the port > section assignment.
def read_section_ports_list( path: Optional[str] = None, ) -> Tuple[Dict[int, str], Dict[str, int]]: if path is None: path = SECTION_PORT_LIST_FILE if DBUTIL_SECTION_PORTS_TEST_DATA_ENV in os.environ: tmpfile = tempfile.NamedTemporaryFile() tmpfile.write(SECTION_PORTS_TEST_DATA.encode("utf-8")) tmpfile.flush() path = tmpfile.name assert path is not None port2sec = {} sec2port = {} with open(path, mode="r", newline="") as section_port_list: reader = csv.reader(section_port_list) for row in reader: sec2port[row[0]] = int(row[1]) port2sec[int(row[1])] = row[0] return port2sec, sec2port
[ "def create_assay_assignment_dict(assay_file):\n assay_assignment_dict = {}\n for line in assay_file:\n line = line.rstrip()\n line_item = line.split(\"=\")\n assignment_group = line_item[0]\n assay_list = line_item[1].split(\",\")\n assay_assignment_dict[assignment_group] = assay_list\n return assay_assignment_dict", "def get_ports(params):\n output = subprocess.Popen((\"/usr/bin/snmpwalk\", \"-On\", \"-v2c\", \"-c\",\n params['community'], params['target'], port_name_prefix),\n stdout=subprocess.PIPE).communicate()[0]\n ports = {}\n for line in output.split(\"\\n\"):\n m = re.match(\n r'[.0-9]+\\.(\\d+) = STRING: \"?(?:ethernet)?([0-9/]+)\"?', line)\n if m:\n if not params['pattern'] or re.match(params['pattern'], m.group(2)):\n ports[m.group(2)] = m.group(1)\n return ports", "def parse_port_list(host_list, portList):\n\thost_port_map = []\n\tfor host in host_list:\n\t\tfor port in portList:\n\t\t\t# host-port tuple:\n\t\t\thost_port_map.append((host, port))\n\treturn host_port_map", "def ProcessConfigSection(\n filename: str, section_name: str = None\n) -> Dict[str, str]:\n\n # TODO(b/286571605): Replace typing when python 3.5 is unsupported.\n dictionary = {} # type: Dict[str, str]\n if not os.path.exists(filename):\n return dictionary\n with open(filename) as rcfile:\n in_section = not section_name\n for line in rcfile:\n if line.lstrip().startswith('[') and line.rstrip().endswith(']'):\n next_section = line.strip()[1:-1]\n in_section = section_name == next_section\n continue\n elif not in_section:\n continue\n elif line.lstrip().startswith('#') or not line.strip():\n continue\n flag, equalsign, value = line.partition('=')\n # if no value given, assume stringified boolean true\n if not equalsign:\n value = 'true'\n flag = flag.strip()\n value = value.strip()\n while flag.startswith('-'):\n flag = flag[1:]\n dictionary[flag] = value\n return dictionary", "def updateSectionMap(self):\n self.sectionMap={}\n BLOCKSIZE=512\n #TODO: modify struct to be 'iilb' or something\n sections=self.fileReadSectionKeys(76,KEYS_SECTIONS,fixedOffsetBytes=16)\n for sectionName in sections.keys():\n blockIndex, entrySize, entryCount = sections[sectionName]\n self.sectionMap[sectionName]={}\n self.sectionMap[sectionName]['byteStart']=blockIndex*BLOCKSIZE\n self.sectionMap[sectionName]['entrySize']=entrySize\n self.sectionMap[sectionName]['entryCount']=entryCount\n self.sectionMap[sectionName]['byteLast']=blockIndex*BLOCKSIZE+entrySize*entryCount\n self.sectionMap[sectionName]['sizeBytes']=entrySize*entryCount", "def protocol_parse(filename, protocol):\n \n f = file(filename, 'r')\n varnames = f.readline().strip().split(\",\")\n targetline = [l.strip().split(\",\") for l in f if l.startswith(protocol)][0]\n f.close()\n return dict( zip(varnames,targetline) )", "def _init_port_dicts(self):\n\n # Extract identifiers of source ports in all modules sending input to\n # the current module's ports and of destination ports in the current\n # module's interface for all modules sending input to the current\n # module:\n self._in_port_dict = {}\n self._in_port_dict['gpot'] = {}\n self._in_port_dict['spike'] = {}\n self._in_port_dict_ids = {}\n self._in_port_dict_ids['gpot'] = {}\n self._in_port_dict_ids['spike'] = {}\n\n self._from_port_dict = {}\n self._from_port_dict['gpot'] = {}\n self._from_port_dict['spike'] = {}\n self._from_port_dict_ids = {}\n self._from_port_dict_ids['gpot'] = {}\n self._from_port_dict_ids['spike'] = {}\n\n self._in_ids = self.routing_table.src_ids(self.id)\n for in_id in self._in_ids:\n self.log_info('extracting input ports for %s' % in_id)\n\n # Get interfaces of pattern connecting the current module to\n # source module `in_id`; `int_1` is connected to the current\n # module, `int_0` is connected to the other module:\n pat = self.routing_table[in_id, self.id]['pattern']\n int_0 = self.routing_table[in_id, self.id]['int_0']\n int_1 = self.routing_table[in_id, self.id]['int_1']\n\n # Get ports in interface (`int_1`) connected to the current\n # module that are connected to the other module via the pattern:\n self._in_port_dict['gpot'][in_id] = \\\n pat.dest_idx(int_0, int_1, 'gpot', 'gpot')\n self._in_port_dict_ids['gpot'][in_id] = \\\n self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id])\n self._in_port_dict['spike'][in_id] = \\\n pat.dest_idx(int_0, int_1, 'spike', 'spike')\n self._in_port_dict_ids['spike'][in_id] = \\\n self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id])\n\n # Get ports in interface (`int_0`) connected to the other module\n # that are connected to the current module via the pattern:\n self._from_port_dict['gpot'][in_id] = \\\n pat.src_idx(int_0, int_1, 'gpot', 'gpot')\n self._from_port_dict_ids['gpot'][in_id] = \\\n self.pm_all['gpot'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id])\n self._from_port_dict['spike'][in_id] = \\\n pat.src_idx(int_0, int_1, 'spike', 'spike')\n self._from_port_dict_ids['spike'][in_id] = \\\n self.pm_all['spike'][in_id].ports_to_inds(self._from_port_dict['gpot'][in_id])", "def parse(context: Context, module_id: int, linker_map_path: Path, executable_sections, base_folder=True):\r\n\r\n sections = dict()\r\n lines = []\r\n\r\n # read linker map line-by-line\r\n with linker_map_path.open('r') as file:\r\n lines = file.readlines()\r\n\r\n # group linker map lines by what section they are in\r\n groups = defaultdict(list)\r\n section_name = None\r\n for text_line in lines:\r\n if \"section layout\" in text_line:\r\n section_name = text_line.split(\" \")[0]\r\n groups[section_name] = []\r\n elif section_name:\r\n groups[section_name].append(text_line)\r\n\r\n # determine what section names are already known\r\n already_known_names = set()\r\n for section in executable_sections:\r\n if section.name:\r\n already_known_names.add(section.name)\r\n\r\n # get all the linker map sections which are not already known\r\n map_names = [x for x in groups.keys() if not x in already_known_names]\r\n\r\n # get the name for all sections which has size and are not already known (this will probably be an list of None's)\r\n sects_names = [\r\n x.name for x in executable_sections if not x.name in already_known_names and x.size > 0]\r\n\r\n # try to match the section names with those from the linker map, i.e., assign the section a name from the linker map.\r\n skip = 0\r\n for i, section in enumerate([x for x in executable_sections if not x.name in already_known_names and x.size > 0]):\r\n index = i + skip\r\n if index < len(map_names):\r\n if len(groups[map_names[index]]) == 0 and map_names[index] != \".bss\":\r\n section.name = map_names[index + 1]\r\n skip += 1\r\n else:\r\n section.name = map_names[index]\r\n\r\n # sort and create linker map sections\r\n executable_sections.sort(key=lambda x: x.start)\r\n for i, section in enumerate(executable_sections):\r\n if section.size > 0:\r\n if not section.name:\r\n assert i > 1\r\n last_section = executable_sections[i - 1]\r\n name_index = SECTION_NAME_ORDER.index(last_section.name)\r\n section.name = SECTION_NAME_ORDER[name_index + 1]\r\n context.warning(f\"section name could not be determine using the map file, using the next name instead '{section.name}'\")\r\n assert False # TODO\r\n\r\n sections[section.name] = Section(\r\n section.name, section.start, section.size, section.code_segments)\r\n sections[section.name].first_padding = section.first_padding + section.offset_padding\r\n sections[section.name].data = section.data\r\n sections[section.name].index = i\r\n sections[section.name].alignment = section.alignment\r\n\r\n # for each section, go through all the lines and try to find sysmbols\r\n for section_name, lines in groups.items():\r\n for text_line in lines:\r\n line = re.sub(r'( \\(entry of [^)]*\\))', \"\", text_line)\r\n data = [x.strip() for x in line.strip().split(\" \")]\r\n data = [x for x in data if len(x) > 0]\r\n\r\n # not symbols\r\n if len(data) < 6 or len(data) > 7:\r\n continue\r\n\r\n lib = None\r\n obj = None\r\n name = None\r\n size = int(data[1], base=16)\r\n addr = int(data[2], base=16)\r\n if len(data) == 6:\r\n name = data[4].split(\"\\\\\")[-1]\r\n obj = data[5]\r\n elif len(data) == 7:\r\n name = data[4]\r\n lib = data[5].split(\"\\\\\")[-1]\r\n obj = data[6]\r\n\r\n # group libraries togather (e.g. JSystem)\r\n if lib:\r\n for k, v in settings.LIBRARY_LUT:\r\n if lib.startswith(k):\r\n lib = v + lib\r\n break\r\n\r\n if obj:\r\n # move translation units into files (e.g. d_a_XXX -> d/a/d_a_XXX)\r\n if base_folder:\r\n if not lib:\r\n for k, v in settings.FOLDERS:\r\n if obj.startswith(k):\r\n obj = v + obj\r\n break\r\n if \"\\\\\" in obj:\r\n obj = obj.replace(\"\\\\\", \"/\")\r\n if \"/\" in obj:\r\n # if the object file is a path only use the last 3 parts.\r\n # choice was arbitrary, but it looks OK\r\n obj = \"/\".join(obj.split(\"/\")[-3:])\r\n\r\n # if we're the main.dol, then convert the address to relative address\r\n if module_id == 0:\r\n addr -= sections[section_name].addr\r\n\r\n # add the symbol\r\n symbol = Symbol(addr, size, 0, name, lib, obj)\r\n symbol.source = f\"linker_map/'{linker_map_path}'/{addr:08X}\"\r\n sections[section_name].symbols.append(symbol)\r\n\r\n # calculate a dictionary of addresses used by each section, this will later\r\n # be used to remove access labels generated by the analyze.py module\r\n addrs = defaultdict(lambda: dict())\r\n for k, v in sections.items():\r\n for symbol in v.symbols:\r\n addrs[k][symbol.addr] = symbol\r\n\r\n return sections, addrs", "def segment_assignments_to_dict(rep_str):\n rep_str = rep_str.strip(\" \\n\")\n rep_lines = rep_str.split(\"\\n\")\n reps = collections.OrderedDict()\n for line in rep_lines: \n if not \"->\" in line: \n #print(\"skipping line\", line) \n continue \n k,v = line.split(\"->\")\n k = k.strip().upper()\n v = v.strip()\n \n v1,v2 = v.split(\":\")\n v1 = v1.strip()\n v2 = v2.strip()\n reps[k] = [v1,v2]\n return reps", "def parse(self, procfile):\r\n cfg = OrderedDict()\r\n with open(procfile) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n m = RE_LINE.match(line)\r\n if m:\r\n cfg[m.group(1)] = m.group(2)\r\n return cfg", "def _extract_section(section_content):\n lines = section_content.split(\"\\n\")\n\n section_dict = OrderedDict()\n for line in lines:\n # drop the comment\n if \"!\" in line:\n st_comment = line.find(\"!\")\n line = line[:st_comment]\n\n exps = line.strip().split(\"=\")\n if len(exps) != 2:\n continue\n\n arg_name = exps[0].strip()\n arg_values = [v.strip() for v in exps[1].split(\",\") if v.strip()]\n\n section_dict[arg_name] = arg_values\n return section_dict", "def parse_pref(file):\n dict = {}\n with open(file) as f:\n raw_content = f.read()\n lines = raw_content.splitlines(True)[1:]\n for line in lines:\n student_id = int(line.split('\\t')[0])\n pref_list_line = line.split('\\t')[1]\n pref_list = [int(x) for x in pref_list_line.split()]\n dict[student_id] = pref_list\n return dict", "def _parse_file(self, fname):\n fin = file(fname)\n try:\n section = 'draco2'\n lineno = 0\n for line in fin:\n lineno += 1\n if self.re_ignore.match(line):\n continue\n match = self.re_section.match(line)\n if match:\n section = match.group(1)\n continue\n match = self.re_assign.match(line)\n if not match:\n raise ConfigSyntaxError(filename=fname, lineno=lineno)\n name = match.group(1).lower()\n language = match.group(2)\n try:\n value = eval(match.group(3))\n except:\n raise ConfigSyntaxError(filename=fname, lineno=lineno)\n if language:\n key = (section, language)\n else:\n key = section\n if key not in self.m_sections:\n self.m_sections[key] = {}\n self.m_sections[key][name] = value\n finally:\n fin.close()", "def GetPortDict(self, name):\n return self._port_names.get(name) or {'tcp': {}, 'udp': {}}", "def phonebook_load(filename):\n f = open(filename)\n {name: number for name, number in\n [line.rstrip(\"\\n\").split() for line in f]}\n f.close()", "def read_topology(topology_file, hostmap=None, spinemap=None, debug=None):\n topology = {}\n current_node = ''\n with open(topology_file, mode='r', buffering=1) as f:\n for line in f:\n line = line.strip()\n if line:\n # Read the name of nodes and the number of ports (Switches or HCAs)\n m = re.search('^(\\w+)\\s+(\\d+)\\s+\\\"(.+?)\\\"\\s+#\\s+\\\"(.+?)\\\"', line)\n if m:\n current_node = m.groups()[2]\n topology[current_node] = {}\n topology[current_node]['number_of_ports'] = int(m.groups()[1])\n topology[current_node]['label'] = m.groups()[3]\n\n if m.groups()[0] == 'Switch':\n topology[current_node]['node_type'] = 'switch'\n topology[current_node]['label'] = current_node\n\n if spinemap:\n # mark if switch is a spine one\n if current_node in spinemap:\n topology[current_node]['switch_type'] = 'spine'\n else:\n topology[current_node]['switch_type'] = 'leaf'\n # if no spinemap, we still need an empty switch_type\n else:\n topology[current_node]['switch_type'] = ''\n\n else:\n topology[current_node]['node_type'] = 'hca'\n\n if hostmap:\n # if hca in mapfile, use hostname\n if current_node in hostmap.keys():\n hostname = hostmap[current_node]\n # keep the HBA model\n label = hostname + \" \" + m.groups()[3].split()[0]\n topology[current_node]['label'] = label\n else:\n topology[current_node]['label'] = current_node\n else:\n topology[current_node]['label'] = current_node\n\n topology[current_node]['ports'] = []\n\n # Read the port lines\n m = re.search('^\\[(\\d+)\\].*?\\\"(.+?)\\\"\\[(\\d+)\\]', line)\n if m:\n local_port = int(m.groups()[0])\n remote = m.groups()[1]\n remote_port = int(m.groups()[2])\n\n # use speed to determine color and weight\n weight, color = speed2weight(line.split()[-1])\n\n topology[current_node]['ports'].append(\n (local_port, remote, remote_port, weight, color))\n return topology", "def _parsefile(self, rngpath: str) -> dict:\n\n # TODO check it's a rng file (avoid utf-8 encoding errors)\n try:\n with open(rngpath, 'r') as file:\n r = [v.split() for v in file]\n except (IOError, FileNotFoundError):\n raise ReadError('Error opening rng file %s' % rngpath)\n return\n\n natoms = int(r[0][0])\n nranges = int(r[0][1])\n end = int((1+natoms)*2)\n\n # shortname + colour (3 floats)\n atoms = np.array(r[2:end:2])\n rngs = r[int(end):int(end+nranges)] # ranges\n\n # Read rows as numpy string array\n rngsconv = np.array(rngs, dtype='S10')\n\n ranges = rngsconv[:,1:3].astype('f8') # Extract ranges as\n # 2 col array of floats\n composition = rngsconv[:,3:3+natoms].astype('b') # Extract ion\n # composition array\n # as bool\n\n return {'ranges':ranges,\n 'atoms':atoms,\n 'comp':composition,\n 'nranges':nranges,\n 'natoms':natoms,\n }", "def read_assignments(filename):\n personal_spots = [ ]\n trackleaders_spots = [ ]\n support_spots = [ ]\n wb = load_workbook(filename)\n sheet = wb.active\n for row in sheet.rows:\n tag = row[0].value\n if tag == \"TL\":\n # TrackLeader rented spot\n record = { \"rider\": (row[2].value or \"_\") + \" \" + (row[3].value or \"_\"),\n \"esn\": row[4].value,\n \"unit\": row[1].value\n }\n trackleaders_spots.append(record)\n elif tag == \"PS\" or tag == \"SV\":\n # Personal spot; we have a URL\n url = row[4].value\n gid = url.split(\"=\")[-1]\n record = { \"rider\": (row[2].value or \"_\") + \" \" + (row[3].value or \"_\"),\n \"gid\": gid\n }\n if tag == \"PS\":\n personal_spots.append(record)\n elif tag == \"SV\":\n support_spots.append(record)\n assignments = {\"kind\": \"assignments\",\n \"personal_spots\": personal_spots,\n \"trackleaders_spots\": trackleaders_spots,\n \"support_spots\": support_spots\n }\n return assignments", "def loadhosts():\n\n path=\"/etc/hosts\" if os.path.isfile(\"/etc/hosts\") else \"../data/hosts\"\n with open(path,\"r\") as hosts:\n hostsdict={line.partition(' ')[0].strip():line.partition(' ')[2].strip() for line in hosts if (not line.startswith('#') and not \":\" in line and line.strip())}\n return hostsdict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the port integer corresponding to the given section name. If the section is None, or an unrecognized one, return the default one (3306).
def get_port_from_section(section: str) -> int: _, sec2port = read_section_ports_list() return sec2port.get(section, 3306)
[ "def get_section_from_port(port: int) -> Optional[str]:\n port2sec, _ = read_section_ports_list()\n return port2sec.get(port, None)", "def port_num(name):\n for num in self.port_map:\n if self.port_map[num] == name:\n return num\n return -1", "def get_port_number(self, port_name):\n\n for i in range(len(self.ports)):\n if self.ports[i] == port_name:\n return i \n raise ValueError", "def _get_port(url):\n\n if url.find('http://') == 0:\n url = url.replace('http://', '')\n port = 80\n if url.find('https://') == 0:\n url = url.replace('https://', '')\n port = 443\n\n url_parts = url.split(':')\n\n if len(url_parts) == 1:\n return port\n else:\n port_part = url_parts[1]\n port_section = port_part.split('/')[0]\n try:\n int(port_section)\n except:\n return port\n return int(port_section)\n\n return port", "def port_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port_id\")", "def docker_mapped_port(cid, port):\n output = subprocess.check_output('docker port %s %s' % (cid, port), shell=True)\n return int(output.split(':', 2)[1])", "def port(self):\n return self._val.port or DEFAULT_PORTS.get(self._val.scheme)", "def port_id(self):\n # type: () -> int\n return self._get_property('port_id')", "def get_section_by_name(self, name):\r\n # The first time this method is called, construct a name to number\r\n # mapping\r\n #\r\n if self._section_name_map is None:\r\n self._section_name_map = {}\r\n for i, sec in enumerate(self.iter_sections()):\r\n self._section_name_map[sec.name] = i\r\n secnum = self._section_name_map.get(name, None)\r\n return None if secnum is None else self.get_section(secnum)", "def port_name(num):\n return self.port_map.get(num, 'Unknown')", "def get_section_by_name(self, name):\n # The first time this method is called, construct a name to number\n # mapping\n #\n if self._section_name_map is None:\n self._section_name_map = {}\n for i, sec in enumerate(self.iter_sections()):\n self._section_name_map[sec.name] = i\n secnum = self._section_name_map.get(name, None)\n return None if secnum is None else self.get_section(secnum)", "def get_node_port(self, key):\n return self._get(key, \"port\")", "def config_integer(self, section, param, default=None):\n try:\n return int(self.config_rds.get(section, param))\n except NoOptionError:\n if default is not None:\n return int(default)\n else:\n raise", "def _find_host_port(self, container_name, container_port):\n cfn_container_definitions = self.infos.green_infos.stack['Resources'][\n 'TaskDefinition']['Properties']['ContainerDefinitions']\n container_info = next(\n (x for x in cfn_container_definitions if x['Name'] == container_name), None)\n return next((x for x in container_info['PortMappings'] if x['ContainerPort'] == container_port), None)['HostPort']", "def port(n: str) -> int:\n\ttry:\n\t\tp = int(n)\n\texcept ValueError as exc:\n\t\traise argparse.ArgumentError('invalid value for port!') from exc\n\n\tif 0 < p < 65536:\n\t\treturn p\n\telse:\n\t\traise argparse.ArgumentError('port value out of range!')", "def socketPort(self):\n\n if not self.isBound():\n return None\n\n return self._port", "def _ReadPortNumber(self):\n if not self.is_alive():\n raise DevServerStartupError('Devserver terminated unexpectedly!')\n\n try:\n timeout_util.WaitForReturnTrue(os.path.exists,\n func_args=[self.port_file],\n timeout=self.DEV_SERVER_TIMEOUT,\n period=5)\n except timeout_util.TimeoutError:\n self.terminate()\n raise DevServerStartupError('Devserver portfile does not exist!')\n\n self.port = int(osutils.ReadFile(self.port_file).strip())", "def select_unused_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('localhost', 0))\n addr, port = s.getsockname()\n s.close()\n return port", "def GetPortDict(self, name):\n return self._port_names.get(name) or {'tcp': {}, 'udp': {}}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the section name corresponding to the given port. If the port is the default one (3306) or an unknown one, return a null value.
def get_section_from_port(port: int) -> Optional[str]: port2sec, _ = read_section_ports_list() return port2sec.get(port, None)
[ "def get_port_from_section(section: str) -> int:\n _, sec2port = read_section_ports_list()\n return sec2port.get(section, 3306)", "def port_name(self):\n return self.get_attr_string('port_name')", "def port_name(num):\n return self.port_map.get(num, 'Unknown')", "def port_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port_id\")", "def _find_host_port(self, container_name, container_port):\n cfn_container_definitions = self.infos.green_infos.stack['Resources'][\n 'TaskDefinition']['Properties']['ContainerDefinitions']\n container_info = next(\n (x for x in cfn_container_definitions if x['Name'] == container_name), None)\n return next((x for x in container_info['PortMappings'] if x['ContainerPort'] == container_port), None)['HostPort']", "def getName(self):\n portnum = self.getPortNumber()\n return port_names[portnum]", "def get_datadir_from_port(port: int) -> str:\n section = get_section_from_port(port)\n if section is None:\n return \"/srv/sqldata\"\n else:\n return \"/srv/sqldata.\" + section", "def port(self):\n return self._val.port or DEFAULT_PORTS.get(self._val.scheme)", "def get_node_port(self, key):\n return self._get(key, \"port\")", "def port_id(self) -> str:\n return self._port_id", "def port_id(self):\n # type: () -> int\n return self._get_property('port_id')", "def l4_port(port, proto, both=True):\n try:\n name = socket.getservbyport(port, proto)\n if both:\n name = \"{} ({})\".format(name, port)\n except:\n name = str(port)\n return name", "def getPortDescription(self):\n portnum = self.getPortNumber()\n return port_descriptions[portnum]", "def GetPortDict(self, name):\n return self._port_names.get(name) or {'tcp': {}, 'udp': {}}", "def name(self):\n return self.port.get_logical_port().id", "def port_num(name):\n for num in self.port_map:\n if self.port_map[num] == name:\n return num\n return -1", "def _get_port(url):\n\n if url.find('http://') == 0:\n url = url.replace('http://', '')\n port = 80\n if url.find('https://') == 0:\n url = url.replace('https://', '')\n port = 443\n\n url_parts = url.split(':')\n\n if len(url_parts) == 1:\n return port\n else:\n port_part = url_parts[1]\n port_section = port_part.split('/')[0]\n try:\n int(port_section)\n except:\n return port\n return int(port_section)\n\n return port", "def get_port(self) -> str:\n return self.__serial.port", "def getPort(self):\n return int(self[SipViaHeader.PARAM_PORT]) if SipViaHeader.PARAM_PORT in self else None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translates port number to expected datadir path
def get_datadir_from_port(port: int) -> str: section = get_section_from_port(port) if section is None: return "/srv/sqldata" else: return "/srv/sqldata." + section
[ "def make_host_port_path(uds_path, port):\n return \"{}_{}\".format(uds_path, port)", "def portdir(argv):\n\tprint portage.settings[\"PORTDIR\"]", "def get_file_name(self, port):\n \n port_file_name = \"%s_%s_%d\" %(self.file_prefix, self.system_manager.cur_user, port )\n return os.path.join(self.working_dir, port_file_name)", "def backend_port(self, value):\n if value is not None and '/' not in value:\n value += '/tcp'\n self.__backend_port = value", "def docker_mapped_port(cid, port):\n output = subprocess.check_output('docker port %s %s' % (cid, port), shell=True)\n return int(output.split(':', 2)[1])", "def full_port_name(portname):\r\n m = re.match('^COM(\\d+)$', portname)\r\n if m and int(m.group(1)) < 10:\r\n return portname\r\n return '\\\\\\\\.\\\\' + portname", "def port(n: str) -> int:\n\ttry:\n\t\tp = int(n)\n\texcept ValueError as exc:\n\t\traise argparse.ArgumentError('invalid value for port!') from exc\n\n\tif 0 < p < 65536:\n\t\treturn p\n\telse:\n\t\traise argparse.ArgumentError('port value out of range!')", "def process_port_number(port):\n try:\n port = int(port)\n if port in range(1024, 64001):\n print('Port number is valid. Your port number is {}\\n'.format(port))\n return port\n\n else:\n sys.exit(1)\n\n except:\n print('Unacceptable port number: Must be in range between 1024 to 64000.\\n')\n sys.exit(1)", "def _make_port(self, port):\n return Port(port)", "def l4_port(port, proto, both=True):\n try:\n name = socket.getservbyport(port, proto)\n if both:\n name = \"{} ({})\".format(name, port)\n except:\n name = str(port)\n return name", "def assign_port(self, owner, port):\n\n out_file = open(self.get_file_name(port),'w')\n out_file.write(\"%s:%d\\n\" %(owner, port))\n out_file.close()", "def correct(directory_name):\n add_zeros = lambda string: '{0:02d}'.format(int(string))\n elements = directory_name.split('_')\n return '{0}_{1}_{2}_{3}_{4}_{5}_{6}'.format(elements[0], elements[1], add_zeros(elements[2]), add_zeros(elements[3]), add_zeros(elements[4]), add_zeros(elements[5]), add_zeros(elements[6]))", "def _get_port(url):\n\n if url.find('http://') == 0:\n url = url.replace('http://', '')\n port = 80\n if url.find('https://') == 0:\n url = url.replace('https://', '')\n port = 443\n\n url_parts = url.split(':')\n\n if len(url_parts) == 1:\n return port\n else:\n port_part = url_parts[1]\n port_section = port_part.split('/')[0]\n try:\n int(port_section)\n except:\n return port\n return int(port_section)\n\n return port", "def find_data_dir() -> str:\n data_dirs = {\n \"fear\": \"/mnt/data0/data\",\n \"hydra\": \"/mnt/archive/shared/data\",\n \"turing\": \"/srv/galene0/shared/data\",\n }\n name_of_machine = platform.node() # name of machine as reported by operating system\n return data_dirs.get(name_of_machine, to_absolute_path(\"data\"))", "def compute_domain_and_port(self):\n\n # Resolving the domain...\n \n # Domain is parent domain, if\n # url is relative :-)\n if self.isrel:\n self.domain = self.baseurl.domain\n else:\n # If not relative, then domain\n # if the first item of dirpath.\n self.domain=self.dirpath[0]\n self.dirpath = self.dirpath[1:]\n\n # Find out if the domain contains a port number\n # for example, server:8080\n dom = self.domain\n index = dom.find(self.PORTSEP)\n if index != -1:\n self.domain = dom[:index]\n # A bug here => needs to be fixed\n try:\n self.port = int(dom[index+1:])\n except:\n pass\n\n # Now check if the base domain had a port specification (other than 80)\n # Then we need to use that port for all its children, otherwise\n # we can use default value.\n if self.baseurl and \\\n self.baseurl.port != self.port and \\\n self.baseurl.protocol != 'file://':\n self.port = self.baseurl.port", "def build_db_path(directory):\n\n return directory / 'test.sqlite'", "def get_fluentd_syslog_src_port():\n for port in range(25229, 25424):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', port))\n s.close()\n return port\n except Exception as e:\n pass\n return -1", "def port_name(num):\n return self.port_map.get(num, 'Unknown')", "def port_str_to_int(port):\r\n try:\r\n port = int(port)\r\n if port is None or port < 1024 or port > 49151:\r\n raise ValueError\r\n return port\r\n except ValueError:\r\n print('\"' + str(port) + '\" is not a valid port. Must be an integer between 1025 and 49150.')\r\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split address into (host, port).
def addr_split(addr: str, def_port: int = 3306) -> Tuple[str, int]: port = def_port if addr.count(":") > 1: # IPv6 if addr[0] == "[": # [ipv6]:port addr_port_rx = re.compile(r"^\[(?P<host>[^]]+)\](?::(?P<port>\w+))?$") m = addr_port_rx.match(addr) if not m: raise ValueError("Invalid [ipv6]:port format: '%s'" % addr) addr = m.group("host") port_sec = m.group("port") if port_sec is not None: port = _port_sec_to_port(port_sec) # plain ipv6 elif ":" in addr: addr, port_sec = addr.split(":") port = _port_sec_to_port(port_sec) return addr, port
[ "def split_inet_addr(addr):\n split = addr.split(\":\")\n if len(split) < 2:\n return None\n ip = split[0]\n port = toInt(split[1])\n if port is None:\n return None\n return (ip, port)", "def parse_address(addr):\n if ':' in addr:\n try:\n host, port = addr.split(':')\n except ValueError:\n raise ValueError('Invalid address: %s' % addr)\n else:\n host, port = 'localhost', addr\n if host == '*':\n host = '' # any\n try:\n return (host, int(port))\n except ValueError:\n raise ValueError('Invalid address: %s' % addr)", "def split_host_and_port(netloc: str) -> Tuple[str, Optional[int]]:\n match = _netloc_re.match(netloc)\n if match:\n host = match.group(1)\n port = int(match.group(2)) # type: Optional[int]\n else:\n host = netloc\n port = None\n return (host, port)", "def split_host_port(value: str, default_port: Optional[int]) -> Tuple[str, int]:\n t = value.rsplit(':', 1)\n # If *value* contains ``:`` we consider it to be an IPv6 address, so we attempt to remove possible square brackets\n if ':' in t[0]:\n t[0] = ','.join([h.strip().strip('[]') for h in t[0].split(',')])\n t.append(str(default_port))\n return t[0], int(t[1])", "def hostportpair(host, port):\n tup = host.split(',', 1)\n if len(tup) == 2:\n host = tup[0]\n sport = tup[1]\n if not sport.isdigit():\n self.logger.error('%s: port must be numeric' % host)\n sys.exit(-1)\n port = int(sport)\n if port <= 0 or port > MAX16INT:\n self.logger.error('%s: port must be > 0 and < %d ' % (host, MAX16INT))\n sys.exit(-1)\n return host, port", "def parse_address(addr, strict=False):\n if not isinstance(addr, six.string_types):\n raise TypeError(\"expected str, got %r\" % addr.__class__.__name__)\n scheme, sep, loc = addr.rpartition(\"://\")\n if strict and not sep:\n msg = (\n \"Invalid url scheme. \"\n \"Must include protocol like tcp://localhost:8000. \"\n \"Got %s\" % addr\n )\n raise ValueError(msg)\n if not sep:\n scheme = DEFAULT_SCHEME\n return scheme, loc", "def parse_address(address: str) -> Optional[Tuple[str, int, Optional[bool]]]:\n try:\n raw_host, _, raw_port = address.rpartition(\":\")\n\n port = int(raw_port)\n\n if port > 65535 or port < 1:\n raise ValueError(\"Port number is invalid.\")\n\n try:\n host = raw_host.translate({ord(i): None for i in \"[]\"})\n version = ip_address(host).version == IPV6\n except ValueError:\n host = raw_host\n version = None\n\n return host, port, version\n\n except ValueError:\n return None", "def parseURI(self,url):\n addr = \"\"\n parts = []\n ip = False\n parts = url.split('/')\n #extract ip address with port\n if(len(parts)>2):\n addr = parts[2] #this contains X.X.X.X:PORT\n else:\n addr = parts[0] #it is possible the mtURL is \"X.X.X.X:PORT/\" (no http), then parts[0] will still be X.X.X.X:PORT\n # extract the ip address \n addr = addr.split(':')\n if(len(addr)>1):\n ip = addr[0]\n port = addr[1]\n else:\n ip = False\n port = False\n return ip, port", "def parseURI(url):\n\thostport = url.split(':')\n\thost = hostport[0] if hostport[0] != 'localhost' else socket.gethostname()\n\treturn host, hostport[1] if len(hostport) > 1 else '80'", "def resolve_address(address):\n hostname, *rest = address.rsplit(\":\", 1)\n ip_address = resolve_hostname(hostname)\n if \":\" in ip_address:\n ip_address = \"[{}]\".format(ip_address)\n return ip_address + \"\".join(\":\" + port for port in rest)", "def get_host_port(uri):\n match = HostPortHelper.pattern.search(uri)\n if not match:\n raise ValueError(\"Bad uri string %s\" % uri)\n host, option, port = match.groups()\n return host, port", "def addr(self):\n return (self.ip, self.port)", "def _parse_host_and_port(uri, default_port=27017):\n if '://' not in uri:\n return uri, default_port\n\n uri = uri.split('://', 1)[1]\n\n if '/' in uri:\n uri = uri.split('/', 1)[0]\n\n # TODO(pascal): Handle replica sets better. Accessing the secondary hosts\n # should reach the same dataas the primary.\n if ',' in uri:\n uri = uri.split(',', 1)[0]\n\n if ']:' in uri:\n host, uri = uri.split(']:', 1)\n host = host + ']'\n elif ':' in uri and not uri.endswith(']'):\n host, uri = uri.split(':', 1)\n else:\n return uri, default_port\n\n if not uri:\n return uri, default_port\n\n try:\n return host, int(uri)\n except ValueError:\n raise InvalidURI('Invalid URI scheme: could not parse port \"%s\"' % uri)", "def resolve_address(address):\n hostname, port = address\n if len(hostname) == 0:\n ip = '0.0.0.0'\n else:\n ip = resolve(hostname)[0]\n\n assert isinstance(ip, str)\n assert isinstance(port, int)\n\n return ip, port", "def get_addr(host, port):\n if \":\" in host: # IPv6\n return \"[%s]:%s\" % (host, port)\n else: # IPv4\n return \"%s:%s\" % (host, port)", "def parse_hostname(hostname, default_port):\n try:\n host, sep, port = hostname.strip().rpartition(\" \")\n if not port: # invalid nothing there\n return None\n\n if not host: # no space separated port, only host as port use default port\n host = port\n port = default_port\n # ipv6 must have two or more colons\n if host.count(\":\") == 1: # only one so may be using colon delimited port\n host, sep, port = host.rpartition(\":\")\n if not host: # colon but not host so invalid\n return None\n if not port: # colon but no port so use default\n port = default_port\n\n host = host.strip()\n try:\n port = int(port)\n except ValueError:\n return None\n\n except AttributeError:\n return None\n\n return (host, port)", "def getHostFrom(fromHost):", "def _url_parse(uri):\n host = \"\"\n path = \"\"\n\n p_uri = urlparse(uri)\n host = p_uri.netloc\n path = p_uri.path.rstrip('/').strip('/')\n\n return (host,path)", "def parse_uri(uri):\n host, port, db = uri, 6379, 0\n if len(host.split('/')) == 2:\n host, db = host.split('/')\n if len(host.split(':')) == 2:\n host, port = host.split(':')\n return host, int(port), int(db)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is n a perfect number?
def is_perfect(n): return sod(n) == 2*n and n > 0
[ "def perfect( n ):\n return sum(divisorsr(n,1)) == n", "def perfect_number(n):\n divisors = find_divisors(n)\n divisors.remove(n)\n sum_divisors = sum(divisors)\n return sum_divisors == n", "def is_perfect(n):\n # 1 is a factor of every number so the variable can be initialized with\n # this value and then it can be skipped in the process of finding factors\n sum_of_factors = 1\n \n # This loop adds all of the factors. A factor cannot be greater than 1/2\n # of the number itself, therefore the loop ends at the half-way mark.\n for x in range (2, n//2+1):\n if (n%x == 0):\n sum_of_factors += x\n \n if (n == sum_of_factors):\n return True\n else:\n return False", "def isperfect(n:Integral) -> bool:\r\n return n == sum(factors(n))", "def is_perfect(number):\n validate_integers(number)\n if number < 1:\n return False\n\n return len(num_and_sum_of_div(number)) == 1", "def print_perfect(n):\n for x in range(n):\n if is_perfect(x):\n print(x)", "def is_perfect_square(n):\n if n < 0:\n return False\n if n == 0:\n return True\n # Perfect squares in hexadecimal can only end in 0, 1, 4, or 9.\n if (n & 0xf) not in (0, 1, 4, 9):\n return False\n # No quick tests showed n as non-square, just check the square root.\n return isqrt(n) ** 2 == n", "def is_square(n):\n if n < 0:\n return False\n sqrt = n ** (1 / 2)\n number_dec = str(sqrt-int(sqrt))[1:]\n if len(number_dec) > 2:\n return False\n else:\n return True", "def is_twice_square(n):\n return int((n // 2) ** 0.5) ** 2 * 2 == n", "def is_abundant(n):\n return sod(n) > 2*n and n > 0", "def isSquare(n):\n if (n > 0):\n if (math.sqrt(n) - int(math.sqrt(n))):\n return False\n return True\n return False", "def is_amicable(n):\n div_sum_n = divisor_sum(n)\n return n == divisor_sum(div_sum_n) and n != div_sum_n", "def is_narcissistic(num):\n c = digit_count(num)\n sum = 0\n save = num\n while num != 0:\n digit = num % 10\n num = num // 10\n sum += digit**c\n return sum == save", "def is_abundant(n):\n return n < get_sum_divisors(n)", "def testWilson(n):\n if (fact(n - 1) + 1) % n == 0:\n return True\n return False", "def abundant(n):\r\n \"*** YOUR CODE HERE ***\"\r\n val = 1\r\n su = 0\r\n while val * val <= n:\r\n if n % val == 0:\r\n print (val, '*', n // val)\r\n su = su + val\r\n if val != 1 and val * val != n:\r\n su = su + (n // val)\r\n val = val + 1\r\n if su > n:\r\n return True\r\n else:\r\n return False", "def is_pos_square(n: int) -> bool:\n return 0 < n and (round(n ** 0.5) ** 2 == n)", "def isUniform(n, c):\n\n last = n % 2\n count = 0\n\n for _ in range(c):\n b = n % 2\n if last != b:\n if count > 2:\n break\n\n count += 1\n last = b\n n //= 2\n \n return count <= 2", "def is_powerful(self,n):\n if n <= 1:\n return True\n ex = [e for _,e in arith.factor(n)]\n for e in ex:\n if e < 2:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is n an abundant number?
def is_abundant(n): return sod(n) > 2*n and n > 0
[ "def is_abundant(n):\n return n < get_sum_divisors(n)", "def isabundant(n:Integral) -> bool:\r\n return n > sigma(factors(n))", "def is_abundant_number(number: int) -> bool:\n return get_sum_of_divisors(number) > number", "def abundant(n):\r\n \"*** YOUR CODE HERE ***\"\r\n val = 1\r\n su = 0\r\n while val * val <= n:\r\n if n % val == 0:\r\n print (val, '*', n // val)\r\n su = su + val\r\n if val != 1 and val * val != n:\r\n su = su + (n // val)\r\n val = val + 1\r\n if su > n:\r\n return True\r\n else:\r\n return False", "def is_abundant(number):\n if number < sum(find_divisors(number)):\n return True\n else:\n return False", "def abundant(n):\n i = 1\n res = []\n while i * i <= n:\n if n % i == 0:\n print(i, '*', n//i)\n res.extend([i, n//i])\n i += 1\n res.remove(n)\n res.remove(1)\n res = set(res)\n if sum(res) > n:\n return True\n else:\n return False", "def is_amicable(n):\n div_sum_n = divisor_sum(n)\n return n == divisor_sum(div_sum_n) and n != div_sum_n", "def two_abundant(number):\n\tfor test1 in range(2,int(number/2)+1):\n\t\ttest2 = number - test1\n\t\tif test2 < 1:\n\t\t\ttest2 = 1\n\n\t\tif is_abundant(test1) and is_abundant(test2):\n\t\t\treturn True\n\n\treturn False", "def can_be_represented(numbers, n):\n return any( (n-abn in numbers) for abn in numbers)", "def is_powerful(self,n):\n if n <= 1:\n return True\n ex = [e for _,e in arith.factor(n)]\n for e in ex:\n if e < 2:\n return False\n return True", "def isAbundant(x):\n \n # your code here\n Abundant = False\n sum = 0\n for i in range(1, x):\n if(x % i == 0):\n sum += i\n if (sum > x):\n Abundant = True\n \n else:\n Abundant = False\n \n return Abundant", "def isperfect(n:Integral) -> bool:\r\n return n == sum(factors(n))", "def is_narcissistic(num):\n c = digit_count(num)\n sum = 0\n save = num\n while num != 0:\n digit = num % 10\n num = num // 10\n sum += digit**c\n return sum == save", "def amicable(n):\r\n \"*** YOUR CODE HERE ***\"\r\n while True:\r\n n = n + 1\r\n m = sum_of_divisor(n) \r\n if m != n and sum_of_divisor(m) == n:\r\n break\r\n\r\n return n", "def McNuggets(n):\r\n\r\n for a in range(n):\r\n for b in range(n):\r\n for c in range(n):\r\n if 6*a + 9*b + 20*c == n:\r\n return True \r\n return False", "def perfect_number(n):\n divisors = find_divisors(n)\n divisors.remove(n)\n sum_divisors = sum(divisors)\n return sum_divisors == n", "def isUniform(n, c):\n\n last = n % 2\n count = 0\n\n for _ in range(c):\n b = n % 2\n if last != b:\n if count > 2:\n break\n\n count += 1\n last = b\n n //= 2\n \n return count <= 2", "def perfect( n ):\n return sum(divisorsr(n,1)) == n", "def is_perfect(n):\n return sod(n) == 2*n and n > 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is n a defecient number?
def is_defecient(n): return sod(n) < 2*n and n > 0
[ "def is_deficient_number(number: int) -> bool:\n return get_sum_of_divisors(number) < number", "def is_factor(n, f):\n return n % f == 0", "def is_powerful(self,n):\n if n <= 1:\n return True\n ex = [e for _,e in arith.factor(n)]\n for e in ex:\n if e < 2:\n return False\n return True", "def is_abundant(n):\n return sod(n) > 2*n and n > 0", "def safe(n: int) -> bool:\n # isolate ones digit\n ones = n % 10\n # isolate tens digit\n tens = int((n - ones) / 10)\n\n # checks to make sure whether n is not divisible by 9 and does not contain 9 as a digit\n return (n % 9 != 0) & (ones != 9) & (tens != 9)", "def isabundant(n:Integral) -> bool:\r\n return n > sigma(factors(n))", "def has_nontrivial_divisor(num):\n divisor = least_divisor(num)\n return bool(divisor < num)", "def factor_check(number, factor):", "def is_natural(num):\n if(not (num % 3) or not (num % 5)):\n return num\n else:\n return 0", "def isperfect(n:Integral) -> bool:\r\n return n == sum(factors(n))", "def testWilson(n):\n if (fact(n - 1) + 1) % n == 0:\n return True\n return False", "def is_amicable(n):\n div_sum_n = divisor_sum(n)\n return n == divisor_sum(div_sum_n) and n != div_sum_n", "def is_natural(n):\r\n\treturn isinstance(n, int) and n > 0", "def mangoldt(n):\n if(n<1 or n!=int(n)):\n raise ValueError(\n \"n must be positive integer\"\n )\n d = 2\n while (d<=n):\n if(n%d == 0):\n if (math.log(n,d)-int(math.log(n,d))==0):\n return math.log(d)\n else:\n return 0\n d += 1\n return 0", "def is_perfect(n):\n return sod(n) == 2*n and n > 0", "def amicable(n):\r\n \"*** YOUR CODE HERE ***\"\r\n while True:\r\n n = n + 1\r\n m = sum_of_divisor(n) \r\n if m != n and sum_of_divisor(m) == n:\r\n break\r\n\r\n return n", "def _validate(self, n):\n n = ZZ(n)\n if n <= 0:\n raise ValueError(\"n must be positive\")\n if n.ndigits() > 4095:\n raise ValueError(\"n must have at most 4095 digits\")\n return n", "def _severe_log(self, n):\n try:\n return math.log(n, math.e ** 8)\n except ValueError:\n # `n` might be too small\n\n return 0", "def is_narcissistic(num):\n c = digit_count(num)\n sum = 0\n save = num\n while num != 0:\n digit = num % 10\n num = num // 10\n sum += digit**c\n return sum == save" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Increases service capacity for every booking
def cap_inrease(self,number): if number == 1: self.current_capacity += 1 elif number == 2: self.service_two_capacity += 1 elif number == 3: self.service_three_capacity += 1 elif number == 4: self.service_four_capacity += 1 elif number == 5: self.service_five_capacity += 1
[ "def updateOneService(self, reservation):\n # Adds information to the new service\n self.setServiceClient(reservation.getReservClient())\n\n # checks if it's going to be a delay, that is, if the driver/vehicle is not available at the requested time\n self.calculateDepartAndArrivalHour(reservation)\n\n self.setServiceCircuit(reservation.getReservCircuit())\n self.setServiceCircuitKms(reservation.getReservCircuitKms())\n\n # Calculates how much work time is left for the driver after this service\n duration = reservation.duration()\n new_accumulated_hours = self.getAccumTime().add(duration)\n allowed_time_left = Driver.TIMELimit.diff(new_accumulated_hours)\n\n # Calculates how much kms are left fot the vehicle after this service\n new_accumulated_kms = int(self.getVehicleKmsDone()) + int(self.getServiceCircuitKms())\n allowed_kms_left = int(self.getVehicleAutonomy()) - new_accumulated_kms\n\n # set common parameters\n self.setAccumTime(new_accumulated_hours)\n self.setVehicleKmsDone(new_accumulated_kms)\n\n # Adds the rest of the information, depending on the allowed time and kms left\n if allowed_time_left < Driver.TIMEThreshold:\n self.setServiceDriverStatus(Driver.STATUSTerminated)\n\n elif allowed_kms_left < Vehicle.AUTONThreshold:\n self.setServiceDriverStatus(Driver.STATUSCharging)\n self.setServiceCircuitKms(reservation.getReservCircuitKms())\n\n else:\n self.setServiceDriverStatus(Driver.STATUSStandBy)\n\n self.setVehicleAutonomy(self.getVehicleAutonomy())", "def updateOneService(reservation, old_service):\n # Adds information to the new service\n new_service = []\n new_service.append(old_service[INDEXDriverName])\n new_service.append(old_service[INDEXVehiclePlate])\n new_service.append(reservation[INDEXClientNameInReservation])\n\n # checks if it's going to be a delay, that is, if the driver/vehicle is not available at the requested time\n startHour, endHour = calculateDelay(old_service, reservation)\n\n new_service.append(startHour)\n new_service.append(endHour)\n\n new_service.append(reservation[INDEXCircuitInReservation])\n new_service.append(reservation[INDEXCircuitKmsInReservation])\n\n # Calculates how much work time is left for the driver after this service\n duration = durationReservation(reservation)\n new_accumulated_hours = add(old_service[INDEXAccumulatedTime], duration)\n allowed_time_left = diff(TIMELimit, new_accumulated_hours)\n\n # Calculates how much kms are left fot the vehivle after this service\n new_accumulated_kms = int(old_service[INDEXAccumulatedKms]) + int(new_service[INDEXCircuitKms])\n allowed_kms_left = int(old_service[INDEXINDEXVehicAutonomy]) - new_accumulated_kms\n\n # Adds the rest of the information, depending on the allowed time and kms left\n if allowed_time_left < TIMEThreshold:\n new_service.append(STATUSTerminated)\n elif allowed_kms_left < AUTONThreshold:\n new_service.append(STATUSCharging)\n new_service.append(new_accumulated_hours)\n new_service.append(old_service[INDEXINDEXVehicAutonomy])\n new_service.append('0')\n else:\n new_service.append(STATUSStandBy)\n new_service.append(new_accumulated_hours)\n new_service.append(old_service[INDEXINDEXVehicAutonomy])\n new_service.append(str(new_accumulated_kms))\n\n return new_service", "def add_reserve(self):\r\n self._reserves += 1", "def update_serviceable_demand(coverage, sd):\n total_serviceable_demand = 0.0\n for demand in coverage[\"demand\"].keys():\n coverage[\"demand\"][demand][\"serviceableDemand\"] = sd[\"demand\"][demand][\"serviceableDemand\"]\n total_serviceable_demand += sd[\"demand\"][demand][\"serviceableDemand\"]\n coverage[\"totalServiceableDemand\"] = total_serviceable_demand\n return coverage", "async def increase_reliability(self):", "def do_changeCapacity(self, args):\n new_capacity = input(\"Enter seating capacity of a single bus: \")\n print(self._changeCapacity(new_capacity))", "def update_cooling_demand(self, action: float):\n\n raise NotImplementedError", "def additional_charge(self):\n self._balance=self._balance+1", "def bike_arrival(self) -> None:\n if self.num_bikes < self.capacity:\n self.bikes_arrived += 1\n self.num_bikes += 1", "def _ReduceCapacity(self, thickness):\n self.__available_capacity -= thickness", "def update(self, clock):\r\n if self.cashier_arrival != None and self.items > 0 and clock > self.arrived:\r\n self.items -= 1\r\n self.items_paid += 1", "def addHardMinShiftsWorkersPerDay(self):", "def _resolve_available_tickets(self, reservation):\n ticket_reservations = reservation.tickereservation_set.all()\n for ticket_reservation in ticket_reservations:\n ticket_reservation.ticket.quantity += ticket_reservation.quantity\n ticket_reservation.save()", "def grow_up_shared(self, cidx, amt):\r\n # split grow amount among number of clients\r\n per_amt = amt / cidx\r\n for idx in range(0, cidx):\r\n self.grow(idx, per_amt)", "def grow_up_shared(self, cidx, amt):\n # split grow amount among number of clients\n per_amt = amt / cidx\n for idx in range(0, cidx):\n self.grow(idx, per_amt)", "def capacity_rate_old(self):\n if self.total_hours_minus_buffer == 0.0:\n return 0.0\n return 100 * (self.allocated_hours_this_month / self.total_hours_minus_buffer_old)", "def bq_reserve_cost(dt, slots):\n t = (math.ceil(dt) / (1000 * 60 * 60))\n slot_count = math.ceil(slots / 100)\n return 4.00 * slot_count * t", "def updateServices(reservations_p, waiting4ServicesList_prevp):\n\n waiting4Services = deepcopy(waiting4ServicesList_prevp)\n\n new_services = []\n\n for reservation in reservations_p:\n\n # checks if reservation would pass km limit of vehicle or time limit of driver and chooses another driver if that's the case\n i = nextDriver(reservation, waiting4Services)\n\n # if there is no driver available to a reservation, try get some to work on the next reservation\n if i == len(waiting4Services):\n next\n else:\n\n old_service = waiting4Services.pop(i)\n new_service = updateOneService(reservation, old_service)\n new_services.append(new_service[:INDEXDriverStatus + 1])\n\n # makes driver and vehicle available again, after charging\n if new_service[INDEXDriverStatus] == STATUSCharging:\n charged = afterCharge(new_service)\n new_services.append(charged[:INDEXDriverStatus + 1])\n waiting4Services.append(charged)\n\n elif new_service[INDEXDriverStatus] == STATUSStandBy:\n waiting4Services.append(new_service)\n\n # sorts waiting4Services so that drivers available earlier are assigned services first\n waiting4Services = sortWaitingServices(waiting4Services)\n\n # adds to new_services the drivers that had no service in this period\n new_services = addNoServiceDriver(new_services, waiting4Services)\n\n return sortServices(new_services)", "def increment_served(self,new_serves):\r\n\t\tself.number_served += new_serves" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Church admnistrators can access user data here
def church_admin(self):
[ "def user(ctx):\n pass", "def get_single_user():", "def principalForUser(user):", "def get_user_info(user):\n\n return user", "def author_info(self):\n return User.objects.get(pk=self.user_id)", "def test_get_user_level_access(self):\n pass", "def get_user_data(self):\n # We get the UserData object this way because if we try to do it via a\n # filter the object will not be automatically created (it's an\n # AutoOneToOneField and so is only created when accessed like\n # `user.american_gut`)\n return getattr(self.request.user, self.get_user_data_related_name())", "def user_entity( self ):\n return", "def test_user_get_current(self):\n pass", "def user_info(self):\n return self.__user_info", "def test_get_user_effective_rights(self):\n pass", "def __int__(self):\n return self.userid", "def _get_user_info(self, user, **options):\n\n return None", "def citing_me():", "def __init__(self, user):\n super(UserItemData, self).__init__()\n self._user = user", "def test_get_run_as_user(self):\n pass", "def author_info(self):\n return User.objects.get(pk=self.author)", "def __init__(self, logged_in_user_id, member_id, user_view, hotlist,\n effective_ids=None):\n\n self.viewing_self = ezt.boolean(logged_in_user_id == member_id)\n\n self.user = user_view\n member_qs_param = user_view.user_id\n self.detail_url = '/u/%s/' % member_qs_param\n self.role = framework_helpers.GetHotlistRoleName(\n effective_ids or {member_id}, hotlist)", "def _update_user_info(self) -> dict:\n data = self.post(\"loadUserContent\").json()\n data = self._store.store_record_map(data)\n\n first_user = list(data[\"notion_user\"].keys())[0]\n first_space = list(data[\"space\"].keys())[0]\n self.current_user = self.get_user(first_user)\n self.current_space = self.get_space(first_space)\n\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all tags in RSS_FEED. Replace dash with whitespace.
def get_tags(): tags1 = TAG_HTML.findall(rssread) tags1 = [w.replace('-', ' ') for w in tags1] return tags1
[ "def get_tags():\n tags = []\n with open(RSS_FEED) as file:\n for line in file.readlines():\n for tag in TAG_HTML.findall(line):\n tags.append(tag.replace('-', ' ').lower())\n return tags", "def tags_rss(request, tags):\n c = RequestContext(request)\n c['entries'] = Entry.objects.filter(tags__tag__in=tags).distinct().order_by('-when')[:10]\n return render_to_response('rss.xml', c)", "def get_feed_tags(self, feed: Optional[FeedInput] = None) -> Iterable[str]:\n feed_url = _feed_argument(feed) if feed is not None else None\n return self._storage.get_tags((feed_url,))", "def removed_feed():\n title = '%s: Deprecated Feed' % config.SITE_NAME\n description = 'This feed has been deprecated and no longer exists.'\n feed_url = config.SITE_BASE + 'crss'\n feed = Feed(title, config.SITE_BASE, description, feed_url=feed_url,\n feed_guid=feed_url, feed_copyright=u'(c) Copyright 2012, Caleb Brown')\n feed.add_item('This RSS feed no longer exists', config.SITE_BASE,\n 'This feed no longer exists. But calebbrown.id.au is still '\n 'alive and kicking. Please visit the hompage to see what\\'s new.')\n response.content_type = 'application/rss+xml; charset=utf-8'\n return feed.writeString('utf-8')", "def FindFeeds():\n rss_page = \"http://www.latimes.com/services/site/la-rssinfopage,0,5039586.htmlstory\"\n\n html = ukmedia.FetchURL( rss_page )\n soup = BeautifulSoup( html )\n\n feeds = []\n div = soup.find('div',{'id':'story-body'} )\n for td in div.table.findAll('td', {'class':'rssTitleCell'} ):\n a = td.a\n url = urlparse.urljoin( rss_page, a['href'] )\n\n title = ukmedia.FromHTMLOneLine( a.renderContents(None) )\n feeds.append( (title,url) )\n\n return feeds", "def clean_entities(tag):\n return ENTITY_CHARS_RE.sub('', tag)", "def get_feedurls():\n return [x.strip() for x in open(feeds).readlines() if x[0] != '#']", "def remove_tags(self, rules):\n for rule in rules:\n for s in self.soup.find_all(**rule):\n s.extract()", "def fixSelfClosingTags(self, stringifiedSoup):\n return self.selfClosingTagRegex.sub('', stringifiedSoup)", "def _strip_xml(txts):\n txts = html.unescape(html.unescape(txts)) # double unescape because Wikipedia dumps are a mess\n txts = txts.split('\\n')\n\n for i in range(len(txts)):\n for pattern in patterns:\n txts[i] = pattern[0].sub(pattern[1], txts[i])\n\n txts = [''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]) for txt in txts if txt != '']\n return '\\n'.join(txts)", "def tag_rss(self, tag, lang, posts, kw, is_category):\n kind = \"category\" if is_category else \"tag\"\n # Render RSS\n output_name = os.path.normpath(\n os.path.join(kw['output_folder'],\n self.site.path(kind + \"_rss\", tag, lang)))\n feed_url = urljoin(self.site.config['BASE_URL'], self.site.link(kind + \"_rss\", tag, lang).lstrip('/'))\n deps = []\n post_list = sorted(posts, key=lambda a: a.date)\n post_list.reverse()\n for post in post_list:\n deps += post.deps(lang)\n return {\n 'basename': str(self.name),\n 'name': output_name,\n 'file_dep': deps,\n 'targets': [output_name],\n 'actions': [(utils.generic_rss_renderer,\n (lang, \"{0} ({1})\".format(kw[\"blog_title\"](lang), tag),\n kw[\"site_url\"], None, post_list,\n output_name, kw[\"rss_teasers\"], kw[\"rss_plain\"], kw['feed_length'],\n feed_url))],\n 'clean': True,\n 'uptodate': [utils.config_changed(kw)],\n 'task_dep': ['render_posts'],\n }", "def blog_entry(html):\n blog_entry_pattern = r'<span class=\"date\">(.*)</span>\\s*<a href=\"(.*)\" target=\"_blank\" class=\"list-title\">(.*)</a>'\n for m_obj in re.finditer(blog_entry_pattern, html):\n log.debug('(Master) Producing blog entries {} {} {}'\n .format(m_obj.group(1), m_obj.group(2), m_obj.group(3)))\n yield m_obj.group(1), m_obj.group(2), m_obj.group(3)", "def tags(self):\n TAG_RE = r'\\#\\w+\\d*'\n matches = re.findall(TAG_RE, self.title)\n tags = []\n for m in matches:\n tags.append(m[1:])\n return tags", "def get_rss_feed_text():\n data = requests.get('http://retre.org/rssdd.xml', headers={'User-Agent': USER_AGENT})\n data.encoding = 'utf-8'\n return data.text", "def fetch_rss(rss):\n html = requests.get(rss).content\n doc = fromstring(html)\n items = doc.cssselect(\"channel item\")\n for item in items:\n stamp = item.cssselect('date')[0].text\n date_, time_ = stamp.split('T')\n time_ = time_.split('Z')[0]\n yield (date_, time_,\n item.cssselect(\"title\")[0].text_content().split('|')[0],\n item.cssselect(\"guid\" )[0].text)", "def generate_rss(packages, herd):\n if not packages.count():\n return \"\"\"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?><rss version=\"2.0\"><channel><title>Meatoo - Gentoo vs. Freshmeat Releases</title><link>http://meatoo.gentooexperimental.org/</link><description>The latest Freshmeat releases with matching Gentoo versions.</description><lastBuildDate>%s</lastBuildDate><generator>PyRSS2Gen-0.1.1</generator><docs>http://blogs.law.harvard.edu/tech/rss</docs><item><title>Herd %s has no entries.</title><link>http://meatoo.gentooexperimental.org/</link><description>There are no entries for %s</description><pubDate>%s</pubDate></item></channel></rss>\"\"\" % (datetime.datetime.utcnow(), herd, herd, datetime.datetime.utcnow())\n items = []\n for pkg in packages:\n items.append(PyRSS2Gen.RSSItem(\n title = \"%s/%s-%s [%s]\" % \\\n (pkg.portageCategory, pkg.packageName, pkg.portageVersion, \\\n pkg.latestReleaseVersion),\n description = \"Freshmeat Release Date: %s<br><br><b>Portage desc:</b><br> %s<br><br><b>Freshmeat desc:</b><br> %s<br>http://freshmeat.net/projects/%s/\" % (pkg.latestReleaseDate, pkg.portageDesc, pkg.descShort, pkg.packageName),\n link = \"http://meatoo.gentooexperimental.org/\",\n pubDate = datetime.datetime.utcnow()\n ))\n\n rss = PyRSS2Gen.RSS2(\n title = \"Meatoo - Gentoo vs. Freshmeat Releases\",\n link = \"http://meatoo.gentooexperimental.org/\",\n description = \"The latest Freshmeat releases with matching Gentoo versions.\",\n lastBuildDate = datetime.datetime.utcnow(),\n items = items)\n return rss.to_xml()", "def feed2fields(file):\n import feedparser\n d = feedparser.parse(file)\n settings = read_settings()\n subs = settings['SLUG_REGEX_SUBSTITUTIONS']\n for entry in d.entries:\n date = (entry.updated_parsed.strftime('%Y-%m-%d %H:%M')\n if hasattr(entry, 'updated_parsed') else None)\n author = entry.author if hasattr(entry, 'author') else None\n tags = ([e['term'] for e in entry.tags]\n if hasattr(entry, 'tags') else None)\n\n slug = slugify(entry.title, regex_subs=subs)\n kind = 'article'\n yield (entry.title, entry.description, slug, date,\n author, [], tags, None, kind, 'html')", "def getitemsfromrss(feedurl,itemclass = Item):\n tree = getfeed(feedurl)\n root = tree.getroot()\n almethods.removenamespaces(root)\n return getitems(root,itemclass=itemclass)", "def rss_feed(request):\n site = get_current_site(request)\n articles = Article.objects.filter(sites__id=site.id).order_by('-published_date')[20:]\n return load_template(request, site, 'rss_articles.html', {'articles': articles})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find set of tags pairs with similarity ratio of > SIMILAR
def get_similarities(tags): simtags3 = {} for i in tags: prodtags3 = list(product([i,''], tags)) for j in prodtags3: seqtags3 = SequenceMatcher(None, j[0].lower(), j[1].lower()) if seqtags3.ratio() != 0.0 and seqtags3.ratio() >= SIMILAR and seqtags3.ratio() != 1.0: if j[0] not in simtags3 and j[0] not in simtags3.values(): simtags3[j[0]] = j[1] return simtags3
[ "def get_similarities(tags):\n similar_tags = []\n s_tags = set(tags)\n for tag in s_tags:\n for compare_tag in s_tags:\n if tag == compare_tag:\n continue\n else:\n compare = SequenceMatcher(None, tag, compare_tag).ratio()\n if compare > SIMILAR:\n if (compare_tag, tag) not in similar_tags:\n if len(tag) < len(compare_tag):\n similar_tags.append((tag, compare_tag))\n else:\n similar_tags.append((compare_tag, tag))\n return similar_tags", "def match_pair(pair, sim_method):\r\n doc1, doc2 = get_texts(pair)\r\n ents1 = extract_ents(nlp(doc1))\r\n ents2 = extract_ents(nlp(doc2))\r\n # cluster the corefer entities for each document\r\n c1 = cluster_doc(ents1)\r\n c2 = cluster_doc(ents2)\r\n similarity = sim_method(c1, c2)\r\n return similarity, [c1, c2]", "def get_tags_similarity(self):\n\n target_movie_tags = self.get_tags_count_(self.target_movie.movie_id)\n print(\"get_tags_similarity: target_movie_tags: %r\" % target_movie_tags)\n\n tags_similarity = {}\n\n users_query = \"select distinct user_id from tags where movie_id=%i\" % \\\n self.target_movie.movie_id\n user_records = self.db.execute(users_query).fetchall()\n print(\"get_tags_similarity: %i users have tagged this movie\"\n % len(user_records))\n\n for urec in user_records:\n user_id = urec[0]\n print(\"get_tags_similarity: Processing user: %i\" % user_id)\n\n movie_ids_query = \"\"\"\n SELECT distinct movie_id\n FROM tags\n WHERE movie_id != %i\n AND user_id=%i\n \"\"\" % (self.target_movie.movie_id, user_id)\n res = self.db.execute(movie_ids_query).fetchall()\n\n print(\"get_tags_similarity: User has tagget %i movies\" % len(res))\n if res:\n for mid_rec in res:\n movie_id = mid_rec[0]\n print(\n \"get_tags_similarity: -> Processing movie: %i\" %\n movie_id\n )\n\n movie_tags = self.get_tags_count_(movie_id, user_id)\n tags_similarity[movie_id] = self.tags_jaccard_index(\n target_movie_tags, movie_tags)\n\n return tags_similarity", "def similar_pairs(labels):\n P = [] # set of similar pairs\n Q = [] # set of dissimilar pairs\n n = len(labels)\n for i in range(n):\n for j in range(i+1, n):\n if labels[i] == labels[j]:\n P.append([i, j])\n else:\n Q.append([i, j])\n return P, Q", "def concept_tags_similarity(method1, method2, nl_dict, nl_model):\n # nl_sim = gensim_lang_cossim(method1, method2, nl_dict, nl_model)\n jaccard_sim, info_dict = counter_cossim(method1.concepts, method2.concepts)\n # avg_sim = (jaccard_sim + nl_sim) / 2\n # if len(info_dict) > 1:\n # print(method2)\n return jaccard_sim, info_dict", "def compare_all_pairs(sentences, w2vmodel):\n for s1, s2 in combinations(sentences, 2):\n # get similarity between s1 and s2\n prob = word_mover_distance_probspec(s1, s2, w2vmodel)\n print(s1)\n print(s2)\n print(pulp.value(prob.objective))", "def sentence_similarity(sentence1, sentence2):\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n\n score, count = 0.0, 0\n\n for synset in synsets1:\n scores = [wn.path_similarity(synset, ss) for ss in synsets2]\n if [x for x in scores if x is not None] == []:\n return 0\n\n best_score = max([x for x in scores if x is not None])\n if best_score is not None:\n score += best_score\n count += 1\n\n if count == 0:\n score = 0\n print('oops')\n else:\n score /= count\n return score * 100", "def get_similarity(cls, a, b):\n a = a.lower().strip()\n b = b.lower().strip()\n\n a_split = filter(lambda x: x, re.split('\\s+|,|-', a))\n b_split = filter(lambda x: x, re.split('\\s+|,|-', b))\n a_sorted = ' '.join(sorted(a_split))\n b_sorted = ' '.join(sorted(b_split))\n a_acr = ''.join([w[0] if w else '' for w in a_split]).lower()\n b_acr = ''.join([w[0] if w else '' for w in b_split]).lower()\n\n sm1 = SequenceMatcher(None, a, b)\n sm2 = SequenceMatcher(None, a_sorted, b_sorted)\n sm3 = SequenceMatcher(None, a_acr, b)\n sm4 = SequenceMatcher(None, a, b_acr)\n\n return max([\n sm1.ratio(),\n sm2.ratio(),\n sm3.ratio(),\n sm4.ratio(),\n ])", "def node_similarities(self):\n q=[self]\n out=[]\n while len(q):\n k=q.pop(0)\n if k.left is not None:\n q.append(k.left)\n if k.right is not None:\n q.append(k.right)\n if k.has_child():\n out.append((k, k.similarity))\n return sorted(out, key=lambda x: x[1])", "def compute_all_similarities(self,A,a):\r\n pass", "def similarity_score(s1, s2):\n \n \n \n synset_arr = []\n largest_synset =[]\n for i in s1:\n for j in s2:\n #if i!=j:\n synset_arr.append(i.path_similarity(j))\n #print(i,j)\n #print(\"syn_arr\",synset_arr)\n synset_arr = sorted(list(filter(None.__ne__, synset_arr)))\n if synset_arr:\n largest_synset.append(np.float(synset_arr[-1]))\n synset_arr=[]\n #largest_synset.append(sorted(synset_arr)[0])\n #print(largest_synset)\n return np.mean(largest_synset)", "def wordnet_sim(set_a, set_b):\n # permutate all possible sim calcs\n possible_pairs = itertools.product(set_a, set_b)\n scores = []\n for pair in possible_pairs:\n score = pair[0].path_similarity(pair[1])\n if score is not None:\n scores.append(score)\n if scores:\n return max(scores)\n else:\n return 0.1", "def compute_similarity(string_1, string_2):\n return 1.0 - (0.01 * max(\n fuzz.ratio(string_1, string_2),\n fuzz.token_sort_ratio(string_1, string_2),\n fuzz.token_set_ratio(string_1, string_2)))", "def similarity(seq1, seq2):\n matchnum = 0\n i = 0\n j = 0\n while True:\n if seq1[i] == seq2[j]: matchnum = matchnum + 1\n else:\n #check for skip:\n for change in [3]:\n if seq1[i:i+change] == seq2[j+change:j+change+change]:\n j = j + change - 1\n i = i - 1\n if seq2[j:j+change] == seq1[i+change:i+change+change]:\n i = i + change - 1\n j = j - 1\n i = i + 1\n j = j + 1\n\n if i >= len(seq1) or j >= len(seq2): break\n if i >= 6 and matchnum < i/2: break\n\n return float(matchnum) / float(len(seq1))", "def similarity(s1, s2):\n words_one = s1.split()\n words_two = s2.split()\n\n common_word_count = len(set(words_one) & set(words_two))\n\n log_sum = log10(len(words_one)) + log10(len(words_two))\n if log_sum== 0:\n return 0\n\n return common_word_count / log_sum", "def get_pairs(labels):\n result = []\n unique = np.unique(labels)\n for label in unique:\n ulabels = np.where(labels==label)[0]\n # handles when a word sense has only one occurrence\n if len(ulabels) == 1:\n # returns the instance paired with itself, so it can be counted\n result.append((ulabels[0], ulabels[0]))\n else:\n for p in itertools.combinations(ulabels, 2):\n result.append(p)\n return result", "def compute_similarities(self,dataset,j):\r\n pass", "def diversity(sentence: str, tokenized_sentences: str, similarity_metric: str) -> float:\n # sentences = nltk.sent_tokenize(document)\n max_sim_sentence = ''\n sentence = sentence.lower()\n tokenized_sentences = [sent.lower() for sent in tokenized_sentences]\n\n if similarity_metric == 'jaccard':\n\n max_sim = -np.inf\n for ref_sentence in tokenized_sentences:\n if sentence != ref_sentence:\n jaccard_sim = jaccard_similarity_words(sentence, ref_sentence)\n if jaccard_sim > max_sim:\n max_sim_sentence = ref_sentence\n max_sim = jaccard_sim\n\n return 1 - max_sim, max_sim_sentence\n\n elif similarity_metric == 'levenshtein':\n \n min_edit_distance = np.inf\n for ref_sentence in tokenized_sentences:\n if sentence != ref_sentence:\n edit_distance = levenshtein(sentence, ref_sentence) \\\n / max(len(sentence), len(ref_sentence))\n\n if edit_distance < min_edit_distance:\n max_sim_sentence = ref_sentence\n min_edit_distance = edit_distance\n # maximum similarity is minimum edit distance\n # max_sim = min_edit_distance \n\n return min_edit_distance, max_sim_sentence", "def estimate_list_similarity(def_pron_list, helper_word):\n #helper_word is stanza-word\n def_list = [def_str for (def_str, pron, pos) in def_pron_list]\n normalize_sent_lists(def_list)\n scores = [0.0] * len(def_list)\n for i in range(len(def_list)):\n #estimate_str_similarity\n scores[i] = estimate_str_similarity(def_list[i], helper_word)\n return scores" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
moves to point location and draws a dot
def draw(self): super().draw() dot(self.prop['dotSize'], self.prop['dotColor'])
[ "def draw_to_point(self, x, y):\n if self.last == (x, y):\n return\n\n if self.drawing == False:\n self.start()\n\n # self.codes.append('G1 X%0.2f Y%0.2f F%0.2f' % (x, y+self.config['y_offset'], self.config['xy_feedrate']))\n\n # self.codes.append('G1 X{0:.2f} Y{1:.2f} F{2:.2f}'\n # .format(x, y + self.config['y_offset'], self.config['drawing_feedrate']))\n self.codes.append('G0 Z{0:.2f}'.format(self.config['z_offset']))\n self.codes.append('G1 X{0:.2f} Y{1:.2f} F{2:.2f}'\n .format(y, -x, self.config['drawing_feedrate']))\n\n self.last = (x, y)", "def point(x: float, y: float) -> None:\n __canvas.drawPath(skia.Path().moveTo(x, y).close(), __stroke_paint())", "def draw_point(self, wig):\r\n # Remove widget from grid\r\n wig.grid_forget()\r\n # -Get coords-\r\n # Invert row\r\n row = (height - 1) - self.button_to_coords[wig.winfo_name()]['row']\r\n col = self.button_to_coords[wig.winfo_name()]['col']\r\n # -Draw point-\r\n axis.scatter(col, row, s=40, c='red', zorder=1)\r\n self.points.append((col, row))\r\n # -Update canvas-\r\n figure.canvas.draw()\r\n figure.canvas.flush_events()", "def print_point(self):\n print \"({}, {})\".format(self.x,self.y)", "def DrawPoint(self, p, size, color):\r\n p = self.to_screen(p)\r\n if color is not None:\r\n self.debug_batch.add(1, gl.GL_POINTS, grPointSize(size),\r\n ('v2f', (p[0], p[1])),\r\n ('c3f', [color.r, color.g, color.b]))", "def go_to(self, point):\n self.hideturtle()\n self.penup()\n self.setposition(point.x, point.y)\n self.pendown()\n self.showturtle()", "def plot_dot(self, frame_index):\n\n if self.dot is not None:\n self.dot.remove()\n self.dot = plt.scatter(self.x[frame_index], self.y[frame_index], s=20, color='red')\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()", "def updatePoints(self, x, y):", "def clipdot(self, pt):\n self.ps('%f %f clipdot' % (pt.x, pt.y))", "def set_pixel(self, p, color):\n self.draw.point(p, color)", "def drawpoint(win):\n p = win.getMouse()\n c = Circle(p, .05)\n if 1.5 < calcdistance(p, Point(5, 5)) < 2:\n c.setFill('purple')\n else:\n c.setFill(\"black\")\n c.draw(win)\n return p", "def arrowtodot(self, u, v):\n self.ps('%f %f %f %f arrowtodot' % (u.x, u.y, v.x, v.y))", "def plot_neighbour(self, point):\n for move_to in self.coordinates:\n # check if it is the same location as current point\n if point != move_to:\n # check if move_to has already been drawn\n if not move_to.is_plotted():\n # check move_to coordinate is a neighbour to the point.\n if (move_to.get_y() == (point.get_y() + 1)) and (\n move_to.get_x() == point.get_x()):\n # convert coordinates to plotter values\n x = self.coordinate_to_plotter(move_to.get_x())\n y = self.coordinate_to_plotter(move_to.get_y())\n # create command\n str_command = \"PA {} {};\".format(x, y)\n # send the command through serial.\n self.ser.write(str_command)\n # mark the move_to point as plotted. So that point\n # is not plotted again.\n move_to.plotted()\n # create a recursive loop in the method.\n # to check if the move_to point has a neighbour.\n self.plot_neighbour(move_to)", "def generate_point(self): # Create the next point\n\n last_point = self.points[-1] # Grab last point in list\n last_point_x = last_point.getX() # Grab that points X\n last_point_y = last_point.getY() # Grab that points Y\n \n compare = self.rand_val() # Generate a random number\n \n # Use modolu to select vertex to move to, 1/3 chance for each vertex\n # 'focus_point' is the vertex being moved towards\n if compare % 3 + 1 == 1:\n focus_point = self.triangle[0]\n focus_point_x = focus_point[0]\n focus_point_y = focus_point[1]\n elif compare % 3 + 1 == 2:\n focus_point = self.triangle[1]\n focus_point_x = focus_point[0]\n focus_point_y = focus_point[1]\n else:\n focus_point = self.triangle[2]\n focus_point_x = focus_point[0]\n focus_point_y = focus_point[1]\n \n # MATH. Determine where new point will be placed\n # IF checks insure all subtraction generates a positive number\n # could use abs()?\n if focus_point_x > last_point_x:\n new_x = ((focus_point_x - last_point_x) / 2) + last_point_x\n else:\n new_x = ((last_point_x - focus_point_x) / 2) + focus_point_x\n \n if focus_point_y > last_point_y:\n new_y = ((focus_point_y - last_point_y) / 2) + last_point_y\n else:\n new_y = ((last_point_y - focus_point_y) / 2) + focus_point_y\n \n new_point = Point(new_x, new_y)\n # new_point.setFill('red')\n self.points.append(new_point)", "def draw(self, pen):\n pointPen = PointToSegmentPen(pen)\n self.drawPoints(pointPen)", "def draw(self,pic):\n # By solving the boundary equation, we have x=a**2/sqrt(a**2+b**2)\n # print \"Drawing an ellipse\" \n self.points=[] \n if self.a>self.b:\n # first go from x axis\n points=self._standardDraw(pic,actuallyDraw=True)\n else:\n # change x and y axis to enable standard drawing process\n self.a, self.b=(self.b,self.a)\n points=self._standardDraw(pic,actuallyDraw=False)\n points=[(self.centerX+p[1]-self.centerY,self.centerY+p[0]-self.centerX) for p in points]\n for p in points:\n x=int(p[0])\n y=int(p[1])\n pic[x][y]=self.color\n self.a, self.b=(self.b,self.a)\n self.points=[p for p in points]\n self._duplicate(pic,points)", "def draw_cross(self, position):\n\n center = self.project_point_to_canvas(position)\n x_min = center[0] - self.point_size_svgpx\n x_max = center[0] + self.point_size_svgpx\n y_min = center[1] - self.point_size_svgpx\n y_max = center[1] + self.point_size_svgpx\n style_string = self._make_svg_style_string(dash_mode = \"none\")\n line = self.svgwrite_object.line([x_min, y_min], [x_max, y_max], style = style_string)\n self.svgwrite_object.add(line)\n line = self.svgwrite_object.line([x_max, y_min], [x_min, y_max], style = style_string)\n self.svgwrite_object.add(line)", "def draw_points(self, pts_x, pts_y):\n pylab.clf()\n pylab.plot(pts_x, [1-y for y in pts_y], marker='o', color='r', ls='')\n pylab.xlim(-.05, 1.05)\n pylab.ylim(-.05, 1.05)\n pylab.axis('off')\n pylab.savefig(os.path.join(self.work_dir, 'points.png'),\n bbox_inches='tight')", "def plot_move(self, endp):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply decorrelation stretch to image
def decorrstretch(self,A, tol=None): # save the original shape orig_shape = A.shape # reshape the image # B G R # pixel 1 . # pixel 2 . # . . . . A = A.reshape((-1,3)).astype(np.float) # covariance matrix of A cov = np.cov(A.T) # source and target sigma sigma = np.diag(np.sqrt(cov.diagonal())) # eigen decomposition of covariance matrix eigval, V = np.linalg.eig(cov) # stretch matrix S = np.diag(1/np.sqrt(eigval)) # compute mean of each color mean = np.mean(A, axis=0) # substract the mean from image A -= mean # compute the transformation matrix T = reduce(np.dot, [sigma, V, S, V.T]) # compute offset offset = mean - np.dot(mean, T) # transform the image A = np.dot(A, T) # add the mean and offset A += mean + offset # restore original shape B = A.reshape(orig_shape) # for each color... for b in range(3): # apply contrast stretching if requested if tol: # find lower and upper limit for contrast stretching low, high = np.percentile(B[:,:,b], 100*tol), np.percentile(B[:,:,b], 100-100*tol) B[B<low] = low B[B>high] = high # ...rescale the color values to 0..255 B[:,:,b] = 1 * (B[:,:,b] - B[:,:,b].min())/(B[:,:,b].max() - B[:,:,b].min()) # return it as uint8 (byte) image return np.asarray(B,dtype='float32')
[ "def applyNormalisation(image):\n #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n #image[:,:,3] = clahe.apply(image[:,:,3])\n return image / 255.", "def corr_image(resting_image, aparc_aseg_file,fwhm, seed_region):\n import numpy as np\n import nibabel as nb\n import matplotlib.pyplot as plt\n from surfer import Brain, Surface\n import os\n import string\n aparc_aseg = nb.load(aparc_aseg_file)\n img = nb.load(resting_image)\n corrmat = np.corrcoef(np.squeeze(img.get_data()))\n corrmat[np.isnan(corrmat)] = 0\n corrmat_npz = os.path.abspath('corrmat.npz')\n np.savez(corrmat_npz,corrmat=corrmat)\n\n# br = Brain('fsaverage5', 'lh', 'smoothwm')\n\n #br.add_overlay(corrmat[0,:], min=0.2, name=0, visible=True)\n #values = nb.freesurfer.read_annot('/software/Freesurfer/5.1.0/subjects/fsaverage5/label/lh.aparc.annot')\n# values = open('/software/Freesurfer/current/FreeSurferColorLUT.txt').read()\n# values = string.split(values,'\\n')\n# values = filter(None,map(string.strip,values))\n\n\n #br.add_overlay(np.mean(corrmat[values[0]==5,:], axis=0), min=0.8, name='mean', visible=True)\n\n aparc_aseg_data = np.squeeze(aparc_aseg.get_data())\n# data = img.get_data()\n\n data = np.squeeze(img.get_data())\n \n\n \n seed_signal = np.mean(data[aparc_aseg_data==seed_region], axis=0)\n seed = np.corrcoef(seed_signal, data)\n\n plt.hist(seed[0,1:], 128)\n plt.savefig(os.path.abspath(\"histogram_%d.png\"%seed_region))\n plt.close()\n\n #corr_image = os.path.abspath(\"corr_image%s.png\"%fwhm)\n #br.save_montage(corr_image)\n #ims = br.save_imageset(prefix=os.path.abspath('fwhm_%s'%str(fwhm)),views=['medial','lateral','caudal','rostral','dorsal','ventral'])\n #br.close()\n #print ims\n #precuneus[np.isnan(precuneus)] = 0\n #plt.hist(precuneus[0,1:])\n\n roitable = [['Region','Mean Correlation']]\n for i, roi in enumerate(np.unique(aparc_aseg_data)):\n roitable.append([roi,np.mean(seed[aparc_aseg_data==seed_region])])\n\n #images = [corr_image]+ims+[os.path.abspath(\"histogram.png\"), roitable]\n roitable=[roitable]\n histogram = os.path.abspath(\"histogram_%d.png\"%seed_region)\n\n return corr_image, ims, roitable, histogram, corrmat_npz", "def calibrate_image(image, white_ref, dark_ref):\n\n calibrated = (image - dark_ref)/(white_ref - dark_ref)\n calibrated[np.invert(np.isfinite(calibrated))] = 1.0\n return calibrated", "def rescaled_image():", "def auto_contrast(image, cutoff, grayscale=True):\n w, h = image.shape[-2:]\n\n if grayscale:\n reference = VF.rgb_to_grayscale(image)\n else:\n reference = image\n\n hist = uint8_histc(reference)\n hist = hist.cumsum(-1)\n hist = hist / hist[...,-1:]\n\n if cutoff:\n lo = (hist <= cutoff).sum(-1)\n hi = 256.0 - (hist >= 1 - cutoff).sum(-1)\n else:\n lo = (hist == 0).sum(-1)\n hi = 256.0 - (hist == 1).sum(-1)\n\n lo = lo[:,:,None,None]\n hi = hi[:,:,None,None]\n \n scale = 255.0 / (hi - lo)\n offset = - lo * scale\n\n scale = scale.expand(-1,-1,w,h)\n offset = offset.expand(-1,-1,w,h)\n\n scaled = image * scale + offset\n scaled = torch.clamp(scaled, 0.0, 255.0)\n\n return image.masked_scatter(hi > lo, scaled)", "def convolve(self, img):", "def _patch_rescale(ret):\n idx = img.shape[0]\n xl = img.shape[1]\n yl = img.shape[2]\n\n for i in range(idx):\n m = amax(ret[i, :, :])\n for x in range(xl):\n for y in range(yl):\n ret[i, x, y] /= m\n return ret", "def crf_refine(self, img, annos):\n assert img.dtype == np.uint8\n assert annos.dtype == np.uint8\n assert img.shape[:2] == annos.shape\n\n # img and annos should be np array with data type uint8\n\n EPSILON = 1e-8\n\n M = 2 # salient or not\n tau = 1.05\n # Setup the CRF model\n d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)\n\n anno_norm = annos / 255.\n\n n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * self.sigmoid(1 - anno_norm))\n p_energy = -np.log(anno_norm + EPSILON) / (tau * self.sigmoid(anno_norm))\n\n U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32')\n U[0, :] = n_energy.flatten()\n U[1, :] = p_energy.flatten()\n\n d.setUnaryEnergy(U)\n\n d.addPairwiseGaussian(sxy=3, compat=3)\n d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5)\n\n # Do the inference\n infer = np.array(d.inference(1)).astype('float32')\n res = infer[1, :]\n\n res = res * 255\n res = res.reshape(img.shape[:2])\n return res.astype('uint8')", "def adjust_contrast(image):\n\n # 0.5 <= alpha <= 2.0\n # These values found empirically\n alpha = 0.5 + 1.5 * random.random()\n image = cv2.convertScaleAbs(image, alpha=alpha, beta=0)\n\n return image", "def MSRCR(self, image):\n\n self.message.toprint('IMAGE_APPLY_MSRCR')\n\n image_original = np.float32(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n # Distributes scale interactions\n max_scale = self.settings.RETINEX_MAX_SCALE\n nr_scale = self.settings.RETINEX_NR_SCALE\n scales = ScalesDistribution.apply(max_scale, nr_scale)\n\n # new image with zero channels\n image_blur = np.zeros(\n shape=[\n len(scales),\n image_original.shape[0],\n image_original.shape[1],\n image_original.shape[2]\n ]\n )\n\n # new image with zero channels\n image_mlog = np.zeros(\n shape=[\n len(scales),\n image_original.shape[0],\n image_original.shape[1],\n image_original.shape[2]\n ]\n )\n\n # Do for each channel\n for channel in range(3):\n # Do for each scale distributed\n for scale_count, scale in enumerate(scales):\n\n # If sigma==0, it will be automatically calculated based on scale\n image_blur[scale_count, :, :, channel] = cv2.GaussianBlur(\n image_original[:, :, channel], (0, 0), scale\n )\n image_mlog[scale_count, :, :, channel] = np.log(\n image_original[:, :, channel] + 1.\n ) - np.log(\n image_blur[scale_count, :, :, channel] + 1.\n )\n\n image_retinex = np.mean(image_mlog, 0)\n\n alpha = self.settings.RETINEX_ALPHA\n gain = self.settings.RETINEX_GAIN\n offset = self.settings.RETINEX_OFFSET\n\n image_retinex = ColorRestoration.apply(\n image_original=image_original,\n image_retinex=image_retinex,\n alpha=alpha,\n gain=gain,\n offset=offset\n )\n\n # Average color image retinex whith restoration\n image_mean = np.mean(image_retinex)\n\n # Standard deviation image retinex whith color restoration\n image_std = np.std(image_retinex)\n\n # Tansmission Map\n # The processing consist of to apply, using the \"Transmission Map\" average and\n # standard-deviation, a transformation of the type:\n # * newT = (oldT-mini) / (maxi-mini)\n # * with mini = average - k * standard-deviation\n # * with maxi = average + k * standard-deviation\n # where k = retinex_dynamic = contrast (variance): is decisive in the image rendering:\n # * low values will increase the seeming contrast,\n # * hight values will make the image more natural with less artefacts and hazes.\n k = self.settings.RETINEX_DYNAMIC\n\n image_mini = image_mean - k * image_std\n\n image_maxi = image_mean + k * image_std\n\n image_maxi_mini = image_maxi - image_mini\n\n image_oldT_mini = image_retinex - image_mini\n image_out = np.uint8(np.clip(image_oldT_mini / image_maxi_mini * 255, 0, 255))\n\n image_out = cv2.cvtColor(image_out, cv2.COLOR_RGB2BGR)\n\n self.message.toprint('IMAGE_APPLIED_MSRCR')\n\n return image_out", "def z_normalize(self, img, t, semi_median, semi_max):\n (lower, upper) = np.percentile(img[img > 0], [25, 99])\n fg = img >= t\n # perform constrast stretching on foreground pixels\n img[fg] = self.constrast_stretch(img[fg], lower, upper,\n semi_median, semi_max)\n # quantile normalization on background pixels\n img[~fg] = self.quantile_normalization(img[~fg],\n self.n_quantiles)\n # re-scale foreground intensities\n img[fg] *= np.max(img[~fg]) / np.min(img[fg])\n # quantile normalization produces a decent amount of static:\n # suggested to smooth similar to xy smoothing of background\n if self.smooth_quartiles:\n mask = self.smooth_background(img, fg, 'savitzky-galore')\n img[~fg] = mask[~fg]\n return img", "def blur_image(im, n) :\n g = gauss_kern(n)\n improc = signal.convolve(im, g, mode='same')\n return(improc)", "def sharpened(image, n):\n k = [-1 / (n ** 2)] * (n ** 2) # Create negative blur kernel\n k[(n ** 2) // 2] += 2 # Add a positive 2 weight to the blur kernel\n newImage = correlate(image, k) # Apply kernel\n return round_and_clip_image(newImage) # Return a valid image", "def main():\r\n filter = [[-2, 3, -1], [4, -1, 2], [0, 5, 3]]\r\n img = cv.imread('Lenna.png')\r\n b, g, r = cv.split(img)\r\n\r\n b_list = b.tolist()\r\n r_list = r.tolist()\r\n g_list = g.tolist()\r\n\r\n for arr in b_list:\r\n arr.insert(0, arr[0])\r\n arr.append(arr[-1])\r\n b_list.insert(0, b_list[0])\r\n b_list.append(b_list[-1])\r\n\r\n for arr in r_list:\r\n arr.insert(0, arr[0])\r\n arr.append(arr[-1])\r\n r_list.insert(0, r_list[0])\r\n r_list.append(r_list[-1])\r\n\r\n for arr in g_list:\r\n arr.insert(0, arr[0])\r\n arr.append(arr[-1])\r\n g_list.insert(0, g_list[0])\r\n g_list.append(g_list[-1])\r\n\r\n ans_b = corr(b_list, filter)\r\n ans_r = corr(r_list, filter)\r\n ans_g = corr(g_list, filter)\r\n\r\n b_rows = []\r\n g_rows = []\r\n r_rows = []\r\n\r\n new_img = []\r\n\r\n i = 0\r\n while i < len(ans_b):\r\n temp = ans_b[i: i + 512]\r\n b_rows.append(temp)\r\n i += 512\r\n\r\n i = 0\r\n while i < len(ans_g):\r\n temp = ans_g[i: i + 512]\r\n g_rows.append(temp)\r\n i += 512\r\n\r\n i = 0\r\n while i < len(ans_r):\r\n temp = ans_r[i: i + 512]\r\n r_rows.append(temp)\r\n i += 512\r\n\r\n new_img = np.dstack((r_rows, g_rows, b_rows))\r\n\r\n io.imsave('new_lenna.png', new_img)", "def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray:\n return img", "def subtract_background(self):\n n = 20\n back = (np.average(self.init_image[:n])+np.average(self.init_image[-n:]))/2\n self.image = np.subtract(self.image,back)", "def fit(image, psf, axis, bg_sigma=0, psf_scale_factor=1):\n psf_data = np.sum(psf, axis=axis)\n image_data = np.sum(image, axis=axis)\n\n # Since we're summing down an axis, we need to also sum the bg sigma\n # This is sqrt(height * sigma**2) = sqrt(height)*sigma\n perp_height = image.shape[axis]\n background = np.sqrt(perp_height)*bg_sigma\n\n # Our \"initial guess\" for the positions of the step are\n # 10% of the way through the data, and 90% of the way through the data\n # since it assumed the input has been cropped to just include the trail with\n # little space around it.\n step_up = int(len(image_data) * 0.1)\n step_down = int(len(image_data) * 0.9)\n tophat_data = make_tophat(len(image_data), step_up, step_down)\n convolved_tophat = np.convolve(psf_data, tophat_data)\n normalized_tophat = np.divide(convolved_tophat, convolved_tophat.max())\n\n half_length = int(len(normalized_tophat) / 2)\n\n halves = {\n 'left': {\n 'half_tophat_data': normalized_tophat[0:half_length],\n 'half_image_data': image_data[0:half_length],\n 'zero_point': step_up,\n 'fill_value': (0, 1)\n },\n 'right': {\n 'half_tophat_data': normalized_tophat[half_length:],\n 'half_image_data': image_data[half_length:],\n 'zero_point': step_down - half_length,\n 'fill_value': (1, 0)\n }\n }\n\n for half in halves:\n half_tophat_data, half_image_data, zero_point, fill_value = pluck(\n halves[half], 'half_tophat_data', 'half_image_data', 'zero_point', 'fill_value')\n\n interpolation_x_data = np.linspace(\n - zero_point / psf_scale_factor,\n (len(half_tophat_data) - zero_point) / psf_scale_factor,\n len(half_tophat_data)\n )\n\n interpolated_step = interp1d(\n interpolation_x_data, half_tophat_data,\n kind=\"cubic\",\n fill_value=fill_value,\n bounds_error=False\n )\n\n def tophat_function(x, B, A, x0):\n return B + A*interpolated_step(x - x0)\n\n # [initial_B, initial_A, initial_x0]\n guesses = [\n np.min(half_image_data), np.mean(half_image_data), zero_point\n ]\n image_x_data = range(len(half_image_data))\n opt, cov = curve_fit(tophat_function, image_x_data,\n half_image_data, p0=guesses)\n\n pretty_output(opt, cov, f'Initial Fit - {half}')\n plt.ion()\n plot_result(\n x_data=image_x_data,\n image=half_image_data,\n fit=tophat_function(image_x_data, *opt),\n title=f'Initial Fit - {half}'\n )\n outliers = []\n residuals = []\n for i in range(0, len(image_x_data)):\n residual = np.abs(\n half_image_data[i] - tophat_function(image_x_data[i], *opt))\n residuals.append(residual)\n if residual > 2 * background:\n outliers.append(i)\n sigs = make_sigmas(len(image_x_data), background, np.inf, outliers)\n # Refit with outlier areas masked\n opt2, cov2 = curve_fit(tophat_function, image_x_data, half_image_data, p0=guesses,\n sigma=sigs, absolute_sigma=True)\n pretty_output(opt2, cov2, f'After masking - {half}')\n plot_result(\n x_data=image_x_data,\n image=half_image_data,\n fit=tophat_function(image_x_data, *opt2),\n title=f'After masking - {half}',\n masked=outliers\n )\n plt.show()\n done = input('Press return to close.')\n return opt, cov, opt2, cov2", "def RescaledImage(img: np_.ndarray, block_shape, full_size) -> np_.ndarray:\n\n block_half_shape = (block_shape[0] // 2, block_shape[1] // 2)\n new_size = (full_size[0] - block_shape[0] + 1, full_size[1] - block_shape[1] + 1)\n\n rescaled = np_.zeros((full_size[0], img.shape[1]), dtype=np_.float64)# empty vector (containing 0), x= full size image rows,\n # y= cropped image column\n#===== rows\n old_rows = range(img.shape[0]) # cropped image rows\n flt_rows = np_.linspace(0, old_rows[-1], new_size[0])# array ( start=0, stop=cropped image rows-1, \n # number of samples to generate: new size rows)\n \n new_rows = slice(block_half_shape[0], rescaled.shape[0] - block_half_shape[0]) # object slice (start:block half shape,\n # stop: rescaled rows-block half shape )\n # rescale rows by block half shape rows\n \n for col in range(img.shape[1]):# for each column of the cropped image\n # full rows reconsruction with pchip interpolation\n rescaled[new_rows, col] = in_.pchip_interpolate(old_rows, img[:, col], flt_rows)\n\n# ===== columns\n img = rescaled\n rescaled = np_.zeros(full_size, dtype=np_.float64) # same full size image shape \n\n old_cols = range(img.shape[1]) # old column number\n flt_cols = np_.linspace(0, old_cols[-1], new_size[1]) # array ( start=0, stop=cropped image columns-1, \n # number of samples to generate: new size columns)\n new_cols = slice(block_half_shape[1], rescaled.shape[1] - block_half_shape[1]) # object slice (start:block half shape,\n # stop: rescaled columns-block half shape )\n \n # rescale rows by block half shape columns\n for row in range(img.shape[0]): # for each row of the cropped image\n # full columns reconsruction with pchip interpolation\n rescaled[row, new_cols] = in_.pchip_interpolate(old_cols, img[row, :], flt_cols)\n \n return im_.filters.gaussian(rescaled, sigma=9) # return a full background recontructed image, \n # filtred with a gaussian filter to remove noise", "def normalization(image):\r\n image = (image - 128) / 128\r\n return image", "def Reduce(image):\r\n\t# applies gaussian smoothing kernel to image before reducing\r\n\treturn resize_image(smoothed_image(image), 0.5)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a league play event by running round robins for half the divisions. When done, a new ladder file is created.
def run_league_play(working_dir: WorkingDir, odd_week: bool, replay_preference: ReplayPreference, team_size): bots = load_all_bots(working_dir) ladder = Ladder.read(working_dir.ladder) # We need the result of every match to create the next ladder. For each match in each round robin, if a result # exist already, it will be parsed, if it doesn't exist, it will be played. # When all results have been found, the new ladder can be completed and saved. new_ladder = Ladder(ladder.bots) event_results = [] # playing_division_indices contains either even or odd indices. # If there is only one division always play that division (division 0, quantum). playing_division_indices = range(ladder.division_count())[int(odd_week) % 2::2] if ladder.division_count() > 1 else [0] # The divisions play in reverse order, so quantum/overclocked division plays last for div_index in playing_division_indices[::-1]: print(f'Starting round robin for the {Ladder.DIVISION_NAMES[div_index]} division') rr_bots = ladder.round_robin_participants(div_index) rr_matches = generate_round_robin_matches(rr_bots) rr_results = [] for match_participants in rr_matches: # Check if match has already been play, i.e. the result file already exist result_path = working_dir.get_match_result(div_index, match_participants[0], match_participants[1]) if result_path.exists(): # Found existing result try: print(f'Found existing result {result_path.name}') result = MatchResult.read(result_path) rr_results.append(result) except Exception as e: print(f'Error loading result {result_path.name}. Fix/delete the result and run script again.') raise e else: # Let overlay know which match we are about to start overlay_data = OverlayData(div_index, bots[match_participants[0]].config_path, bots[match_participants[1]].config_path) overlay_data.write(working_dir.overlay_interface) participant_1 = bots[match_participants[0]] participant_2 = bots[match_participants[1]] match_config = make_match_config(participant_1, participant_2, team_size) result = run_match(participant_1.name, participant_2.name, match_config, replay_preference) result.write(result_path) print(f'Match finished {result.blue_goals}-{result.orange_goals}. Saved result as {result_path}') rr_results.append(result) # Let the winner celebrate and the scoreboard show for a few seconds. # This sleep not required. time.sleep(8) print(f'{Ladder.DIVISION_NAMES[div_index]} division done') event_results.append(rr_results) # Find bots' overall score for the round robin overall_scores = [CombinedScore.calc_score(bot, rr_results) for bot in rr_bots] sorted_overall_scores = sorted(overall_scores)[::-1] print(f'Bots\' overall performance in {Ladder.DIVISION_NAMES[div_index]} division:') for score in sorted_overall_scores: print(f'> {score.bot}: goal_diff={score.goal_diff}, goals={score.goals}, shots={score.shots}, saves={score.saves}, points={score.points}') # Rearrange bots in division on the new ladder first_bot_index = new_ladder.division_size * div_index bots_to_rearrange = len(rr_bots) for i in range(bots_to_rearrange): new_ladder.bots[first_bot_index + i] = sorted_overall_scores[i].bot # Save new ladder Ladder.write(new_ladder, working_dir.new_ladder) print(f'Done. Saved new ladder as {working_dir.new_ladder.name}') # Remove overlay interface file now that we are done if working_dir.overlay_interface.exists(): working_dir.overlay_interface.unlink() return new_ladder
[ "def playGolf(playerFullName): \n numberRounds = getValidInteger(NUMBER_ROUNDS_MIN, NUMBER_ROUNDS_MAX, \n \"How many rounds would you like to play? (1-9) \",\n \"Invalid number of rounds.\")\n for roundNumber in range(numberRounds): \n distanceToHole = HOLE_DISTANCE_FROM_TEE\n shotCount = 0\n print(\"\\nRound\", roundNumber + 1)\n print(\"This hole is a\", str(HOLE_DISTANCE_FROM_TEE) + \"m\", \"par\", PAR)\n showClubMenu()\n\n while distanceToHole > 0:\n print(\"You are\", str(distanceToHole) + \"m\", \"from the hole, after\",\n shotCount, \"shot/s\") \n \n clubSelection = (input(\"Choose club: \")).upper()\n if clubSelection in CLUB_DICTIONARY:\n CLUB_DICTIONARY[clubSelection].setSwingStrength(SHOT_STRENGTH_VARIATION_LOWER,\n SHOT_STRENGTH_VARIATION_UPPER)\n if clubSelection == 'P' and distanceToHole < SHORT_PUT_THRESHOLD_METERS: \n CLUB_DICTIONARY[clubSelection].shortPut(distanceToHole, \n MIN_PUT_DISTANCE_METERS)\n else:\n CLUB_DICTIONARY[clubSelection].swing()\n \n shotDistance = CLUB_DICTIONARY[clubSelection].getShot()\n \n else:\n shotDistance = 0\n print(\"Invalid club selection = air swing :(\")\n showClubMenu()\n \n print(\"Your shot went \" + str(shotDistance) + \"m\")\n distanceToHole = abs(distanceToHole - shotDistance)\n shotCount += 1 \n \n print(\"Clunk... After\", shotCount, \"hits, the ball is in the hole!\")\n parScore = abs(shotCount - PAR)\n if shotCount > PAR:\n print(\"Disappointing. You are\", parScore, \"over par.\")\n elif shotCount == PAR:\n print(\"And that's par.\")\n else:\n print(\"Congratulations. You are\", parScore, \"under par.\")\n \n \"\"\"\n Ask the user, by his/her first name, whether to save the score. \n If yes, append the score and player's full name to the scores file.\n \"\"\"\n playerNames = playerFullName.split()\n saveScoreToFile = (input(\"Would you like to save your score, \" + \n playerNames[0] + \"? (Y/N) \")).upper()\n while saveScoreToFile != 'Y' and saveScoreToFile != 'N':\n print(\"Please enter Y or N\")\n saveScoreToFile = (input(\"Would you like to save your score, \" + \n playerNames[0] + \"? (Y/N) \")).upper()\n if saveScoreToFile == 'Y':\n outFile = open(SCORE_OUTPUT_FILE, \"a\")\n outFile.write(str(shotCount) + \", \" + playerFullName + '\\n')\n outFile.close()\n print(\"Score saved. New high scores:\")\n viewScores(SCORE_OUTPUT_FILE)", "def run():\n\n clear_shell()\n play(choose_difficulty())", "def main():\n run_game(even)", "def run_simulation(self):\n laplace = .55\n\n date = self.game['game_date'].split('-')\n if date[1] == '03' or date[1] == '04':\n if random() < .45:\n self.standings.add_win(self.away_team, self.home_team)\n self.losing_team = self.home_team\n else:\n self.standings.add_win(self.home_team, self.away_team)\n self.losing_team = self.away_team\n else:\n home_info = self.standings.get_info(self.home_team)\n away_info = self.standings.get_info(self.away_team)\n home_wins = home_info[0] + 1\n home_games = home_info[1] + 2\n away_wins = away_info[0] + 1\n away_games = away_info[1] + 2\n home_laplace = home_wins / home_games\n away_laplace = away_wins / away_games\n\n if home_laplace > away_laplace:\n laplace = math.fabs(laplace - (home_laplace - away_laplace))\n else:\n laplace = math.fabs(laplace + (away_laplace - home_laplace))\n if random() < laplace:\n self.standings.add_win(self.away_team, self.home_team)\n self.losing_team = self.home_team\n else:\n self.standings.add_win(self.home_team, self.away_team)\n self.losing_team = self.away_team\n return self.standings", "def exe_rig4games(self, *args, **kwargs):\n print 'Yay! your game rig is ready!'", "def loadLeague(league, data):\n\tfor game in data:\n\t\tif (game.winner != None): #If the game as finished\n\t\t\tposition = league.positionOfTeam(game.winner)\n\t\t\tleague.teams[position].wins.append(league.teams[league.positionOfTeam(game.loser)])", "def run(filename):\n lap_records = load_data(filename)\n sorted_laps = sort_laps(lap_records)\n race_results = build_results(sorted_laps)\n print_results(race_results)\n return race_results", "def main():\n start = datetime.now()\n parser = argparse.ArgumentParser(description=\"Tracks larvae for thigmotaxis experiment\")\n # add options for argument parser\n parser.add_argument(\"in_path\",\n help=\"Path to the video.\")\n parser.add_argument(\"out_path\",\n help=\"Directory for results. Should be empty.\")\n parser.add_argument(\"-x\", \"--keep_temp\", action=\"store_true\",\n help=\"Keep temporary folder after execution.\")\n parser.add_argument(\"-t\", \"--only_tracking\", action=\"store_true\",\n help=\"Only perform tracking step.\")\n parser.add_argument(\"-n\", \"--number\", type=int, default=24,\n help=\"Number of wells to track, default is 24\")\n parser.add_argument(\"-i\", \"--save_track_image\", action=\"store_true\",\n help=\"Save images of tracked paths.\")\n parser.add_argument(\"-m\", \"--manual_crop\", action=\"store_true\",\n help=\"Manually select the wells to be tracked.\")\n parser.add_argument(\"-s\", \"--save_track\", action=\"store_true\",\n help=\"Save track points to file.\")\n parser.add_argument(\"--median\", action=\"store_true\",\n help=\"Use median intensity projection for segmentation.\")\n parser.add_argument(\"-c\", \"--cpu\", type=int, default=1,\n help=\"Set number of threads for multi core machines.\")\n parser.add_argument(\"--big\", action=\"store_true\",\n help=\"Reduces memory usage for very large video files (time intensive, not recommended).\")\n\n # parse arguments from command line\n args = parser.parse_args()\n # get all file names and directories ready\n infile = os.path.abspath(args.in_path)\n video_name = os.path.basename(infile)\n video_name_base = os.path.splitext(video_name)[0]\n out_dir = os.path.abspath(args.out_path)\n with open(os.path.join(out_dir, 'stats.txt'), 'w') as out:\n out.write('well\\t')\n out.write('time in outer region\\t')\n out.write('distance in outer region\\t')\n out.write('time in inner region\\t')\n out.write(' distance in inner region\\t')\n out.write(' % of time in outer region\\t')\n out.write(' % of distance in outer region\\n')\n if not out_dir.endswith('/'):\n out_dir += '/'\n # make directory for temporary results\n temp_dirs = []\n seg_paths = []\n for i in range(args.number):\n temp_dirs.append(os.path.join(out_dir, \"temp_\" + str(i) + \"/\"))\n # segmentation path does not include file extension,\n # it will be appended in FIJI macro\n seg_paths.append(os.path.join(out_dir, \"SEG_\" + str(i) + '_' + video_name_base))\n cropped_video = \"cropped_\" + video_name_base + \".avi\"\n thumb = 'thumb.tiff'\n mask_paths = []\n start_frame = False\n end_frame = False\n for temp_dir in temp_dirs:\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n mask_paths.append(os.path.join(temp_dir, \"mask.tiff\"))\n\n crops = []\n if not args.only_tracking:\n silent_remove(os.path.join(temp_dirs[0], \"thumb.tiff\"))\n ffmpeg = Ffmpeg(infile, os.path.join(temp_dirs[0], thumb))\n ffmpeg.pix_fmt = \"gray8\"\n ffmpeg.vframes = \"1\"\n ffmpeg.ss = \"150\"\n ffmpeg.run()\n\n thumb = os.path.join(temp_dirs[0], thumb)\n if not args.manual_crop and args.number == 24:\n # crop the image into 24 parts\n # let the user choose the region in which the wells are.\n image = Image(thumb)\n crops = image.auto_crop()\n prev_mask = False\n for i in range(len(crops)):\n crop = crops[i]\n temp_dir = temp_dirs[i]\n mask_path = mask_paths[i]\n silent_remove(os.path.join(temp_dir, \"crop.tiff\"))\n ffmpeg = Ffmpeg(infile, os.path.join(temp_dir, \"crop.tiff\"))\n ffmpeg.pix_fmt = \"gray8\"\n ffmpeg.vframes = \"1\"\n ffmpeg.ss = \"150\"\n ffmpeg.filter = \"crop=\" + crop\n ffmpeg.run()\n image = Image(os.path.join(temp_dir, \"crop.tiff\"), prev_mask=prev_mask)\n prev_mask = image.mask(mask_path)\n else:\n m = (0, 0)\n for i in range(len(temp_dirs)):\n # prepare cropping and masking\n temp_dir = temp_dirs[i]\n mask_path = mask_paths[i]\n if len(crops) == 0:\n c, m = crop_and_mask(infile, mask_path, temp_dir, thumb)\n crops.append(c)\n else:\n c, m = crop_and_mask(infile, mask_path, temp_dir, thumb, crops[-1], m)\n crops.append(c)\n i = 0\n while i < len(temp_dirs):\n threads = {}\n for thread in range(args.cpu):\n try:\n temp_dir = temp_dirs[i]\n crop = crops[i]\n # prepare the video for segmentation\n threads[thread] = Thread(target=prepare_vid,\n args=[cropped_video, infile, temp_dir, crop])\n threads[thread].start()\n i += 1\n except IndexError:\n break\n for thread in threads:\n threads[thread].join()\n while not start_frame:\n try:\n start_frame = int(input(\"First frame to keep: \"))\n except ValueError:\n start_frame = False\n while not end_frame:\n try:\n end_frame = int(input(\"Last frame to keep: \")) + 1\n except ValueError:\n end_frame = False\n for i in range(len(temp_dirs)):\n # segment the video\n temp_dir = temp_dirs[i]\n mask_path = mask_paths[i]\n seg_path = seg_paths[i]\n # run the segmentation macro\n if args.median:\n fiji = ImageJMacro(\"segmentation_median\")\n else:\n fiji = ImageJMacro(\"segmentation\")\n fiji.run([temp_dir + cropped_video, str(start_frame),\n str(end_frame), seg_path, mask_path])\n\n for i in range(len(seg_paths)):\n # track outer region\n seg_path = seg_paths[i]\n if args.big:\n outer = Video(seg_path + \"_outer.tiff\", big=True)\n else:\n outer = Video(seg_path + \"_outer.tiff\")\n outer_tracks = outer.track()\n del outer\n # track inner region\n if args.big:\n inner = Video(seg_path + \"_inner.tiff\", big=True)\n else:\n inner = Video(seg_path + \"_inner.tiff\")\n inner_tracks = inner.track()\n del inner\n analysis = Analysis(outer_tracks, inner_tracks)\n analysis.analyze(out_dir + 'stats.txt', i)\n if args.save_track_image:\n analysis.save_track_image(temp_dirs[i], out_dir, i)\n if args.save_track:\n # save track points to file\n analysis.save_track(out_dir, i)\n\n if not args.keep_temp:\n for temp_dir in temp_dirs:\n shutil.rmtree(temp_dir)\n for i in range(args.number):\n silent_remove(os.path.join(out_dir,\n \"SEG_\" + str(i) + '_' + video_name_base + \"_outer.tiff\"))\n silent_remove(os.path.join(out_dir,\n \"SEG_\" + str(i) + '_' + video_name_base + \"_inner.tiff\"))\n\n end = datetime.now()\n print(\"Executed in \" + str(end-start))", "def start_game():\n\n save.load_game()\n play_game()", "def printGameRecord(self, board, curPlayer, folder=\"\"):\n dt = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n filename = os.path.join(folder, dt+\".pdn\") \n i=1\n while os.path.isfile(filename):\n filename = os.path.join(folder, dt+\"(\"+str(i)+\").pdn\")\n i += 1\n with open(filename, \"w+\") as f:\n # curPlayer is REQUIRED for correct scoring \n result = curPlayer * self.getGameEnded(board, curPlayer)\n if result!=0:\n result = \"Game over. Result:\"+str(result)\n else:\n result = \"Game not ended yet\"\n print(result, \", halfMoves:\", board.halfMoves, file=f)\n board.display(file=f)\n print(\"executed_moves:\", board.executed_moves, file=f)\n f.closed", "def record_gameplay(self, path='file.mp4'):\n frames_dir = 'temp_frames'\n # color transition from black to white\n transition_color_list = ['forestgreen', 'black', 'dimgray', 'dimgrey', 'gray', 'grey',\n 'darkgray', 'darkgrey', 'silver', 'lightgray', \n 'lightgrey', 'gainsboro', 'whitesmoke', 'white']\n frames_per_anim = len(transition_color_list) - 1\n color_array = np.zeros((self._size, self._size), np.uint8)\n alpha_array = np.zeros((self._size, self._size), np.uint8)\n # play a full game\n winner = self.play()\n # use the history object to save to game\n # temp_frames directory is reserved to save intermediate frames\n if(os.path.exists(frames_dir)):\n # clear out the directory\n # shutil.rmtree(frames_dir)\n for _, _, file_list in os.walk(frames_dir):\n pass\n for f in file_list:\n os.remove(os.path.join(frames_dir, f))\n else:\n os.mkdir(frames_dir)\n # plotting\n ####### Begin Template Creation #######\n # n * h + (n+1) * d = 1, where n is no of cells along 1 axis,\n # h is height of one cell and d is the gap between 2 cells\n delta = 0.005\n cell_height = (1 - ((self._size + 1) * delta))/self._size\n cell_height_half = cell_height/2.0\n # plt x axis runs left to right while y runs bottom to top\n # create the full template for the board here, then just change colors\n # in the loop\n fig, axs = plt.subplots(1, 1, figsize=(8, 8), dpi=72)\n axs.axis('off')\n title_line2 = 'white: ' + \\\n str(type(self._p[1])).replace('players.','').replace('<','').replace('>','') + \\\n ' | black: ' + \\\n str(type(self._p[0])).replace('players.','').replace('<','').replace('>','')\n if(winner == 1):\n axs.set_title('winner:white\\n' + title_line2)\n elif(winner == 0):\n axs.set_title('winner:black\\n' + title_line2)\n else:\n axs.set_title('winner:tie\\n' + title_line2)\n\n # add scatter points\n # axs.scatter([0, 1, 0, 1], [0, 1, 1, 0])\n ellipse_patch_list = []\n # add horizontal and vertical lines\n for i in range(self._size):\n # linewidth is dependent on axis size and hence needs\n # to be set manually\n axs.axvline((2 * i + 1)*(delta/2) + i * cell_height, \n color='white', lw=2)\n axs.axhline((2 * i + 1)*(delta/2) + i * cell_height, \n color='white', lw=2)\n for _ in range(self._size):\n ellipse_patch_list.append([0] * self._size)\n # add the large rect determining the board\n rect = Rectangle((delta, delta),\n width=1 - 2 * delta, \n height=1 - 2 * delta,\n color='forestgreen')\n axs.add_patch(rect)\n # add circle patches\n s = self._converter.convert(self._hist[0][0],\n input_format='bitboard',\n output_format='ndarray3d')\n # determine the color and alpha values\n color_array[s[:,:,0] == 1] = transition_color_list.index('black')\n color_array[s[:,:,1] == 1] = transition_color_list.index('white')\n alpha_array = (color_array != 0).astype(np.uint8)\n for i in range(self._size):\n for j in range(self._size):\n # i moves along y axis while j along x\n cell_centre = ((j + 1) * delta + (2*j + 1) * cell_height_half,\\\n (self._size - i) * delta + (2*(self._size - i) - 1) * cell_height_half)\n # a circle will be placed where a coin is\n ellipse = Ellipse(cell_centre,\n width=((cell_height - delta)),\n height=((cell_height - delta)),\n angle=0,\n color=transition_color_list[color_array[i][j]], \n alpha=alpha_array[i][j])\n ellipse_patch_list[i][j] = ellipse\n # add to the figure\n axs.add_patch(ellipse_patch_list[i][j])\n # save first figure with some persistence\n fig_file_idx = 0\n for idx in range(frames_per_anim):\n if(idx == 0):\n fig.savefig('{:s}/img_{:05d}.png'.format(frames_dir, fig_file_idx), \n bbox_inches='tight')\n else:\n shutil.copyfile('{:s}/img_{:05d}.png'.format(frames_dir, 0),\n '{:s}/img_{:05d}.png'.format(frames_dir, fig_file_idx))\n fig_file_idx += 1\n ######## End Template Creation ########\n # iterate over the game frames with animation\n for idx in tqdm(range(len(self._hist))):\n # clear figure\n # plt.cla()\n # get the board from history\n s = self._converter.convert(self._hist[idx][self._hist_dict['s']],\n input_format='bitboard',\n output_format='ndarray3d')\n next_s = self._converter.convert(\n self._hist[idx][self._hist_dict['next_s']],\n input_format='bitboard',\n output_format='ndarray3d')\n # prepare a single frame\n for t in range(frames_per_anim):\n # determine the color and alpha values\n # color change from black to white\n color_array[s[:,:,0] * next_s[:,:,1] == 1] = t + 1\n # color change from white to black\n color_array[s[:,:,1] * next_s[:,:,0] == 1] = frames_per_anim - t\n # no coin now and then\n color_array[s[:,:,:2].sum(2) + next_s[:,:,:2].sum(2) == 0] = 0\n # new coin placed\n color_array[(s[:,:,:2].sum(2) == 0) & (next_s[:,:,0] == 1)] = 1\n color_array[(s[:,:,:2].sum(2) == 0) & (next_s[:,:,1] == 1)] = \\\n len(transition_color_list)-1\n # set alpha array\n alpha_array = (color_array != 0).astype(np.uint8)\n for i in range(self._size):\n for j in range(self._size):\n # i moves along y axis while j along x\n # a circle will be placed where a coin is\n ellipse_patch_list[i][j].set_color(\n transition_color_list[color_array[i][j]])\n ellipse_patch_list[i][j].set_alpha(alpha_array[i][j])\n # axs.scatter(5, 5)\n # figure is prepared, save in temp frames directory\n fig.savefig('{:s}/img_{:05d}.png'.format(frames_dir, fig_file_idx), \n bbox_inches='tight')\n fig_file_idx += 1\n # add some persistence before placing another new coin\n fig_file_copy_idx = fig_file_idx - 1\n for _ in range(frames_per_anim if idx== len(self._hist)-1\\\n else frames_per_anim//2):\n shutil.copyfile('{:s}/img_{:05d}.png'.format(frames_dir, fig_file_copy_idx),\n '{:s}/img_{:05d}.png'.format(frames_dir, fig_file_idx))\n fig_file_idx += 1\n \n # all frames have been saved, use ffmpeg to convert to movie\n # output frame rate is different to add some persistence\n os.system('ffmpeg -y -framerate {:d} -pattern_type sequence -i \"{:s}/img_%05d.png\" \\\n -c:v libx264 -r {:d} -pix_fmt yuv420p -vf \"crop=floor(iw/2)*2:floor(ih/2)*2\" {:s}'\\\n .format(int(1.5 * frames_per_anim), frames_dir, int(1.5 * frames_per_anim), path))", "def run_simulated_games(away_team_name, home_team_name, game_count=10):\n print \"Running simulations between %s and %s\" %\\\n (away_team_name, home_team_name)\n if conf.pitcher_sub_model is None:\n conf.pitcher_sub_model = joblib.load(\n './models/%s' % conf.used_pitcher_model_name)\n away_team = Team(away_team_name)\n home_team = Team(home_team_name)\n agg_batters = []\n agg_scores = []\n agg_pitchers = []\n game_no = 0\n while game_no < game_count:\n print \"Running game %d of %d. (%s vs. %s)\" %\\\n (game_no + 1, game_count, away_team_name, home_team_name)\n away_team.start_new_game()\n home_team.start_new_game()\n game_log, score, pit_stats, bat_stats = play_game(away_team, home_team)\n agg_scores.append(score)\n agg_pitchers.append(pit_stats)\n agg_batters.append(bat_stats)\n game_no += 1\n\n \"\"\"Convert data to final display format.\"\"\"\n batters_fanduel = {}\n batters_draftkings = {}\n pitchers_fanduel = {}\n pitchers_draftkings = {}\n wins = {\n away_team_name: 0,\n home_team_name: 0\n }\n runs = {\n away_team_name: [],\n home_team_name: []\n }\n for idx, entry in enumerate(agg_scores):\n runs[away_team_name].append(entry[0])\n runs[home_team_name].append(entry[1])\n if entry[0] > entry[1]:\n wins[away_team_name] += 1\n else:\n wins[home_team_name] += 1\n for name in agg_batters[idx].index:\n if name in batters_draftkings:\n batters_draftkings[name].append(\n agg_batters[idx].at[name, 'DKP'])\n batters_fanduel[name].append(agg_batters[idx].at[name, 'FDP'])\n else:\n batters_draftkings[name] = [agg_batters[idx].at[name, 'DKP']]\n batters_fanduel[name] = [agg_batters[idx].at[name, 'FDP']]\n for name in agg_pitchers[idx].index:\n if name in batters_draftkings:\n del batters_draftkings[name]\n del batters_fanduel[name]\n if name in pitchers_draftkings:\n pitchers_draftkings[name].append(\n agg_pitchers[idx].at[name, 'DKP'])\n pitchers_fanduel[name].append(\n agg_pitchers[idx].at[name, 'FDP'])\n else:\n pitchers_draftkings[name] = [agg_pitchers[idx].at[name, 'DKP']]\n pitchers_fanduel[name] = [agg_pitchers[idx].at[name, 'FDP']]\n return {\n 'wins': wins,\n 'runs': runs,\n 'batters_fd': batters_fanduel,\n 'batters_dk': batters_draftkings,\n 'pitchers_fd': pitchers_fanduel,\n 'pitchers_dk': pitchers_draftkings\n }", "def simulate_round(self, course_info):\r\n import numpy as np\r\n\r\n\r\n #McIlroy = Player.player(name = 'Rory McIlroy', year = 2020, df = df_total)\r\n total_strokes = 0\r\n course_lengths = list(course_info.keys())\r\n for i in range(len(course_info)):\r\n self.distance_from_hole = course_lengths[i]\r\n self.par = course_info[course_lengths[i]]\r\n self.location = 'Tee box'\r\n self.number_of_strokes = 0\r\n self.in_hole = False\r\n\r\n while self.in_hole == False:\r\n if self.location == 'Tee box':\r\n print('1st')\r\n if self.par == 4 or self.par == 5:\r\n # use the fir method\r\n tee_shot_probs = self.fairway_in_reg() # this is a list of probabilities\r\n tee_shot_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n tee_shot = np.random.choice(tee_shot_outcomes, size = 1, p = tee_shot_probs)\r\n self.distance_from_hole = self.stroke(tee_shot)\r\n self.location = tee_shot\r\n else:\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n\r\n\r\n elif self.location == 'Fairway':\r\n if (self.distance_from_hole <= 280) and (self.distance_from_hole > 120):\r\n # use the gir method\r\n\r\n print('2nd')\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n elif self.distance_from_hole > 280:\r\n # use the fir method\r\n\r\n print('3rd')\r\n layup_probs = self.fairway_in_reg() # this is a list of probabilities\r\n layup_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n layup = np.random.choice(layup_outcomes, size = 1, p = layup_probs)\r\n self.distance_from_hole = self.stroke(layup)\r\n self.location = layup\r\n\r\n elif (self.distance_from_hole >= 30) and (self.distance_from_hole <= 120):\r\n # use the pitch method\r\n\r\n print('4th')\r\n pitch_probs = self.pitch() # this is a list of probabilities\r\n pitch_outcomes = ['Green', 'First Cut', 'Second Cut']\r\n pitch = np.random.choice(pitch_outcomes, size = 1, p = pitch_probs)\r\n self.distance_from_hole = self.stroke(pitch)\r\n self.location = pitch\r\n\r\n else:\r\n # use the chip method\r\n\r\n print('5th')\r\n chip_probs = self.chip() # this is a list of probabilities\r\n chip_outcomes = ['Make Chip', 'Miss Chip']\r\n chip = np.random.choice(chip_outcomes, size = 1, p = chip_probs)\r\n self.distance_from_hole = self.stroke(chip)\r\n self.location = 'Green'\r\n\r\n elif self.location == 'First Cut':\r\n # The lie will adjust the maximum distance the player can reach the green in two.\r\n # If poorer the lie is, the shorter the maximum distance becomes.\r\n if (self.distance_from_hole <= 260) and (self.distance_from_hole > 120):\r\n\r\n print('6th')\r\n # use the gir method\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n elif self.distance_from_hole > 260:\r\n\r\n print('7th')\r\n # use the fir method\r\n layup_probs = self.fairway_in_reg() # this is a list of probabilities\r\n layup_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n layup = np.random.choice(layup_outcomes, size = 1, p = layup_probs)\r\n self.distance_from_hole = self.stroke(layup)\r\n self.location = layup\r\n\r\n elif (self.distance_from_hole >= 30) and (self.distance_from_hole <= 120):\r\n\r\n print('8th')\r\n # use the pitch method\r\n pitch_probs = self.pitch() # this is a list of probabilities\r\n pitch_outcomes = ['Green', 'First Cut', 'Second Cut']\r\n pitch = np.random.choice(pitch_outcomes, size = 1, p = pitch_probs)\r\n self.distance_from_hole = self.stroke(pitch)\r\n self.location = pitch\r\n\r\n else:\r\n # use the chip method\r\n\r\n print('9th')\r\n chip_probs = self.chip() # this is a list of probabilities\r\n chip_outcomes = ['Make Chip', 'Miss Chip']\r\n chip = np.random.choice(chip_outcomes, size = 1, p = chip_probs)\r\n self.distance_from_hole = self.stroke(chip)\r\n self.location = 'Green'\r\n\r\n elif self.location == 'Second Cut':\r\n # The lie will adjust the maximum distance the player can reach the green in two.\r\n # If poorer the lie is, the shorter the maximum distance becomes.\r\n if self.distance_from_hole <= 230 and self.distance_from_hole > 120:\r\n\r\n print('10th')\r\n # use the gir method\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n elif self.distance_from_hole > 230:\r\n\r\n print('11th')\r\n # use the fir method\r\n layup_probs = self.fairway_in_reg() # this is a list of probabilities\r\n layup_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n layup = np.random.choice(layup_outcomes, size = 1, p = layup_probs)\r\n self.distance_from_hole = self.stroke(layup)\r\n self.location = layup\r\n\r\n elif (self.distance_from_hole >= 30) and (self.distance_from_hole <= 120):\r\n\r\n print('12th')\r\n # use the pitch method\r\n pitch_probs = self.pitch() # this is a list of probabilities\r\n pitch_outcomes = ['Green', 'First Cut', 'Second Cut']\r\n pitch = np.random.choice(pitch_outcomes, size = 1, p = pitch_probs)\r\n self.distance_from_hole = self.stroke(pitch)\r\n self.location = pitch\r\n\r\n else:\r\n\r\n print('13th')\r\n # use the chip method\r\n chip_probs = self.chip() # this is a list of probabilities\r\n chip_outcomes = ['Make Chip', 'Miss Chip']\r\n chip = np.random.choice(chip_outcomes, size = 1, p = chip_probs)\r\n self.distance_from_hole = self.stroke(chip)\r\n self.location = 'Green'\r\n\r\n elif self.location == 'Green':\r\n # use the putt method\r\n\r\n print('14th')\r\n putt_probs = self.putt()\r\n putt_outcomes = ['Make', 'Miss']\r\n #putt = np.mean(np.random.choice([1, 0], size = 10, p = putt_probs)).round()\r\n putt = np.random.choice(putt_outcomes, size = 1, p = putt_probs)\r\n print(putt, putt_probs)\r\n if putt == 'Make':\r\n self.in_hole == True\r\n self.number_of_strokes += 1\r\n break\r\n else:\r\n self.distance_from_hole = self.stroke(putt)\r\n\r\n self.number_of_strokes += 1\r\n print('Number of strokes: ', self.number_of_strokes)\r\n total_strokes += self.number_of_strokes\r\n print('Total Number of Strokes', total_strokes)\r\n pass", "def main():\n log.info('beginning INDRA machine runner')\n\n for subdirectory in sorted(os.listdir(HERE)):\n run_one(os.path.join(HERE, subdirectory))", "def main(args):\r\n # Test for debug execution\r\n if hasattr(args, '_debug'):\r\n xgame = \"DEBUG: \"\r\n else:\r\n args._debug = False\r\n xgame = \"\"\r\n # Build the playing board for a new game - new if no supplied load filename\r\n if not args.load and args.player1:\r\n xgame += \"Player 1 starts a new game.\"\r\n if not args.load and args.player2:\r\n xgame += \"Player 2 starts a new game.\"\r\n raise NotImplementedError(\"Player2 starts new game is unsupported.\")\r\n if args.load and args.player1:\r\n xgame += \"Player 1 continues a game.\"\r\n if args.load and args.player2:\r\n xgame += \"Player 2 continues a game.\"\r\n\r\n print(xgame)\r\n\r\n if not args.load:\r\n board = Board(args.board)\r\n tilebag = TileBag(args.tilebag)\r\n status = None\r\n mode = \"new\"\r\n else:\r\n last_turn = utils.load(args.load)\r\n board = last_turn.board\r\n tilebag = last_turn.tilebag\r\n status = last_turn.status\r\n mode = \"continue\"\r\n\r\n if args.player1:\r\n rack = Rack(args.rack)\r\n coord = None\r\n direction = None\r\n player = 1\r\n next_play = \"Opponent.\"\r\n\r\n if args.player2:\r\n rack = Rack(args.rack, player=2)\r\n coord = tuple(map(int, args.coord.split(\",\")))\r\n direction = args.direction\r\n player = 2\r\n next_play = \"Player1.\"\r\n\r\n # Game is the main app object, invoke take_turn to play.\r\n game = Game(board=board, tilebag=tilebag, status=status, rack=rack,\r\n coord=coord, direction=direction, mode=mode, player=player)\r\n if not args._debug:\r\n game.take_turn()\r\n\r\n # Output results, and exit saving game state.\r\n print(game.print_board())\r\n print(game.print_status())\r\n utils.save(args.save, game)\r\n utils.dump_output(game, next_play)\r\n\r\n return xgame", "def main():\n print(\"Let the games begin!\")\n ev3.Sound.speak(\"Starting Frogger 2.0 Game\").wait()\n\n main_follow_the_line()", "def game_thread_handler(event, playoff_data):\n\n if playoff_data:\n playoff_headline(event, playoff_data)\n else:\n generate_title(event)\n\n if event.meta['event_type'] in ['pre', 'game']:\n generate_game_body(event)\n\n print(f\"{os.path.basename(__file__)}: Created headline: {event.summary}\")\n new_thread(event)", "def main():\n os.system(\"clear\")\n level = 0\n print_intro(level)\n\n while level < 3:\n filename = next_level(level)\n map_string = map_reading(filename)\n maze = map_convert_to_list(map_string)\n player_pos = initialization(maze)\n level = move_player(player_pos, maze, level)\n\n win_game()", "def run(self):\n while True:\n if self.phase == 0: self.welcome_run()\n elif self.phase == 1: self.board_negotiation_run()\n elif self.phase == 2: self.turn_order_run()\n elif self.phase == 3: self.initial_placement_run()\n elif self.phase == 4: self.game_play_run()\n else:\n time.sleep(10)\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check image if it has longitude and latitude before upload to media file
def clean(self): if self.image: try: get_data = ImageMetaData(self.image) except AttributeError: raise ValidationError(_("This image type does not support" )) lat, lon = get_data.get_lat_lng() if not lat and not lon: raise ValidationError(_("This image has no GPS details" ))
[ "def has_gps(img):\n imagen = open(img, 'rb')\n losTags = exifread.process_file(imagen)\n\n return True if 'GPS GPSLongitude' in losTags.keys() else False", "def imagecheck(tweet):\n\tpass", "def is_spatial_image(image: Any) -> bool:\n if not isinstance(image, xr.DataArray):\n return False\n\n if not set(image.dims).issubset(_supported_dims):\n return False\n\n for dim in _spatial_dims.intersection(image.dims):\n if not image.coords[dim].dtype == np.float64:\n return False\n\n diff = np.diff(image.coords[dim])\n if not np.allclose(diff, diff[0]):\n return False\n\n if \"t\" in image.dims:\n t_coord = image.coords[\"t\"]\n if (\n t_coord.dtype.char not in np.typecodes[\"AllInteger\"]\n and t_coord.dtype.char not in np.typecodes[\"AllFloat\"]\n and t_coord.dtype.char not in np.typecodes[\"Datetime\"]\n ):\n return False\n\n return True", "def _check_thumbnail_uploaded(thumbnail_content):\n return thumbnail_content is not None", "def upload_image(self, image, single=True):\n url = BASE_API_URL % \"UploadImage\"\n data = {\n \"detection_flags\": \"bestface,propoints\" if single else \"propoints\",\n \"url\": self.uploads_url + image\n }\n response = requests.post(url, headers=self.headers, data=data).json()\n if response.get(\"int_response\", 1) == 0:\n return response.get(\"img_uid\")\n return None", "def validate_image(self, picture):\n if picture:\n if picture.size > 2000000:\n raise ValueError('Max size allowed 2MB')\n if picture.image.width < 180:\n raise ValueError('Width should be min 180px')\n if picture.image.height < 180:\n raise ValueError('Height should be min 180px')", "def img_check(img):\n with rasterio.open(img) as src:\n if src.crs.is_valid and src.crs.is_projected and src.crs.is_epsg_code:\n print(\"Input raster is valid and has valid CRS\")\n else:\n print(\"Input raster does not have valid CRS. Exiting the script\")\n # exiting from script\n sys.exit()", "def _coords_inside_image(*args, **kwargs): # real signature unknown\n pass", "def test_file_move_location(self):\n image = self.create_filer_image()\n image.is_public = False\n image.save()\n self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT))\n image._move_file(filer_settings.FILER_PRIVATEMEDIA_PREFIX,\n filer_settings.FILER_PUBLICMEDIA_PREFIX)\n image.save()\n self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT))", "def upload_image():\n data = request.body.read()\n file_name = request.headers.get('image_path', 'temp.jpg')\n raw_image_path2save = os.path.join(retina_model.output_path, file_name)\n det_image_path2save = os.path.join(retina_model.output_path_det, file_name)\n tzinfo = request.headers.get('tzinfo', '+08:00')\n\n # read posted image from bytes, and save it.\n image_data_raw = io.BytesIO(bytearray(data))\n image_data = retina_model.readImage(image_data_raw)\n retina_model.saveImage(image_data, raw_image_path2save)\n\n print(\"[upload_image] get post_image with file_name :\", file_name)\n # inser image info into image_info table\n arrive_timestamp = arrow.now(tzinfo).datetime\n retina_model.insert_image_info(file_name, arrive_timestamp)\n\n # detect image with retina\n detected_results = retina_model.image_datect_draw_save(image_data, file_name,\n det_image_path2save)", "def set_gps_location(file_name, lat, lng, alti, time):\n lat_deg = to_deg(lat, [\"S\", \"N\"])\n lng_deg = to_deg(lng, [\"W\", \"E\"])\n \n #print lat_deg\n #print lng_deg\n \n # convert decimal coordinates into degrees, munutes and seconds\n exiv_lat = (pyexiv2.Rational(lat_deg[0], 1), pyexiv2.Rational(lat_deg[1], 1), pyexiv2.Rational(lat_deg[2]*1000, 1000))\n exiv_lng = (pyexiv2.Rational(lng_deg[0], 1), pyexiv2.Rational(lng_deg[1] ,1), pyexiv2.Rational(lng_deg[2]*1000, 1000))\n\n exiv_image = pyexiv2.ImageMetadata(file_name)\n exiv_image.read()\n exif_keys = exiv_image.exif_keys\n \n exiv_image[\"Exif.GPSInfo.GPSLatitude\"] = exiv_lat\n exiv_image[\"Exif.GPSInfo.GPSLatitudeRef\"] = lat_deg[3]\n exiv_image[\"Exif.GPSInfo.GPSLongitude\"] = exiv_lng\n exiv_image[\"Exif.GPSInfo.GPSLongitudeRef\"] = lng_deg[3]\n exiv_image[\"Exif.GPSInfo.GPSAltitude\"] = pyexiv2.Rational(alti*100, 100)\n exiv_image[\"Exif.Image.GPSTag\"] = 654\n exiv_image[\"Exif.GPSInfo.GPSMapDatum\"] = \"WGS-84\"\n exiv_image[\"Exif.GPSInfo.GPSVersionID\"] = '2 0 0 0'\n\n _set_date_time(exiv_image, time)\n _set_model(exiv_image, 'Flir Systems AB', 'A65 Thermal Camera')\n \n exiv_image.write()", "def search_album_image(self) -> bool:\n self._log.debug('search_album_image() self.copy_dst=\"%s\"',\n self.copy_dst)\n\n media_files = []\n try:\n for fp in self.copy_dst.parent.iterdir(): # 'fp' means file path\n if fp.suffix.lower() in AUDIO_TYPES:\n media_files.append(fp)\n except OSError as ose:\n self._log.exception(ose)\n\n if not media_files:\n return False\n\n # for media files, try to extract an embedded image bytes, constitute\n # the bytes with a PIL.Image class instance, store that as `self._image`\n # help from https://stackoverflow.com/a/54773705/471376\n from mutagen.id3 import ID3\n from PIL import Image\n key_apic = 'APIC:'\n for fp in media_files:\n try:\n media = ID3(fp)\n except: # XXX: most likely will be ID3NoHeaderError\n continue\n if key_apic not in media:\n continue\n apic = media.get(key_apic)\n image_data = apic.data\n try:\n image = Image.open(io.BytesIO(image_data))\n except:\n continue\n # the PIL.Image will later be PIL.Image.save to the\n # self._image_type type (i.e. it will be format converted by the PIL\n # module)\n self.image_type_PIL = ImageType.ImageFromFormat(image.format)\n if not self.image_type_PIL:\n continue\n self._image = image\n self._image.size_pixels = image.height * image.width\n self._image_src = fp\n return True\n\n return False", "def test_file_upload_public_destination(self):\n image = self.create_filer_image()\n image.is_public = True\n image.save()\n self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT))", "def checkForGeom(dataset):\n \n spatial = False\n if \"Shape\" in [f.name for f in arcpy.ListFields(dataset) if f.required]:\n spatial = True\n return spatial", "def test_existing_location(self):\n cm = get_camera_by_location(self.lat, self.lon)\n self.assertEqual(self.st, cm.start_time)\n self.assertEqual(self.et, cm.end_time)", "def get_exif_location(self, exif_data):\n lat = None\n lon = None\n alt = None\n\n gps_latitude = _get_if_exist(exif_data, 'GPS GPSLatitude')\n gps_latitude_ref = _get_if_exist(exif_data, 'GPS GPSLatitudeRef')\n gps_longitude = _get_if_exist(exif_data, 'GPS GPSLongitude')\n gps_longitude_ref = _get_if_exist(exif_data, 'GPS GPSLongitudeRef')\n gps_altitude = _get_if_exist(exif_data, 'GPS GPSAltitude')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = convert_to_degrees(gps_latitude)\n if gps_latitude_ref.values[0] != 'N':\n lat = 0 - lat\n\n lon = convert_to_degrees(gps_longitude)\n if gps_longitude_ref.values[0] != 'E':\n lon = 0 - lon\n\n if gps_altitude:\n alt = ratio_to_float(gps_altitude.values[0])\n\n return lat, lon, alt", "def clone_image(self, volume, image_location, image_id, image_meta):\n container_format=image_meta.get('container_format')\n if container_format in ['fs_vgw_url','vcloud_vgw_url','aws_vgw_url']:\n return {'provider_location': None}, False\n else:\n return {'provider_location': None}, True", "def test_upload_image_success(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (20, 20))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n\n response = self.client.post(\n url,\n {'image': ntf},\n format='multipart'\n )\n\n self.recipe.refresh_from_db()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('image', response.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "def check_image_positions(self, kwargs_ps, kwargs_lens, tolerance=0.001):\n x_image_list, y_image_list = self.image_position(kwargs_ps, kwargs_lens)\n for i, model in enumerate(self._point_source_list):\n if model in ['LENSED_POSITION', 'SOURCE_POSITION']:\n x_pos = x_image_list[i]\n y_pos = y_image_list[i]\n x_source, y_source = self._lensModel.ray_shooting(x_pos, y_pos, kwargs_lens)\n dist = np.sqrt((x_source - x_source[0]) ** 2 + (y_source - y_source[0]) ** 2)\n if np.max(dist) > tolerance:\n return False\n return True", "def exist(image):\r\n try:\r\n if len(pyautogui.locateCenterOnScreen(image)) == 2:\r\n return True\r\n except Exception:\r\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function creating the trade_dec_model model's graph.
def trade_dec_model(input_shape): ### START CODE HERE ### # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices). features = Input(shape = input_shape, dtype = 'float32') # Propagate features through your batch_layer, you get back the data Normalized # batch_layer = BatchNormalization()(features) batch_layer = features # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state # Be careful, the returned output should be a batch of sequences. X = LSTM(128, return_sequences=True)(batch_layer) # Add dropout with a probability of 0.5 X = Dropout(0.2)(X) # Propagate X trough another LSTM layer with 128-dimensional hidden state # Be careful, the returned output should be a single hidden state, not a batch of sequences. X = LSTM(128,return_sequences=False)(X) # Add dropout with a probability of 0.5 X = Dropout(0.2)(X) # Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors. X = Dense(1,activation='sigmoid')(X) # Add a softmax activation # X = Activation('sigmoid')(X) # Create Model instance which Normalize features into X. model = Model(inputs=[features],outputs=X) ### END CODE HERE ### return model
[ "def _create_graph(self, model: dict) -> Graph:\n\n\t\tgraph = Graph()\n\n\t\tkeys = list(model.keys())\n\n\t\tfor idx, pos in enumerate(keys):\n\t\t\tnode = Node(str(pos), name = str(pos), mlayout = pos[0], nlayout = pos[1])\n\t\t\tgraph.add_node(node)\n\n\t\tfor idx1, pos1 in enumerate(keys):\n\t\t\tnode1 = graph.get_node_by_nid(str(pos1))\n\t\t\tfor idx2, tup in enumerate(model[pos1]):\n\t\t\t\tpos2, _, cost = tup\n\t\t\t\tnode2 = graph.get_node_by_nid(str(pos2))\n\t\t\t\tedge = Edge(node1, node2, directed = False, weight = cost, pheromone_level = 0.0)\n\t\t\t\tgraph.add_edge(edge)\n\n\t\treturn graph", "def create_DiGraph(model, remove_rev=True, remove_nonfeas=True):\n\n G = nx.DiGraph()\n graph_reactions = []\n\n if remove_rev or remove_nonfeas:\n for r_i, r in enumerate(model.reactions):\n if remove_rev:\n if r.direction == Direction.reversible() and r.bounds.lb < 0 and r.bounds.ub > 0:\n continue\n if remove_nonfeas:\n if r.bounds.lb == 0 and r.bounds.ub == 0:\n continue\n graph_reactions.append(r)\n else:\n graph_reactions = model.reactions\n\n if len(graph_reactions) == 0:\n raise RuntimeError(\"No reactions to build graph\")\n reaction_names = [r.name for r in graph_reactions]\n\n for r_i, r in enumerate(graph_reactions):\n r_metabolites = []\n p_metabolites = []\n\n for mb in r.reactants:\n if not mb.metabolite.name in reaction_names:\n r_metabolites.append(mb.metabolite.name)\n else:\n raise RuntimeError(\"Metabolites and reaction must have different names: {0}\".format(mb.metabolite.name))\n for mb in r.products:\n if not mb.metabolite.name in reaction_names:\n p_metabolites.append(mb.metabolite.name)\n else:\n raise RuntimeError(\"Metabolites and reaction must have different names: {0}\".format(mb.metabolite.name))\n G.add_nodes_from(r_metabolites, bipartite=0)\n G.add_nodes_from(p_metabolites, bipartite=0)\n G.add_node(r.name, bipartite=1)\n\n for met in r_metabolites:\n G.add_edge(met, r.name)\n for met in p_metabolites:\n G.add_edge(r.name, met)\n if bipartite.is_bipartite(G):\n return G\n else:\n raise TypeError(\"Cannot create bipartite graph\")", "def mfConstructGraphModel(graph_model, baseModel, configFile):\n if (baseModel,graph_model) in model_instances.keys():\n return model_instances[(baseModel,graph_model)]\n elif graph_model == \"complete\":\n log.info(\"Creating complete graph model\")\n inst = CompleteGraphModel(baseModel, configFile)\n elif graph_model == \"empty\":\n log.info(\"Creating disconnected graph model\")\n inst = EmptyGraphModel(baseModel, configFile)\n elif graph_model == \"erdos_renyi\":\n log.info(\"Creating Erdos-Renyi graph model\")\n inst = ErdosRenyiModel(baseModel, configFile)\n elif graph_model == \"symmetric\":\n log.info(\"Creating Symmetric Erdos-Renyi graph model\")\n inst = ErdosRenyiModel(baseModel, configFile)\n elif graph_model == \"sbm\":\n log.info(\"Creating Stochastic Block model\")\n inst = StochasticBlockModel(baseModel, configFile)\n elif graph_model == \"distance\":\n log.info(\"Creating Latent Distance model\")\n inst = LatentDistanceModel(baseModel, configFile)\n elif graph_model == \"coupled_sbm_w\":\n log.info(\"Creating coupled SBM+Weight prior\")\n inst = StochasticBlockModelCoupledWithW(baseModel, configFile)\n else:\n log.error(\"Unrecognized graph model: %s\", graph_model)\n exit()\n \n model_instances[(baseModel,graph_model)] = inst\n return inst", "def _create_network(self):\n self.mu, self.log_var = self._encoder()\n\n self.x_hat = self._decoder()\n self.vae_model = Model(inputs=self.x, outputs=self.decoder_model(self.encoder_model(self.x)), name=\"VAE\")", "def create_graph(self, graph_name):", "def create_model():\n ###########################################################################\n # Flowsheet and Property Package #\n ###########################################################################\n m = pyo.ConcreteModel(name=\"Steam Cycle Model\")\n m.fs = FlowsheetBlock(default={\"dynamic\": False})\n m.fs.prop_water = iapws95.Iapws95ParameterBlock(\n default={\"phase_presentation\": iapws95.PhaseType.LG}\n )\n\n m.fs.prop_water2 = iapws95.Iapws95ParameterBlock()\n m.fs.therminol66 = ThermalOilParameterBlock()\n\n m.fs.charge_hx = HeatExchanger(\n default={\"delta_temperature_callback\": delta_temperature_underwood_callback,\n \"shell\": {\"property_package\": m.fs.prop_water2},\n \"tube\": {\"property_package\": m.fs.therminol66},\n \"flow_pattern\": HeatExchangerFlowPattern.countercurrent})\n\n m.fs.hp_splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water2})\n m.fs.ip_splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water2})\n\n m.fs.storage_cooler = Heater(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water2,\n \"has_pressure_change\": True})\n \n m.fs.hx_pump = WaterPump(default={\"property_package\": m.fs.prop_water2})\n\n # The enthalpy at the outlet of the cooler is required to be subcooled, that is,\n # below the ehntalpy of saturation. This condition was selected instead of using\n # temperatures, which cause certain difficulty in converging the model.\n # return (m.fs.storage_cooler.control_volume.properties_out[0].temperature <= \n # m.fs.storage_cooler.control_volume.properties_out[0].temperature_sat - 5)\n @m.fs.storage_cooler.Constraint(m.fs.time)\n def constraint_cooler_enth(b, t):\n return (m.fs.storage_cooler.control_volume.properties_out[0].enth_mol <= \n m.fs.storage_cooler.control_volume.properties_out[0].enth_mol_sat_phase['Liq'])\n \n ###########################################################################\n # Turbine declarations #\n ###########################################################################\n\n for i in range(9):\n\n turbine = HelmTurbineStage(\n default={\n \"property_package\": m.fs.prop_water2\n }\n )\n setattr(m.fs, \"turbine_\" + str(i+1), turbine)\n\n ###########################################################################\n # Boiler section declarations: #\n ###########################################################################\n # Boiler section is set up using two heater blocks, as following:\n # 1) For the main steam the heater block is named 'boiler'\n # 2) For the reheated steam the heater block is named 'reheater'\n m.fs.boiler = Heater(\n default={\n \"dynamic\": False,\n \"property_package\": m.fs.prop_water,\n \"has_pressure_change\": True\n }\n )\n m.fs.reheater = Heater(\n default={\n \"dynamic\": False,\n \"property_package\": m.fs.prop_water,\n \"has_pressure_change\": True\n }\n )\n\n # Outlet temperature of boiler is set to 866.15 K\n @m.fs.boiler.Constraint(m.fs.time)\n def boiler_temperature_constraint(b, t):\n return b.control_volume.properties_out[t].temperature == 866.15 # K\n\n # Outlet temperature of reheater is set to 866.15 K\n @m.fs.reheater.Constraint(m.fs.time)\n def reheater_temperature_constraint(b, t):\n return b.control_volume.properties_out[t].temperature == 866.15 # K\n\n ###########################################################################\n # Add Condenser Mixer, Condenser, and Condensate pump #\n ###########################################################################\n # condenser mix\n m.fs.condenser_mix = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"main\", \"bfpt\", \"drain\", \"makeup\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n\n # The inlet 'main' refers to the main steam coming from the turbine train\n # Inlet 'bfpt' refers to the steam coming from the bolier feed pump turbine\n # Inlet 'drain' refers to the condensed steam from the feed water heater 1\n # Inlet 'makeup' refers to the make up water\n # The outlet pressure of condenser mixer is equal to the minimum pressure\n # Since the turbine (#9) outlet (or, mixer inlet 'main') pressure\n # has the minimum pressure, the following constraint sets the outlet\n # pressure of the condenser mixer to the pressure of the inlet 'main'\n @m.fs.condenser_mix.Constraint(m.fs.time)\n def mixer_pressure_constraint(b, t):\n return b.main_state[t].pressure == b.mixed_state[t].pressure\n\n m.fs.condenser = CondenserHelm(default={\"shell\":{\"has_pressure_change\": False,\n \"property_package\": m.fs.prop_water2},\n \"tube\": {\"has_pressure_change\": False,\n \"property_package\": m.fs.prop_water2}})\n \n iscale.set_scaling_factor(m.fs.condenser.side_1.heat, 1e-9)\n iscale.set_scaling_factor(m.fs.condenser.side_2.heat, 1e-9)\n\n # condensate pump\n m.fs.cond_pump = WaterPump(\n default={\n \"property_package\": m.fs.prop_water2,\n }\n )\n ###########################################################################\n # Feedwater heater declaration #\n ###########################################################################\n # Feed water heaters (FWHs) are declared as 0D heat exchangers\n # Tube side is for feed water & Shell side is for steam condensing\n # Pressure drop on both sides are accounted for by setting the respective\n # outlet pressure based on the following assumptions:\n # (1) Feed water side: A constant 4% pressure drop is assumed\n # on the feedwater side for all FWHs. For this,\n # the outlet pressure is set to 0.96 times the inlet pressure,\n # on the feed water side for all FWHs\n # (2) Steam condensing side: Going from high pressure to\n # low pressure FWHs, the outlet pressure of\n # the condensed steam in assumed to be 10% more than that\n # of the pressure of steam extracted for the immediately\n # next lower pressure feedwater heater.\n # e.g. the outlet condensate pressure of FWH 'n'\n # = 1.1 * pressure of steam extracted for FWH 'n-1'\n # In case of FWH1 the FWH 'n-1' is used for Condenser,\n # and in case of FWH6, FWH 'n-1' is for Deaerator. Here,\n # the steam pressure for FWH 'n-1' is known because the\n # pressure ratios for turbines are fixed.\n # The condensing steam is assumed to leave the FWH as saturated liquid\n # Thus, each FWH is accompanied by 3 constraints, 2 for pressure drop\n # and 1 for the enthalpy.\n\n # Scaling factors for area and overall heat transfer coefficients for\n # FWHs have all been set appropriately (user may change these values,\n # if needed) if not set, the scaling factors = 1 (IDAES default)\n\n ###########################################################################\n # DEFINITION OF FEED WATER HEATERS MIXERS\n ###########################################################################\n FWH_Mixers_list = ['fwh1_mix', 'fwh2_mix', 'fwh3_mix', 'fwh6_mix']\n\n for i in FWH_Mixers_list:\n FWH_Mixer = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"steam\", \"drain\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n setattr(m.fs, i, FWH_Mixer)\n\n m.fs.fwh7_mix = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"steam\", \"drain\", \"from_hx_pump\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n \n m.fs.bfp_mix = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"from_bfp\", \"from_hx_pump\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n # @m.fs.hx_pump.Constraint(m.fs.time)\n # def hx_pump_pressure_out(b, t):\n # return (m.fs.hx_pump.control_volume.properties_out[0.0].pressure == \n # m.fs.fwh7_mix.steam_state[0].pressure*1.15)\n ###########################################################################\n # DEFINITION OF OUTLET PRESSURE OF FEED WATER HEATERS MIXERS\n ###########################################################################\n\n # The outlet pressure of an FWH mixer is equal to the minimum pressure\n # Since the pressure of mixer inlet 'steam' has the minimum pressure,\n # the following constraints set the outlet pressure of FWH mixers to be same\n # as the pressure of the inlet 'steam'\n\n def fwhmixer_pressure_constraint(b, t):\n return b.steam_state[t].pressure == b.mixed_state[t].pressure\n\n for i in FWH_Mixers_list:\n setattr(getattr(m.fs, i), \"mixer_pressure_constraint\", pyo.Constraint(m.fs.config.time, rule=fwhmixer_pressure_constraint))\n\n @m.fs.fwh7_mix.Constraint(m.fs.time)\n def fwh7mixer_pressure_constraint(b, t):\n return b.steam_state[t].pressure == b.mixed_state[t].pressure\n \n @m.fs.bfp_mix.Constraint(m.fs.time)\n def bfp_mix_pressure_constraint(b, t):\n return b.from_bfp_state[t].pressure == b.mixed_state[t].pressure\n ###########################################################################\n # DEFINITION OF FEED WATER HEATERS\n ###########################################################################\n FWH_list = ['fwh1', 'fwh2', 'fwh3', 'fwh4', 'fwh6', 'fwh7', 'fwh8']\n\n for i in FWH_list:\n FWH = HeatExchanger(\n default={\n \"delta_temperature_callback\": delta_temperature_underwood_callback,\n \"shell\": {\n \"property_package\": m.fs.prop_water,\n \"material_balance_type\": MaterialBalanceType.componentTotal,\n \"has_pressure_change\": True,\n },\n \"tube\": {\n \"property_package\": m.fs.prop_water,\n \"material_balance_type\": MaterialBalanceType.componentTotal,\n \"has_pressure_change\": True,\n },\n }\n )\n setattr(m.fs, i, FWH)\n\n ###########################################################################\n # SETTING SCALING FACTORS FOR AREA AND HEAT TRANSFER COEFFICIENT\n ###########################################################################\n\n for i in FWH_list:\n c = getattr(m.fs, i)\n iscale.set_scaling_factor(getattr(c, \"area\"), 1e-2)\n iscale.set_scaling_factor(getattr(c, \"overall_heat_transfer_coefficient\"), 1e-3)\n\n ###########################################################################\n # Setting the outlet enthalpy of condensate in an FWH to be same as saturated liquid\n ###########################################################################\n def fwh_vaporfrac_constraint(b, t):\n return (\n b.side_1.properties_out[t].enth_mol\n == b.side_1.properties_out[t].enth_mol_sat_phase['Liq'])\n\n for i in FWH_list:\n setattr(getattr(m.fs, i), i + \"_vaporfrac_constraint\", pyo.Constraint(m.fs.time, rule=fwh_vaporfrac_constraint))\n\n ###########################################################################\n # Setting a 4% pressure drop on the feedwater side (P_out = 0.96 * P_in)\n ###########################################################################\n\n def fwh_s2pdrop_constraint(b, t):\n return (\n b.side_2.properties_out[t].pressure\n == 0.96 * b.side_2.properties_in[t].pressure)\n\n for i in FWH_list:\n setattr(getattr(m.fs, i), i + \"_s2pdrop_constraint\", pyo.Constraint(m.fs.time, rule=fwh_s2pdrop_constraint))\n\n ###########################################################################\n # Setting the outlet pressure of condensate to be 10% more than that of\n # steam routed to condenser, as described in FWH description\n ###########################################################################\n # FWH1: 0.5 is the pressure ratio for turbine #9 (see set_inputs)\n # FWH2: 0.64^2 is the pressure ratio for turbine #8 (see set_inputs)\n # FWH3: 0.64^2 is the pressure ratio for turbine #7 (see set_inputs)\n # FWH4: 0.64^2 is the pressure ratio for turbine #6 (see set_inputs)\n # FWH6: 0.79^6 is the pressure ratio for turbine #4 (see set_inputs)\n # FWH7: 0.79^4 is the pressure ratio for turbine #3 (see set_inputs)\n # FWH8: 0.8^2 is the pressure ratio for turbine #2 (see set_inputs)\n \n pressure_ratio_list = { 'fwh1': 0.5,\n 'fwh2': 0.64**2,\n 'fwh3': 0.64**2,\n 'fwh4': 0.64**2,\n 'fwh6': 0.79**6,\n 'fwh7': 0.79**4,\n 'fwh8': 0.8**2}\n \n def fwh_s1pdrop_constraint(b, t):\n return (\n b.side_1.properties_out[t].pressure\n == 1.1 * b.turbine_pressure_ratio * b.side_1.properties_in[t].pressure)\n\n for i in FWH_list:\n b = getattr(m.fs, i)\n b.turbine_pressure_ratio = pyo.Param(initialize = pressure_ratio_list[i])\n setattr(b, i+\"_s1pdrop_constraint\", pyo.Constraint(m.fs.config.time, rule=fwh_s1pdrop_constraint))\n\n ###########################################################################\n # Add deaerator and boiler feed pump (BFP) #\n ###########################################################################\n m.fs.fwh5_da = Mixer(\n default={\n \"momentum_mixing_type\": MomentumMixingType.none,\n \"inlet_list\": [\"steam\", \"drain\", \"feedwater\"],\n \"property_package\": m.fs.prop_water,\n }\n )\n\n # The outlet pressure of deaerator is equal to the minimum pressure\n # Since the pressure of deaerator inlet 'feedwater' has\n # the minimum pressure, the following constraint sets the outlet pressure\n # of deaerator to be same as the pressure of the inlet 'feedwater'\n @m.fs.fwh5_da.Constraint(m.fs.time)\n def fwh5mixer_pressure_constraint(b, t):\n return b.feedwater_state[t].pressure == b.mixed_state[t].pressure\n\n m.fs.bfp = WaterPump(\n default={\n \"property_package\": m.fs.prop_water2,\n }\n )\n m.fs.bfpt = HelmTurbineStage(\n default={\n \"property_package\": m.fs.prop_water2,\n }\n )\n\n # The following constraint sets the outlet pressure of steam extracted\n # for boiler feed water turbine to be same as that of condenser\n @m.fs.Constraint(m.fs.time)\n def constraint_out_pressure(b, t):\n return (\n b.bfpt.control_volume.properties_out[t].pressure\n == b.condenser_mix.mixed_state[t].pressure\n )\n\n # The following constraint demands that the work done by the\n # boiler feed water pump is same as that of boiler feed water turbine\n # Essentially, this says that boiler feed water turbine produces just\n # enough power to meet the demand of boiler feed water pump\n @m.fs.Constraint(m.fs.time)\n def constraint_bfp_power(b, t):\n return (\n b.bfp.control_volume.work[t] + b.bfpt.control_volume.work[t]\n == 0\n )\n\n ###########################################################################\n # Turbine outlet splitter constraints #\n ###########################################################################\n # Equality constraints have been written as following to define\n # the split fractions within the turbine train\n\n splitter_list = ['t1_splitter', 't2_splitter', 't3_splitter', 't5_splitter', 't6_splitter', 't7_splitter', 't8_splitter']\n\n for i in splitter_list:\n\n Splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water})\n setattr(m.fs, i, Splitter)\n \n m.fs.t4_splitter = HelmSplitter(default={\"dynamic\": False,\n \"property_package\": m.fs.prop_water,\n \"num_outlets\": 3})\n\n # The power plant with storage for a charge scenario is now ready\n # Declaraing a plant power out variable for easy analysis of various\n # design and operating scenarios\n m.fs.plant_power_out = pyo.Var(\n m.fs.time,\n domain=pyo.Reals,\n initialize=620,\n doc=\"Net Power MWe out from the power plant\"\n )\n\n # Constraint on Plant Power Output\n # Plant Power Out = Turbine Power - Power required for HX Pump\n @m.fs.Constraint(m.fs.time)\n def production_cons(b, t):\n return (\n (-1*(m.fs.turbine_1.work_mechanical[t]\n + m.fs.turbine_2.work_mechanical[t]\n + m.fs.turbine_3.work_mechanical[t]\n + m.fs.turbine_4.work_mechanical[t]\n + m.fs.turbine_5.work_mechanical[t]\n + m.fs.turbine_6.work_mechanical[t]\n + m.fs.turbine_7.work_mechanical[t]\n + m.fs.turbine_8.work_mechanical[t]\n + m.fs.turbine_9.work_mechanical[t])\n ) * 1e-6\n == m.fs.plant_power_out[t]\n )\n\n ###########################################################################\n # Create the stream Arcs and return the model #\n ###########################################################################\n _create_arcs(m)\n pyo.TransformationFactory(\"network.expand_arcs\").apply_to(m.fs)\n return m", "async def make_graph(self, args):\n return self._graph", "def _construct_graph(self, model: torch.nn.Module, model_input: Union[torch.Tensor, Tuple]):\n module_tensor_shapes_map = ConnectedGraph._generate_module_tensor_shapes_lookup_table(model, model_input)\n trace = torch.jit.trace(model, model_input, **jit_trace_args)\n self._parse_top_level_trace(trace, model)\n self._optimize_connected_graph()\n self._transform_ops_and_products_to_connected_graph_convention()\n self._fill_op_and_product_properties(module_tensor_shapes_map)\n\n # In certain models, a 'mangled' version of nodes like Conv or BN appears in the trace which exposes parameters\n # as constant inputs to the node. In such cases, remove constant inputs since param products will be created\n # to track them.\n self._remove_inputs_for_ops()\n # Create parameters for ops such as conv, batchnorm, etc.\n self._create_param_products()\n\n # For each split in the model, insert a corresponding split Op in the connected graph.\n ops_list = [op for op in self._ops.values()]\n for op in ops_list:\n self._determine_split_behavior_for_op_and_insert_split_op_in_connected_graph(op)", "def build_graph(config):\n # placeholders\n pholders = build_placeholders(config)\n\n waymark_construction_results = tf_get_waymark_data(config, pholders)\n wmark0_data = waymark_construction_results.waymark0_data\n wmark_data = waymark_construction_results.waymark_data\n\n with tf.variable_scope(\"tre_model\"):\n\n idxs = config.initial_waymark_indices\n max_num_ratios = idxs[-1]\n\n energy_obj = build_energies(config=config,\n bridge_idxs=pholders.bridge_idxs,\n max_num_ratios=max_num_ratios,\n head_multiplier=pholders.head_multiplier\n )\n\n neg_energies = energy_obj.neg_energy(wmark_data, is_train=True, is_wmark_input=True)\n\n # build train loss & optimisation step\n tre_train_loss = build_train_loss(config, neg_energies, pholders.loss_weights)\n tre_optim_op = build_optimisers(tre_train_loss, pholders, config)\n\n # build validation operations\n val_neg_energies = energy_obj.neg_energy(wmark_data, is_train=False, is_wmark_input=True)\n loss_obj, val_loss, loss_terms, nwj_loss_op = build_val_loss(config, val_neg_energies)\n\n neg_energies_of_data = energy_obj.neg_energy(wmark0_data, is_train=False, is_wmark_input=False) # (n_batch, n_ratios)\n av_neg_energies_of_data = tf.reduce_mean(neg_energies_of_data, axis=0) # (n_ratios, )\n\n if \"2d\" in config.dataset_name or \"1d\" in config.dataset_name:\n noise_logprob = waymark_construction_results.noise_dist.log_prob(wmark0_data)\n bridges_and_noise_neg_e_of_data = tf.concat([neg_energies_of_data, tf.expand_dims(noise_logprob, axis=1)], axis=1)\n\n spec_norms = []\n if hasattr(energy_obj, \"model\"):\n for layer in energy_obj.model.layers:\n if hasattr(layer, \"spectral_norm\"):\n spec_norms.append(layer.spectral_norm)\n\n average_metric_ops = [\n loss_obj.acc,\n loss_obj.class1_acc,\n loss_obj.class2_acc,\n loss_obj.dawid_statistic_numerator,\n loss_obj.dawid_statistic_denominator,\n val_loss,\n nwj_loss_op,\n av_neg_energies_of_data\n ]\n\n graph = AttrDict(locals())\n graph.update(pholders)\n return graph # dict whose values can be accessed as attributes i.e. val = dict.key", "def build_graph(self):\r\n self._create_placeholders()\r\n self._create_network()\r\n self._create_loss()\r\n self._create_optimizer()\r\n self._create_summaries()\r\n self._show_current_model()", "def _create_model(self):\n\n self.z1 = tf.placeholder(tf.float32, [None, self.n_obs])\n self.z2 = tf.placeholder(tf.float32, [None, self.n_obs])\n self.h1 = tf.placeholder(tf.float32, [None, self.n_prior])\n self.h2 = tf.placeholder(tf.float32, [None, self.n_prior])\n\n # _create_encoder\n #self.mu1, self.log_sigma1_squared, self.mu2, self.log_sigma2_squared, self.mu3, self.log_sigma3_squared, self.mu4, self.log_sigma4_squared = self._create_encoder(tf.concat([self.z1, self.z2], axis = 1))\n # _create_encoder_v2\n # conditional on only z\n # self.mu1, self.log_sigma1_squared, self.mu3, self.log_sigma3_squared = self._create_encoder_v2(self.z1)\n # self.mu2, self.log_sigma2_squared, self.mu4, self.log_sigma4_squared = self._create_encoder_v2(self.z2)\n # conditional on both z and h\n self.mu1, self.log_sigma1_squared, self.mu3, self.log_sigma3_squared = self._create_encoder_v2(tf.concat([self.z1, self.h1], axis = 1))\n if self.pair_or_single == \"pair\":\n self.mu2, self.log_sigma2_squared, self.mu4, self.log_sigma4_squared = self._create_encoder_v2(tf.concat([self.z2, self.h2], axis = 1))\n \n if self.pair_or_single == \"pair\":\n self.mu1_prior, self.log_sigma1_squared_prior, self.mu2_prior, self.log_sigma2_squared_prior, self.mu3_prior, self.log_sigma3_squared_prior, self.mu4_prior, self.log_sigma4_squared_prior = self._compute_prior_parameter(self.h1, self.h2, self.z1, self.z2)\n else:\n self.mu1_prior, self.log_sigma1_squared_prior, self.mu3_prior, self.log_sigma3_squared_prior = self._compute_prior_parameter(self.h1, None, self.z1, None)\n\n self.delta1 = tf.random_normal([self.n_prior], mean = self.mu1, stddev = tf.sqrt(tf.exp(self.log_sigma1_squared)))\n if self.pair_or_single == \"pair\":\n self.delta2 = tf.random_normal([self.n_prior], mean = self.mu2, stddev = tf.sqrt(tf.exp(self.log_sigma2_squared)))\n \n if self.nf_K > 0:\n self.delta1_K = planar_flow(self.delta1, z_dim = self.n_prior, length = self.nf_K)\n if self.pair_or_single == \"pair\":\n self.delta2_K = planar_flow(self.delta2, z_dim = self.n_prior, length = self.nf_K, reuse = True)\n else:\n self.delta1_K = self.delta1\n if self.pair_or_single == \"pair\":\n self.delta2_K = self.delta2\n\n self.sigma_squared_obs1 = tf.exp(tf.random_normal([self.n_obs], mean = self.mu3, stddev = tf.sqrt(tf.exp(self.log_sigma3_squared))))\n if self.pair_or_single == \"pair\":\n self.sigma_squared_obs2 = tf.exp(tf.random_normal([self.n_obs], mean = self.mu4, stddev = tf.sqrt(tf.exp(self.log_sigma4_squared))))\n if self.pair_or_single == \"pair\":\n self.z1_minius_z2, self.new_h1, self.new_h2, self.f1, self.f2 = self._create_decoder(self.h1 + self.delta1_K, self.h2 + self.delta2_K, self.sigma_squared_obs1, self.sigma_squared_obs2)\n else:\n self.generated_z1, self.new_h1, self.f1 = self._create_decoder(self.h1 + self.delta1_K, None, self.sigma_squared_obs1, None)\n\n\n # regularization term\n if self.nf_K > 0:\n regular_term1 = tfd.MultivariateNormalDiag(loc = self.mu1_prior, scale_diag = tf.exp(self.log_sigma1_squared_prior/2.)).log_prob(self.delta1_K) - tfd.MultivariateNormalDiag(loc = self.mu1, scale_diag = tf.exp(self.log_sigma1_squared/2.)).log_prob(self.delta1) + SLDJ(self.delta1, z_dim = self.n_prior, length = self.nf_K, reuse = True)\n if self.pair_or_single == \"pair\":\n regular_term2 = tfd.MultivariateNormalDiag(loc = self.mu2_prior, scale_diag = tf.exp(self.log_sigma2_squared_prior/2.)).log_prob(self.delta2_K) - tfd.MultivariateNormalDiag(loc = self.mu2, scale_diag = tf.exp(self.log_sigma2_squared/2.)).log_prob(self.delta2) + SLDJ(self.delta2, z_dim = self.n_prior, length = self.nf_K, reuse = True)\n else:\n regular_term2 = tf.constant(0.0)\n else:\n regular_term1 = 0.5 * tf.reduce_sum(1 + self.log_sigma1_squared - self.log_sigma1_squared_prior - (tf.exp(self.log_sigma1_squared) + tf.square(self.mu1 - self.mu1_prior))/(tf.exp(self.log_sigma1_squared_prior) + self.epsilon), 1)\n if self.pair_or_single == \"pair\":\n regular_term2 = 0.5 * tf.reduce_sum(1 + self.log_sigma2_squared - self.log_sigma2_squared_prior - (tf.exp(self.log_sigma2_squared) + tf.square(self.mu2 - self.mu2_prior))/(tf.exp(self.log_sigma2_squared_prior) + self.epsilon), 1)\n else:\n regular_term2 = tf.constant(0.0)\n \n regular_term3 = 0.5 * tf.reduce_sum(1 + self.log_sigma3_squared - self.log_sigma3_squared_prior - (tf.exp(self.log_sigma3_squared) + tf.square(self.mu3 - self.mu3_prior))/(tf.exp(self.log_sigma3_squared_prior) + self.epsilon), 1)\n if self.pair_or_single == \"pair\":\n regular_term4 = 0.5 * tf.reduce_sum(1 + self.log_sigma4_squared - self.log_sigma4_squared_prior - (tf.exp(self.log_sigma4_squared) + tf.square(self.mu4 - self.mu4_prior))/(tf.exp(self.log_sigma4_squared_prior) + self.epsilon), 1)\n else:\n regular_term4 = tf.constant(0.0)\n\n self.regularizer = regular_term1 + regular_term2 + regular_term3 + regular_term4\n # reconstruction term\n if self.pair_or_single == \"pair\":\n self.recon_error = tf.reduce_sum(-tf.log(self.sigma_squared_obs1 + self.sigma_squared_obs2 + self.epsilon)/2. - tf.square(self.z1 - self.z2 - self.z1_minius_z2)/2./(self.sigma_squared_obs1 + self.sigma_squared_obs2 + self.epsilon), 1)\n else:\n self.recon_error = tf.reduce_sum(-tf.log(self.sigma_squared_obs1 + self.epsilon)/2. - tf.square(self.z1 - self.generated_z1)/2./(self.sigma_squared_obs1 + self.epsilon), 1)\n \n # loss\n loss = -tf.reduce_mean(self.regularizer + self.recon_error)\n\n return loss, -tf.reduce_mean(regular_term1), -tf.reduce_mean(regular_term2), -tf.reduce_mean(regular_term3), -tf.reduce_mean(regular_term4)", "def _build_graph(self):\n\n # build simple architecture to multiply two numbers\n w1 = keras.layers.Input(shape=(1,), name=\"w1\")\n w2 = keras.layers.Input(shape=(1,), name=\"w2\")\n\n add = keras.layers.add([w1, w2])\n mult = keras.layers.multiply([w1, w2])\n out = keras.layers.concatenate([add, mult])\n\n return keras.models.Model(inputs=[w1, w2], outputs=out)", "def create_graph(AdjList, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Graph.objects.create(AdjList=AdjList, pub_date=time)", "def _create_decode_layer(self):\n\n with tf.name_scope(\"Decode\"):\n if self.dec_act_func == 'sigmoid':\n _dec_act_func = tf.nn.sigmoid\n\n elif self.dec_act_func == 'tanh':\n _dec_act_func = tf.nn.tanh\n\n else:\n _dec_act_func = lambda x: x\n\n self.decode = _dec_act_func(tf.matmul(self.encode, tf.transpose(self.W_)) + self.bv_)\n\n tf.summary.histogram('weights', tf.transpose(self.W_))\n tf.summary.histogram('bias', self.bv_)\n tf.summary.histogram('decodings', self.decode)", "def build_graph(self):\n return TorchGloVeModel(self.n_words, self.embed_dim)", "def make_cp_graph(model_config):\n\n tf.reset_default_graph()\n\n model = model_config.model\n if model_config.bijectors_fn is not None:\n model = ed_transforms.transform_with_bijectors(\n model, model_config.bijectors_fn)\n\n log_joint_centered = ed.make_log_joint_fn(model)\n\n with ed.tape() as model_tape:\n _ = model(*model_config.model_args)\n\n target_cp_kwargs = {}\n for param in model_tape.keys():\n if param in model_config.observed_data.keys():\n target_cp_kwargs[param] = model_config.observed_data[param]\n\n def target_cp(*param_args):\n i = 0\n for param in model_tape.keys():\n if param not in model_config.observed_data.keys():\n target_cp_kwargs[param] = param_args[i]\n i = i + 1\n\n return log_joint_centered(*model_config.model_args, **target_cp_kwargs)\n\n elbo, variational_parameters = util.get_mean_field_elbo(\n model,\n target_cp,\n num_mc_samples=FLAGS.num_mc_samples,\n model_args=model_config.model_args,\n model_obs_kwargs=model_config.observed_data,\n vi_kwargs=None)\n\n return target_cp, model_config.model, elbo, variational_parameters, None", "def generate_tf_model(graph):\n\n # generate tensorflow model and export to out_file\n\n # with __dict__ we can see the content of the class\n logging.debug(graph.__dict__)\n\n # model_spec contains some info about the model\n for key, value in graph.model_spec.items():\n logging.debug(key)\n logging.debug(value)\n\n network_name = graph.model_spec['name']\n\n filename = get_database( 'benchmark', 'graphs' ,'tf2', network_name+'.pb')\n logging.debug(\"Stored to: %s\" % filename)", "def convert(self, model: \"keras.models.Model\", input_orders: List[Order] = None) -> Graph:\n if not model.built:\n model.build(None)\n\n self._convert_tensors(model.inputs, input_orders)\n\n for depth in sorted(list(model.nodes_by_depth.keys()), reverse=True):\n for node in model.nodes_by_depth[depth]:\n self._convert_operator(node.outbound_layer)\n\n # Check that all output tensors from current layer are converted into WebDNN Variable\n for tensor in node.output_tensors:\n if not self.has_variable(tensor):\n raise AssertionError(\n f\"[KerasConverter] {node.outbound_layer} outputs {tensor}, but it was not converted into WebDNN Variable by \"\n f\"{self._handler_map[self.__class__.__name__][self.serialize_operator_type(node.outbound_layer)]}\")\n\n self._input_index_dict[model] -= 1\n self._output_index_dict[model] -= 1\n self._input_tensor_cache = None\n self._output_tensor_cache = None\n\n graph = Graph([self.get_variable(t) for t in self.get_input_tensor(model)],\n [self.get_variable(t) for t in self.get_output_tensor(model)])\n\n self._input_tensor_cache = None\n self._output_tensor_cache = None\n\n for v in graph.inputs:\n v.attributes.add(Input(v))\n\n for v in graph.outputs:\n v.attributes.add(Output(v))\n\n return graph", "def __create_graph(self):\n self.clear() \n self.__ordered_network()\n self.__create_new_random_connections()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build query to request journey data.
def build_journey_query( self, station_id: str, direction_id: Optional[str] = None, max_journeys: int = 20, products: Optional[List[str]] = None, ) -> str: self.station_id = station_id self.direction_id = direction_id self.max_journeys = max_journeys self.products_filter = product_filter(products or ALL_PRODUCTS) params: Dict[str, Union[str, int]] = { "selectDate": "today", "time": "now", "input": self.station_id, "maxJourneys": self.max_journeys, "boardType": "dep", "productsFilter": self.products_filter, "disableEquivs": "discard_nearby", "output": "xml", "start": "yes", } if self.direction_id: params["dirInput"] = self.direction_id return base_url() + urllib.parse.urlencode(params)
[ "def __build_query(self, user: str = None) -> None:\n # Optional params: start_time,end_time,since_id,until_id,max_results,next_token,\n # expansions,tweet.fields,media.fields,poll.fields,place.fields,user.fields\n self.__query = {'query': \"\"}\n\n if self.__twitter_keyword:\n self.__query['query'] = str(self.__twitter_keyword)\n if user is not None:\n if self.__twitter_keyword:\n self.__query['query'] += \" from: \" + str(user)\n else:\n self.__query['query'] += \"from: \" + str(user)\n\n if self.__twitter_lang:\n self.__query['query'] += \" lang:\" + self.__twitter_lang\n if self.__twitter_place:\n self.__query['query'] += \" place:\" + self.__twitter_place\n if self.__twitter_place_country:\n self.__query['query'] += \" place_country:\" + self.__twitter_place_country\n if self.__twitter_all_tweets:\n if self.__twitter_context_annotations:\n self.__query['max_results'] = str(100)\n else:\n self.__query['max_results'] = str(500)\n\n # if is specified a number of result to request\n elif self.__twitter_n_results:\n # if the specified number is greater than 500 set the max_result query field to the max value possible so\n # 500.\n if self.__twitter_context_annotations:\n if self.__twitter_n_results > 100:\n self.__query['max_results'] = str(100)\n elif self.__twitter_n_results > 500:\n self.__query['max_results'] = str(500)\n # if the specified number is less than 10 set the max_result field to the min value possible so 10\n elif self.__twitter_n_results < 10:\n self.__query['max_results'] = str(10)\n # else if the value is between 10 and 500 set the max_result field query to the value given\n else:\n self.__query['max_results'] = str(self.__twitter_n_results)\n\n if self.__twitter_bounding_box:\n self.__query['query'] += \" bounding_box:\" + \"[\" + self.__twitter_bounding_box + \"]\"\n elif self.__twitter_point_radius_longitude:\n self.__query['query'] += \" point_radius:\" + \"[\" + str(self.__twitter_point_radius_longitude) + \" \" + str(\n self.__twitter_point_radius_latitude) + \" \" + self.__twitter_point_radius_radius + \"]\"\n if self.__twitter_filter_retweet is True:\n self.__query['query'] += \" -is:retweet\"\n self.__query['place.fields'] = \"contained_within,country,country_code,full_name,geo,id,name,place_type\"\n self.__query['expansions'] = 'author_id,geo.place_id,referenced_tweets.id'\n self.__query['tweet.fields'] = 'lang,referenced_tweets,public_metrics,entities,created_at,possibly_sensitive'\n self.__query['user.fields'] = 'username,location'\n\n if self.__twitter_context_annotations:\n self.__query['tweet.fields'] += ',context_annotations'\n if self.__twitter_start_time:\n self.__query['start_time'] = str(self.__twitter_start_time)\n if self.__twitter_end_time:\n self.__query['end_time'] = str(self.__twitter_end_time)", "def build_query(ctx: click.core.Context, location: str) -> dict:\n\n query = {\n 'appid': ctx.obj['api_key'],\n 'units': 'imperial',\n }\n\n city_data = get_city_data()\n locations = [city.lower() for city in city_data]\n\n if location.isdigit():\n query['id'] = location\n elif location.lower() in locations:\n query['id'] = city_data[location.title()]['id']\n else:\n query['q'] = location\n\n return query", "def build_query(self):\n\n query = super(FilteredTableSourceMixin, self).build_query()\n\n if self.config.filterlist_available:\n query = self.extend_query_with_filter(query)\n\n if self.config.subject_filter_available:\n SubjectFilter(self.config.context, self.request).update_query(query)\n\n return query", "def _build_db_query(self):\n\n base_query = \"select * from \" + self._trim_db_measure_param()\n if all([self.db_params['db_where_jkey'], self.db_params['db_where_comp_id']]):\n self.db_params['db_where_key'] = self.db_params['db_where_jkey'] + \" and \" + \\\n self.db_params['db_where_comp_id']\n elif self.db_params['db_where_jkey']:\n self.db_params['db_where_key'] = self.db_params['db_where_jkey']\n elif self.db_params['db_where_comp_id']:\n self.db_params['db_where_key'] = self.db_params['db_where_comp_id']\n else:\n t.log(level='info', message=base_query)\n base_query = base_query + \" limit \" + str(self.db_params['db_limit']) + \";\"\n return base_query\n base_query = base_query + \" where \" + self.db_params['db_where_key'] + \" limit \" \\\n + str(self.db_params['db_limit']) + \";\"\n t.log(level='info', message=base_query)\n t.log(level='info', message=base_query)\n return base_query", "def build_catalog_query(**kw):\n query = {}\n\n # note: order is important!\n query.update(get_request_query())\n query.update(get_custom_query())\n query.update(get_keyword_query(**kw))\n\n logger.info(\"build_catalog_query::query=%s\" % query)\n return query", "def build_query(self, params, **kwargs):\n\n if params.get('bbox'):\n self.facets['bbox'] = 'bbox.coordinates'\n\n query = super().build_query(params, **kwargs)\n\n # Add sorting\n query['sort'] = [\n {\n \"dataType.keyword\": {\n \"order\": \"asc\"\n }\n },\n {\n \"processingLevel.keyword\": {\n \"order\": \"desc\"\n }\n },\n {\n \"productString.keyword\": {\n \"order\": \"asc\"\n }\n },\n {\n \"productVersion.keyword\": {\n \"order\": \"desc\"\n }\n },\n {\n \"title.keyword\": {\n \"order\": \"asc\"\n }\n }\n ]\n\n pid = params.get('parentIdentifier')\n\n if pid:\n\n query['query']['bool']['must'].append({\n 'term': {\n 'parent_identifier': pid\n }\n })\n\n else:\n query['query']['bool']['must_not'].append({\n 'exists': {\n 'field': 'parent_identifier'\n }\n })\n\n return query", "def _make_query(self, query=None, order_by=None, reverse=None):\n if query or order_by or reverse:\n q = gdata.spreadsheet.service.ListQuery()\n if query:\n q.sq = query\n if order_by:\n q.orderby = order_by\n if reverse:\n q.reverse = reverse\n return q\n else:\n return None", "def generate_query(self):\n self.query = self._add_select_statement() +\\\n self._add_case_statement() +\\\n self._add_from_statement() +\\\n self._add_group_by_statement()\n\n return self.query", "def create_query(self, query_json):\n\n set_name = \"set-none\"\n if 'predicates' in query_json:\n set_name = self.add_set(query_json['predicates'])\n\n is_polar = query_json.get('isPolar', None)\n if is_polar is None:\n tag = \"NLQuery\"\n self.statement_xml = \"<NLQueryStatement>%s</NLQueryStatement>\" % query_json.get(\n 'nlQuery', '')\n\n # Add additional time intervals\n for time_obj in query_json.get('times', []):\n self.add_time(time_obj)\n\n else:\n if is_polar:\n tag = \"Query\"\n else:\n tag = \"NonePolarQuery\"\n self.add_statement(set_name, query_json['isPolar'])\n\n return self.compile_query(tag)", "def build_query(self, query_string: str) -> WikidataQuery:\n return WikidataQuery(query_string)", "def BuildSubQuery(self):\n MultipleValuePairs = ''\n #This is to make sure psycopg2 uses the correct %s values\n sqlidx=0\n for ivaluedict in self.ConditionColumns:\n if MultipleValuePairs:\n MultipleValuePairs += \" OR \"\n MultipleValuePairs += \"({})\".format(self.BuildSubqString(ivaluedict,sqlidx))\n sqlidx += 1\n self.subquery = \"\"\"SELECT align_id FROM {} WHERE {} \"\"\".format(Db.searched_table,MultipleValuePairs)", "def build_where_condition(self, query_dict):\n analysis_type = query_dict.get(\"analysis_type\")\n where = (\" WHERE biotype.`biomimic_type`=\\'%s\\'\") % query_dict['biomimic_type']\n if query_dict.get('country') is not None:\n where += \" AND geo.`country`=\\'%s\\'\" % (query_dict['country'])\n if query_dict.get('state_province') is not None:\n where += \" AND geo.`state_province`=\\'%s\\'\" % (query_dict['state_province'])\n if query_dict.get('location') is not None:\n where += \" AND geo.`location`=\\'%s\\'\" % (query_dict['location'])\n if (query_dict.get('zone') is not None) and (query_dict.get('zone') != 'All'):\n where += \" AND prop.`zone`=\\'%s\\'\" % (query_dict.get('zone'))\n if (query_dict.get('sub_zone') is not None) and (query_dict.get('sub_zone') != 'All'):\n if query_dict.get('sub_zone') == 'N/A':\n where += \" AND prop.sub_zone is Null\"\n else:\n where += \" AND prop.`sub_zone`=\\'%s\\'\" % (query_dict.get('sub_zone'))\n if (query_dict.get('wave_exp') is not None) and (query_dict.get('wave_exp') != 'All'):\n if query_dict.get('wave_exp') == 'N/A':\n where += \" AND prop.wave_exp is Null\"\n else:\n where += \" AND prop.`wave_exp`=\\'%s\\' \" % (query_dict.get('wave_exp'))\n if (query_dict.get('start_date') is not None) and (query_dict.get('end_date') is not None):\n where += \"\"\" AND cast(temp.Time_GMT as date) >= \\'%s\\'\n AND cast(temp.Time_GMT as date) <= \\'%s\\'\"\"\" % \\\n (query_dict.get('start_date'), query_dict.get('end_date'))\n if analysis_type == \"Daily\":\n where += \" GROUP BY cast(temp.Time_GMT as date)\"\n elif analysis_type == \"Monthly\":\n where += \"\"\" GROUP BY YEAR(temp.Time_GMT), MONTHNAME(temp.Time_GMT)\n ORDER BY YEAR(temp.Time_GMT), MONTH(temp.Time_GMT) ASC\"\"\"\n elif analysis_type == \"Yearly\":\n where += \" GROUP BY YEAR(temp.Time_GMT)\"\n else:\n pass\n return where", "def build_query(self, query_string: str) -> Query:\n raise EntityMapperNotImplemented", "def _makeRequestForQuery(self, query):\n elem = XACMLAuthzDecisionQueryElementTree.toXML(query)\n soapRequest = SOAPEnvelope()\n soapRequest.create()\n soapRequest.body.elem.append(elem)\n\n request = soapRequest.serialize()\n\n return request", "def get_full_query(self):\n query = self.get_query()\n post_filters = self.get_post_filters()[\"bool\"][\"filter\"]\n query[\"bool\"][\"filter\"].extend(post_filters)\n return query", "def build_parameters(self) -> None:\n if not self.query_string:\n return\n\n query_parser = CardQueryParser()\n try:\n self.root_parameter = query_parser.parse(self.query_string)\n # TODO\n self.sort_params = []\n except (ParseError, ValueError) as error:\n self.error_message = str(error)", "def createQueryString(self, criteria):\n query = self.createQuery(criteria)\n\n #adapter = self.__proof.getAdapter(criteria.getDbName())\n #\n #limit = criteria.getLimit();\n #offset = criteria.getOffset()\n #\n #sql = ''\n #if (limit or offset) and \\\n # adapter.getLimitStyle() == SQLConstants.LIMIT_STYLE_ORACLE:\n # raise ProofException.ProofNotImplementedException( \\\n # \"ORACLE support is not implemented yet!\" )\n #else:\n # if offset and adapter.supportsNativeOffset():\n # # Now set the criteria's limit and offset to return the\n # # full resultset since the results are limited on the\n # # server.\n # criteria.setLimit(-1)\n # criteria.setOffset(0)\n # elif limit and adapter.supportsNativeLimit():\n # # Now set the criteria's limit to return the full\n # # resultset since the results are limited on the server.\n # criteria.setLimit(-1)\n\n sql = str(query)\n\n self.log( \"SQL: %s\" % (sql) )\n\n return sql", "def get_filter_query(self, data):\n filters = []\n\n for key, value in self.filter_map.items():\n #if key in data.keys():\n # Yep, that bit is in the request. Go get it and add it to\n # our filters.\n if isinstance(value, dict):\n if \"type\" in value and value.get(\"type\") == \"date\":\n filter_str = self.handle_date_filter(value, data)\n if filter_str:\n filters.append(filter_str)\n elif key in data.keys():\n sub_filters = []\n for sub_key, sub_value in value.items():\n sub_filters.append(f\"{sub_value}:{data.get(sub_key)}\")\n filters.append(f\"({' AND '.join(sub_filters)})\")\n elif key in data.keys():\n filters.append(f\"{str(value)}:{data.get(key)}\")\n return \" AND \".join(filters)", "def prepareQuery( self ):\n self._queryWidget.setQuery(self.query())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch suggestions for the given station name from the backend.
async def _fetch_sugestions( self, name: str, max_results: int ) -> List[Optional[Dict]]: params: Dict[str, Union[str, int]] = { "getstop": 1, "REQ0JourneyStopsS0A": max_results, "REQ0JourneyStopsS0G": name, } url = base_url(GETSTOP_PATH) + urllib.parse.urlencode(params) _LOGGER.debug("URL: %s", url) response = await self._query_rmv_api(url) data = extract_json_data(response) try: json_data = json.loads(data) except (TypeError, json.JSONDecodeError) as err: _LOGGER.debug("Error in JSON: %s...", data[:100]) raise RMVtransportError(err) from err return list(json_data["suggestions"][:max_results])
[ "def _get_suggestions (self, state):\n\t\treturn SATClient().doSuggestStandards (self.nses_id, state, self.band)", "def google_suggestion(band_name):\r\n\r\n try:\r\n google_suggestion_url = (\r\n \"http://suggestqueries.google.com/complete/search?client=chrome&q=\"\r\n + band_name\r\n )\r\n\r\n url_response = requests.get(google_suggestion_url)\r\n\r\n suggestion_list = json.loads(url_response.content.decode(\"utf-8\"))\r\n\r\n if len(suggestion_list) > 1:\r\n suggestion_band_name = suggestion_list[1][0]\r\n return suggestion_band_name\r\n except:\r\n return None", "def search_suggest():\n user_input = request.args.get('text')\n latitude = request.args.get('latitude', DEFAULT_LATITUDE)\n longitude = request.args.get('longitude', DEFAULT_LONGITUDE)\n\n if not user_input:\n return json.dumps({})\n\n yelp_session_obj = YelpAPI(api_key=YELP_API_KEY)\n autocomplete_suggestions = yelp_session_obj.autocomplete_query(\n text=user_input,\n latitude=latitude,\n longitude=longitude,\n )\n\n response = {\n 'businesses': autocomplete_suggestions['businesses'],\n 'categories': autocomplete_suggestions['categories'],\n }\n return json.dumps(response)", "def search(self, description, use_keywords=False, limit=5):\n for name in self._playlist.search(description, use_keywords, limit):\n station = self._playlist.get_station(name)\n print(station)", "def make_autocomplete():\n result = [s[0] for s in db.session.query(Strain.s_name).all()]\n return result", "def autocomplete(request):\n q = request.GET['q']\n\n field = 'title_t'\n clean_params = {\n # TODO: use qt=dismax\n 'q': '*:*',\n 'facet': 'true',\n 'facet.field': field,\n 'facet.limit': '10',\n 'facet.prefix': q,\n 'rows': '0',\n 'wt': 'json',\n }\n \n u = settings.SOLR_SERVER + \"?\" + urlencode(clean_params, \n doseq=1 # Must use this because urlencode doesn't handle Unicode otherwise\n )\n open_req = urllib2.Request(u)\n request = urllib2.urlopen(open_req)\n raw = request.read()\n json = simplejson.loads(raw)\n \n facets = json['facet_counts']['facet_fields'][field]\n # facets is a list ['result1', count1, 'result2', count2...]\n suggestions = \"\"\n for i in range(len(facets) / 2): # We ignore the counts\n suggestions += \"%s\\n\" % facets[i * 2]\n \n return HttpResponse(suggestions, mimetype='text/plain')", "def suggest(self, query, suggester, size=None):\n uri = '/2013-01-01/suggest'\n params = {}\n headers = {}\n query_params = {}\n if query is not None:\n query_params['q'] = query\n if suggester is not None:\n query_params['suggester'] = suggester\n if size is not None:\n query_params['size'] = size\n return self.make_request('GET', uri, expected_status=200,\n data=json.dumps(params), headers=headers,\n params=query_params)", "def station_by_name(self, name):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if name == _[\"properties\"][\"name\"]]\n log.debug(\"searching for station {} found {}\".format(name, station))\n return station[0]\n except:\n log.debug(\"Exception: searching for station {} found None\".format(name))\n return None", "def autocomplete(self, str):\n return list(cognipy_call(self._uid, \"AutoComplete\", str))", "def getPlaces(place):\r\n url = \"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/autosuggest/v1.0/US/USD/en-US/\"\r\n querystring = {\"query\": place}\r\n\r\n return requests.request(\"GET\", url, headers = headers, params = querystring).json()['Places']", "def term_suggest(self, querybuilder, callback=None):\n query_params = self._get_params(querybuilder)\n\n log.debug('term_suggest with params: %s' % query_params)\n qs = urllib.urlencode(query_params)\n final_url = '?'.join([self._termsuggest_url, qs])\n log.debug('Final suggest URL: %s' % final_url)\n\n self._get(final_url, headers=querybuilder.headers,\n callback=handle_suggest_response(querybuilder, callback))", "def getClosestStation(query):\n return maps.queryVenue(\"train stations near \" + query).partition(\" \")[0] # get just name of station", "def suggest(request,category,field):\n\n\t###EXTRACTION OF SESSION DATA###\n\tstore_data = request.session[category]\n\tfield_list=store_data['field']\n\tqu=store_data['dict']\n\tqu=qu[field]\n\tr=db.ask(qu)\n\n\tuser=request.session['user']\n\tif request.user.is_authenticated():\n\t\t\t\trequest.session['cur_queryset']=r\n\n\t###RENDERING RESPONSE###\n\tif r:\n\t\treturn render_to_response('suggest_result.html', {'query':r, 'field':field_list, 'user': user, 'category':category})\n\telse:\n\t\treturn render_to_response('suggest_result.html', {'query':\"No results\", 'category':CAT, 'user': user})", "def test_suggestions(self):\r\n self.setPWLContents([\"Sazz\",\"Lozz\"])\r\n d = request_pwl_dict(self._path())\r\n self.assertTrue(\"Sazz\" in d.suggest(\"Saz\"))\r\n self.assertTrue(\"Lozz\" in d.suggest(\"laz\"))\r\n self.assertTrue(\"Sazz\" in d.suggest(\"laz\"))\r\n d.add(\"Flagen\")\r\n self.assertTrue(\"Flagen\" in d.suggest(\"Flags\"))\r\n self.assertFalse(\"sazz\" in d.suggest(\"Flags\"))", "def search_suggestions(self):\n suggestions = _get_search_suggestions([self.title])\n return suggestions", "def _get_suggestions_index(name):\n return f'df_suggestions_{name}'", "async def test_search_systems_by_name(galaxy_fx):\n nearest = await galaxy_fx.search_systems_by_name(\"Fualun\")\n assert 'FOLNA' in nearest\n assert 'FEI LIN' in nearest\n assert 'FEI LIAN' in nearest", "def autocomplete_search(name_searched):\n search_results = wikipedia.search(name_searched)\n if len(search_results) < 1:\n return None\n return search_results[0]", "def _fuzzy_match(name, stations):\n st_names = {s['name'].lower(): s for s in stations}\n best_names = difflib.get_close_matches(name.lower(), st_names, n=1)\n if not best_names:\n log.info(\"Didn't find a match for station \\\"%s\\\".\" % name)\n return []\n else:\n log.info('Heard \"%s\", matching with station \"%s\".' %\n (name, best_names[0]))\n return [st_names[best_names[0]]]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the product filter.
def product_filter(products) -> str: _filter = sum({PRODUCTS[p] for p in products}) return format(_filter, "b")[::-1]
[ "def processFilter(self, pInputData):\n return _almathswig.DigitalFilter_processFilter(self, pInputData)", "def get_all_filtered(self):\n products = self.__repository.get_all()\n if self.__name_filter != \"\":\n products = [product for product in products if self.__name_filter in product.name]\n if self.__price_filter != \"-1\":\n products = [product for product in products if product.price == self.__price_filter]\n return products, self.__name_filter, self.__price_filter", "def filter(self, filter_params):\n pass", "def calc_filter_properties(self):\n\n # dlambda\n diff = self.ls - np.roll(self.ls, 1)\n # useful integral region\n m = (self.tran_ls > 0) & (diff > 0)\n\n # mean wavelength\n self.mean = np.exp((self.tran_ls[m] * np.log(self.ls[m]) * diff[m] /\n self.ls[m]).sum() /\n (self.tran_ls[m] * diff[m] / self.ls[m]).sum())\n # pivot wavelength\n self.pivot = np.sqrt((self.tran_ls[m] * self.ls[m] * diff[m]).sum() /\n (self.tran_ls[m] * diff[m] / self.ls[m]).sum())\n # average wavelength\n self.average = (self.tran_ls[m] * self.ls[m] * diff[m]).sum() / (\n self.tran_ls[m] * diff[m]).sum()\n # effective dimensionless gaussian width\n self.sig = np.sqrt((self.tran_ls[m] * np.log(self.ls[m] / self.mean)\n **2.0 * diff[m] / self.ls[m]).sum() /\n (self.tran_ls[m] * diff[m] / self.ls[m]).sum())\n # effective width\n self.width = 2.0 * np.sqrt(2.0 * np.log(2.0)) * self.sig * self.mean\n # equivalent width\n self.equivalent_width = (self.tran_ls[m] * diff[m]).sum()\n # rectangular width\n self.rectangular_width = self.equivalent_width / self.tran_ls[m].max()", "def _product_offer(self): # double is private, single for show.\n for k in self.model.clusters:\n for j in self.model.products:\n exp = pyo.quicksum(self.model.x[cluster, customer, product] for cluster, customer, product in self.model.ccp if (cluster==k and product==j))\n self.model.product_offer.add(exp == self.tactical_model.y[k, j].value)", "def get_filter_prod_activity(state, shiftLR): \n pix_shift = int(round(shiftLR / state['downsample'])) #dividing shift by downsample\n min_L_x = max(0,-pix_shift) #starting point for left filtered\n min_R_x = max(0,pix_shift) #starting point for right filtered\n # state.filtered_img.shape[2] is the filtered image width after convolution\n size_x = state['filtered_img'].shape[2] - abs(pix_shift)\n L_filtered_shifted = state['filtered_img'][:,0,min_L_x:min_L_x+size_x,:]\n R_filtered_shifted = state['filtered_img'][:,1,min_R_x:min_R_x+size_x,:]\n filter_prod_activity = L_filtered_shifted * R_filtered_shifted\n return filter_prod_activity", "def filter_pressure(self):\n\n pass", "def get_energy(filtered_signal):\n return sum([x*2 for x in filtered_signal])", "def filter(self):\n # notch filter and bandpass filter\n self.eegs[0].filter(self.config['bandpass'][0], self.config['bandpass'][1])\n self.eegs[1].filter(self.config['bandpass'][0], self.config['bandpass'][1])", "def __compute_utility(self, product_price, ave_product_price, product_quality,\n ave_product_quality, advertise_effect, herd_effect, product_diversity, offline_exp_effect):\n utility = (-self.price_sensitivity**(product_price-ave_product_price)\n + self.social_economic_negative_factor)*product_price\n utility += (-self.quality_sensitivity**(product_quality-ave_product_quality)\n +self.social_economic_positive_factor)*product_quality\n utility += self.advertise_sensitivity * advertise_effect\n utility += self.herd_sensitivity * herd_effect\n utility += self.variety_sensitivity * product_diversity\n utility += self.offline_experience_factor * offline_exp_effect\n return utility", "def test_compute_butter_lp_filter(self):\n\n # Configure the filter\n\n parameters = {'passband_frequency': 10,\n 'stopband_frequency': 100,\n 'passband_attenuation': 1,\n 'stopband_attenuation': 80}\n\n self.filter_under_test.filter_class = 'butterworth'\n\n self.filter_under_test.configure_filter(parameters)\n self.filter_under_test.compute_parameters(target='passband')\n self.assertEqual(self.filter_under_test.N, 5)\n self.assertEqual(self.filter_under_test.Wn, 71.92210683023319)\n\n self.filter_under_test.design()\n self.assertAlmostEqual(self.filter_under_test.B[0], 1924473804.6221437)\n self.assertAlmostEqual(self.filter_under_test.A[0], 1.00000000e+00)\n self.assertAlmostEqual(self.filter_under_test.A[1], 232.744826787636)\n self.assertAlmostEqual(self.filter_under_test.A[2], 27085.0771982035)\n self.assertAlmostEqual(self.filter_under_test.A[3], 1948015.8157543)\n self.assertAlmostEqual(self.filter_under_test.A[4], 86589900.200991)\n self.assertAlmostEqual(self.filter_under_test.A[5], 1924473804.6221435)\n\n self.filter_under_test.compute_parameters(target='stopband')\n self.assertEqual(self.filter_under_test.N, 5)\n self.assertAlmostEqual(self.filter_under_test.Wn, 99.5817763027)\n self.filter_under_test.design()\n\n self.assertAlmostEqual(self.filter_under_test.B[0], 9792629962.0921497)\n self.assertAlmostEqual(self.filter_under_test.A[0], 1)\n self.assertAlmostEqual(self.filter_under_test.A[1], 322.253397435715)\n self.assertAlmostEqual(self.filter_under_test.A[2], 51923.626079522102)\n self.assertAlmostEqual(self.filter_under_test.A[3], 5170646.9170805747)\n self.assertAlmostEqual(self.filter_under_test.A[4], 318227063.34817797)\n self.assertAlmostEqual(self.filter_under_test.A[5], 9792629962.0921497)", "def _smooth_price_data(self, sigma):\n self.High = features.gaussian_filter(self.High_raw, sigma)\n self.Low = features.gaussian_filter(self.Low_raw, sigma)\n self.Close = features.gaussian_filter(self.Close_raw, sigma)\n self.Open = features.gaussian_filter(self.Open_raw, sigma)\n self.Volume = features.gaussian_filter(self.Volume_raw, sigma)", "def filter(self, stack) -> None:\n low_pass = partial(self.low_pass, sigma=self.sigma)\n stack.image.apply(low_pass, is_volume=self.is_volume, verbose=self.verbose)\n\n # apply to aux dict too:\n for auxiliary_image in stack.auxiliary_images.values():\n auxiliary_image.apply(low_pass, is_volume=self.is_volume)", "def productivityMultiplier(self) -> float:\n return self._getMultiplier('productivity')", "def update_filter_2ndOrder(self):\n # Second-Order Filter\n self.xf_dot = self.xf_dot + (self.dt * xf_2dot(self.x,self.xf,self.xf_dot,k_e))\n self.xf = self.xf + (self.dt * self.xf_dot)", "def test_compute_butter_bs_filter(self):\n\n parameters = {'passband_frequency': [1, 25],\n 'stopband_frequency': [2, 15],\n 'passband_attenuation': 1,\n 'stopband_attenuation': 40}\n\n self.filter_under_test.filter_class = 'butterworth'\n self.filter_under_test.configure_filter(parameters)\n self.filter_under_test.compute_parameters(target='passband')\n self.assertEqual(self.filter_under_test.filter_type, 'bandstop')\n self.assertEqual(self.filter_under_test.N, 9)\n self.assertAlmostEqual(self.filter_under_test.Wn[0], 8.0681551)\n self.assertAlmostEqual(self.filter_under_test.Wn[1], 146.79336908)\n self.filter_under_test.design()\n\n target_B_coefs = [1, 0, 10659.165019231212, 0, 50496799.514312126,\n 0, 139547260472.68924, 0, 247909546226676.62,\n 0, 2.9361208478586816e+17, 0, 2.3182664173133981e+20,\n 0, 1.176704014318369e+23, 0, 3.484078407614217e+25,\n 0, 4.5848600847778207e+27]\n target_A_coefs = [1, 798.886667535808, 329769.118807736,\n 90767049.233665258,\n 18246055317.574032, 2779764493559.9541,\n 323737622832967.31,\n 28439472756901696.0, 1.8160086510982392e+18,\n 7.8893059749655937e+19, 2.1507928764897341e+21,\n 3.9891732058277967e+22, 5.3781783783668304e+23,\n 5.4692869382179803e+24, 4.2517955086597104e+25,\n 2.5050262147266555e+26, 1.0778906831551873e+27,\n 3.092648654056598e+27, 4.5848600847778174e+27]\n\n for pos, B in enumerate(target_B_coefs):\n self.assertAlmostEqual(self.filter_under_test.B[pos], B, places=4)\n self.assertAlmostEqual(self.filter_under_test.A[pos],\n target_A_coefs[pos], places=4)\n\n self.filter_under_test.compute_parameters(target='stopband')\n self.assertEqual(self.filter_under_test.N, 9)\n self.assertAlmostEqual(self.filter_under_test.Wn[0],\n 8.19898674504612, places=4)\n self.assertAlmostEqual(self.filter_under_test.Wn[1],\n 144.451038642691, places=4)\n self.filter_under_test.design()\n\n target_B_coefs = [0.999999999999997, 0, 10659.165019231235,\n 0, 50496799.514312387,\n 0, 139547260472.68756, 0, 247909546226678.84, 0,\n 2.936120847858713e+17, 0, 2.3182664173134024e+20,\n 0, 1.1767040143183764e+23, 0,\n 3.4840784076142471e+25, 0, 4.5848600847778548e+27]\n\n target_A_coefs = [1, 784.644294735911, 318492.53822460608,\n 86261284.909445286, 17072986548.141478,\n 2562592979207.1934, 294287704148591.62,\n 25527354190801672.0, 1.6135947979406876e+18,\n 6.9745276318961222e+19, 1.9110636917136504e+21,\n 3.5806935741770867e+22, 4.8889336791349288e+23,\n 5.0419941479279025e+24, 3.9784406142229909e+25,\n 2.3806742847603819e+26, 1.0410318007203891e+27,\n 3.0375137958120487e+27, 4.5848600847778592e+27]\n\n for pos, B in enumerate(target_B_coefs):\n print(\"pos = \", pos)\n self.assertAlmostEqual(self.filter_under_test.B[pos], B, places=4)\n self.assertAlmostEqual(self.filter_under_test.A[pos],\n target_A_coefs[pos], places=4)", "def test_compute_butter_hp_filter(self):\n\n # Compute a high-pass filter\n\n parameters = {'passband_frequency': 100,\n 'stopband_frequency': 10,\n 'passband_attenuation': 1,\n 'stopband_attenuation': 80}\n\n self.filter_under_test.filter_class = 'butterworth'\n self.filter_under_test.configure_filter(parameters)\n\n self.filter_under_test.compute_parameters(target='passband')\n self.assertEqual(self.filter_under_test.N, 5)\n self.assertAlmostEqual(self.filter_under_test.Wn, 548.90518846372663)\n\n self.filter_under_test.design()\n self.assertAlmostEqual(self.filter_under_test.B[0], 1)\n self.assertAlmostEqual(self.filter_under_test.A[0], 1)\n self.assertAlmostEqual(self.filter_under_test.A[1], 1776.29450307095)\n self.assertAlmostEqual(self.filter_under_test.A[2], 1577611.08082004)\n self.assertAlmostEqual(self.filter_under_test.A[3], 865958907.63998842)\n self.assertAlmostEqual(self.filter_under_test.A[4], 293769686363.14844)\n self.assertAlmostEqual(self.filter_under_test.A[5], 49829517234887.664)\n\n self.filter_under_test.compute_parameters(target='stopband')\n self.assertEqual(self.filter_under_test.N, 5)\n self.assertAlmostEqual(self.filter_under_test.Wn, 396.442191233058)\n\n self.filter_under_test.design()\n self.assertAlmostEqual(self.filter_under_test.B[0], 1)\n self.assertAlmostEqual(self.filter_under_test.A[0], 1)\n self.assertAlmostEqual(self.filter_under_test.A[1], 1282.91387997915)\n self.assertAlmostEqual(self.filter_under_test.A[2], 822934.011721574)\n self.assertAlmostEqual(self.filter_under_test.A[3], 326245762.84711146)\n self.assertAlmostEqual(self.filter_under_test.A[4], 79935023616.862701)\n self.assertAlmostEqual(self.filter_under_test.A[5], 9792629864165.8633)", "def filter_variants(self, min_dp=10, min_gq=0, min_vaf=20, max_vaf=100, min_prct_cells=25, min_mut_prct_cells=0.5, min_mut_num_cells=None, min_std=0, method='mb', min_alt_read = 5):\n \n gt = self.layers[NGT]\n dp = self.layers[DP]\n gq = self.layers[GQ]\n vaf = self.layers[AF]\n\n # @ HZ: filter on alternative reads absolute value\n if min_alt_read > 0 and 'alt_read_count' in self.layers:\n alt = self.layers['alt_read_count']\n alt_keep = alt >= min_alt_read\n elif min_alt_read > 0 and 'alt_read_count' not in self.layers:\n print('alt_read_count not calculated, calculate now')\n alt = (np.multiply(vaf, dp)/100).astype(int)\n self.add_layer('alt_read_count', alt)\n alt_keep = alt >= min_alt_read\n else:\n alt_keep = 1\n\n dp_keep = dp >= min_dp\n gq_keep = gq >= min_gq\n min_vaf_keep = ~np.logical_and(vaf < min_vaf, gt == 1)\n max_vaf_keep = ~np.logical_and(vaf > max_vaf, gt == 1)\n gt = (gt - 3) * dp_keep * gq_keep * min_vaf_keep * max_vaf_keep * alt_keep + 3 # workaround to apply filter in one line\n # ^^^ \n # @HZ: this is dangerous since this will only trim down variants already filtered by the default thresholds\n\n self.add_layer(NGT_FILTERED, gt)\n\n num_cells = len(self.barcodes())\n \n ##############################################################\n # @HZ: different way of filtering based on read depth per cell\n if method == 'mb':\n min_cells_filter = np.isin(gt, [0, 1, 2]).sum(axis=0) > num_cells * min_prct_cells / 100\n elif method == 'hz':\n min_cells_filter = dp_keep.sum(axis=0) > num_cells * min_prct_cells / 100\n else:\n print(\"method should be either 'mb' or 'hz' \")\n raise NotImplementedError\n ########################################\n\n if min_mut_num_cells is not None:\n if min_mut_prct_cells is not None:\n print(\"only one of [min_mut_prct_cells] and [min_mut_num_cells] should be input \")\n raise NotImplementedError\n elif not (0 <= min_mut_num_cells < num_cells):\n print(\"[min_mut_num_cells] should be greater than or equal to zero and smaller than the total number of cells in the sample\")\n raise ValueError\n\n else:\n min_cells_mut_filter = np.isin(gt, [1, 2]).sum(axis=0) > min_mut_num_cells\n else:\n min_cells_mut_filter = np.isin(gt, [1, 2]).sum(axis=0) > round(num_cells * min_mut_prct_cells / 100)\n\n good_variants = min_cells_mut_filter * min_cells_filter\n\n final_filter = (vaf.std(axis=0) >= min_std) * good_variants\n\n # @HZ: add reason for exclusion as a layer \"filter_info\" to each variant-cell pair\n \n # dp_fil = np.char.array(np.where(~dp_keep, 'dp', ''))\n # gq_fil = np.char.array(np.where(~gq_keep, 'gq', ''))\n # min_vaf_fil = np.char.array(np.where(~min_vaf_keep, 'min_vaf', ''))\n # max_vaf_fil = np.char.array(np.where(~max_vaf_keep, 'max_vaf', ''))\n\n # filter_info = dp_fil + ' ' + gq_fil + ' ' + min_vaf_fil + ' ' + max_vaf_fil\n # self.add_layer('filter_info', filter_info)\n\n # min_cells_fil = np.char.array(np.where(~min_cells_filter, 'min_cells_covered', ''))\n # min_cells_mut_fil = np.char.array(np.where(~min_cells_mut_filter, 'min_cells_mut', ''))\n \n # var_filter_info = min_cells_fil + ' ' + min_cells_mut_fil\n\n # var_filter_info_dict = {}\n # for variant, info in zip(self.col_attrs[ID], var_filter_info):\n # var_filter_info_dict[variant] = info\n\n return self.col_attrs[ID][final_filter].astype(str)", "def __filtering(data,low,high,freq):\n bplowcut = low/(freq*0.5)\n bphighcut = high/(freq*0.5)\n [b,a] = sig.butter(N=3,Wn=[bplowcut,bphighcut],btype='bandpass')\n filtered = sig.filtfilt(b,a,data)\n\n return filtered" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to fix known issues in XML data.
def fix_xml(data: bytes, err: etree.XMLSyntaxError) -> Any: xml_issue = data.decode().split("\n")[err.lineno - 1] if xml_issue not in KNOWN_XML_ISSUES.keys(): _LOGGER.debug("Unknown xml issue in: %s", xml_issue) raise RMVtransportError() return data.decode().replace(xml_issue, KNOWN_XML_ISSUES[xml_issue]).encode()
[ "def test_invalid_xml_handling(self):\n sample_invalid_xml = textwrap.dedent(\"\"\"\n <problem>\n </proble-oh no my finger broke and I can't close the problem tag properly...\n \"\"\")\n with pytest.raises(etree.XMLSyntaxError):\n self._create_descriptor(sample_invalid_xml, name=\"Invalid XML\")", "def fix_fixmes(self):\n flines = self.fixme_lines()\n tries = 0\n while flines:\n self.try_fixes(flines)\n flines = self.fixme_lines()\n tries += 1\n if tries >= 100:\n raise ParseError(\"Can't parse FIXME lines: %s\" % (flines))", "def _valid_xml(self, xml):\n if not (os.path.exists(xml) and valid_version(xml)):\n rcg, ext = os.path.splitext(xml)\n if os.path.isfile(rcg):\n warnmsg=\"xml was not valid. Recreating from {0}.\".format(rcg)\n logging.warn(warnmsg)\n xml=rcgtoxml(rcg,convert=False)\n if not(os.path.exists(xml) and valid_version(xml)):\n # we still couldn't get the right xml.\n errmsg=\"even after trying to recreate the correct xml file could\"\n errmsg+=\"not be obtained.\"\n raise StatisticsError(errmsg)\n else:\n # print the appropriate error message when we could not have the\n # correct version xml and could not recover.\n if not os.path.exists(xml):\n errmsg=\"{0} not found. Could not find the rcg file to\"\n errmsg+=\"recreate.\"\n elif not valid_version(xml):\n errmsg=\"{0} version was wrong.\"\n errmsg+=\"rcg file to recreate was not found.\"\n raise StatisticsError(errmsg.format(xml))\n # when we get this far without an exception, everything is OK\n return xml", "def xml_validate(self):\n self.tree = Validator.parse(self.content)", "def fix_validation_problems(soup):\n \n # remove the attributes span.c and span.ce used in ast_to_html\n for e in soup.select('span[c]'):\n del e.attrs['c']\n for e in soup.select('span[ce]'):\n del e.attrs['ce']\n\n also_remove = ['figure-id', 'figure-class', 'figure-caption']\n also_remove.extend('make-col%d' % _ for _ in range(1, 12))\n \n for a in also_remove:\n for e in soup.select('[%s]' % a):\n del e.attrs[a]\n \n # add missing type for <style>\n for e in soup.select('style'):\n if not 'type' in e.attrs:\n e.attrs['type'] = 'text/css'\n\n if False:\n for e in soup.select('span.MathJax_SVG'):\n style = e.attrs['style']\n style = style.replace('display: inline-block;' ,'/* decided-to-ignore-inline-block: 0;*/')\n e.attrs['style'] = style\n \n # remove useless <defs id=\"MathJax_SVG_glyphs\"></defs>", "def testXMLWithUknownData(self):\n self.XMLSchemaService.loadSchema('http://queue.amazonaws.com/doc/2008-01-01/QueueService.xsd', self)\n self.runLoop.run()\n assert(self.schema)\n parser = self.schema.newParser()\n parser.feed(message_response_with_uknown_elements)\n result = parser.finish()\n self.assertEqual('8f2770293f9b94ad705d5fd742f5f885', result.ReceiveMessageResult.Message[0].MD5OfBody)", "def fix_data_issues():\n\n for sensor_file in sensor_train_files:\n number_of_corrected_lines = 1\n with sensor_file.open(\"r+\", encoding=\"utf8\") as f:\n\n sensor_text = f.read()\n for sensor_line_index, sensor_data_line in enumerate(sensor_text.splitlines()):\n\n sensor_line_split_delimiter = sensor_data_line.split(\"\\t\")\n\n if len(sensor_line_split_delimiter) <= 10:\n pass\n else:\n sensor_split_lines = line_check(sensor_line_split_delimiter)\n print(\"Error lines found in file: \", sensor_file)\n print(\"# error lines\", len(sensor_split_lines))\n for correct_line_index, sensor_corrected_line in enumerate(sensor_split_lines):\n\n if correct_line_index == 0 and number_of_corrected_lines == 1:\n sensor_corrected_line_joined = (\n \"\\n\" + \"\\t\".join(sensor_corrected_line) + \"\\n\"\n )\n else:\n sensor_corrected_line_joined = \"\\t\".join(sensor_corrected_line) + \"\\n\"\n\n f.write(sensor_corrected_line_joined)\n\n print(\"Fixed error line: \", sensor_line_index)\n number_of_corrected_lines += 1", "def fix_feats(node):\n orig_feats = dict(node.feats)\n node.feats = None\n for name, value in sorted(orig_feats.items()):\n name = name.split('/')[1]\n if name == 'inflection_type':\n node.misc['InflectionType'] = value.capitalize()\n continue\n if \"antecedent\" in name and node.upos == 'PRON':\n node.feats[\"PronType\"] = \"Prs\"\n new = FEATS_CHANGE.get(name + '=' + value)\n if new is not None:\n if new != '':\n for new_pair in new.split('|'):\n new_name, new_value = new_pair.split('=')\n node.feats[new_name] = new_value\n elif name[0].isupper():\n node.feats[name] = value\n else:\n node.feats[name.capitalize()] = value.capitalize()\n\n # Don't loose info about proper names which will not have upos=PROPN.\n if node.feats['Proper'] == 'True':\n if node.xpos not in {'NNP', 'NNPS'}:\n node.misc['Proper'] = 'True'\n del node.feats['Proper']", "def fix_deployments(self,mode=\"update\",defaultSchedule=None):\n \n if self.syntax_error:\n raise Exception(\"invalid XML\")\n\n \n if self.has_been_fixed:\n txt = \"XML has already been fixed, doing nothing\"\n logger.warning(txt)\n print(txt)\n return False\n\n Station.__fix_deployments(self.xml_root,mode,defaultSchedule)\n \n try:\n xmlschema.assertValid(self.xml_root)\n logger.debug(\"XML schema sucessfully fixed. Schema valid.\")\n self.has_been_fixed=True\n self.invalid_schema=False\n return True\n except:\n return False", "def is_valid_xml(self):\n try:\n self.soup = BeautifulSoup(self.input_xml, self.features)\n return True\n except Exception as e:\n print(f\"{TAG} | Failed to parse the XML: {self.input_xml} \")\n return False", "def test_01_FindXml(self):\r\n self.assertEqual(self.m_xml.root.tag, 'PyHouse', 'Invalid XML - not a PyHouse XML config file')", "def test_1000(self):\n with pytest.raises(pyxb.PyXBException):\n self.test_files.load_xml_to_pyxb('systemMetadata_v1_0.invalid.xml')", "def fix_missing_locations(node):\r\n def _fix(node, lineno, col_offset):\r\n if 'lineno' in node._attributes:\r\n if getattr(node, 'lineno', None) is None:\r\n node.lineno = lineno\r\n else:\r\n lineno = node.lineno\r\n if 'col_offset' in node._attributes:\r\n if getattr(node, 'col_offset', None) is None:\r\n node.col_offset = col_offset\r\n else:\r\n col_offset = node.col_offset\r\n for child in ast.iter_child_nodes(node):\r\n _fix(child, lineno, col_offset)\r\n _fix(node, 1, 0)\r\n return node", "def fix(self, source: str):\n pass", "def check_and_repair_tag_integrity(self):\n text_content = self.get_text_content().lower()\n for t in self.tags:\n tag_text = t.text.lower()\n text_text = text_content[t.start:t.end]\n if tag_text != text_text:\n repaired = False\n # run backwards trough the document\n for off in range(5, -30, -1):\n if tag_text == text_content[t.start + off:t.end + off]:\n t.start = t.start - off\n t.end = t.end - off\n repaired = True\n if not repaired:\n logging.debug(f'Tag position does not match to string in text ({tag_text} vs {text_text})')", "def _element_check(data):\n if isinstance(data, etree.Element):\n logging.debug(\"attempting to convert to xml string\")\n return etree.tostring(data)\n else:\n return data", "def convertOldNodes(self):\n for node in self.model.root.descendantGen():\n for field in node.nodeFormat().fields():\n text = node.data.get(field.name, '')\n if text:\n if field.typeName == 'Text' and not field.oldHasHtml:\n text = text.strip()\n text = xml.sax.saxutils.escape(text)\n text = text.replace('\\n', '<br />\\n')\n node.data[field.name] = text\n elif (field.typeName == 'ExternalLink' and\n field.oldTypeName):\n dispName = node.data.get(field.oldLinkAltField, '')\n if not dispName:\n dispName = text\n if field.oldTypeName == 'URL':\n if not urltools.extractScheme(text):\n text = urltools.replaceScheme('http', text)\n elif field.oldTypeName == 'Path':\n text = urltools.replaceScheme('file', text)\n elif field.oldTypeName == 'ExecuteLink':\n if urltools.isRelative(text):\n fullPath = which(text)\n if fullPath:\n text = fullPath\n text = urltools.replaceScheme('file', text)\n elif field.oldTypeName == 'Email':\n text = urltools.replaceScheme('mailto', text)\n node.data[field.name] = ('<a href=\"{0}\">{1}</a>'.\n format(text, dispName))\n elif field.typeName == 'InternalLink':\n uniqueId = treenode.adjustId(text)\n dispName = node.data.get(field.oldLinkAltField, '')\n if not dispName:\n dispName = uniqueId\n node.data[field.name] = ('<a href=\"#{0}\">{1}</a>'.\n format(uniqueId, dispName))\n elif field.typeName == 'Picture':\n node.data[field.name] = ('<img src=\"{0}\" />'.\n format(text))\n if node.nodeFormat().fields(): # skip for dummy root\n node.updateUniqueId()", "def fix_error(self):\n source = ZipFile(self.file, 'r')\n target = ZipFile(self.file, 'w', ZIP_DEFLATED)\n\n for file in source.filelist:\n if file.filename != 'xl/SharedStrings.xml':\n target.writestr(file.filename, source.read(file.filename))\n else:\n target.writestr('xl/sharedStrings.xml', source.read(name=file.filename))\n target.close()\n source.close()", "def test_upgrade_link_invalid(self):\n document = self.root.document\n editable = document.get_editable()\n editable.content = ParsedXML(\n 'content', u\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<doc>\n <p type=\"normal\">\n <link target=\"_blank\" url=\"Aléatoire\">On me link</link>\n </p>\n</doc>\"\"\".encode('utf-8'))\n self.assertEqual(document_upgrader.upgrade(document), document)\n document_dom = editable.content.documentElement\n links = document_dom.getElementsByTagName('link')\n self.assertEqual(len(links), 1)\n link = links[0]\n self.assertTrue(link.hasAttribute('url'))\n self.assertEqual(link.getAttribute('url'), u'Aléatoire')\n self.assertFalse(link.hasAttribute('anchor'))\n self.assertFalse(link.hasAttribute('reference'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function creates the DropDowns for Function selection based ond the funcitonLib
def createFunctionDropwDowns(self): all_functions = inspect.getmembers(functionLib, inspect.isfunction) self.c_functions = [] self.i_functions = [] self.r_functions = [] self.v_functions = [] self.l_functions = [] for functionTupel in all_functions: if "c_" in functionTupel[0]: self.c_functions.append(functionTupel) elif "i_" in functionTupel[0]: self.i_functions.append(functionTupel) elif "r_" in functionTupel[0]: self.r_functions.append(functionTupel) elif "v_" in functionTupel[0]: self.v_functions.append(functionTupel) elif "l_" in functionTupel[0]: self.l_functions.append(functionTupel) self.function_c_DropwDown = QtGui.QComboBox() self.function_c_DropwDown.addItem("Choose Function") self.function_i_DropwDown = QtGui.QComboBox() self.function_i_DropwDownNew = QtGui.QComboBox() self.function_i_DropwDown.addItem("Choose Function") self.function_i_DropwDownNew.addItem("Choose Function") self.function_r_DropwDown = QtGui.QComboBox() self.function_r_DropwDown.addItem("Choose Function") self.function_v_DropwDown = QtGui.QComboBox() self.function_v_DropwDownNew = QtGui.QComboBox() self.function_v_DropwDown.addItem("Choose Function") self.function_v_DropwDownNew.addItem("Choose Function") self.function_l_DropwDown = QtGui.QComboBox() self.function_l_DropwDown.addItem("Choose Function") for functionTupel in self.c_functions: self.function_c_DropwDown.addItem(functionTupel[0]) for functionTupel in self.i_functions: self.function_i_DropwDown.addItem(functionTupel[0]) self.function_i_DropwDownNew.addItem(functionTupel[0]) for functionTupel in self.r_functions: self.function_r_DropwDown.addItem(functionTupel[0]) for functionTupel in self.v_functions: self.function_v_DropwDown.addItem(functionTupel[0]) self.function_v_DropwDownNew.addItem(functionTupel[0]) for functionTupel in self.l_functions: self.function_l_DropwDown.addItem(functionTupel[0]) self.function_c_DropwDown.hide() self.function_i_DropwDown.hide() #self.function_r_DropwDown.hide() self.function_v_DropwDown.hide() self.function_l_DropwDown.hide()
[ "def _create_fmat_dropdown(self):\n options_list = list(self.available_plots.keys())\n default_value = options_list[0] # default_value = \"line\"\n dropdown_default_params = dict(options=options_list,\n value=default_value,\n description='Plot type:',\n disabled=False\n )\n dropdown_list = [Dropdown(**dropdown_default_params) for _ in self.tensor_rep.fmat]\n return dropdown_list", "def create_dropdown(self):\n try:\n if self.table_count==4:\n self.dropdown1 = {'label':'7 day Calculations','visibility': [True,False,True,False]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [False,True,False,True]}\n elif self.table_count==3 and not (self.bool7_below_th):\n # no below threshold for 7 days per 100k sums\n self.dropdown1 = {'label':'7 day Calculations','visibility': [False,True,False]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [True,False,True]}\n # If there is no below threshold for 14 days then there is also no below threshold table for the 7 days --> else\n elif self.table_count ==3 and not (self.bool14_above_th):\n # no above threshold for 14 days per 100k \n self.dropdown1 = {'label':'7 day Calculations','visibility': [True,False,True]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [False,True,False]}\n # If there is no above threshold for 7 days then there is also no above threshold table for the 14 days -> else\n else:\n self.dropdown1 = {'label':'7 day Calculations','visibility': [True,False]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [False,True]} \n \n except Exception as e:\n logger.error(e)", "def createDropDowns(self):\n\n self.componentDropwDown = QtGui.QComboBox()\n self.componentDropwDown.addItem(\"Resistor\")\n self.componentDropwDown.addItem(\"Coil\")\n self.componentDropwDown.addItem(\"Capacitator\")\n self.componentDropwDown.addItem(\"V-Source\")\n self.componentDropwDown.addItem(\"I-Source\")\n self.componentDropwDown.currentIndexChanged.connect(self.on_ComponentChanged)\n\n self.potenzialDropDownFrom = QtGui.QComboBox()\n self.potenzialDropDownFrom.addItem(\"---Potencial From---\")\n self.potenzialDropDownFrom.addItem(\"E-Last\")\n self.potenzialDropDownFrom.addItem(\"E-Masse\")\n self.potenzialDropDownFrom.setAutoCompletion(True)\n \n self.potenzialDropDownTo = QtGui.QComboBox()\n self.potenzialDropDownTo.addItem(\"---Potencial To---\")\n self.potenzialDropDownTo.addItem(\"E-Last\")\n self.potenzialDropDownTo.addItem(\"E-Masse\")\n self.potenzialDropDownFrom.setAutoCompletion(True)\n\n self.directionDropwDown = QtGui.QComboBox()\n self.directionDropwDown.addItem(\"left\")\n self.directionDropwDown.addItem(\"right\")\n self.directionDropwDown.addItem(\"up\")\n self.directionDropwDown.addItem(\"down\")\n\n self.potenzialDropDown = QtGui.QComboBox()\n self.potenzialDropDown.setFixedSize(200,20)\n self.potenzialDropDown.hide()\n self.potenzialDropDown.currentIndexChanged.connect(self.onPotencialChanged)", "def initialize(self):\n\n self.title('WA Crash Feature Mapper')\n self.minsize(700, 700)\n\n self.selection0 = tk.StringVar()\n self.selection0.set('Select year to view')\n options0 = ['2013', '2014', '2015', '2016', '2017']\n self.drop0 = tk.OptionMenu(self, self.selection0, *options0)\n self.drop0.pack()\n\n # self.button0\n tk.Button(\n self,\n text='Save year selection',\n command=lambda: enable_next_dropdown(self.drop1)\n ).pack()\n\n self.selection1 = tk.StringVar()\n self.selection1.set('Select county to view')\n options1 = ['Adams',\n 'Asotin',\n 'Benton',\n 'Chelan',\n 'Clallam',\n 'Clark',\n 'Columbia',\n 'Cowlitz',\n 'Douglas',\n 'Ferry',\n 'Franklin',\n 'Garfield',\n 'Grant',\n 'Grays Harbor',\n 'Island',\n 'Jefferson',\n 'King',\n 'Kitsap',\n 'Kittitas',\n 'Klickitat',\n 'Lewis',\n 'Lincoln',\n 'Mason',\n 'Okanogan',\n 'Pacific',\n 'Pend Oreille',\n 'Pierce',\n 'San Juan',\n 'Skagit',\n 'Skamania',\n 'Snohomish',\n 'Spokane',\n 'Stevens',\n 'Thurston',\n 'Wahkiakum',\n 'Walla Walla',\n 'Whatcom',\n 'Whitman',\n 'Yakima']\n self.drop1 = tk.OptionMenu(self, self.selection1, *options1)\n self.drop1.configure(state='disabled')\n self.drop1.pack()\n # self.button1\n tk.Button(\n self,\n text='Save county selection',\n command=lambda: enable_next_dropdown(self.drop2)\n ).pack()\n\n self.selection2 = tk.StringVar()\n self.selection2.set('Select group feature to view')\n options2 = [\n 'Weather',\n 'Surface Condition',\n 'Lighting Condition',\n 'Day of the week']\n self.drop2 = tk.OptionMenu(self, self.selection2, *options2)\n self.drop2.configure(state='disabled')\n self.drop2.pack()\n\n # self.button2 =\n tk.Button(\n self,\n text='Save group selection',\n command=lambda: self.set_options_init(self.drop3, self.selection3)\n ).pack()\n\n self.selection3 = tk.StringVar()\n self.selection3.set('Select subgroup feature to view')\n options3 = 'Select subgroup to view'\n self.drop3 = tk.OptionMenu(self, self.selection3, options3)\n self.drop3.configure(state='disabled')\n self.drop3.pack()\n # self.button3 =\n tk.Button(\n self,\n text='Save subgroup selection',\n command=lambda: set_map_options(self.drop4, self.selection4)\n ).pack()\n\n self.selection4 = tk.StringVar()\n self.selection4.set('Select type of map to view')\n options4 = ['Select type of map to view']\n self.drop4 = tk.OptionMenu(self, self.selection4, *options4)\n self.drop4.configure(state='disabled')\n self.drop4.pack()\n # show the final map based on selections\n\n # self.button4 =\n tk.Button(\n self,\n text='Show map', command=self.show_map\n ).pack()\n\n # self.button5 =\n tk.Button(\n self,\n text='Generate ML reports',\n command=lambda: generate_ml(self.selection0.get())\n ).pack()", "def dropdown_states():\r\n\r\n states = ['Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','Florida','Georgia','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York','North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania','Rhode Island','South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virginia','Washington','West Virginia','Wisconsin','Wyoming']\r\n abbreviations = ['AL','AK','AZ','AR','CA','CO','CT','DE','FL','GA','HI','ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY']\r\n\r\n output = ['<select>']\r\n\r\n for state, abbreviation in zip(states, abbreviations):\r\n output.append('\\t<option value=\"{0}\">{1}</option>'.format(abbreviation, state))\r\n\r\n output.append('</select>')\r\n\r\n output = '\\n'.join(output) # Glue together the list with a newline between each list item.\r\n\r\n return output", "def add_drop_down(self, col_number, col_label):\n if col_label.endswith('**') or col_label.endswith('^^'):\n col_label = col_label[:-2]\n # add drop-down for experiments\n if col_label == \"experiments\":\n if 'measurements' in self.contribution.tables:\n meas_table = self.contribution.tables['measurements'].df\n if 'experiment' in meas_table.columns:\n exps = meas_table['experiment'].unique()\n self.choices[col_number] = (sorted(exps), False)\n self.grid.SetColLabelValue(col_number, col_label + \"**\")\n return\n #\n if col_label == 'method_codes':\n self.add_method_drop_down(col_number, col_label)\n elif col_label == 'magic_method_codes':\n self.add_method_drop_down(col_number, 'method_codes')\n elif col_label in ['specimens', 'samples', 'sites', 'locations']:\n if col_label in self.contribution.tables:\n item_df = self.contribution.tables[col_label].df\n item_names = item_df.index.unique() #[col_label[:-1]].unique()\n self.choices[col_number] = (sorted(item_names), False)\n elif col_label in ['specimen', 'sample', 'site', 'location']:\n if col_label + \"s\" in self.contribution.tables:\n item_df = self.contribution.tables[col_label + \"s\"].df\n item_names = item_df.index.unique() #[col_label[:-1]].unique()\n self.choices[col_number] = (sorted(item_names), False)\n # add vocabularies\n if col_label in self.contribution.vocab.suggested:\n typ = 'suggested'\n elif col_label in self.contribution.vocab.vocabularies:\n typ = 'controlled'\n else:\n return\n\n # add menu, if not already set\n if col_number not in list(self.choices.keys()):\n if typ == 'suggested':\n self.grid.SetColLabelValue(col_number, col_label + \"^^\")\n controlled_vocabulary = self.contribution.vocab.suggested[col_label]\n else:\n self.grid.SetColLabelValue(col_number, col_label + \"**\")\n controlled_vocabulary = self.contribution.vocab.vocabularies[col_label]\n #\n stripped_list = []\n for item in controlled_vocabulary:\n try:\n stripped_list.append(str(item))\n except UnicodeEncodeError:\n # skips items with non ASCII characters\n pass\n\n if len(stripped_list) > 100:\n # split out the list alphabetically, into a dict of lists {'A': ['alpha', 'artist'], 'B': ['beta', 'beggar']...}\n dictionary = {}\n for item in stripped_list:\n letter = item[0].upper()\n if letter not in list(dictionary.keys()):\n dictionary[letter] = []\n dictionary[letter].append(item)\n stripped_list = dictionary\n\n two_tiered = True if isinstance(stripped_list, dict) else False\n self.choices[col_number] = (stripped_list, two_tiered)\n return", "def generate_item_dropdown(self, e):\n self.items_df = self.df.query(\"types == @self.food_type_dropdown.get()\")\n self.food_names_list = list(self.items_df[\"title\"])\n self.food_names_dropdown.config(value=self.food_names_list)", "def fill_dropdown(data_source, hd5_file, visit_id, static_data, filter_signals):\n # Extract the signals\n try:\n # Add movements from static data\n if data_source == \"static\":\n mvmnt_fields = static_data[\"Movements\"][\"fields\"]\n signal_list = mvmnt_fields[\"department_nm\"]\n # Add all the other signals\n else:\n signal_list = TMapHelper.list_signal(\n hd5_file,\n data_source,\n visit_id,\n filter_signals,\n )\n except KeyError: # Don't crash if the file is in an old format\n signal_list = []\n\n # Rename special signals\n special_signals = LAYOUT[\"options\"][\"special_signals\"]\n for idx, signal in enumerate(signal_list):\n for special_signal in special_signals:\n if special_signal[\"pattern\"] in signal:\n signal_list = np.delete(signal_list, idx)\n for suffix in special_signal[\"suffix\"]:\n signal_list = np.append(signal_list, f\"{signal}_{suffix}\")\n\n # Format the options for the dropdown\n dd_options = [\n {\"label\": sig, \"value\": f\"{data_source}--{sig}\"} for sig in signal_list\n ]\n return dd_options", "def add_method_drop_down(self, col_number, col_label):\n if self.data_type == 'ages':\n method_list = self.contribution.vocab.age_methods\n else:\n method_list = self.contribution.vocab.age_methods.copy()\n method_list.update(self.contribution.vocab.methods)\n self.choices[col_number] = (method_list, True)", "def make_choose_control(field_name,\n included_label,\n included_items,\n excluded_label,\n excluded_items,\n item_to_text=str,\n item_to_value=str,\n ordered=0):\n \n # We'll construct an array of buttons. Each element is an HTML\n # input control.\n buttons = []\n # Construct the encoding for the items initially included.\n initial_value = string.join(map(item_to_value, included_items), \",\")\n # The hidden control that will contain the encoded representation of\n # the included items.\n hidden_control = '<input type=\"hidden\" name=\"%s\" value=\"%s\">' \\\n % (field_name, initial_value)\n # Construct names for the two select controls.\n included_select_name = \"_inc_\" + field_name\n excluded_select_name = \"_exc_\" + field_name\n\n # The select control for included items. When the user selects an\n # item in this list, deselect the selected item in the excluded\n # list, if any.\n included_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (included_select_name, excluded_select_name)\n # Build options for items initially selected.\n for item in included_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n included_select = included_select + option\n included_select = included_select + '</select>\\n'\n\n # The select control for excluded items. When the user selects an\n # item in this list, deselect the selected item in the included\n # list, if any.\n excluded_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (excluded_select_name, included_select_name)\n # Build options for items initially excluded.\n for item in excluded_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n excluded_select = excluded_select + option\n excluded_select = excluded_select + '</select>\\n'\n\n # The Add button.\n button = '''\n <input type=\"button\"\n value=\" << Add \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" />\n ''' % (excluded_select_name, included_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n # The Remove button.\n button = '''\n &nbsp;<input\n type=\"button\"\n value=\" Remove >> \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" />&nbsp;\n ''' % (included_select_name, excluded_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n if ordered:\n # The Move Up button.\n button = '''\n <input type=\"button\"\n value=\" Move Up \"\n onclick=\"swap_option(document.form.%s, -1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n\n buttons.append(button)\n\n # The Move Down button.\n button = '''\n <input type=\"button\"\n value=\" Move Down \"\n onclick=\"swap_option(document.form.%s, 1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n buttons.append(button)\n\n # Arrange everything properly.\n buttons = string.join(buttons, \"\\n<br />\\n\")\n return '''\n %(hidden_control)s\n <table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">\n <tr valign=\"center\">\n <td>\n %(included_label)s:\n <br />\n %(included_select)s\n </td>\n <td align=\"center\">\n %(buttons)s\n </td>\n <td>\n %(excluded_label)s:<br />\n %(excluded_select)s\n </td>\n </tr>\n </table>\n ''' % locals()", "def build_options(slot,buttons):\r\n if slot in ['origin_city','destination_city']:\r\n button_list= []\r\n for btn in buttons:\r\n button_list.append({'text': btn, 'value': btn})\r\n return button_list", "def populateEquationsDropDown(self, tabNo):\n\n if tabNo == 4:\n\n self.regressionTab.regrSelectPane.mlrTab.eqSelect.clear()\n self.regressionTab.regrSelectPane.pcarTab.eqSelect.clear()\n self.regressionTab.regrSelectPane.zscrTab.eqSelect.clear()\n self.regressionTab.regrSelectPane.annTab.eqSelect.clear()\n\n for key in self.forecastDict['EquationPools'].keys():\n self.regressionTab.regrSelectPane.mlrTab.eqSelect.addItem(str(key))\n self.regressionTab.regrSelectPane.zscrTab.eqSelect.addItem(str(key))\n self.regressionTab.regrSelectPane.pcarTab.eqSelect.addItem(str(key))\n self.regressionTab.regrSelectPane.annTab.eqSelect.addItem(str(key))\n \n return", "def uitab_dropdown_items(tab_name, tab, domain, request) -> List[dict]:", "def setStartingValues(self):\n if self.choosen == 0:\n self.function = self.function_i_DropwDownNew.currentText()\n else:\n self.function = self.function_v_DropwDownNew.currentText()\n self.initParametersDialog.close()", "def InitUI(self):\n if self.data_type in ['orient', 'ages']:\n belongs_to = []\n else:\n parent_table_name = self.parent_type + \"s\"\n if parent_table_name in self.contribution.tables:\n belongs_to = sorted(self.contribution.tables[parent_table_name].df.index.unique())\n else:\n belongs_to = []\n\n self.choices = {}\n if self.data_type in ['specimens', 'samples', 'sites']:\n self.choices = {1: (belongs_to, False)}\n if self.data_type == 'orient':\n self.choices = {1: (['g', 'b'], False)}\n if self.data_type == 'ages':\n for level in ['specimen', 'sample', 'site', 'location']:\n if level in self.grid.col_labels:\n level_names = []\n if level + \"s\" in self.contribution.tables:\n level_names = list(self.contribution.tables[level+\"s\"].df.index.unique())\n num = self.grid.col_labels.index(level)\n self.choices[num] = (level_names, False)\n # Bind left click to drop-down menu popping out\n self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK,\n lambda event: self.on_left_click(event, self.grid, self.choices))\n\n cols = self.grid.GetNumberCols()\n col_labels = [self.grid.GetColLabelValue(col) for col in range(cols)]\n\n # check if any additional columns have controlled vocabularies\n # if so, get the vocabulary list\n for col_number, label in enumerate(col_labels):\n self.add_drop_down(col_number, label)", "def get_classify_method_choices():\n\n queryset = ClassificationMethod.objects.filter(active=True)\n\n return [INITIAL_SELECT_CHOICE] +\\\n [(x.id, x.display_name) for x in queryset]", "def create_edge_layer_select(net):\n\n options = ['Score']\n if 'p_value' in net.columns:\n options.append('P-value')\n if 'hotelling_p_value' in net.columns:\n options.append('Hotelling P-value (phased)')\n if 'Test_Name' in net.columns:\n options.append('Test Name')\n if 'r_squared' in net.columns:\n options.append('R^2')\n options.append('Relationship')\n\n select = dcc.Dropdown(\n id = 'edge-layer-select',\n style = {\n 'color' : 'black'\n },\n options = [\n {'label' : col, 'value' : col} for col in options\n ],\n value = 'Score'\n )\n return select", "def generate_autoplot_list(apid):\n s = '<select name=\"q\" class=\"iemselect2\" data-width=\"100%\">\\n'\n for entry in scripts.data[\"plots\"]:\n s += f\"<optgroup label=\\\"{entry['label']}\\\">\\n\"\n for opt in entry[\"options\"]:\n selected = ' selected=\"selected\"' if opt[\"id\"] == apid else \"\"\n s += (\n f\"<option value=\\\"{opt['id']}\\\"{selected}>{opt['label']} \"\n f\"(#{opt['id']})</option>\\n\"\n )\n s += \"</optgroup>\\n\"\n\n s += \"</select>\\n\"\n return s", "def printDropDown(name, nameValList, default, onChange=None):\n addStr = \"\"\n if onChange is not None:\n addStr = \"\"\" onchange=\"%s\" \"\"\" % onChange\n print('<select id=\"dropdown\" name=\"%s\"%s>' % (name, addStr))\n for name, desc in nameValList:\n name = str(name)\n addString = \"\"\n if default is not None and str(name)==str(default):\n addString = ' selected=\"selected\"'\n print(' <option value=\"%s\"%s>%s</option>' % (name, addString, desc))\n print('</select>')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function creates multile DropDowns for the GUI. Mostly used for the input of components
def createDropDowns(self): self.componentDropwDown = QtGui.QComboBox() self.componentDropwDown.addItem("Resistor") self.componentDropwDown.addItem("Coil") self.componentDropwDown.addItem("Capacitator") self.componentDropwDown.addItem("V-Source") self.componentDropwDown.addItem("I-Source") self.componentDropwDown.currentIndexChanged.connect(self.on_ComponentChanged) self.potenzialDropDownFrom = QtGui.QComboBox() self.potenzialDropDownFrom.addItem("---Potencial From---") self.potenzialDropDownFrom.addItem("E-Last") self.potenzialDropDownFrom.addItem("E-Masse") self.potenzialDropDownFrom.setAutoCompletion(True) self.potenzialDropDownTo = QtGui.QComboBox() self.potenzialDropDownTo.addItem("---Potencial To---") self.potenzialDropDownTo.addItem("E-Last") self.potenzialDropDownTo.addItem("E-Masse") self.potenzialDropDownFrom.setAutoCompletion(True) self.directionDropwDown = QtGui.QComboBox() self.directionDropwDown.addItem("left") self.directionDropwDown.addItem("right") self.directionDropwDown.addItem("up") self.directionDropwDown.addItem("down") self.potenzialDropDown = QtGui.QComboBox() self.potenzialDropDown.setFixedSize(200,20) self.potenzialDropDown.hide() self.potenzialDropDown.currentIndexChanged.connect(self.onPotencialChanged)
[ "def createFunctionDropwDowns(self):\n\n all_functions = inspect.getmembers(functionLib, inspect.isfunction) \n\n self.c_functions = []\n self.i_functions = []\n self.r_functions = []\n self.v_functions = []\n self.l_functions = []\n\n for functionTupel in all_functions:\n if \"c_\" in functionTupel[0]:\n self.c_functions.append(functionTupel)\n\n elif \"i_\" in functionTupel[0]:\n self.i_functions.append(functionTupel)\n elif \"r_\" in functionTupel[0]:\n self.r_functions.append(functionTupel)\n elif \"v_\" in functionTupel[0]:\n self.v_functions.append(functionTupel)\n elif \"l_\" in functionTupel[0]:\n self.l_functions.append(functionTupel)\n\n \n self.function_c_DropwDown = QtGui.QComboBox()\n self.function_c_DropwDown.addItem(\"Choose Function\")\n self.function_i_DropwDown = QtGui.QComboBox()\n self.function_i_DropwDownNew = QtGui.QComboBox()\n self.function_i_DropwDown.addItem(\"Choose Function\")\n self.function_i_DropwDownNew.addItem(\"Choose Function\")\n self.function_r_DropwDown = QtGui.QComboBox()\n self.function_r_DropwDown.addItem(\"Choose Function\")\n self.function_v_DropwDown = QtGui.QComboBox()\n self.function_v_DropwDownNew = QtGui.QComboBox()\n self.function_v_DropwDown.addItem(\"Choose Function\")\n self.function_v_DropwDownNew.addItem(\"Choose Function\")\n self.function_l_DropwDown = QtGui.QComboBox()\n self.function_l_DropwDown.addItem(\"Choose Function\")\n\n for functionTupel in self.c_functions:\n self.function_c_DropwDown.addItem(functionTupel[0])\n\n for functionTupel in self.i_functions:\n self.function_i_DropwDown.addItem(functionTupel[0])\n self.function_i_DropwDownNew.addItem(functionTupel[0])\n\n for functionTupel in self.r_functions:\n self.function_r_DropwDown.addItem(functionTupel[0])\n \n for functionTupel in self.v_functions:\n self.function_v_DropwDown.addItem(functionTupel[0])\n self.function_v_DropwDownNew.addItem(functionTupel[0])\n\n for functionTupel in self.l_functions:\n self.function_l_DropwDown.addItem(functionTupel[0])\n\n self.function_c_DropwDown.hide()\n self.function_i_DropwDown.hide()\n #self.function_r_DropwDown.hide()\n self.function_v_DropwDown.hide()\n self.function_l_DropwDown.hide()", "def initialize(self):\n\n self.title('WA Crash Feature Mapper')\n self.minsize(700, 700)\n\n self.selection0 = tk.StringVar()\n self.selection0.set('Select year to view')\n options0 = ['2013', '2014', '2015', '2016', '2017']\n self.drop0 = tk.OptionMenu(self, self.selection0, *options0)\n self.drop0.pack()\n\n # self.button0\n tk.Button(\n self,\n text='Save year selection',\n command=lambda: enable_next_dropdown(self.drop1)\n ).pack()\n\n self.selection1 = tk.StringVar()\n self.selection1.set('Select county to view')\n options1 = ['Adams',\n 'Asotin',\n 'Benton',\n 'Chelan',\n 'Clallam',\n 'Clark',\n 'Columbia',\n 'Cowlitz',\n 'Douglas',\n 'Ferry',\n 'Franklin',\n 'Garfield',\n 'Grant',\n 'Grays Harbor',\n 'Island',\n 'Jefferson',\n 'King',\n 'Kitsap',\n 'Kittitas',\n 'Klickitat',\n 'Lewis',\n 'Lincoln',\n 'Mason',\n 'Okanogan',\n 'Pacific',\n 'Pend Oreille',\n 'Pierce',\n 'San Juan',\n 'Skagit',\n 'Skamania',\n 'Snohomish',\n 'Spokane',\n 'Stevens',\n 'Thurston',\n 'Wahkiakum',\n 'Walla Walla',\n 'Whatcom',\n 'Whitman',\n 'Yakima']\n self.drop1 = tk.OptionMenu(self, self.selection1, *options1)\n self.drop1.configure(state='disabled')\n self.drop1.pack()\n # self.button1\n tk.Button(\n self,\n text='Save county selection',\n command=lambda: enable_next_dropdown(self.drop2)\n ).pack()\n\n self.selection2 = tk.StringVar()\n self.selection2.set('Select group feature to view')\n options2 = [\n 'Weather',\n 'Surface Condition',\n 'Lighting Condition',\n 'Day of the week']\n self.drop2 = tk.OptionMenu(self, self.selection2, *options2)\n self.drop2.configure(state='disabled')\n self.drop2.pack()\n\n # self.button2 =\n tk.Button(\n self,\n text='Save group selection',\n command=lambda: self.set_options_init(self.drop3, self.selection3)\n ).pack()\n\n self.selection3 = tk.StringVar()\n self.selection3.set('Select subgroup feature to view')\n options3 = 'Select subgroup to view'\n self.drop3 = tk.OptionMenu(self, self.selection3, options3)\n self.drop3.configure(state='disabled')\n self.drop3.pack()\n # self.button3 =\n tk.Button(\n self,\n text='Save subgroup selection',\n command=lambda: set_map_options(self.drop4, self.selection4)\n ).pack()\n\n self.selection4 = tk.StringVar()\n self.selection4.set('Select type of map to view')\n options4 = ['Select type of map to view']\n self.drop4 = tk.OptionMenu(self, self.selection4, *options4)\n self.drop4.configure(state='disabled')\n self.drop4.pack()\n # show the final map based on selections\n\n # self.button4 =\n tk.Button(\n self,\n text='Show map', command=self.show_map\n ).pack()\n\n # self.button5 =\n tk.Button(\n self,\n text='Generate ML reports',\n command=lambda: generate_ml(self.selection0.get())\n ).pack()", "def make_choose_control(field_name,\n included_label,\n included_items,\n excluded_label,\n excluded_items,\n item_to_text=str,\n item_to_value=str,\n ordered=0):\n \n # We'll construct an array of buttons. Each element is an HTML\n # input control.\n buttons = []\n # Construct the encoding for the items initially included.\n initial_value = string.join(map(item_to_value, included_items), \",\")\n # The hidden control that will contain the encoded representation of\n # the included items.\n hidden_control = '<input type=\"hidden\" name=\"%s\" value=\"%s\">' \\\n % (field_name, initial_value)\n # Construct names for the two select controls.\n included_select_name = \"_inc_\" + field_name\n excluded_select_name = \"_exc_\" + field_name\n\n # The select control for included items. When the user selects an\n # item in this list, deselect the selected item in the excluded\n # list, if any.\n included_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (included_select_name, excluded_select_name)\n # Build options for items initially selected.\n for item in included_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n included_select = included_select + option\n included_select = included_select + '</select>\\n'\n\n # The select control for excluded items. When the user selects an\n # item in this list, deselect the selected item in the included\n # list, if any.\n excluded_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (excluded_select_name, included_select_name)\n # Build options for items initially excluded.\n for item in excluded_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n excluded_select = excluded_select + option\n excluded_select = excluded_select + '</select>\\n'\n\n # The Add button.\n button = '''\n <input type=\"button\"\n value=\" << Add \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" />\n ''' % (excluded_select_name, included_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n # The Remove button.\n button = '''\n &nbsp;<input\n type=\"button\"\n value=\" Remove >> \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" />&nbsp;\n ''' % (included_select_name, excluded_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n if ordered:\n # The Move Up button.\n button = '''\n <input type=\"button\"\n value=\" Move Up \"\n onclick=\"swap_option(document.form.%s, -1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n\n buttons.append(button)\n\n # The Move Down button.\n button = '''\n <input type=\"button\"\n value=\" Move Down \"\n onclick=\"swap_option(document.form.%s, 1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n buttons.append(button)\n\n # Arrange everything properly.\n buttons = string.join(buttons, \"\\n<br />\\n\")\n return '''\n %(hidden_control)s\n <table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">\n <tr valign=\"center\">\n <td>\n %(included_label)s:\n <br />\n %(included_select)s\n </td>\n <td align=\"center\">\n %(buttons)s\n </td>\n <td>\n %(excluded_label)s:<br />\n %(excluded_select)s\n </td>\n </tr>\n </table>\n ''' % locals()", "def set_combobox(self, domain:str, option_list:list):\n setting_area = QVBoxLayout()\n rows = QVBoxLayout()\n btnAdd = QPushButton(parameter.add_str)\n btnAdd.clicked.connect(lambda:self.Addbutton_click(domain))\n\n\n for elem in option_list:\n row = self.one_row(elem, domain)\n row.itemAt(0).widget().setEnabled(False)\n rows.addLayout(row)\n\n\n setting_area.addLayout(rows)\n setting_area.addWidget(btnAdd)\n return setting_area", "def create_dropdown(self):\n try:\n if self.table_count==4:\n self.dropdown1 = {'label':'7 day Calculations','visibility': [True,False,True,False]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [False,True,False,True]}\n elif self.table_count==3 and not (self.bool7_below_th):\n # no below threshold for 7 days per 100k sums\n self.dropdown1 = {'label':'7 day Calculations','visibility': [False,True,False]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [True,False,True]}\n # If there is no below threshold for 14 days then there is also no below threshold table for the 7 days --> else\n elif self.table_count ==3 and not (self.bool14_above_th):\n # no above threshold for 14 days per 100k \n self.dropdown1 = {'label':'7 day Calculations','visibility': [True,False,True]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [False,True,False]}\n # If there is no above threshold for 7 days then there is also no above threshold table for the 14 days -> else\n else:\n self.dropdown1 = {'label':'7 day Calculations','visibility': [True,False]}\n self.dropdown2 = {'label':'14 day Calculations','visibility': [False,True]} \n \n except Exception as e:\n logger.error(e)", "def generate_item_dropdown(self, e):\n self.items_df = self.df.query(\"types == @self.food_type_dropdown.get()\")\n self.food_names_list = list(self.items_df[\"title\"])\n self.food_names_dropdown.config(value=self.food_names_list)", "def InitUI(self):\n if self.data_type in ['orient', 'ages']:\n belongs_to = []\n else:\n parent_table_name = self.parent_type + \"s\"\n if parent_table_name in self.contribution.tables:\n belongs_to = sorted(self.contribution.tables[parent_table_name].df.index.unique())\n else:\n belongs_to = []\n\n self.choices = {}\n if self.data_type in ['specimens', 'samples', 'sites']:\n self.choices = {1: (belongs_to, False)}\n if self.data_type == 'orient':\n self.choices = {1: (['g', 'b'], False)}\n if self.data_type == 'ages':\n for level in ['specimen', 'sample', 'site', 'location']:\n if level in self.grid.col_labels:\n level_names = []\n if level + \"s\" in self.contribution.tables:\n level_names = list(self.contribution.tables[level+\"s\"].df.index.unique())\n num = self.grid.col_labels.index(level)\n self.choices[num] = (level_names, False)\n # Bind left click to drop-down menu popping out\n self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK,\n lambda event: self.on_left_click(event, self.grid, self.choices))\n\n cols = self.grid.GetNumberCols()\n col_labels = [self.grid.GetColLabelValue(col) for col in range(cols)]\n\n # check if any additional columns have controlled vocabularies\n # if so, get the vocabulary list\n for col_number, label in enumerate(col_labels):\n self.add_drop_down(col_number, label)", "def _create_fmat_dropdown(self):\n options_list = list(self.available_plots.keys())\n default_value = options_list[0] # default_value = \"line\"\n dropdown_default_params = dict(options=options_list,\n value=default_value,\n description='Plot type:',\n disabled=False\n )\n dropdown_list = [Dropdown(**dropdown_default_params) for _ in self.tensor_rep.fmat]\n return dropdown_list", "def __init__(self, num_selectors,\n label_text = [],\n label_template = \"Channel\",\n button_text = [],\n button_template = \"Port\",\n buttons = 1,\n title=\"MultiSwitch\"):\n super(MultiSelectorForm, self).__init__()\n self.num_selectors = num_selectors\n self.label_text = label_text\n self.label_template = label_template\n self.button_template = button_template\n if button_text:\n self.button_text = button_text\n else:\n self.button_text = [\"\"]*buttons\n self.title=title\n self.state = {}\n\n self.signal = SignalMaker()", "def make_drop_down_widget(self, default, choices):\n menu_var = StringVar(self)\n self.drop_menu_tkvar = menu_var\n\n f = lambda v: self.update_options_view(v)\n drop_menu = OptionMenu(self, menu_var, default, *choices, command=f)\n drop_menu.grid(sticky=E, column=1, row=self.row)\n self.drop_menu = drop_menu\n self.increment_row()", "def build(self, choices):\n for choice in choices:\n self.addItem(choice)", "def add_drop_down(self, col_number, col_label):\n if col_label.endswith('**') or col_label.endswith('^^'):\n col_label = col_label[:-2]\n # add drop-down for experiments\n if col_label == \"experiments\":\n if 'measurements' in self.contribution.tables:\n meas_table = self.contribution.tables['measurements'].df\n if 'experiment' in meas_table.columns:\n exps = meas_table['experiment'].unique()\n self.choices[col_number] = (sorted(exps), False)\n self.grid.SetColLabelValue(col_number, col_label + \"**\")\n return\n #\n if col_label == 'method_codes':\n self.add_method_drop_down(col_number, col_label)\n elif col_label == 'magic_method_codes':\n self.add_method_drop_down(col_number, 'method_codes')\n elif col_label in ['specimens', 'samples', 'sites', 'locations']:\n if col_label in self.contribution.tables:\n item_df = self.contribution.tables[col_label].df\n item_names = item_df.index.unique() #[col_label[:-1]].unique()\n self.choices[col_number] = (sorted(item_names), False)\n elif col_label in ['specimen', 'sample', 'site', 'location']:\n if col_label + \"s\" in self.contribution.tables:\n item_df = self.contribution.tables[col_label + \"s\"].df\n item_names = item_df.index.unique() #[col_label[:-1]].unique()\n self.choices[col_number] = (sorted(item_names), False)\n # add vocabularies\n if col_label in self.contribution.vocab.suggested:\n typ = 'suggested'\n elif col_label in self.contribution.vocab.vocabularies:\n typ = 'controlled'\n else:\n return\n\n # add menu, if not already set\n if col_number not in list(self.choices.keys()):\n if typ == 'suggested':\n self.grid.SetColLabelValue(col_number, col_label + \"^^\")\n controlled_vocabulary = self.contribution.vocab.suggested[col_label]\n else:\n self.grid.SetColLabelValue(col_number, col_label + \"**\")\n controlled_vocabulary = self.contribution.vocab.vocabularies[col_label]\n #\n stripped_list = []\n for item in controlled_vocabulary:\n try:\n stripped_list.append(str(item))\n except UnicodeEncodeError:\n # skips items with non ASCII characters\n pass\n\n if len(stripped_list) > 100:\n # split out the list alphabetically, into a dict of lists {'A': ['alpha', 'artist'], 'B': ['beta', 'beggar']...}\n dictionary = {}\n for item in stripped_list:\n letter = item[0].upper()\n if letter not in list(dictionary.keys()):\n dictionary[letter] = []\n dictionary[letter].append(item)\n stripped_list = dictionary\n\n two_tiered = True if isinstance(stripped_list, dict) else False\n self.choices[col_number] = (stripped_list, two_tiered)\n return", "def __init__(self,parent, engname, onames, exts=['dat'], ext_descrips = ['data'],ext_multi=[True]):\n wx.Dialog.__init__(self,parent,-1,title='Export multiple objects to file',\n style=wx.DEFAULT_DIALOG_STYLE| wx.RESIZE_BORDER,\n size=(720,420))\n\n #get references to required core tools.\n app = wx.GetApp()\n self.console = app.toolmgr.get_tool('Console')\n\n #attributes\n self.engname = engname\n self.onames = onames\n self.opt_dialog = None\n self.filepaths = []\n self.options = {} #dict of ext string: options dialog\n\n #get extensions\n self.ext_descrips = ext_descrips\n self.exts = exts\n self.ext_multi = ext_multi\n self.multi = True #return multiple filepaths\n\n #controls\n self._InitControls()", "def build_options(slot,buttons):\r\n if slot in ['origin_city','destination_city']:\r\n button_list= []\r\n for btn in buttons:\r\n button_list.append({'text': btn, 'value': btn})\r\n return button_list", "def uitab_dropdown_items(tab_name, tab, domain, request) -> List[dict]:", "def _set_components(self):\n index = self._ui.data_selector.currentIndex()\n if index < 0:\n return\n data = self._data[index]\n cids = data.components\n\n c_list = self._ui.component_selector\n c_list.clear()\n for c in cids:\n item = QListWidgetItem(c.label)\n c_list.addItem(item)\n c_list.set_data(item, c)", "def initControl(self):\n self._obj=[]\n self.init_GridSizer() \n \n for d in self.list_of_controls:\n \n self._gs_add_add_first_text_info(d)\n \n #--- Button\n if d[0] == \"BT\" : self._gs_add_Button(d)\n #--- FlatButton\n elif d[0] == \"FLBT\" : self._gs_add_FlatButton(d)\n #--- CheckButton\n elif d[0] == 'CK' : self._gs_add_CheckBox(d) \n #---ComboBox \n elif d[0] == \"COMBO\": self._gs_add_ComboBox(d)\n #---wx.TextCtrl \n elif d[0] == 'TXT' : self._gs_add_TextCtrl(d)\n #---wx.StaticText \n elif d[0] == 'STXT' : self._gs_add_StaticText(d)\n #--- wx BitmapButton\n elif d[0] == 'BTBMP': self._gs_add_BitmapButton(d)\n \n #--- MIN/MAXSpin Buttons\n elif d[0].startswith('SP'):\n #--- min button \n self._gs_add_min_max_button(\"|<\",\"MIN\")\n #---SpinCrtl \n if d[0] == 'SP' : self._gs_add_SpinCtrl(d)\n #---FloatSpinCrtl \n elif d[0] == \"SPF\": self._gs_add_FloatSpin(d)\n #--- max button \n self._gs_add_min_max_button(\">|\",\"MAX\")\n \n else:\n self._gs_add_empty_cell()\n #self.GS.Add(wx.StaticText(self,-1,label=\"NOT A CONTROLL\"),wx.EXPAND,self.gap)\n #self.gs_add_empty_cell()\n # print(self._obj[-1].GetName())\n return self.GS", "def createDataSelectorWidgets (self,parent,parent_layout):;\n \n #print('in createDataSelectionWidgets')\n self._ds_top = top = QWidget(parent);\n parent_layout.addWidget(top);\n self._ds_lo = lotop = QVBoxLayout(top);\n lotop.setContentsMargins(0,0,0,0);\n self._ds_complex = QWidget(top);\n self._ds_complex.setVisible(False);\n lotop.addWidget(self._ds_complex);\n lo = QVBoxLayout(self._ds_complex);\n lo.setContentsMargins(0,0,0,0);\n lab = QLabel(\"complex:\");\n lab.setAlignment(Qt.AlignHCenter);\n lo.addWidget(lab);\n # add complex selector\n lo0 = QHBoxLayout();\n lo0.setContentsMargins(0,0,0,0);\n lo.addLayout(lo0);\n lo1 = QGridLayout()\n lo1.setContentsMargins(0,0,0,0);\n lo1.setHorizontalSpacing(0);\n lo1.setVerticalSpacing(0);\n# lo0.addStretch(1);\n lo0.addLayout(lo1);\n# lo0.addStretch(1);\n bgrp = QButtonGroup(self._ds_complex);\n# tbdesc = { self.AMP:(u\"\\u007Ca\\u007C\",0,0),self.PHASE:(u\"\\u03D5\",0,1),self.REAL:(\"Re\",1,0),self.IMAG:(\"Im\",1,1) };\n# tbdesc = { self.AMP:(\"\\\\u007Ca\\\\u007C\",0,0),self.PHASE:(\"\\\\u0278\",0,1),self.REAL:(\"Re\",1,0),self.IMAG:(\"Im\",1,1) };\n tbdesc = { self.AMP:(\"Amp\",0,0),self.PHASE:(\"Pha\",0,1),self.REAL:(\"Re\",1,0),self.IMAG:(\"Im\",1,1) };\n for label,qa in list(self._qas_complex.items()):\n tbtext,row,col = tbdesc[label];\n tb = QToolButton(self._ds_complex);\n lo1.addWidget(tb,row,col);\n bgrp.addButton(tb);\n tb.setText(tbtext);\n tb.setToolButtonStyle(Qt.ToolButtonTextOnly);\n tb.setSizePolicy(QSizePolicy.MinimumExpanding,QSizePolicy.Minimum);\n tb.setCheckable(True);\n tb.setChecked(label is self.complex_component);\n tb.setMinimumWidth(32);\n tb.clicked[bool].connect(qa.setChecked)\n tb.clicked[bool].connect(self._change_complex)\n qa.triggered[bool].connect(tb.setChecked)\n self._tbs_complex[label] = tb;", "def _create_command_menu(self):\n f1 = urwid.Button('Jump', on_press=self.button_show_jump)\n f2 = urwid.Button('Sell', on_press=self.button_show_sell)\n f3 = urwid.Button('Buy', on_press=self.button_show_buy)\n f4 = urwid.Button('Upgrade', on_press=self.button_show_equip)\n f5 = urwid.Button('Galaxy', on_press=self.button_show_galaxy)\n f6 = urwid.Button('Locals', on_press=self.button_show_locals)\n f7 = urwid.Button('System', on_press=self.button_show_planet_info)\n f8 = urwid.Button('Market', on_press=self.button_show_market)\n f9 = urwid.Button('Status', on_press=self.button_show_status)\n f0 = urwid.Button('Cargo', on_press=self.button_show_cargo)\n buttons = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f0]\n buttons = (urwid.AttrMap(b, 'button') for b in buttons)\n menu = urwid.Columns(buttons)\n menu.focus_position = 8\n return menu" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the choosen potencial for plotting graph when PotencialDropDown changed
def onPotencialChanged(self): self.potencial = self.potenzialDropDown.currentIndex()
[ "def setStartingValues(self):\n if self.choosen == 0:\n self.function = self.function_i_DropwDownNew.currentText()\n else:\n self.function = self.function_v_DropwDownNew.currentText()\n self.initParametersDialog.close()", "def on_vendor_parameter_changed(self):\n self.vendor_parameter = self.vendor_parameter_combobox.currentText()\n if self.report_parameter:\n self.fill_names()\n\n self.update_costs()", "def on_report_parameter_changed(self):\n self.report_parameter = self.report_parameter_combobox.currentText()\n self.name_label.setText(NAME_FIELD_SWITCHER[self.report_parameter].capitalize())\n if self.vendor_parameter:\n self.fill_names()\n\n self.update_costs()", "def on_option_change(self, event):\n\t\telement = event.GetEventObject()\n\t\t_id = element.GetId()\n\t\tvar_name = self.var_ids[_id]\n\t\tif var_name == 'time_index' or var_name == 'pl_index':\n\t\t\tval = int(element.GetValue().split(\" \")[0])\n\t\telif var_name == 'preset':\n\t\t\tval = element.GetValue()\n\t\t\tself.display_map_preview(val)\n\t\telse:\n\t\t\tval = element.GetValue()\n\t\tself.update_option(var_name, val)\n\t\tevent.Skip()", "def plot_select_callback(self):\n self.current_sparam = vna.SParam(self.plot_select.get())\n self.update_widgets()", "def _on_option_clicked(self, *_):\n self.variable.set(True)", "def on_change(change):\n #If we're changing the site dropdown, we need to replot the site plots and change the specimen options\n if (change.owner==site_wid)&(change.name=='value'):\n specimen_wid.options=thellierData[site_wid.value].specimens.keys()\n fit=thellierData[site_wid.value].fit\n display_site_plot(fit)\n\n #If we're changing the specimen dropdown, we need to update the temperature steps.\n if (change.owner==specimen_wid)&(change.name=='value'):\n lower_temp_wid.options=thellierData[site_wid.value][change.new].temps-273\n upper_temp_wid.options=thellierData[site_wid.value][change.new].temps-273\n checkbox.value= not thellierData[site_wid.value][change.new].active\n\n #Additionally, we need to make sure the plot changes if the temperature steps were the exact same as last time.\n if (lower_temp_wid.value!=thellierData[site_wid.value][change.new].savedLowerTemp-273)&(upper_temp_wid.value!=thellierData[site_wid.value][change.new].savedUpperTemp-273):\n pass\n else:\n display_specimen_plots()\n try:\n display_specimen_ring()\n except:\n pass\n lower_temp_wid.value=thellierData[site_wid.value][change.new].savedLowerTemp-273\n upper_temp_wid.value=thellierData[site_wid.value][change.new].savedUpperTemp-273\n\n #If we're changing the specimen plot, we display a red circle around the currently selected specimen on the site plot\n #if (change.owner==specimen_wid):\n #display_specimen_ring()\n\n if (change.name=='value')&((change.owner==lower_temp_wid)|(change.owner==upper_temp_wid)):\n display_specimen_plots()", "def set_default_commodity(self):\n index = int(self.ctl.get_parameter_value(15)) - 1\n self.set_combo_selection(index, self.gui.cmb_commodity_name)", "def computer_choose(self) -> None:\n self.computer_choice = random.choice(OPTIONS)", "def r_choice(self):\r\n\r\n def enter(t: str):\r\n \"\"\"\r\n Function to set values in entries on radio button command\r\n :param t: hotel type\r\n \"\"\"\r\n for i in range(8):\r\n self.sv[i].set(creds_rtypes['h_type'][t][i])\r\n\r\n if self.v.get() == 0:\r\n enter('small')\r\n elif self.v.get() == 1:\r\n enter('standard')\r\n elif self.v.get() == 2:\r\n enter('resort')\r\n elif self.v.get() == 3:\r\n enter('large')\r\n elif self.v.get() == 4:\r\n enter('custom')", "def comboBoxParameter_SelectionChanged(self, event):\n self.SelectedItem.parameter_type = event.GetInt()", "def set_default_market(self):\n index = int(self.ctl.get_parameter_value(14)) - 1\n self.set_combo_selection(index, self.gui.cmb_market_code)", "def configure_boxes_for_design_parameters(self):\n if self.ui.radioButton_NWn.isChecked():\n self.ui.label_opt1.setText(\"N: \")\n self.ui.label_opt2.setText(\"Freq. (Hz): \")\n self.ui.label_opt3.hide()\n self.ui.label_opt4.hide()\n self.ui.plainTextEdit_opt1.setEnabled(True)\n self.ui.plainTextEdit_opt2.setEnabled(True)\n self.ui.plainTextEdit_opt3.hide()\n self.ui.plainTextEdit_opt4.hide()\n\n self.ui.plainTextEdit_opt1.setToolTip(\"The order. \"\n \"It must be an integer bigger than zero.\")\n self.ui.plainTextEdit_opt2.setToolTip(\"The natural frequency(ies). \\n\" + self.BAND_MESSAGE)\n\n self.config_dict['mode'] = \"N_WN\"\n elif self.ui.radioButton_AttSpecs.isChecked():\n self.ui.label_opt1.setText(\"Fpass (Hz): \")\n self.ui.label_opt2.setText(\"Fstop (Hz): \")\n self.ui.label_opt3.setText(\"Apass (dB): \")\n self.ui.label_opt4.setText(\"Astop (dB): \")\n self.ui.label_opt3.show()\n self.ui.label_opt4.show()\n self.ui.plainTextEdit_opt3.show()\n self.ui.plainTextEdit_opt4.show()\n\n self.ui.plainTextEdit_opt1.setToolTip(\"The passband frequency(ies), in hertz. \" + self.BAND_MESSAGE)\n self.ui.plainTextEdit_opt2.setToolTip(\"The stop frequency(ies), in hertz.\" + self.BAND_MESSAGE)\n self.ui.plainTextEdit_opt3.setToolTip(\"The attenuation at passband, in dB.\")\n self.ui.plainTextEdit_opt4.setToolTip(\"The attenuation at stopband, in dB.\")\n self.config_dict['mode'] = \"specs\"\n\n else:\n raise ValueError(\"Somehow we chose something that can't be chosen!\")", "def apply_option_button(self):\n\n # get the correct options\n configs = self.plotting_Object.config\n\n if self.selected_plot_option:\n for path in self.selected_plot_option:\n configs = configs[path]\n\n # Change the plot label from the line edit\n if self.widget.plot_label_lineEdit.text():\n configs[\"PlotLabel\"] = self.widget.plot_label_lineEdit.text()\n self.current_plot_object = relabelPlot(\n self.current_plot_object, configs[\"PlotLabel\"]\n )\n\n # Find the plot options otherwise generate\n if not \"PlotOptions\" in configs:\n configs[\"PlotOptions\"] = {}\n\n # Find index of first colon\n line = self.widget.options_lineEdit.text()\n if line:\n ind = line.find(\":\")\n if ind == -1:\n ind = line.find(\"=\")\n # Try to evaluate\n try:\n value = ast.literal_eval(line[ind + 1 :].strip())\n except:\n value = line[ind + 1 :].strip()\n newItem = {line[:ind].strip(): value}\n else:\n newItem = {} # If no options are passed, generate an empty one\n try:\n apply_success = False\n errors = []\n\n if hasattr(self.current_plot_object, \"children\"):\n childs = len(self.current_plot_object.children)\n else:\n childs = 1\n\n if childs > 1:\n self.log.critical(\n \"Applying options to composite plot objects is currently experimental. Unforseen results may occure!\"\n )\n for child in self.current_plot_object.keys():\n plot_object = self.current_plot_object\n for path in child:\n plot_object = getattr(plot_object, path)\n try:\n self.apply_options_to_plot(plot_object, **newItem)\n apply_success = True\n break\n except Exception as err:\n self.log.debug(err)\n errors.append(err)\n if not apply_success:\n for err in errors:\n raise Exception(err)\n else:\n self.apply_options_to_plot(self.current_plot_object, **newItem)\n\n self.replot_and_reload_html(self.current_plot_object)\n configs[\"PlotOptions\"].update(newItem)\n self.update_plot_options_tree(self.current_plot_object)\n self.set_current_plot_object()\n self.save_session(\n self.widget.session_name_lineEdit.text(), self.plotting_Object\n ) # Saves the changes in the session\n except Exception as err:\n self.log.error(\n \"An error happened with the newly passed option with error: {} Option will be removed! \"\n \"Warning: Depending on the error, you may have compromised the plot object and a re-render \"\n \"may be needed!\".format(err)\n )\n\n else:\n # If the plot was altered and no options can be rebuild\n self.log.error(\n \"The plot options for this plot can not be retraced! Maybe the plot was altered during building.\"\n \" Applying options anyway, but no options history can be shown!\"\n )\n try:\n # Change the plot label from the line edit\n if self.widget.plot_label_lineEdit.text():\n configs[\"PlotLabel\"] = self.widget.plot_label_lineEdit.text()\n self.current_plot_object = relabelPlot(\n self.current_plot_object, configs[\"PlotLabel\"]\n )\n\n # Find index of first colon\n line = self.widget.options_lineEdit.text()\n if line:\n ind = line.find(\":\")\n if ind == -1:\n ind = line.find(\"=\")\n # Try to evaluate\n try:\n value = ast.literal_eval(line[ind + 1 :].strip())\n except:\n value = line[ind + 1 :].strip()\n newItem = {line[:ind].strip(): value}\n else:\n newItem = {} # If no options are passed, generate an empty one\n self.apply_options_to_plot(self.current_plot_object, **newItem)\n self.replot_and_reload_html(self.current_plot_object)\n except Exception as err:\n self.log.error(\n \"An error happened with the newly passed option with error: {} Option will be removed! \"\n \"Warning: Depending on the error, you may have compromised the plot object and a re-render \"\n \"may be needed!\".format(err)\n )", "def option_widget(self, ):\n pass", "def set(self, **opt):\n for o in opt.keys():\n v = opt[o]\n if o == 'P' or o == 'pressure':\n self.setPressure(v)\n del opt[o]\n elif o == 'energy':\n self.solveEnergyEqn(flag = _onoff[v])\n else:\n self._set(opt)", "def set_prun_type(self, prun_type, CurWindow, Pruning_button):\n if \"Factor\" in prun_type:\n CurWindow.prun_acc.setChecked(False)\n if self.prun_type == None or not \"Factor\" in self.prun_type or Pruning_button == True:\n CurWindow.prun_fac.setChecked(True)\n self.prun_type = prun_type\n if self.prun_factor_dense == None and self.prun_factor_conv == None:\n CurWindow.Pruning_Dense.setText(\"10\")\n CurWindow.Pruning_Conv.setText(\"10\")\n else:\n CurWindow.Pruning_Dense.setText(str(self.prun_factor_dense))\n CurWindow.Pruning_Conv.setText(str(self.prun_factor_conv))\n CurWindow.Pruning_Dense.setVisible(True)\n CurWindow.Pruning_Conv.setVisible(True)\n CurWindow.Pruning_Conv_label.setVisible(True)\n CurWindow.Pruning_Dense_label.setVisible(True)\n\n try:\n self.prun_acc = int(CurWindow.prun_acc_edit.text())\n except:\n self.prun_acc = None\n CurWindow.min_acc.setVisible(False)\n CurWindow.acc_loss.setVisible(False)\n CurWindow.prun_acc_label.setVisible(False)\n CurWindow.prun_acc_edit.setVisible(False) \n else:\n self.prun_type = None\n try:\n self.prun_factor_dense = int(CurWindow.Pruning_Dense.text())\n self.prun_factor_conv = int(CurWindow.Pruning_Conv.text())\n except:\n self.prun_factor_dense = None\n self.prun_factor_conv = None\n CurWindow.Pruning_Dense.setVisible(False)\n CurWindow.Pruning_Conv.setVisible(False)\n CurWindow.Pruning_Conv_label.setVisible(False)\n CurWindow.Pruning_Dense_label.setVisible(False)\n\n elif \"Accuracy\" in prun_type:\n CurWindow.prun_fac.setChecked(False)\n if self.prun_type == None or not \"Accuracy\" in self.prun_type or Pruning_button == True:\n CurWindow.prun_acc.setChecked(True)\n self.prun_type = prun_type\n \n CurWindow.min_acc.setVisible(True)\n CurWindow.acc_loss.setVisible(True)\n\n if self.prun_acc_type != None and \"Minimal accuracy\" in self.prun_acc_type:\n CurWindow.min_acc.setChecked(True)\n CurWindow.acc_loss.setChecked(False)\n CurWindow.prun_acc_label.setText(\"Min accuracy\\nto reach in %\")\n CurWindow.prun_acc_label.setVisible(True)\n CurWindow.prun_acc_edit.setVisible(True)\n if self.prun_acc == None:\n CurWindow.prun_acc_edit.setText(\"\")\n else:\n CurWindow.prun_acc_edit.setText(str(self.prun_acc))\n\n elif self.prun_acc_type != None and \"Accuracy loss\" in self.prun_acc_type:\n CurWindow.min_acc.setChecked(False)\n CurWindow.acc_loss.setChecked(True)\n CurWindow.prun_acc_label.setText(\"Max accuracy\\nloss in %\")\n CurWindow.prun_acc_label.setVisible(True)\n CurWindow.prun_acc_edit.setVisible(True)\n if self.prun_acc == None:\n CurWindow.prun_acc_edit.setText(\"\")\n else:\n CurWindow.prun_acc_edit.setText(str(self.prun_acc))\n\n try:\n self.prun_factor_dense = int(CurWindow.Pruning_Dense.text())\n self.prun_factor_conv = int(CurWindow.Pruning_Conv.text())\n except:\n self.prun_factor_dense = None\n self.prun_factor_conv = None\n CurWindow.Pruning_Dense.setVisible(False)\n CurWindow.Pruning_Conv.setVisible(False)\n CurWindow.Pruning_Conv_label.setVisible(False)\n CurWindow.Pruning_Dense_label.setVisible(False)\n else:\n self.prun_type = None\n\n try:\n self.prun_acc = int(CurWindow.prun_acc_edit.text())\n except:\n self.prun_acc = None \n CurWindow.min_acc.setVisible(False)\n CurWindow.acc_loss.setVisible(False)\n CurWindow.prun_acc_label.setVisible(False)\n CurWindow.prun_acc_edit.setVisible(False) \n\n print(self.prun_type)", "def sensor_selected(self):\n sens = self.sensor_select.currentText()\n self.sensor_selected_sig.emit(sens)\n if sens == 'Basic Config':\n self.basic_config.show()\n self.basic_lever.hide()\n elif sens == 'Waterline':\n self.basic_config.hide()\n self.basic_lever.show()\n self.show_waterline.show()\n self.waterline_spacer.show()\n self.refpt_label.hide()\n self.refpt_select.hide()\n self.xlabel.hide()\n self.x.hide()\n self.ylabel.hide()\n self.y.hide()\n self.rlabel.hide()\n self.r.hide()\n self.plabel.hide()\n self.p.hide()\n self.yawlabel.hide()\n self.yaw.hide()\n elif sens == 'Primary Antenna':\n self.basic_config.hide()\n self.basic_lever.show()\n self.show_waterline.hide()\n self.waterline_spacer.hide()\n self.refpt_label.hide()\n self.refpt_select.hide()\n self.xlabel.show()\n self.x.show()\n self.ylabel.show()\n self.y.show()\n self.rlabel.hide()\n self.r.hide()\n self.plabel.hide()\n self.p.hide()\n self.yawlabel.hide()\n self.yaw.hide()\n else:\n self.basic_config.hide()\n self.basic_lever.show()\n self.show_waterline.hide()\n self.waterline_spacer.hide()\n if sens == 'Vessel Reference Point':\n self.refpt_label.show()\n self.refpt_select.show()\n else:\n self.refpt_label.hide()\n self.refpt_select.hide()\n self.xlabel.show()\n self.x.show()\n self.ylabel.show()\n self.y.show()\n self.rlabel.show()\n self.r.show()\n self.plabel.show()\n self.p.show()\n self.yawlabel.show()\n self.yaw.show()\n if sens:\n self.populate_sensor(sens)", "def dialog_expected_marginal_cost_handle_comboBox(self):\n # Find out which plant is currently active\n index = self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_plant_comboBox.currentIndex()\n self.dialog_expected_marginal_cost.plant = self.game_obj.player.getPlant(index)\n # Set label of fuel price\n if self.dialog_expected_marginal_cost.plant.source == \"Coal\":\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_source.setText(\"Coal\")\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_fuel_cost.setText(\"{}+{}Q\".format(self.game_obj.coal_cost_fixed, self.game_obj.coal_cost_variable))\n elif self.dialog_expected_marginal_cost.plant.source == \"Gas\":\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_source.setText(\"Gas\")\n # Set label of fuel price\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_fuel_cost.setText(\n \"{}+{}Q\".format(self.game_obj.gas_cost_fixed, self.game_obj.gas_cost_variable))\n elif self.dialog_expected_marginal_cost.plant.source == \"PV\":\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_source.setText(\"PV\")\n # Set label of fuel price\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_fuel_cost.setText(\"0\")\n else:\n print(\"Error source {} has not been defined in dialog_expected_marginal_cost\".format(self.dialog_expected_marginal_cost.plant.source))\n # Set other plant data to labels\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_variable_cost.setText(number_to_string(self.dialog_expected_marginal_cost.plant.variableCost))\n if self.dialog_expected_marginal_cost.plant.isDispatchable():\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_capacity.setText(\n number_to_string(self.dialog_expected_marginal_cost.plant.capacity))\n else:\n self.dialog_expected_marginal_cost.ui.dialog_marginal_cost_capacity.setText(\n number_to_string(self.dialog_expected_marginal_cost.plant.getActualCapacity(self.game_obj.weather_effect)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets function when functiondropdowns changed. It has influence on the function for the starting element
def setStartingValues(self): if self.choosen == 0: self.function = self.function_i_DropwDownNew.currentText() else: self.function = self.function_v_DropwDownNew.currentText() self.initParametersDialog.close()
[ "def createFunctionDropwDowns(self):\n\n all_functions = inspect.getmembers(functionLib, inspect.isfunction) \n\n self.c_functions = []\n self.i_functions = []\n self.r_functions = []\n self.v_functions = []\n self.l_functions = []\n\n for functionTupel in all_functions:\n if \"c_\" in functionTupel[0]:\n self.c_functions.append(functionTupel)\n\n elif \"i_\" in functionTupel[0]:\n self.i_functions.append(functionTupel)\n elif \"r_\" in functionTupel[0]:\n self.r_functions.append(functionTupel)\n elif \"v_\" in functionTupel[0]:\n self.v_functions.append(functionTupel)\n elif \"l_\" in functionTupel[0]:\n self.l_functions.append(functionTupel)\n\n \n self.function_c_DropwDown = QtGui.QComboBox()\n self.function_c_DropwDown.addItem(\"Choose Function\")\n self.function_i_DropwDown = QtGui.QComboBox()\n self.function_i_DropwDownNew = QtGui.QComboBox()\n self.function_i_DropwDown.addItem(\"Choose Function\")\n self.function_i_DropwDownNew.addItem(\"Choose Function\")\n self.function_r_DropwDown = QtGui.QComboBox()\n self.function_r_DropwDown.addItem(\"Choose Function\")\n self.function_v_DropwDown = QtGui.QComboBox()\n self.function_v_DropwDownNew = QtGui.QComboBox()\n self.function_v_DropwDown.addItem(\"Choose Function\")\n self.function_v_DropwDownNew.addItem(\"Choose Function\")\n self.function_l_DropwDown = QtGui.QComboBox()\n self.function_l_DropwDown.addItem(\"Choose Function\")\n\n for functionTupel in self.c_functions:\n self.function_c_DropwDown.addItem(functionTupel[0])\n\n for functionTupel in self.i_functions:\n self.function_i_DropwDown.addItem(functionTupel[0])\n self.function_i_DropwDownNew.addItem(functionTupel[0])\n\n for functionTupel in self.r_functions:\n self.function_r_DropwDown.addItem(functionTupel[0])\n \n for functionTupel in self.v_functions:\n self.function_v_DropwDown.addItem(functionTupel[0])\n self.function_v_DropwDownNew.addItem(functionTupel[0])\n\n for functionTupel in self.l_functions:\n self.function_l_DropwDown.addItem(functionTupel[0])\n\n self.function_c_DropwDown.hide()\n self.function_i_DropwDown.hide()\n #self.function_r_DropwDown.hide()\n self.function_v_DropwDown.hide()\n self.function_l_DropwDown.hide()", "def update_func(self):\n for e in self.funcs:\n self.func_set[e.func_name] = e", "def sample_fun_input_modified(attr, old, new):\n # Clear existing function and its wavelet transform plots\n reset()\n \n # Get the new sample function id\n sample_function_id = new #sample_fun_input_f.value\n sample_fun_input_f.label=sample_function_id\n\n # Change the Layout of interactive widgets according to sample functin id\n if (sample_function_id == \"Heaviside function\"):\n T0_input.title = \"Input 0 \"u\"\\u2264 t\"u\"\\u2080 \"u\"\\u2264 5:\"\n controls = [sample_fun_input_f, T0_input, Amp_input]\n controls_box = widgetbox(controls, sizing_mode='scale_width')\n My_Layout.children[0].children[1].children[0].children[0]= controls_box\n \n elif (sample_function_id == \"Rectangular function\"):\n T0_input.title = \"Input 0 \"u\"\\u2264 t\"u\"\\u2080 \"u\"\\u2264 t\"u\"\\u2081:\"\n controls = [sample_fun_input_f, T0_input, T1_input, Amp_input]\n controls_box = widgetbox(controls, sizing_mode='scale_width') # all controls\n My_Layout.children[0].children[1].children[0].children[0]= controls_box\n \n elif (sample_function_id == \"Dirac delta function\"):\n T0_input.title = \"Input 0 \"u\"\\u2264 t\"u\"\\u2080 \"u\"\\u2264 5:\"\n controls = [sample_fun_input_f, T0_input, Amp_input]\n controls_box = widgetbox(controls, sizing_mode='scale_width') # all controls\n My_Layout.children[0].children[1].children[0].children[0]= controls_box\n \n elif (sample_function_id == \"Trigonometric function\"):\n controls = [sample_fun_input_f, Trigonometric_radio, Frequency_Slider, Calc_button]\n controls_box = widgetbox(controls, sizing_mode='scale_width') # all controls\n My_Layout.children[0].children[1].children[0].children[0]= controls_box\n Trig_fun_modified('active',0,0) # Plot sin\n\n else:\n controls = [sample_fun_input_f, User_Func]\n controls_box = widgetbox(controls, sizing_mode='scale_width') # all controls\n My_Layout.children[0].children[1].children[0].children[0]= controls_box", "def set_update_function(self, fn_update=None):\n self.fn_update = fn_update", "def set_func(self, func):\n self._func = func", "def setter(self, fn):\n self.cb_set = fn", "def bind_function(self, function):\n if function is not None:\n self.get_widget().config(command=function)", "def set_function(self, func):\n self.com.send(f\"FUNC {func}\")\n return", "def change_dropdown(self, *args):\n self.master.unbind('<Unmap>') # unregister unmap so grid_remove doesn't trip it\n new_light_label = self.lightvar.get()\n if self.current_lightframe is not None:\n self.current_lightframe.stop()\n self.logger.debug('Stopping current frame: {}'.format(self.current_lightframe.get_label()))\n self.current_light = self.lightsdict[new_light_label]\n if new_light_label not in self.framesdict.keys(): # Build a new frame\n self.framesdict[new_light_label] = LightFrame(self, self.current_light)\n self.logger.info(\"Building new frame: {}\".format(self.framesdict[new_light_label].get_label()))\n else: # Frame was found; bring to front\n for frame in self.framesdict.values():\n frame.grid_remove() # remove all other frames; not just the current one (this fixes sync bugs for some reason)\n self.framesdict[new_light_label].grid() # should bring to front\n self.logger.info(\n \"Brought existing frame to front: {}\".format(self.framesdict[new_light_label].get_label()))\n self.current_lightframe = self.framesdict[new_light_label]\n self.current_lightframe.restart()\n if not self.current_lightframe.get_label() == self.lightvar.get():\n self.logger.error(\"Mismatch between LightFrame ({}) and Dropdown ({})\".format(\n self.current_lightframe.get_label(), self.lightvar.get()))\n self.master.bind('<Unmap>', lambda *_: self.master.withdraw()) # reregister callback", "def _on_dropdown_select(self, dropdown_obj, data, *largs):\n\n self.cur_button = [btn for btn in dropdown_obj.children[0].children if btn.text == data][0]\n self.is_open = False\n\n if self.val == data:\n self.val = \"\"\n else:\n self.val = data", "def on_clicked(self):\n self.function()", "def set_tooltip_func(self, func):\n if func != self._get_tooltip:\n self.__get_tooltip_func = func\n self._update_node_tooltips()", "def add_on_change(self, setting, func):\n self._listeners.setdefault(setting, []).append(func)", "def initialize(self):\n\n self.title('WA Crash Feature Mapper')\n self.minsize(700, 700)\n\n self.selection0 = tk.StringVar()\n self.selection0.set('Select year to view')\n options0 = ['2013', '2014', '2015', '2016', '2017']\n self.drop0 = tk.OptionMenu(self, self.selection0, *options0)\n self.drop0.pack()\n\n # self.button0\n tk.Button(\n self,\n text='Save year selection',\n command=lambda: enable_next_dropdown(self.drop1)\n ).pack()\n\n self.selection1 = tk.StringVar()\n self.selection1.set('Select county to view')\n options1 = ['Adams',\n 'Asotin',\n 'Benton',\n 'Chelan',\n 'Clallam',\n 'Clark',\n 'Columbia',\n 'Cowlitz',\n 'Douglas',\n 'Ferry',\n 'Franklin',\n 'Garfield',\n 'Grant',\n 'Grays Harbor',\n 'Island',\n 'Jefferson',\n 'King',\n 'Kitsap',\n 'Kittitas',\n 'Klickitat',\n 'Lewis',\n 'Lincoln',\n 'Mason',\n 'Okanogan',\n 'Pacific',\n 'Pend Oreille',\n 'Pierce',\n 'San Juan',\n 'Skagit',\n 'Skamania',\n 'Snohomish',\n 'Spokane',\n 'Stevens',\n 'Thurston',\n 'Wahkiakum',\n 'Walla Walla',\n 'Whatcom',\n 'Whitman',\n 'Yakima']\n self.drop1 = tk.OptionMenu(self, self.selection1, *options1)\n self.drop1.configure(state='disabled')\n self.drop1.pack()\n # self.button1\n tk.Button(\n self,\n text='Save county selection',\n command=lambda: enable_next_dropdown(self.drop2)\n ).pack()\n\n self.selection2 = tk.StringVar()\n self.selection2.set('Select group feature to view')\n options2 = [\n 'Weather',\n 'Surface Condition',\n 'Lighting Condition',\n 'Day of the week']\n self.drop2 = tk.OptionMenu(self, self.selection2, *options2)\n self.drop2.configure(state='disabled')\n self.drop2.pack()\n\n # self.button2 =\n tk.Button(\n self,\n text='Save group selection',\n command=lambda: self.set_options_init(self.drop3, self.selection3)\n ).pack()\n\n self.selection3 = tk.StringVar()\n self.selection3.set('Select subgroup feature to view')\n options3 = 'Select subgroup to view'\n self.drop3 = tk.OptionMenu(self, self.selection3, options3)\n self.drop3.configure(state='disabled')\n self.drop3.pack()\n # self.button3 =\n tk.Button(\n self,\n text='Save subgroup selection',\n command=lambda: set_map_options(self.drop4, self.selection4)\n ).pack()\n\n self.selection4 = tk.StringVar()\n self.selection4.set('Select type of map to view')\n options4 = ['Select type of map to view']\n self.drop4 = tk.OptionMenu(self, self.selection4, *options4)\n self.drop4.configure(state='disabled')\n self.drop4.pack()\n # show the final map based on selections\n\n # self.button4 =\n tk.Button(\n self,\n text='Show map', command=self.show_map\n ).pack()\n\n # self.button5 =\n tk.Button(\n self,\n text='Generate ML reports',\n command=lambda: generate_ml(self.selection0.get())\n ).pack()", "def taxa_menu_callback(attr, old, new):\n global live_taxa_val\n live_taxa_val = new\n label = \"Taxon Rank\"\n taxa_menu.label = f\"{label}: {new}\"\n global live_drill_val\n global last_drill_down\n last_drill_down = \"\"\n live_drill_val = None\n updateIfNeeded()", "def _on_option_clicked(self, *_):\n self.variable.set(True)", "def _update_function(self, old_function, new_function):\n \n # fix me: Does this handle closures correctly? Can we?\n # readonly: func_closure, func_globals\n old_function.func_code = new_function.func_code\n old_function.func_defaults = new_function.func_defaults\n old_function.func_dict = new_function.func_dict\n old_function.func_doc = new_function.func_doc", "def on_option_change(self, event):\n\t\telement = event.GetEventObject()\n\t\t_id = element.GetId()\n\t\tvar_name = self.var_ids[_id]\n\t\tif var_name == 'time_index' or var_name == 'pl_index':\n\t\t\tval = int(element.GetValue().split(\" \")[0])\n\t\telif var_name == 'preset':\n\t\t\tval = element.GetValue()\n\t\t\tself.display_map_preview(val)\n\t\telse:\n\t\t\tval = element.GetValue()\n\t\tself.update_option(var_name, val)\n\t\tevent.Skip()", "def SetFunction(self, id, value):\n self.functions[id] = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handels the adding of a new component to the circuit
def addComponentToCircuit(self): component = (str(self.componentDropwDown.currentText())) function = "0" if component == "Capacitator": function = self.function_c_DropwDown.currentText() if component == "I-Source": function = self.function_i_DropwDown.currentText() if component == "Resistor": function = self.function_r_DropwDown.currentText() if component == "V-Source": function = self.function_v_DropwDown.currentText() if component == "Coil": function = self.function_l_DropwDown.currentText() direction = (str(self.directionDropwDown.currentText())) name = (str(self.componentNameInput.text())) elabel = self.controler.addComponent(component, direction, name, self.potenzialDropDownFrom.currentIndex(), self.potenzialDropDownTo.currentIndex(), self.componentValueInput.text(), function) if len(elabel) > 0: self.potenzialDropDownFrom.addItem(elabel) self.potenzialDropDownTo.addItem(elabel) self.potenzialDropDownFrom.setCurrentIndex(0) self.potenzialDropDownTo.setCurrentIndex(0) self.componentValueInput.setText("0.0") self.componentValueInput.hide() self.componentValueLabel.hide() self.updateGraph()
[ "def add_component(self, component):\n self.components.append(component)", "def add_component(self, component):\r\n self.subcomponents.append(component)", "def add_component(self, component):\n self.__components.append(copy.deepcopy(component))", "def add(self, cname, **kwargs):\n # Load and immediately instantiate.\n component = get_component(cname)(**kwargs)\n # Part of the contract: we must add ourselves as an entity reference.\n component.entity = self.entity\n # Add for easy iteration as well as easy reference.\n self._index[component._cname] = component\n if component.doprocess:\n self._list.append(component)", "def _circuit_handler(event):\n if not event.build_flags or 'IS_INTERNAL' not in event.build_flags:\n if event.id == self._cid:\n probe.circs.append(event)\n if self._circuit_built.is_set():\n if event.status in ('FAILED', 'CLOSED'):\n self._circuit_finished.set()\n if not self._circuit_built.is_set():\n if event.status in ('FAILED', 'BUILT'):\n self._circuit_built.set()\n elif event.status == 'LAUNCHED' and not self._cid:\n self._cid = event.id\n probe.circs.append(event)\n self._manager.circ_launched.release()", "def __call__(self, circ):\n print \" my circuit is in progress\", circ.id\n self.attacher.waiting_circuits.append((circ.id, self.d,\n self.stream_cc))", "def addOutlet(outlet):", "def add_component(self, entity_number, component):\n self.table[entity_number] = component\n if self.parent is not None:\n self.parent.add_component(entity_number, component)", "def addNetworkResourceForScaleup(self, components):\n try:\n utility.execLog(\"\")\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.TemplatesObjects(\"addComponent\"))), action=\"CLICK\")\n time.sleep(2)\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.TemplatesObjects(\"scaleUpNetwork\"))), action=\"CLICK\")\n #self.handleEvent(EC.presence_of_element_located((By.XPATH,self.TemplatesObjects(\"dropdownToggle\"))),action=\"SELECT\",selectBy=\"VISIBLE_TEXT\",setValue=\"Network\")\n time.sleep(5)\n i = 0\n for item in components[\"select_network\"]:\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.TemplatesObjects(\"ddlAvailableNetworks\"))), action=\"SELECT\", setValue=item,selectBy=\"VISIBLE_TEXT\")\n utility.execLog(\"Selected Value %s for network dropdown on add network resource page\"%str(item))\n time.sleep(1)\n self.handleEvent(EC.element_to_be_clickable((By.ID,self.TemplatesObjects(\"btn_addavailablenetwork\"))), action=\"CLICK\")\n utility.execLog(\"Added the item selected\")\n time.sleep(2)\n utility.execLog(\"Selecting port group\")\n try:\n try:\n self.handleEvent(EC.element_to_be_clickable((By.classname, \"form-control ddlPortGroup\")), action=\"SELECT\", setValue=components[\"port_group\"])\n utility.execLog(\"Selected %s value from the Port Group dropdown\"%str(components[\"port_group\"]))\n except:\n utility.execLog(\"port Group %s is not available to select in dropdown\"%str(components[\"port_group\"]))\n self.handleEvent(EC.element_to_be_clickable((By.classname, \"form-control ddlPortGroup\")), action=\"SELECT\", setValue=\"New Port Group\")\n time.sleep(1)\n utility.execLog(\"Selected 'New Port Group' value from the dropdown\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, \"//*[contains(@id,'txtPortGroup')]\")), action=\"SET_TEXT\", setValue=\"NewPortGroup\")\n utility.execLog(\"Entered new port group value to 'NewPortGroup' \")\n except:\n utility.execLog(\"Flow is not VDS flow so Port Group is not available\") \n utility.execLog(\"Selecting resource for the added netwrok %s\"%str(item))\n #self.handleEvent(EC.element_to_be_clickable((By.XPATH, \"(//div[@class='dropdown resourcedropdown']/button)[%s]\"%str(i+1))), action=\"CLICK\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH,self.TemplatesObjects(\"selectResource\"))),action=\"CLICK\")\n utility.execLog(\"Select resource button clicked\")\n time.sleep(1)\n if \"all\" in components[\"select_resource\"][i].lower():\n utility.execLog(\"Adding both host and VM to the added network\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, +self.TemplatesObjects(\"selctHost\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"Host resource selected\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.TemplatesObjects(\"selcetVM\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"VM resource selected\")\n elif \"host\" in components[\"select_resource\"][i].lower():\n utility.execLog(\"Adding host to the added network\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, +self.TemplatesObjects(\"selctHost\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"Host resource selected\")\n elif \"vm\" in components[\"select_resource\"][i].lower():\n utility.execLog(\"Adding VM to the added network\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.TemplatesObjects(\"selcetVM\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"VM resource selected\")\n else:\n utility.execLog(\"select_resource value entered in json does not match any option\")\n i = i+1\n utility.execLog(\"Save the added networks\")\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.TemplatesObjects(\"saveNetworkButton\"))), action=\"CLICK\")\n except Exception as e:\n utility.execLog(\"Error generated while adding network component(s) to scaleup service :: Error -> %s\"%(str(), str(e)))", "def add_component(self, component: GameObjectComponent):\n\n self.__component_container.append(component)", "def add_item(self, index: int, quantum_circuit: qcirc.QuantumCircuit) -> None:\n matrix = quantum_circuit.matrix\n vector = numpy.concatenate(\n (numpy.real(matrix).reshape((-1, 1)), numpy.imag(matrix).reshape((-1, 1)))\n )\n self._annoy_index.add_item(index, vector)\n self._scipy_data.append(vector)\n self._quantum_circuits.append(copy.copy(quantum_circuit).compress())", "def test_component_add_ok(self):\n self.execute('component add new_component')\n rv, output = self.execute('component list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "def manipulate(self, stored_instance, component_instance):\n pass", "def addSlot(cv, cur, ch):\n s = cv.slot()\n cv.feature(s, ch=ch)", "def create_component(self, comp=None, field_name=None, field_value=None):\n # search in cache based on name\n # if its different, then update cache and ardoq\n if comp['rootWorkspace'] not in self.ws.keys():\n self.ws[comp['rootWorkspace']] = self.get_workspace(ws_id=comp['rootWorkspace'])\n\n # update the find to include field name, but that means create needs that field name\n # find only works on component name. comps with same name but different attributes will update rather\n # then creating a 2nd component.\n ind, c = self._find_component(comp=comp, field_name=field_name, field_value=field_value)\n if c:\n if self._is_different(c, comp):\n for k, v in comp.items():\n c[k] = comp[k]\n if not self.simulate:\n res = super().update_component(comp_id=c['_id'], comp=c)\n self.ws[comp['rootWorkspace']]['components'][ind] = res\n self.report['updated_comps'] += 1\n self.report['updated_comps_l'].append({'_id': res['_id'], 'name': res['name'], 'type': res['type']})\n return(res)\n else:\n self.report['updated_comps'] += 1\n return (c)\n else:\n logging.debug('create_component - cache_hit: %s', comp['name'])\n self.report['cache_hit_comps'] += 1\n self.report['cache_hit_comps_l'].append({'_id': c['_id'], 'name': c['name'], 'type': c['type']})\n return c\n if not self.simulate:\n res = super().create_component(comp=comp)\n self.ws[comp['rootWorkspace']]['components'].append(res)\n self.report['new_comps'] += 1\n self.report['new_comps_l'].append({'_id': res['_id'], 'name': res['name'], 'type': res['type']})\n return res\n else:\n self.report['new_comps'] += 1\n comp['_id'] = secrets.token_hex(15) # make a fake _id if when simulating\n self.report['new_comps_l'].append({'_id': comp['_id'], 'name': comp['name'], 'type': comp['typeId']})\n return(comp)", "def circuit(self):\n raise NotImplementedError", "def add_module_event(self, event, data):\n item = data.items[0]\n self.controller.reset_pipeline_view = False\n self.noUpdate = True\n internal_version = -1L\n reg = get_module_registry()\n if reg.is_abstraction(item.descriptor):\n internal_version = item.descriptor.module.internal_version\n adder = self.controller.add_module_from_descriptor\n module = adder(item.descriptor, \n event.scenePos().x(),\n -event.scenePos().y(),\n internal_version)\n self.reset_module_colors()\n graphics_item = self.addModule(module)\n graphics_item.update()\n self.unselect_all()\n # Change selection\n graphics_item.setSelected(True)\n\n # controller changed pipeline: update ids\n self._old_connection_ids = set(self.controller.current_pipeline.connections)\n self._old_module_ids = set(self.controller.current_pipeline.modules)\n\n # We are assuming the first view is the real pipeline view \n self.views()[0].setFocus()\n\n self.noUpdate = False", "def addComponentDetails(self):\n # Skip if we've already updated the reagent\n try:\n int(self.component[\"reagent\"])\n except TypeError:\n return\n\n # Look up the reagent we are adding\n pAddReagent = {}\n if self.component[\"reagent\"] != 0:\n pAddReagent = self.database.GetReagent(self.username, self.component[\"reagent\"])\n\n # Replace the reagent\n del self.component[\"reagent\"]\n self.component[\"reagent\"] = pAddReagent\n\n # Set the default delivery time and pressure\n if self.component[\"deliverytime\"] == 0:\n self.component[\"deliverytime\"] = DEFAULT_ADD_DURATION\n if self.component[\"deliverypressure\"] == 0:\n self.component[\"deliverypressure\"]= DEFAULT_ADD_PRESSURE", "def add_components(self, *components):\n self._store.add_components(self.uuid, *components)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge copy of iMIS data with a new updated iMIS file. The new updated file is assumed to be the more uptodate file, typically it will be a report generated by the Girl Guides iMIS system
def update_data(current_file_path=None, new_data_file_path=None, make_backup=True): imis_file = ImisFile(current_file_path) imis_file.merge(new_data_file_path) #print(" ACTIVE MEMBERS") #print("--------------------------------------------------------") #for member in imis_file.active_member_list: # print(str(member)) #print("\n\n INACTIVE MEMBERS") #print("--------------------------------------------------------") #for member in imis_file.inactive_member_list: # print(str(member)) # TODO verify the correctness of the new file if make_backup: shutil.copy(current_file_path, current_file_path+".bk") imis_file.write()
[ "def update_inp(read,write,n_imperv,n_perv,d_imperv,d_perv,Ks,H_i,IMD):\n\n fin = open(read,'r')\n filedata = fin.read()\n fin.close()\n \n newdata=filedata.replace('S1 0.01 0.1 0 0 0 OUTLET ', \n 'S1 '+ str(n_imperv) + ' '+ str(n_perv) + ' '+ str(d_imperv) +' '+ str(d_perv) + ' 0 OUTLET ')\n newdata2=newdata.replace('S2 0.01 0.1 0 0 0 OUTLET ', \n 'S2 '+ str(n_imperv) + ' '+ str(n_perv) + ' '+ str(d_imperv) +' '+ str(d_perv) + ' 0 OUTLET ')\n newdata3=newdata2.replace('S3 0.01 0.1 0 0 0 OUTLET ', \n 'S3 '+ str(n_imperv) + ' '+ str(n_perv) + ' '+ str(d_imperv) +' '+ str(d_perv) + ' 0 OUTLET ')\n \n newdata4=newdata3.replace('S2 0\t 10 0 ', \n 'S2 '+ str(H_i) + '\t '+ str(Ks) + ' '+ str(IMD) + '') \n \n f = open(write,'w')\n f.write(newdata4)\n f.close()", "def update_compact_files(self, ):\n for file_path, updates in self._updates.items():\n if os.path.exists(file_path):\n with open_temp_copy(file_path, binary=True) as instream, open(file_path, 'wb') as outstream:\n updated_events = self._updated_compact_events(\n yaml.parse(instream),\n updates\n )\n \n yaml.emit(updated_events, outstream)\n else:\n with open(file_path, 'wb') as outstream:\n yaml.emit(self._fresh_content_events(updates.items()), outstream)", "def update(self):\n # ic()\n # self.update_scans()\n self.update_data()", "def _updateinfos(self, filename='', extrainfos=None):\n self['infos'].updateinfos(filename, extrainfos)", "def update_data(current_user_data, filename):\n\n try:\n stored_data = read_data(filename)\n logger.info(\"Merging new data into stored\")\n\n for k, v in current_user_data.items():\n if existing := stored_data.get(k):\n stored_data[k] = list(set(existing) | set(v))\n else:\n stored_data[k] = v\n except:\n logger.info(\"No stored data found\")\n stored_data = current_user_data\n\n logger.info(\"Writing new data to file %s\", filename)\n\n with open(filename, 'w') as f:\n json.dump(stored_data, f)", "def daily_update(debug=False):\n # ---------------------------------------------------\n # Part 1: Main shooting victims data file\n # ---------------------------------------------------\n victims = ShootingVictimsData(debug=debug)\n data = victims.get(fresh=True, update_local=True)\n\n # Value-added info for hot spots and court info\n hotspots = StreetHotSpots(debug=debug)\n courts = CourtInfoByIncident(debug=debug)\n\n # Merge in the value-added info\n data = data.pipe(hotspots.merge).pipe(courts.merge)\n\n # Save victims data to annual files\n victims.save(data)\n\n # -----------------------------------------------------\n # Part 2: Cumulative daily victim totals\n # -----------------------------------------------------\n victims.save_cumulative_totals(data, update_local=True)\n\n # ------------------------------------------------------\n # Part 3: Homicide count scraped from PPD\n # ------------------------------------------------------\n homicide_count = PPDHomicideTotal(debug=debug)\n homicide_count.update()\n\n # Update meta data\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n meta_path = DATA_DIR / \"meta.json\"\n\n # save the download time\n meta = {\"last_updated\": now}\n json.dump(meta, meta_path.open(mode=\"w\"))", "def _mergeISOTXS(self):\n\n # Create an empty ISOTXS library to be filled in with XS data\n lib = xsLibraries.IsotxsLibrary()\n\n neutronVelocities = xsLibraries.mergeXSLibrariesInWorkingDirectory(lib)\n latticePhysicsInterface.setBlockNeutronVelocities(self.r, neutronVelocities)\n\n isotxs.writeBinary(lib, neutronics.ISOTXS)", "def merge_modified_section_data(self):\n\n for section in self.sections:\n section_data_start = self.adjust_FileAlignment( section.PointerToRawData,\n self.OPTIONAL_HEADER.FileAlignment )\n section_data_end = section_data_start+section.SizeOfRawData\n if section_data_start < len(self.__data__) and section_data_end < len(self.__data__):\n self.__data__ = self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:]", "def modify_fits(infile):\n with pyfits.open(infile,mode='update') as pf:\n\n # Add/update a keyword\n pf['PRIMARY'].header['MY_KEYWD'] = 2.0\n\n # Add HISTORY\n pf['PRIMARY'].header['HISTORY'] = 'Multiplied SCI by 2.'\n\n # Modify SCI data\n pf['SCI',1].data *= 2.0\n\n # Recalculate ERR data\n pf['ERR',1].data = numpy.sqrt(pf['SCI',1].data)\n\n # Look at the modified contents using function above\n view_fits(infile)", "def update_file(file_name, table):\r\n with open(file_name, 'wb') as objFile: # this opens the data file to overwrite with data currently in the inventory\r\n pickle.dump(table, objFile)\r\n objFile.close()", "def oneFile():\n\n academia = pd.read_excel('DataPrep/Academia.xlsm', sheet_name = 'Universities - projects')\n academia = academia[[\"University Name\", \"Project Name\", \"Partners\", \"Funding\", \"Classification\", \"Source\"]]\n academia[\"Project Name\"].replace('', np.nan, inplace=True)\n academia.dropna(subset=[\"Project Name\"], inplace=True)\n academia[\"Project Name\"].replace(np.nan, '', inplace=True)\n academia.columns = [\"projLead\", \"projName\", \"projCollab\", \"projFunding\", \"projGrouping\", \"Source\"]\n\n industry = pd.read_excel('DataPrep/Industry.xlsm')\n industry = industry[[\"Company Name\", \"Project Name\", \"UK Partners\", \"Classification\", \"Source\"]]\n industry[\"Project Name\"].replace('', np.nan, inplace=True)\n industry.dropna(subset=[\"Project Name\"], inplace=True)\n industry[\"Project Name\"].replace(np.nan, '', inplace=True)\n industry.columns = [\"projLead\", \"projName\", \"projCollab\", \"projGrouping\", \"Source\"]\n industry[\"projFunding\"] = \"\"\n\n frames = [academia, industry]\n\n df = academia.append(industry, ignore_index=True)\n df.to_pickle('Pickles/excel_merged.pickle')\n df.to_excel('DataPrep/merged.xlsx')", "def test_update_ifc_files(self):\n pass", "def merge_cmd(out_file, airr_files, drop=False, debug=False):\n return airr.interface.merge_rearrangement(out_file, airr_files, drop=drop, debug=debug)", "def update_bank_data(self):\n uis = self.src['b_uis'].read_data()\n sip = self.src['b_sip'].read_data()\n nmv = self.src['nomv'].read_data()\n bcap = self.src['rcap'].read_data()\n totbats = uis*sip\n self.src['bnk_tbats'].write_data(totbats)\n self.src['bnk_cap'].write_data(round(sip*bcap,2))\n self.src['bnk_vo'].write_data(round(uis*nmv,2))", "def cmd_write_bloominp(self):\n if not os.path.exists(self.original_bloominp_path):\n # pdb.set_trace() \n self.log.warning(\"BLOOM not found (%s)! Tread carefully\"%self.original_bloominp_path)\n return\n \n dst=os.path.join(self.base_path,'bloominp.d09')\n if os.path.exists(dst) and not self.overwrite:\n raise Exception(\"%s exists, but overwrite is False\"%dst)\n \n shutil.copyfile(self.original_bloominp_path,dst)", "def copy_bias():\n\n #grab the data file from nres_common, make the header\n from astropy.io import fits\n\n bias = nr.dat.astype(float)\n\n prihdr = fits.Header()\n prihdr['MJD'] = nr.mjdc, 'Creation date'\n prihdr['NFRAVGD'] = 1, 'avgd this many frames'\n prihdr['ORIGNAME'] = nr.filename, '1st filename'\n prihdr['SITEID'] = nr.site, ' '\n prihdr['INSTRUME'] = nr.camera, ' '\n prihdr['OBSTYPE'] = 'BIAS', ' '\n prihdr['EXPTIME'] = nr.exptime, ' '\n #prihdu = fits.PrimaryHDU(header=prihdr) Don't think I need.\n\n biaso='BIAS'+str(nr.datestrc)+'.fits'\n biasdir=nr.nresroot+nr.biasdir\n biasout=nr.nresroot+nr.biasdir+biaso\n\n if not os.path.exists(biasdir):\n os.makedirs(biasdir)\n\n fits.writeto(biasout, bias, prihdr)\n\n import stds_addline\n stds_addline.stds_addline('BIAS','bias/'+biaso,1,nr.site,nr.camera,nr.jdc,'0000')\n\n if nr.verbose==1:\n print('*** copy_bias ***')\n print('File In = ', nr.filin0)\n naxes = nr.dathdr, ['NAXIS']\n nx = nr.dathdr, ['NAXIS1']\n ny = nr.dathdr, ['NAXIS2']\n print('Naxes, Nx, Ny = ', naxes, nx, ny)\n print('Wrote file to bias dir:')\n print(biasout)\n print('Added line to reduced/csv/standards.csv')", "def merge_metadata_files(metadata_files, output_file):\n if len(metadata_files) > 1:\n merged_metadata_df = pd.concat(metadata_files)\n \n # If we have duplicate rows we currently are going to drop the last\n # set of rows until update logic is put in place\n merged_metadata_df = merged_metadata_df.drop_duplicates(subset=['Site/Sub/Coll ID', 'data_type'], \n keep='last')\n merged_metadata_df.to_csv(output_file, index=False)\n else:\n output_file = metadata_files[0]\n\n return output_file", "def updateIntoCsv(self,filename,where):\n\t\tpass", "def cmd_write_bloominp(self):\n if not os.path.exists(self.original_bloominp_path):\n # pdb.set_trace()\n self.log.warning(\"BLOOM not found (%s)! Tread carefully\"%self.original_bloominp_path)\n return\n \n dst=os.path.join(self.base_path,'bloominp.d09')\n if not self.overwrite:\n assert not os.path.exists(dst)\n shutil.copyfile(self.original_bloominp_path,dst)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select a set of iMIS numbers from the given file.
def select_numbers(file_path=None, how_many=3, make_backup=False, use_all=False): # Read in the iMIS data imis_file = ImisFile(file_path) # Set-up the random number generator random.seed() random.randrange(0, len(imis_file.active_member_list)) # Select the desired number of iMIS numbers selected_members = [] while len(selected_members) < how_many: new_idx = random.randint(0, len(imis_file.active_member_list)) member = imis_file.active_member_list[new_idx] if use_all or len(member.dates_selected) < 1: # We select this one. if len(member.dates_selected) < 1: member.dates_selected = time.strftime("%Y%m%d") else: member.dates_selected += ':' + time.strftime("%Y%m%d") selected_members.append(member) if make_backup: shutil.copy(file_path, file_path+".bk") imis_file.write() print('Selected Members') print('---------------------') for member in selected_members: print(str(member)) return selected_members
[ "def read_ints(file_name):\n with open(file_name) as f:\n return [int(x) for x in f.readlines()]", "def read_rad_indsets(fname):\n\n try:\n f = open(fname, \"r\")\n except IOError:\n print(\"Could not open file:\" + fname)\n sys.exit()\n with f:\n risd = f.readlines()\n\n n_line = len(risd)\n\n ris = [ [] for l in range(n_line) ]\n for line in range(n_line):\n ris[line] = (\" \".join(risd[line].split())).split(\" \")\n ris[line] = [ int(r) for r in ris[line] ]\n\n return ris", "def extract_cr_mask(filename):\n dq = get_groupdq(filename)\n cr_hits = flag_map(dq, 'JUMP_DET')\n return cr_hits", "def read_file(file_path):\n numbers = io.read_numbers_from_file(file_path)\n for each in numbers:\n print each", "def select_features(file):\n with open(file + '_data.pickle', 'rb') as data:\n reviews = pickle.load(data)\n X = np.array([\" \".join(review['tokens']) for review in reviews])\n Y = np.array([review['class'] for review in reviews])\n cv = CountVectorizer(max_df=0.95, min_df=2,\n max_features=10000)\n X_vec = cv.fit_transform(X)\n select_count = 1000\n res = sorted(list(zip(cv.get_feature_names(),\n mutual_info_classif(X_vec, Y, discrete_features=True)\n )), key=lambda x: x[1], reverse=True)[0:select_count]\n print(\"Top \" + str(select_count) + \" features according to chi square test:\")\n pprint(res)\n print(len(res))\n\n chi_stats, p_vals = chi2(X_vec, Y)\n\n chi_res = sorted(list(zip(cv.get_feature_names(),\n chi_stats\n )), key=lambda x: x[1], reverse=True)[0:select_count]\n\n print(\"Top \" + str(select_count) + \" features according to chi square test:\")\n pprint(chi_res)\n print(len(chi_res))\n\n selected_features = list(set([x[0] for x in res]) & set([x[0] for x in chi_res]))\n print('The selected features are:')\n pprint(selected_features)\n print(str(len(selected_features)) + \" features have been selected\")\n\n with open(file + \"_selected_features.pickle\", 'wb') as selected_features_pickle:\n pickle.dump(selected_features, selected_features_pickle, protocol=-1)", "def get_int_code_from_file(file_name):\n int_code = []\n\n with open(file_name) as file:\n lines = file.readlines()\n for line in lines:\n int_code.extend([int(integer) for integer in line.rstrip().split(',')])\n\n return int_code", "def load_mask(filename):\n mask = defaultdict(dict)\n header = []\n with tofile.open_file_handle(filename) as fh:\n for line in fh.readlines():\n seqid, start, end, *cols = line.rstrip().split(\"\\t\")\n if cols is None:\n cols = []\n if seqid == \"sequence\" and start == \"start\":\n header = cols\n continue\n mask[seqid].update({int(start): {\"end\": int(end), \"cols\": cols}})\n return mask, header", "def open_init_file(self, filename):\r\n my_file = open(filename, \"r\")\r\n line_list = []\r\n for line in my_file:\r\n a_line = line.strip().split()\r\n line_list.append(a_line)\r\n int_list = self.int_lists(line_list)\r\n return int_list", "def extract_file_ids(file_path):\n with open(file_path, \"r\") as f:\n content = f.read()\n\n ids = ID_RE.finditer(content)\n return [id.group(1) for id in ids]", "def _read_test_ids(self):\n with open(FLAGS.test_set_ids_file, \"r\", encoding=_ENCODING) as f:\n return set(f.read().split(\"\\n\"))", "def load_data(file: Path) -> List[int]:\n with file.open() as f:\n values = f.read().split('\\n')\n return list(map(int, filter(None, values)))", "def readLines(filename):\n if filename!=None and os.path.isfile(filename):\n if filename!=\"stdin\":\n lines = open(filename, \"r\").readlines()\n else:\n lines = stdin.readlines()\n pmids = set()\n for l in lines:\n if len(l)>1:\n pmids.add(l.split('\\t')[0])\n return pmids\n else:\n return set()", "def load_SCN_file(self, infiles):\n #TODO: check if infiles is valid entry: single file or a list of files \n if isinstance(infiles, str):\n if os.path.isfile(infiles):\n infile = infiles\n elif isinstance(infiles, list):\n if os.path.isfile(infiles[0]):\n infile = infiles[0]\n #TODO: enable taking several scan files and join in a single record.\n # Just a single file could be loaded at present.\n self.header = read_header(infile, self.verbose)\n self.itint, iampl, self.iprop = read_data(\n infile, self.header)\n self.iampl = iampl.astype(float) * self.header['calfac2']\n self.origin = \"Intervals loaded from SCN file: \" + infile\n self.is_loaded = True\n self._tres = 0.0\n self.rtint, self.rampl, self.rprop = self.itint, self.iampl, self.iprop\n self._set_periods()", "def crosssection_from_file(file, m_med_target):\n logger.debug('Loading xsec list from file {0}'.format(file))\n with open(file ,'r') as f:\n for line in decomment(f):\n m_med, xs = line.split()\n m_med = float(m_med)\n if m_med == m_med_target:\n xs = float(xs)\n logger.debug('Found xs = {0} for m_med = {1}'.format(xs, m_med_target))\n return xs\n raise ValueError(\n 'Could not find cross section for m_med = {0} in {1}'\n .format(m_med_target, file)\n )", "def parser_file(file_in, header=False):\n df = pd.read_csv(file_in, sep=SEPARATOR)\n try:\n df = df.sort_values(by=['score'], ascending=False)\n\n except Exception as e:\n\n print('cannot sort ', file_in)\n\n\n\n try:\n ids = df['node,layer'].values\n except:\n #print('WARNING: cannot select \\\"node,layer\\\" perform a replace operation if needed')\n ids = df['node'].values\n\n return ids", "def read_CIDs():\n fin=open('CIDs_final.txt','r')\n flines=fin.readlines()\n fin.close()\n CIDs=[]\n for line in flines:\n if line[-1]=='\\n':\n CIDs.append(int(line[:-1]))\n else:\n CIDs.append(int(line))\n return CIDs", "def get_number_list_from_file(file):\n with open(file) as f:\n lines = [float(line.rstrip('\\n')) for line in f]\n\n return lines", "def get_values_from_file(file, count):\n for i in range(count):\n try:\n yield int(file.readline().strip().split(',')[1])\n except IndexError:\n # Ignore empty lines\n pass", "def select_all_snps(infile, chrr, start, end, ont_file):\r\n\tf=open(infile)\r\n\tdic={}\r\n\tfor i in f:\r\n\t\tline=i.split(\"\\t\")\r\n\t\tif line[0]==\"chr\"+chrr or line[0]==chrr:\r\n\t\t\tif int(start)<=int(line[1]) and int(end)>=int(line[2]):\r\n\t\t\t\tif float(line[7].strip())<=0.00000005:\r\n\t\t\t\t\tif change_trait_format(line[3]) not in dic:\r\n\t\t\t\t\t\tdic[change_trait_format(line[3])]=[1,[float(line[7].strip())], 0, []]#primeiro os mais significativos, segudo os menos\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdic[change_trait_format(line[3])][0]+=1\r\n\t\t\t\t\t\tdic[change_trait_format(line[3])][1].append(float(line[7].strip()))#confirmar que isto funciona, sendo que o p-value esta em notacao cientifica\r\n\t\t\t\telif float(line[7].strip())<0.00001:\r\n\t\t\t\t\tif change_trait_format(line[3]) not in dic:\r\n\t\t\t\t\t\tdic[change_trait_format(line[3])]=[0, [], 1,[float(line[7].strip())]]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdic[change_trait_format(line[3])][2]+=1\r\n\t\t\t\t\t\tdic[change_trait_format(line[3])][3].append(float(line[7].strip()))#confirmar que isto funciona, sendo que o p-value esta em notacao cientifica\t\t\t\t\t\r\n\tf.close()\r\n\tnewdic={}\r\n\tfor key,value in dic.items():\r\n\t\tif len(value[1])!=0:\r\n\t\t\taa=formatnote(str(min(value[1])))\r\n\t\tif len(value[3])!=0:\r\n\t\t\tbb=formatnote(str(min(value[3])))\r\n\t\tif value[0]!=0 and str(value[0])+\"b\" not in newdic:\r\n\t\t\tnewdic[str(value[0])+\"b\"]=[[key],[\"[\"+aa+\"]\"]]#confirmar que isto funciona, sendo que o p-value esta em notacao cientifica\r\n\t\telif value[0]!=0 and str(value[0])+\"b\" in newdic:\r\n\t\t\tnewdic[str(value[0])+\"b\"][0].append(key)\r\n\t\t\tnewdic[str(value[0])+\"b\"][1].append(\"[\"+aa+\"]\")\r\n\t\tif value[2]!=0 and str(value[2]) not in newdic:\r\n\t\t\tnewdic[str(value[2])]=[[key],[\"[\"+bb+\"]\"]]#confirmar que isto funciona, sendo que o p-value esta em notacao cientifica\r\n\t\telif value[2]!=0 and str(value[2]) in newdic:\r\n\t\t\tnewdic[str(value[2])][0].append(key)\r\n\t\t\tnewdic[str(value[2])][1].append(\"[\"+bb+\"]\")\r\n\tffdic=check_corres(ont_file, newdic)\r\n\treturn ffdic" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform list of gene symbols to entrez_ids and returns a tuple of dataframes with results
def genesymbols_2_entrezids(genelist): # should check that genelist input does not have 'na' values probes_file = pd.read_csv('./data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv', usecols=['gene_symbol', 'entrez_id']).drop_duplicates() has_entrez = probes_file[probes_file.gene_symbol.isin(genelist)] has_entrez = has_entrez.drop_duplicates().dropna(subset=['entrez_id']) return has_entrez
[ "def get_entrez_conversion(genes: Tuple[str, ...]) -> Dict[str, str]:\n # Query mapping from gene IDs to entrez IDs\n mg = mygene.MyGeneInfo()\n df = mg.getgenes(genes, fields=\"entrezgene\", as_dataframe=1, species=\"human\")\n df = df[~df.entrezgene.isna()] # only keep genes with entrez IDs\n\n # Only preserve one-to-one mappings\n df = (\n df\n .reset_index()\n .drop_duplicates(subset=\"query\", keep=False)\n .drop_duplicates(subset=\"entrezgene\", keep=False)\n )\n id_to_entrez = dict(zip(df[\"query\"].astype(str), df.entrezgene.astype(str)))\n logger.info(f\"Converting gene IDs to entrez, {df.shape[0]:,} (out of {len(genes):,}) mapped.\")\n\n return id_to_entrez", "def gene_id_list(q, tax):\n return sorted([Gene.objects.get(pk=g).entrez_id for g in flatten_query(parse_gene_abstractquery(q, tax))])", "def genenames_from10x(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[-1]:])\n \n return genesymbol#,ensemblid", "def genenames_from10x_mod(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[0]:])\n \n return genesymbol#,ensemblid", "def find_multiple_by(**kwargs):\n results = post(endpoints['post-query'], params=kwargs)\n genes = []\n for r in results:\n genes.append(Gene(r))\n return genes", "def collect_all_genomes():\n\n def str2num(s,cat=False,force=True):\n \"\"\"\n Converts string to integer\n eg. ensembl92 to 92\n\n :param s: string\n :param cat: Whether to concatenate detected integers. eg. 20,23 to 2023\n :param force: If True, ignores decimal point error. \n \"\"\"\n import re \n if '.' in s and not force:\n raise ValueError(f\"A string can only be converted to integeres, found a '.' in {s}\")\n n=re.findall(r'\\d+',s)\n if len(n)==0:\n raise ValueError(\"No digits found in string {}\".format(s)) \n elif len(n)==1:\n return int(n[0])\n else:\n if cat:\n return int(''.join(n))\n else:\n return n\n\n from glob import glob\n from os.path import dirname,basename,exists\n import numpy as np\n import pandas as pd\n from pyensembl.species import normalize_species_name,Species\n \n # here's how I get the .cache directory eg. '/home/user/.cache/pyensembl'\n import datacache\n pyensembl_cache_dir=f\"{dirname(datacache.get_data_dir())}/pyensembl\" #FIXME if genomes are installed at other places than .cache\n\n # all the assemblies\n assemblies=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/*\")]\n # dataframe that contains all the info (and can be exported as a tsv).\n dspecies=pd.DataFrame(columns=['latin name','release','synonymn','assembly'])\n # assempy to release min max dict needed as an input to create Species object\n assembly2releasesminmax={}\n # following loop populates the dataframe \n genomei=0\n for assembly in assemblies:\n releases=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/{assembly}/*\")]\n for release in releases:\n releasei=str2num(release) #FIXME is realease is a float\n genome_dir=f\"{pyensembl_cache_dir}/{assembly}/{release}\"\n genome_files=glob(f\"{genome_dir}/*\")\n is_genome_installed=True if len(genome_files)>4 else False #FIXME need more than 4 (.gz) files to be strict\n if is_genome_installed:\n dspecies.loc[genomei,'assembly']=assembly\n dspecies.loc[genomei,'release']=releasei\n dspecies.loc[genomei,'synonymn']=basename(genome_files[0]).split('.')[0]\n dspecies.loc[genomei,'latin name']=normalize_species_name(dspecies.loc[genomei,'synonymn'])\n genomei+=1\n # following loop generates the Species object\n for spc in dspecies['latin name'].unique():\n assembly2releases={}\n for assembly in dspecies.loc[(dspecies['latin name']==spc),'assembly'].unique():\n d=dspecies.loc[((dspecies['latin name']==spc) & (dspecies['assembly']==assembly)),:]\n assembly2releases[assembly]=d['release'].min(),d['release'].max() #FIXME if MAX_ENSEMBL_RELEASE very important and has to be used\n Species.register(\n latin_name=spc,\n synonyms=dspecies.loc[(dspecies['latin name']==spc),'synonymn'].unique().tolist(),\n reference_assemblies=assembly2releases)\n Species.dspecies=dspecies\n return Species", "def entrez_to_symbol(entrez_id):\n client = ensembl_client.EnsemblRestApiClient()\n url = ''.join([client.server, '/xrefs/name/human/', entrez_id, '?external_db=EntrezGene'])\n results = client.send_request(url)\n for gene in results: # result is an array. First element is enough\n return gene['display_id']", "def get_encounter_gids(ibs, eid_list):\n # FIXME: MAKE SQL-METHOD FOR NON-ROWID GETTERS\n gids_list = ibs.db.get(EG_RELATION_TABLE, ('image_rowid',), eid_list, id_colname='encounter_rowid', unpack_scalars=False)\n #print('get_encounter_gids')\n #print('eid_list = %r' % (eid_list,))\n #print('gids_list = %r' % (gids_list,))\n return gids_list", "def get_tickers(df: DataFrame) -> DataFrame:\n get_tickers_udf = udf(lambda x: extract_tickers(x))\n df = df.withColumn(\"ticker_ids\", get_tickers_udf(df[\"symbols\"]))\n df = df.drop(\"symbols\")\n return df", "def get_image_eids(ibs, gid_list):\n # FIXME: MAKE SQL-METHOD FOR NON-ROWID GETTERS\n colnames = ('encounter_rowid',)\n eids_list = ibs.db.get(EG_RELATION_TABLE, colnames, gid_list,\n id_colname='image_rowid', unpack_scalars=False)\n return eids_list", "def extract_expression_features(expr_df, gene_model, type):\n expr_df = expr_df[[\"icgc_donor_id\", \"gene_model\", \"gene_id\", type]]\n expr_df = expr_df[expr_df[\"gene_model\"] == gene_model]\n expr_df = expr_df.drop(\"gene_model\", axis=1)\n expr_df[type] = expr_df[type].astype(\"float16\")\n expr_df = expr_df.drop_duplicates()\n\n donors = expr_df[\"icgc_donor_id\"].unique()\n genes = expr_df[\"gene_id\"].unique()\n helper_list = [list(a) for a in\n zip(expr_df[\"icgc_donor_id\"], expr_df[\"gene_id\"], expr_df[type])]\n feature_df = pd.DataFrame(np.nan, index=donors, columns=genes, dtype=\"float16\")\n for expr in helper_list:\n feature_df.at[expr[0], expr[1]] = expr[2]\n\n return feature_df", "def bfs_trrust_database_search_target(list_of_input_genes, trrust_filepath=\"../trrust_rawdata.human.tsv\", column_names=[\"Transcription factor\", \"Target gene\", \"Relationship\", \"PubMED identifier\"], return_all=False):\n\n\n\tdf = pd.read_csv(trrust_filepath, delimiter='\\t', header=None)\n\tdf.columns = column_names\n\n\tmaster_visited = []\n\tmaster_relationships = []\n\n\n\tif not return_all:\n\t\tfor gene in list_of_input_genes:\n\t\t\tprint(gene)\n\n\t\t\tqueue = [gene]\n\t\t\tvisited = []\n\t\t\trelationships = []\n\n\t\t\twhile queue:\n\t\t\t\tcurrent_gene = queue.pop(0)\n\t\t\t\tfor target_gene in df.loc[df[\"Transcription factor\"]==current_gene.upper()][\"Target gene\"].values:\n\t\t\t\t\tif target_gene not in visited:\n\t\t\t\t\t\tvisited.append(target_gene)\n\t\t\t\t\t\tqueue.append(target_gene)\n\t\t\t\t\t\trelationships.append([current_gene,target_gene])\n\n\t\t\tmaster_visited.append(visited)\n\t\t\tmaster_relationships.append(relationships)\n\n\telif return_all:\n\t\tfor index, row in df.iterrows():\n\t\t\tmaster_relationships.append([row[\"Transcription factor\"], row[\"Target gene\"]])\n\t\t\tif index % 1000==0:\n\t\t\t\tprint(index)\n\t\tmaster_relationships = [master_relationships]\n\n\tflat_visited = []\n\tfor sub_list in master_visited:\n\t\tfor gene_name in sub_list:\n\t\t\tflat_visited.append(gene_name)\n\n\tcount_dict = {}\n\tfor gene_name in flat_visited:\n\t\tif (gene_name in count_dict):\n\t\t\tcount_dict[gene_name] += 1\n\t\telse:\n\t\t\tcount_dict[gene_name] = 1\n\n\treturn master_visited, master_relationships, count_dict", "def exportGeneIdProbeIdMapping():\n conn = psycopg2.connect(postgres_uri)\n cursor = conn.cursor()\n cursor.execute(\"select distinct from_id,to_id from stemformatics.feature_mappings where db_id=59\")\n result = cursor.fetchall()\n cursor.close()\n conn.close()\n pandas.DataFrame(result, columns=[\"geneId\",\"probeId\"]).to_csv(\"/mnt/data/portal_data/GeneIdProbeIdMapping.tsv\", sep=\"\\t\", index=False)", "def getSequences(self, genelist=None, loc_key='loc', replace_loc_key=True, strand_key=False,\n mask=False, pointify=False, delta=False, **kargs):\n raise NotImplementedError", "def extract_feature_sql(server, seqfeature_ids, type=['CDS', 'rRNA', 'tRNA'], qualifier=['ID','locus_tag'], translate=False, file=sys.stdout):\n for chunk in chunks(seqfeature_ids, 900):\n sql = \"SELECT f.seqfeature_id AS gid, \\\n fl.strand,\\\n substr(s.seq, COALESCE(fl.start_pos, 1), (COALESCE(fl.end_pos, s.length) - COALESCE(fl.start_pos, 1))+1) AS subseq\\\n FROM seqfeature f \\\n JOIN term t ON f.type_term_id=t.term_id \\\n JOIN location fl USING(seqfeature_id) \\\n JOIN biosequence s USING(bioentry_id)\\\n WHERE t.name IN ({}) AND f.seqfeature_id IN ({})\".format(generate_placeholders(len(type)), generate_placeholders(len(chunk)))\n\n features = server.adaptor.execute_and_fetchall(sql, tuple(type + chunk) )\n results = {}\n for sfid, strand, seq in features:\n if seq is None:\n # at the moment partial features are stored with a null at either the start_pos or end_pos\n # in the location table in the database. There isn't a good way at the moment\n print(\"seqfeature {} could not be extracted, problem with the location\".format(sfid), file=sys.stderr)\n else:\n results[sfid] = (strand, seq)\n\n qual_select_sql = 'SELECT seqfeature_id, name, value FROM seqfeature_qualifier_value qv, term t WHERE seqfeature_id IN ({}) AND t.term_id = qv.term_id'.format(generate_placeholders(len(chunk)))\n qv = {}\n for seqfeature_id, name, value in server.adaptor.execute_and_fetchall(qual_select_sql, tuple(chunk)):\n try:\n qv[seqfeature_id][name] = value\n except KeyError:\n qv[seqfeature_id] = {}\n qv[seqfeature_id][name] = value\n\n tax_name_select_sql = 'SELECT seqfeature.seqfeature_id, taxon_name.name FROM seqfeature JOIN bioentry ON seqfeature.bioentry_id = bioentry.bioentry_id JOIN taxon_name ON bioentry.taxon_id = taxon_name.taxon_id WHERE seqfeature.seqfeature_id IN ({}) AND taxon_name.name_class = \\'scientific name\\''.format(generate_placeholders(len(chunk)))\n tax = {}\n for seqfeature_id, name in server.adaptor.execute_and_fetchall(tax_name_select_sql, tuple(chunk)):\n tax[seqfeature_id] = name\n\n for seqfeature_id, (strand, seq) in results.items():\n # remove any pseudo genes\n if 'pseudo' in qv[seqfeature_id]:\n continue\n\n name = str(seqfeature_id)\n for q in qualifier:\n try:\n name += ' ' + qv[seqfeature_id][q]\n\n except KeyError:\n pass\n\n if strand == -1:\n try:\n seq = reverse_complement(results[seqfeature_id][1])\n except TypeError as e:\n raise TypeError(\"failed to retieve sequence for {}\".format(seqfeature_id))\n\n try:\n codon_start = int(qv[seqfeature_id]['codon_start'][0]) - 1\n except KeyError:\n try:\n codon_start = int(qv[seqfeature_id]['phase'][0])\n except KeyError:\n codon_start = 0\n\n seq = seq[codon_start:]\n if translate:\n seq = bio_translate(seq)\n try:\n name += ' ' + qv[seqfeature_id]['product']\n except KeyError:\n pass\n\n try:\n name += ' [' + tax[seqfeature_id] + ']'\n except KeyError:\n pass\n\n print(\">{}\\n{}\".format(name, seq), file=file)", "def relabel_expr(expr: pd.DataFrame) -> pd.Series:\n\n # Separate lists for ease of indexing and replacing columns, though\n # elements in each list directly correspond. ensembl_ids_in_entrez\n # will be used to index into `expr`, then entrez_ids will be used\n # as a replacement index\n ensembl_ids_in_entrez = []\n entrez_ids = []\n for ensembl_id_full in expr.index:\n ensembl_id, *junk = ensembl_id_full.split('.')\n if ensembl_id in ensembl_entrez_mapping:\n ensembl_ids_in_entrez.append(ensembl_id_full)\n entrez_ids.append(ensembl_entrez_mapping[ensembl_id])\n\n subset = expr.loc[ensembl_ids_in_entrez, :].iloc[:, 0]\n subset.index = entrez_ids\n\n entrez_id_counts = Counter(entrez_ids)\n single_genes = {gene for (gene, count) in entrez_id_counts.items() if count == 1}\n duplicated_genes = {gene for (gene, count) in entrez_id_counts.items() if count > 1}\n\n s = pd.Series(index=entrez_id_counts)\n s.loc[single_genes] = subset.loc[single_genes]\n for gene in duplicated_genes:\n s.loc[gene] = subset.loc[gene].median()\n\n return s", "def get_ids():", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene = init_gene() ## gene section related tags\n gene['id'] = gene_counter\n gene['name'] = gene_entry[1]\n gene['chr'] = genes_cmpt[gene_entry]['chr']\n gene['source'] = genes_cmpt[gene_entry]['source']\n gene['start'] = genes_cmpt[gene_entry]['start']\n gene['stop'] = genes_cmpt[gene_entry]['stop']\n gene['strand'] = genes_cmpt[gene_entry]['strand']\n if gene['strand'] != '+' and gene['strand'] != '-': gene['strand'] = '.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n general_info = dict()\n ## TODO add more gene related information from attribute column of GFF file based on the reserved key words\n if 'Name' in genes_cmpt[gene_entry]:general_info['Name'] = genes_cmpt[gene_entry]['Name']\n if 'Note' in genes_cmpt[gene_entry]:general_info['Note'] = genes_cmpt[gene_entry]['Note']\n if 'Alias' in genes_cmpt[gene_entry]:general_info['Alias'] = genes_cmpt[gene_entry]['Alias']\n if general_info == {}:general_info['ID'] = gene_entry[1]\n gene['gene_info'] = general_info\n if len(transcripts_cmpt[gene_entry]) > 1:gene['is_alt_spliced'] = 1\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n exon_cod = []\n if len(exons_cmpt) != 0: ## rQuant requires only exon coordinates of the transcripts \n if (gene['chr'], tids['ID']) in exons_cmpt:\n for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]:exon_cod.append([feat_exon['start'], feat_exon['stop']])\n else: ## build exon coordinates from UTR3, UTR5 and CDS\n utr5_pos, cds_pos, utr3_pos = [], [], []\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]:utr5_pos.append([feat_utr5['start'], feat_utr5['stop']])\n if (gene['chr'], tids['ID']) in cds_cmpt:\n for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]:cds_pos.append([feat_cds['start'], feat_cds['stop']])\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]:utr3_pos.append([feat_utr3['start'], feat_utr3['stop']])\n exon_cod = CreateExon(gene['strand'], utr5_pos, cds_pos, utr3_pos) \n ## generalize the coordinate system for exons, GFF file may contain ascending or descending order.\n if gene['strand'] == '-':\n if exon_cod != [] and len(exon_cod) != 1:\n if exon_cod[0][0] > exon_cod[-1][0]: exon_cod.reverse()\n if exon_cod: gene['exons'].append(exon_cod)\n ## make a compact form of features in each gene struct to write into .mat format.\n gene = FeatureValueFormat(gene)\n gene_counter += 1\n gene_models.append(gene)\n return gene_models", "def getSpectra2genome(sample_df, expanded_genomes):\n spectra2protein_dic, spectra2genome_dic = {}, {}\n for j, sample_row in sample_df.iterrows():\n proteins = [item.split('(pre')[0] for item in sample_row['Protein'].split(';') if item.startswith('XXX_') == False]\n \n if proteins:\n spectrum = sample_row['SpecID']\n spectra2protein_dic[spectrum] = proteins\n spectra2genome_dic[spectrum] = []\n for protein in proteins:\n genome = proteins2genomes_dic[protein]\n if genome in expanded_genomes:\n if genome not in spectra2genome_dic[spectrum]: #make sure you only mention each genome once per spectrum at most\n spectra2genome_dic[spectrum].append(genome)\n return spectra2genome_dic" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert embedding with probe_ids for index to gene symbols by averaging probes for same gene symbol.
def convert_probe_emb_to_gene_emb(probe_emb): all_genes = pd.read_csv('./data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv') probe2gene = all_genes[all_genes.probeset_name.isin(probe_emb.index)].loc[:, ['probeset_name', 'gene_symbol']] # remove probes for 'A_' and 'CUST_' gene_symbols probe2gene = probe2gene[~((probe2gene.gene_symbol.str.startswith('A_')) | (probe2gene.gene_symbol.str.startswith('CUST_')))] gene_emb = probe_emb.merge(probe2gene, left_index=True, right_on='probeset_name').drop( 'probeset_name', axis=1).groupby('gene_symbol').mean() return gene_emb.drop('na')
[ "def transform_sample(vfdb_tool_result, gene_names):\n out = {'rpkm': {}, 'rpkmg': {}}\n for gene_name in gene_names:\n try:\n vals = vfdb_tool_result['genes'][gene_name]\n rpkm, rpkmg = vals['rpkm'], vals['rpkmg']\n except KeyError:\n rpkm, rpkmg = 0, 0\n out['rpkm'][gene_name] = np.log10(rpkm + 1)\n out['rpkmg'][gene_name] = np.log10(rpkmg + 1)\n return out", "def getSpectra2genome(sample_df, expanded_genomes):\n spectra2protein_dic, spectra2genome_dic = {}, {}\n for j, sample_row in sample_df.iterrows():\n proteins = [item.split('(pre')[0] for item in sample_row['Protein'].split(';') if item.startswith('XXX_') == False]\n \n if proteins:\n spectrum = sample_row['SpecID']\n spectra2protein_dic[spectrum] = proteins\n spectra2genome_dic[spectrum] = []\n for protein in proteins:\n genome = proteins2genomes_dic[protein]\n if genome in expanded_genomes:\n if genome not in spectra2genome_dic[spectrum]: #make sure you only mention each genome once per spectrum at most\n spectra2genome_dic[spectrum].append(genome)\n return spectra2genome_dic", "def encode_glove_average(X_train , X_test , embedding_dim , word_index):\r\n import os\r\n import numpy as np\r\n #Embedding the vector in this step\r\n EMBEDDING_DIM = embedding_dim\r\n FILE_NAME = \"glove.6B.\" + str(embedding_dim) + \"d.txt\"\r\n FILE_NAME = \"glove.6B.\" + str(embedding_dim) + \"d.txt\"\r\n \r\n #This is very time consuming, do not run again and again\r\n GLOVE_DIR = 'C:/Users/Nipun.Puri/Desktop/wordEmbed/'\r\n\r\n embeddings_index = {} #dictionary keys - words and values are the 100 dimension vector\r\n f = open(os.path.join(GLOVE_DIR, FILE_NAME) , encoding=\"utf8\")\r\n for line in f:\r\n values = line.split() #each line in the glove text file\r\n word = values[0] #The thing on the 0th position is the word\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = coefs\r\n f.close()\r\n\r\n print('Found %s word vectors.' % len(embeddings_index))\r\n \r\n embedding_matrix = np.ranom.rand(len(word_index) + 1, EMBEDDING_DIM) / 100\r\n for word, i in word_index.items():\r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be the same randomly intialized, better than zero\r\n embedding_matrix[i] = embedding_vector\r\n \r\n print(\"The dimention of the embedding Matrix should be : (nx , EMBEDDING_DIM)\")\r\n print(\"And it actually is: \" + str(embedding_matrix.shape))\r\n \r\n import itertools\r\n\r\n zeroList = np.zeros((1,EMBEDDING_DIM)).tolist()[0]\r\n x_train_glove = np.zeros((X_train.shape[0] , EMBEDDING_DIM))\r\n row = 0\r\n for document in X_train:\r\n #Looping over each Tweet\r\n vectorTemp = np.zeros((1,EMBEDDING_DIM)) #initializing the vector representation of the tweet\r\n #This will become a 1 x 300 vector\r\n for word in document:\r\n if word == 0:#This is the padded one\r\n vectorTemp = vectorTemp + np.zeros((1,EMBEDDING_DIM))\r\n else:\r\n vectorTemp = vectorTemp + np.array(embedding_matrix[word , :])\r\n vectorTemp = vectorTemp / 30\r\n #And this helps us in decomposing it\r\n x_train_glove[row] = vectorTemp\r\n row+=1\r\n\r\n x_test_glove = np.zeros((X_test.shape[0] , EMBEDDING_DIM))\r\n row = 0\r\n for document in X_test:\r\n #Looping over each Tweet\r\n vectorTemp = np.zeros((1,EMBEDDING_DIM)) #initializing the vector representation of the tweet\r\n for word in document:\r\n if word == 0:#This is the padded one\r\n vectorTemp = vectorTemp + np.zeros((1,EMBEDDING_DIM))\r\n else:\r\n vectorTemp = vectorTemp + np.array(embedding_matrix[word , :])\r\n vectorTemp = vectorTemp / 30\r\n #And this helps us in decomposing it\r\n x_test_glove[row] = vectorTemp\r\n row+=1\r\n\r\n print(\"Shape of Training set now is: \" + str(x_train_glove.shape))\r\n print(\"Shape of Test set now is: \" + str(x_test_glove.shape))\r\n \r\n return (x_train_glove , x_test_glove)", "def getPhenotye2NormalizedGenomeAbundances(phenotype, samples2phenotypes_dic = samples2phenotypes_dic, sample_quantifications_dir = sample_quantifications_dir):\n print('processing phenotype', phenotype, '...')\n phen_samples = samples2phenotypes_dic[phenotype]\n phen_df = pd.DataFrame(columns = ['genome', 'n_spectra', 'rel_n_spectra_%', 'n_spectra_normalized', 'rel_n_spectra_%_normalized', 'n_samples'])\n phen_dic = {}\n n_samples = len(phen_samples)\n genome2Nsamples_dic = {}\n for sample in phen_samples:\n sample_df = pd.read_csv(sample_quantifications_dir + sample + in_suffix, sep = '\\t')\n for i, row in sample_df.iterrows():\n genome = row['genome']\n n_spectra = row['n_spectra']\n rel_n_spectra = row['rel_n_spectra_%']\n n_spectra_normalized = row['n_spectra_normalized']\n rel_n_spectra_normalized = row['rel_n_spectra_%_normalized']\n if genome not in phen_dic:\n phen_dic[genome] = {'n_spectra' : 0, 'rel_n_spectra_%' : 0, 'n_spectra_normalized' : 0, 'rel_n_spectra_%_normalized' : 0}\n phen_dic[genome]['n_spectra'] += n_spectra\n phen_dic[genome]['rel_n_spectra_%'] += rel_n_spectra\n phen_dic[genome]['n_spectra_normalized'] += n_spectra_normalized\n phen_dic[genome]['rel_n_spectra_%_normalized'] += rel_n_spectra_normalized\n if genome not in genome2Nsamples_dic:\n genome2Nsamples_dic[genome] = 0\n genome2Nsamples_dic[genome] += 1\n \n for i, genome in enumerate(phen_dic):\n phen_df.loc[i] = [genome, phen_dic[genome]['n_spectra'], phen_dic[genome]['rel_n_spectra_%']/n_samples, phen_dic[genome]['n_spectra_normalized'], phen_dic[genome]['rel_n_spectra_%_normalized']/n_samples, genome2Nsamples_dic[genome]]\n phen_df.to_csv(out_dir + phenotype + out_suffix, sep = '\\t', index = False)", "def average_by_gene(quad_list_, ratios_df_):\n\n\tfor elm in quad_list_:\n\t\tcell_name = elm[0]\n\t\tgenes_breakdown = elm[1]\n\n\t\tfor sub_elm in genes_breakdown:\n\t\t\tgene = sub_elm[0]\n\t\t\tvals = sub_elm[1]\n\t\t\tv_vals_total = 0\n\t\t\tt_vals_total = 0\n\t\t\t\n\t\t\tfor val in vals:\n\t\t\t\tv_val = int(val.split(':')[0])\n\t\t\t\tt_val = int(val.split(':')[1])\n\t\t\t\tv_vals_total += v_val\n\t\t\t\tt_vals_total += t_val\n\n\t\t\tif t_vals_total != 0:\n\t\t\t\tratio = v_vals_total / t_vals_total\n\t\t\t\tratio = round(ratio, 2) # round to two decimal places\n\t\t\telse:\n\t\t\t\tratio = 0\n\t\t\t\n\t\t\tratios_df_[gene][cell_name] = ratio\n\n\treturn(ratios_df_)", "def get_gene_scores_from_chrom(chrom_array, chrom, all_genes, genes_by_chrom,\n genes_scores):\n try:\n id_list = genes_by_chrom[chrom]\n except KeyError:\n id_list = []\n for id in id_list:\n gene = all_genes[id]\n score = sum(chrom_array[gene.start:gene.end])\n genes_scores[gene.ensembl_id] = score", "def _avg_matches(self, test_target_matches_counts, num):\n avg_total = []\n avg_sdgs = {}\n for i in range(1, 6):\n avg_sdgs[i] = []\n for i in range(num):\n adder, counter = 0, 0\n adder_sdgs = [0, 0, 0, 0, 0]\n counter_sdgs = [0, 0, 0, 0, 0]\n for key in self._matches_by_sent:\n try:\n adder += (self._matches_by_sent[key][i] * test_target_matches_counts[key])\n counter += test_target_matches_counts[key]\n adder_sdgs[int(key[0])-1] += (self._matches_by_sent[key][i] * test_target_matches_counts[key])\n counter_sdgs[int(key[0])-1] += test_target_matches_counts[key]\n except:\n adder += (self._matches_by_sent[key][-1] * test_target_matches_counts[key])\n counter += test_target_matches_counts[key]\n adder_sdgs[int(key[0])-1] += (self._matches_by_sent[key][-1] * test_target_matches_counts[key])\n counter_sdgs[int(key[0])-1] += test_target_matches_counts[key]\n avg_total.append(adder / counter)\n for j in range(1, 6):\n avg_sdgs[j].append(adder_sdgs[j-1]/counter_sdgs[j-1])\n return avg_total, avg_sdgs", "def normalize_index(phyche_index, is_convert_dict=False):\n normalize_phyche_value = []\n for phyche_value in phyche_index:\n average_phyche_value = sum(phyche_value) * 1.0 / len(phyche_value)\n sd_phyche = standard_deviation(phyche_value)\n normalize_phyche_value.append([round((e - average_phyche_value) / sd_phyche, 2) for e in phyche_value])\n\n return normalize_phyche_value", "def generate_embeddings(self):\n for record in self.proteinnet:\n esm_input = [(record.id, \"\".join(record.primary))]\n _, _, batch_tokens = self.batch_converter(esm_input)\n\n if self.use_gpu:\n batch_tokens = batch_tokens.to(device=\"cuda\", non_blocking=True)\n\n with torch.no_grad():\n results = self.model(batch_tokens, repr_layers=[33], return_contacts=True)\n reps = results[\"representations\"][33].to(device=\"cpu\")\n\n for p in range(len(record)):\n x = reps.numpy()[0, 1 + p, :]\n y = record.evolutionary[:, p]\n\n if self.thresh is not None:\n y = (y < self.thresh).astype(float)\n\n yield x, y", "def _sumSupportForGene(self, geneAnnots):\n chrom = geneAnnots.chrom\n return self._sumSupportsByIntron(chrom.name, chrom.start, chrom.end)", "def avg_gpa_by_grades(self):\n\n g = []\n g = list(av_teachers.keys()) #! make DRYer\n\n results = {}\n\n for grade in g:\n sum_gpa = 0\n i = 0\n for s in self.students:\n if grade == s.grade_level:\n print('Name:', s.name + ',GPA:', s.gpa)\n sum_gpa += s.gpa\n i += 1\n avg_gpa = sum_gpa/i\n results[grade] = avg_gpa\n return results", "def embed_avg(tgt_embed_list, pred_embed_list):\n Log.info(\"calculate the embed avg start: size = {}\".format(len(tgt_embed_list)))\n embed_avg_list = []\n for tgt_embed, pred_embed in zip(tgt_embed_list, pred_embed_list):\n if len(tgt_embed) == 0 or len(pred_embed) == 0:\n embed_avg_list.append(0.0)\n continue\n tgt_avg_embed = np.divide(np.sum(tgt_embed, 0), np.linalg.norm(np.sum(tgt_embed, 0)))\n pred_avg_embed = np.divide(np.sum(pred_embed, 0), np.linalg.norm(np.sum(pred_embed, 0)))\n embed_avg_score = cal_cosine_similarity(tgt_avg_embed, pred_avg_embed)\n embed_avg_list.append(embed_avg_score)\n embed_avg_score = np.mean(embed_avg_list)\n Log.info(\"calculate the embed avg success!\")\n return embed_avg_score", "def average_grades(grades):\n for key in grades.keys():\n \tgrades[key] = sum(grades[key]) / len(grades[key])\n return grades", "def _embed_index(self) -> None:\n batch = []\n results = []\n for i, example in enumerate(self._index_exs):\n if i % self._log_every == 0:\n logging.info(\"Processed %d / %d examples\", i, len(self._index_exs))\n batch.append(example)\n if len(batch) == self._batch_size:\n results.append(self.embed_batch(batch))\n batch = []\n # Final batch\n if batch:\n results.append(self.embed_batch(batch))\n self._index_embs = np.vstack(results)", "def compute_tf(voc_to_index, file_to_identifiers):\n\n tf = {}\n\n for file_path in file_to_identifiers.keys():\n\n tf[file_path] = [0 for _ in range(len(voc_to_index))]\n\n for word in file_to_identifiers[file_path]:\n\n tf[file_path][voc_to_index[word]] += 1\n\n num_identifiers = len(file_to_identifiers[file_path])\n\n if num_identifiers > 0:\n tf[file_path] = [value / num_identifiers for value in tf[file_path]]\n\n return tf", "def average_one_hots(sent, word_to_ind):\n size = len(word_to_ind)\n sum_one_hots = np.zeros(size)\n for word in sent.text:\n sum_one_hots += get_one_hot(size, word_to_ind[word])\n return sum_one_hots / len(sent.text)", "def aaf(self):\n num_chroms = 0.0\n allele_counts = Counter()\n for s in self.samples:\n if s.gt_type is not None:\n for a in s.gt_alleles:\n allele_counts.update([a])\n num_chroms += 1\n return [allele_counts[str(i)]/num_chroms for i in range(1, len(self.ALT)+1)]", "def encode_probe(self, p):\n\n if isinstance(p, GroupProbe):\n self.encode(p.queries)\n self.encode(p.qry_tot)\n return self", "def calculate_hexa_penta_score(cds_filename, base_freq, output_filename):\n log_scores = {} # key: (hexa-frame), value: log odds score\n hexamer = {0: defaultdict(lambda: 1), 1:defaultdict(lambda: 1), 2:defaultdict(lambda: 1)}\n pentamer = {0: defaultdict(lambda: 4), 1:defaultdict(lambda: 4), 2:defaultdict(lambda: 4)}\n for r in SeqIO.parse(open(cds_filename), 'fasta'):\n seq = str(r.seq).upper()\n seq_len = len(seq)\n #assert seq_len % 3 == 0\n for i in xrange(seq_len-5):\n frame = i % 3\n hexamer[frame][seq[i:i+6]] += 1\n pentamer[frame][seq[i:i+5]] += 1\n i = seq_len - 5\n pentamer[i%3][seq[i:i+5]] += 1\n\n f = open(output_filename, 'w')\n for hexa in itertools.product('ATCG', repeat=6):\n for frame in xrange(3):\n hexa = \"\".join(hexa)\n score = math.log(hexamer[frame][hexa]) - math.log(pentamer[frame][hexa[:5]]) - math.log(base_freq[hexa[-1]])\n f.write(\"{0}-{1}\\t{2}\\n\".format(hexa, frame, score))\n log_scores[hexa+'-'+str(frame)] = score\n f.close()\n return log_scores" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a TOML file
def test_toml_load(toml_load: str) -> None: results = tomlio.load(toml_load) assert results == EXPECTED_TOML
[ "def load_toml(path):\n from toml import loads\n return loads(path.read_text(encoding='utf-8'))", "def _toml(self):\r\n data = {}\r\n with open(self._filename, 'rb') as f:\r\n data = pytoml.load(f)\r\n return self._wrap(data)", "def _toml(self):\r\n return KaoToml(self.filename)", "def __init__(self, toml_file):\n self._toml = toml.load(toml_file)\n self._flat_dict = flatten_dict(self._toml)", "def test_save_and_load(toml_save: str) -> None:\n tomlio.save(toml_save, EXPECTED_TOML)\n\n result = tomlio.load(toml_save)\n\n assert result == EXPECTED_TOML", "def load_from_ftml(self, filename):\n _dirname, _name = os.path.split(filename)\n _fs = fs.open_fs(_dirname)\n xml_content = _fs.readtext(_name)\n name = os.path.basename(filename.replace(\".ftml\", \"\").strip())\n self.load_from_xml_data(xml_content, name)", "def load_from_template(self, db, tag):\n path = os.path.join('templates', tag + '.xml')\n with open(path, 'r') as f:\n text = f.read()\n\n vars_ = db.get_vars()\n self.editor.setText(text.format(**vars_))", "def loadTemplate(self, filename, path):\n # TODO what should \"path\" be relative to? I vote the Template file.\n relPath = os.path.join(self._templatePath, path)\n templateFile = os.path.join(os.path.normpath(relPath), filename)\n self._template, _ = xmlUtils.loadToTree(templateFile)", "def load_yaml(self,infpath,attrpath):\n obj=yaml_manager.readfile(self.render(infpath))\n self.set_nested(attrpath,obj)", "def _load_template(self, template_file):\n pass", "def load_yaml(self, path):\n # TBD\n pass", "def from_graphML(self, in_file):\n pass", "def to_lingo_doc(filepath):\n serif_doc = serifxml.Document(filepath)\n \"\"\":type: serifxml.Document\"\"\"\n\n docid = serif_doc.docid\n lingo_doc = lingoDoc(docid)\n for st_index, sentence in enumerate(serif_doc.sentences):\n st = sentence.sentence_theories[0]\n \"\"\":type: serifxml.SentenceTheory\"\"\"\n if len(st.token_sequence) == 0:\n continue\n st_text, st_start, st_end = get_snippet(serif_doc, st)\n\n tokens = to_tokens(st)\n assert st_start == tokens[0].start_char_offset()\n assert (st_end+1) == tokens[-1].end_char_offset()\n\n s = Sentence(docid, IntPair(st_start, st_end+1), st_text, tokens, st_index)\n add_entity_mentions(st, s, lingo_doc)\n add_value_mentions(st, s, lingo_doc)\n add_names(st, lingo_doc)\n\n lingo_doc.add_sentence(s)\n return lingo_doc", "def _load_translation(self, filename):\n with open(filename) as tfile:\n translations = json.loads(tfile.read())\n for key, value in translations.items():\n if isinstance(value, str):\n self._session.add(Translation(string_id=key, value=value, lang='english'))\n self._session.commit()", "def _load_turicreate_model(self, path):\n return tc.load_model(path)", "def load_template(mol) :\n filename = os.path.join(PROT_INFO_PATH,\"template_%s.txt\"%mol)\n if os.path.isfile(filename) :\n return ProteinTemplate(filename)\n else :\n raise Exception(\"Invalid mol (%s) or file is missing (%s)\"%(mol,filename))", "def load_tdms(self):\n for fname in os.listdir(self.folder):\n ext = os.path.splitext(fname)[1] \n if ext=='.tdms': self.tdms = TdmsFile(self.folder+fname)\n if not self.tdms: print 'Tdms file not found'", "def load_file(self, file_path):\n ...", "def construct_simulation_from_toml(filename: str) -> Simulation:\n with open(filename) as f:\n input_data = toml.load(f)\n\n return Simulation(input_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save a toml and load to confirm
def test_save_and_load(toml_save: str) -> None: tomlio.save(toml_save, EXPECTED_TOML) result = tomlio.load(toml_save) assert result == EXPECTED_TOML
[ "def save(self):\n self.path.write_text(toml.dumps(self.tomldoc))", "def save(self):\r\n with open(self._filename, 'w') as f:\r\n pytoml.dump(f, self._collapse(self._toml))", "def save(wn):\n wordnet_yaml.save(wn)\n save_all_xml(wn)\n with codecs.open(\"wn.xml\",\"w\",\"utf-8\") as outp:\n wn.to_xml(outp, True)\n pickle.dump(wn, open(\"wn.pickle\", \"wb\"))", "def saveTexts(self):\n with open('saved/readings.pickle', 'wb') as handle:\n pickle.dump(self, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save(title=None, temporary=False):", "def _save_button_clicked(self):\n\n fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self,\"Save File\",UWBsim.BASE_DIR,\"All Files (*);;YAML files (*.yaml)\")\n \n yaml_dump = {}\n for i in range(len(self.anchor_positions)):\n key = str(i)\n yaml_dump[key] = {}\n yaml_dump[key]['x'] = str(self.anchorLineEdits[i][0].text())\n yaml_dump[key]['y'] = str(self.anchorLineEdits[i][1].text())\n yaml_dump[key]['z'] = str(self.anchorLineEdits[i][2].text())\n\n if not fileName.endswith('.yaml'):\n fileName = fileName + '.yaml'\n \n with open(fileName, 'w') as f:\n yaml.safe_dump(yaml_dump, f)", "def save(self, path):", "def save(self) -> None:\n if self.meta.file_path:\n # We are a family root node or the user has decided to make us one\n # Save family information\n with self.meta.file_path.open('w') as of:\n of.write(self.to_json())\n\n # Now for saving language information\n # Sound changes cannot be serialized! So we can only save lexicon\n # information.\n if self.lexicon:\n self.lexicon.save(self.meta.lexicon_file_path)\n if self.lexicon_delta:\n self.lexicon_delta.save(self.meta.lexicon_delta_file_path)", "def load(self):\n print(savePath)\n with open(savePath, 'rb') as font:\n self.data = pk.load(font)", "def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)", "def templateSaver(settings, savename):\n #Create save directory if it does not exist:\n if not os.path.exists(os.path.join(os.getcwd(),'Setting templates\\\\')):\n os.mkdir(os.path.join(os.getcwd(),'Setting templates\\\\'))\n \n with open(savename +'.pkl', 'wb') as savefile:\n dump(settings, savefile, HIGHEST_PROTOCOL)\n daviPrint('Settings file \"' + savename + '.pkl\" was saved.')", "def save():\n with open(_USER_CONFIG, 'w') as f:\n print(\"# Configuartion file for PasteTray.\", file=f)\n settings.write(f)", "def save_as(self, filename: str):\n\n toml.dump(self.to_dict(), filename)", "def test_save_uml(self):\n self.element_factory.create(UML.Package)\n self.element_factory.create(UML.Diagram)\n self.element_factory.create(UML.Comment)\n self.element_factory.create(UML.Class)\n\n out = PseudoFile()\n storage.save(XMLWriter(out), factory=self.element_factory)\n out.close()\n\n assert \"<Package \" in out.data\n assert \"<Diagram \" in out.data\n assert \"<Comment \" in out.data\n assert \"<Class \" in out.data", "def save(self):\n self.trans=open(\"Translation.txt\", \"r+\")\n self.trans.truncate(0)\n written=\"\"\n for word in self.dictionary:\n written+=(word+\"-\"+self.dictionary[word]+\"\\n\")\n #self.trans.write(written.encode('utf8'))\n self.trans.write(written)\n self.trans.close()\n self.trans=open(\"Translation.txt\", \"r+\")", "def save_checkpoint(state, is_best, filename=os.path.join(os.environ.get('USER_PATH'),'/data/checkpoints/checkpoint.pt')):\n\t if is_best:\n\t\t print (\"=> Saving a new best model\")\n\t\t print(f'SAVING TO: {filename}')\n\t\t torch.save(state, filename) # save checkpoint\n\t else:\n\t\t print (\"=> Loss did not improve\")", "def save_file(self):\r\n self._main.save_file()", "def save(self, name):\n path_to_vocab = path_to_language_models.joinpath(name)\n path_to_vocab.mkdir(parents=True, exist_ok=True)\n\n with open(path_to_vocab.joinpath(\"vocab_meta.json\"), 'w', encoding='utf-8') as f:\n all_items = {k: v for k, v in self.__dict__.items() if k not in ['_word_vocab', '_char_vocab']}\n json.dump(all_items, f)\n\n self._word_vocab.save(path_to_vocab.joinpath('vocab_word.json'))\n self._char_vocab.save(path_to_vocab.joinpath('vocab_char.json'))\n\n logger.info('text transformer saved successfully at {}'.format(path_to_vocab))", "def saveSettingFile(self):\n self.saveSettingsFilename = asksaveasfilename()\n self.settingsCollector()\n templateSaver(self.settings.settings, self.saveSettingsFilename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a line function from two points
def func_from_line(a: tuple, b: tuple) -> Callable[[int], int]: def f(x): """ the line function y = f(x)""" return a[1] + (b[1]-a[1])/(b[0]-a[0])*x - (b[1]-a[1])/(b[0]-a[0])*a[0] return f
[ "def train_linear_two_points(point_1, point_2):\n\n points = [point_1, point_2]\n x_coords, y_coords = zip(*points)\n A = vstack([x_coords, ones(len(x_coords))]).T\n m, c = lstsq(A, y_coords)[0]\n\n output_dict = {\"slope\": m, \"intercept\": c}\n\n return output_dict", "def coefficients_of_line_from_points(point_a, point_b):\n\n points = [point_a, point_b]\n x_coords, y_coords = zip(*points)\n coord_array = np.vstack([x_coords, np.ones(len(x_coords))]).T\n m, c = np.linalg.lstsq(coord_array, y_coords, rcond=None)[0]\n return m, c", "def coefficients_of_line_from_points(p1, p2):\n\n points = [p1, p2]\n x_coords, y_coords = zip(*points)\n coord_array = np.vstack([x_coords, np.ones(len(x_coords))]).T\n m, c = np.linalg.lstsq(coord_array, y_coords, rcond=None)[0]\n\n return m, c", "def line(list1, list2, plo=False, pri=False, **kwargs):\n import matplotlib.pyplot as mp\n [x1, y1] = list1\n [x2, y2] = list2\n a = (y2 - y1) / (x2 - x1)\n b = (x2*y1 - x1*y2) / (x2 - x1)\n label = str(a) + 'x + ' + str(b)\n if plo:\n mp.plot([x1, x2], [y1, y2], label=label, **kwargs)\n if pri:\n print label\n return a, b", "def find_linear_function_parameters(p1, p2):\n assert len(p1) == 2, 'first_point needs to have exactly 2 coordinates'\n assert len(p2) == 2, 'second_point needs to have exactly 2 coordinates'\n assert p1[0] <= p2[0] and p1[1] <= p2[1], \\\n 'first_point coordinates need to be smaller or equal to second_point coordinates'\n\n if p2[0] - p1[0] == 0 or p2[1] - p1[1] == 0:\n assert p1 == p2, 'first_point and second_point cannot lie on a horizontal or vertical line'\n alpha = 1 # both points are the same\n beta = 0\n else:\n alpha = (p2[1] - p1[1]) / (p2[0] - p1[0])\n beta = p1[1] - (p1[0] * alpha)\n\n return alpha, beta", "def draw_line(p1, p2, *varargin, **others):\n \n plt.plot([p1[0], p2[0]], [p1[1], p2[1]], *varargin, **others)", "def _line_from_two_points(pt1: np.array, pt2: np.array) -> np.array:\n numLine = pt1.shape[0]\n lines = np.zeros((numLine, 6))\n n = np.cross(pt1, pt2)\n n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)\n lines[:, 0:3] = n\n\n areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))\n areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))\n areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))\n planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1\n lines[:, 3] = planeIDs\n\n for i in range(numLine):\n uv = _xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])\n umax = uv[:, 0].max() + np.pi\n umin = uv[:, 0].min() + np.pi\n if umax - umin > np.pi:\n lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi\n else:\n lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi\n\n return lines", "def plotline(ax, z1, z2, *args, **kwargs):\n return ax.plot((z1.real, z2.real), (z1.imag, z2.imag), *args, **kwargs)", "def get_line_coefficients(line: Line) -> Optional[tuple[float, float]]:\n (x1, y1), (x2, y2) = line\n\n # Check for vertical line.\n if x2 == x1:\n return None\n\n a = (y2 - y1) / (x2 - x1)\n b = -x1 * (y2 - y1) / (x2 - x1) + y1\n\n return a, b", "def create_line(self, start_point=None, end_point=None):\n selected_points = (start_point, end_point) \n if None in selected_points:\n selected_points = self.get_selected_points()\n if len(selected_points) > 1:\n line = GraphicsLib.GraphicsItemLine(selected_points[0],\n selected_points[1])\n self.add_shape(line)\n return line\n else:\n msg = \"Please select two points (with same kappa and phi) \" + \\\n \"to create a helical line\"\n logging.getLogger(\"GUI\").error(msg)", "def line(x1: float, y1: float, x2: float, y2: float) -> None:\n __canvas.drawLine(x1, y1, x2, y2, __stroke_paint())", "def slope(p1,p2):\n return (p2[1] - p1[1])/(p2[0] - p1[0])", "def project_point_to_line(P: list | np.ndarray,\n A: list | np.ndarray,\n B: list | np.ndarray) -> np.ndarray:\n p, a, b = pad_with_zeros(P, A, B)\n n = unit(b - a)\n return a + np.dot(p - a, n) * n", "def line_intersection(line1, line2):\n (x1,y1), (x2,y2) = line1\n (u1,v1), (u2,v2) = line2\n (a,b), (c,d) = (x2-x1, u1-u2), (y2-y1, v1-v2)\n e, f = u1-x1, v1-y1\n # Solve ((a,b), (c,d)) * (t,s) = (e,f)\n denom = float(a*d - b*c)\n if MathHelper.near(denom, 0):\n # parallel\n # If collinear, the equation is solvable with t = 0.\n # When t=0, s would have to equal e/b and f/d\n if MathHelper.near(float(e)/b, float(f)/d):\n # collinear\n px = x1\n py = y1\n else:\n return None\n else:\n t = (e*d - b*f)/denom\n # s = (a*f - e*c)/denom\n px = x1 + t*(x2-x1)\n py = y1 + t*(y2-y1)\n return px, py", "def distance_point_to_line(P: list | np.ndarray,\n A: list | np.ndarray,\n B: list | np.ndarray) -> float:\n return distance(P, project_point_to_line(P, A, B))", "def intersection(line1, line2):\n a = array([[line2[2], -line1[2]],\n [line2[3], -line1[3]]])\n b = array([[line1[0] - line2[0]],\n [line1[1] - line2[1]]])\n co = solve(a, b)\n\n x = line2[0] + co[0][0] * line2[2]\n y = line2[1] + co[0][0] * line2[3]\n return x, y", "def intercept(x1, y1, x2, y2): \n return float(y1 - slope(x1,y1,x2,y2)*x1)", "def _walk_line(p0, p1):\n # unpack the point tuples\n x0, y0 = p0\n x1, y1 = p1\n\n dx, dy = x1 - x0, y1 - y0\n yi = 1\n if dy < 0:\n yi = -1\n dy = -dy\n\n D = 2 * dy - dx\n x = np.arange(x0, x1 + 1, dtype=int).T\n y = np.zeros((len(x),), dtype=int)\n\n yy = y0\n for i in np.arange(len(x)):\n y[i] = yy\n if D > 0:\n yy = yy + yi\n D = D - 2 * dx\n\n D = D + 2 * dy\n\n # sort by major axis, and index the cells\n xI = np.argsort(x)\n x = x[xI]\n y = y[xI]\n\n return x, y", "def point_line_distance(point: Tuple[float, float],\n start_point: Tuple[float, float],\n end_point: Tuple[float, float]) -> float:\n if start_point == end_point:\n return distance(point, start_point)\n else:\n n = abs(\n (end_point[0] - start_point[0]) * (start_point[1] - point[1]) - (start_point[0] - point[0]) * (end_point[1] - start_point[1])\n )\n d = sqrt(\n (end_point[0] - start_point[0]) ** 2 + (end_point[1] - start_point[1]) ** 2\n )\n return n / d" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the point, tuple such as (x,y) from points_list with minimal x coordinate. When there are two points it returns the bottom left point
def return_left_point(points_list: List[tuple]) -> tuple: return min(points_list)
[ "def leftmost(pts):\n return withmin(xcoord, pts)", "def farthestPoint(pointList, p):\r\n return None", "def min_x(self):\n return min(point.x for point in self.points)", "def return_right_point(points_list: List[tuple]) -> tuple:\n return max(points_list)", "def GetMinPoint(self):\n ...", "def _get_minPoint(self) -> \"adsk::core::Ptr< adsk::core::Point2D >\" :\n return _core.BoundingBox2D__get_minPoint(self)", "def closest(reference,points):\n min_dis = float('inf')\n for point in points:\n dis = distance(reference,point)\n if dis < min_dis:\n min_dis = dis\n closest_point = point\n return closest_point, min_dis", "def nearest_point(point, points):\n\n # Note this uses euculidean distances -- so beware possible inaccuracy\n # using it on geographic coordinates at high latitudes. (Not sure how\n # extreme the situation has to be for it to matter -- does it ever?)\n dist_2 = numpy.sum((points.transpose() - point) ** 2, axis=1)\n nearest_point_index = numpy.argmin(dist_2)\n return points.transpose()[nearest_point_index], nearest_point_index", "def brute_force_closest(point, pointlist):\n import sys\n pid, d = -1, sys.maxint\n for i, p in enumerate(pointlist):\n nd = norm(point-p) \n if nd < d:\n d = nd\n pid = i\n return pointlist[pid]", "def get_min_x(sticks: list[Matchstick]) -> float:\n min_x = None\n for stick in sticks:\n if min_x is None or stick.h_pos < min_x:\n min_x = stick.h_pos\n return min_x", "def get_bottom_left_point(self): \n return self.x, self.y", "def min(self):\n return self.x.min(), self.y.min()", "def find_nearest_set_point(self, p):\n #print \"I'm in permutations_by_transpositions.py in find_nearest_set_point\"\n # converting point\n c = [-2 * x for x in p]\n return self.find_min_of_linear_function(c)\n #qres.put_nowait(self.find_min_of_linear_function(c))", "def find_closest_keypoint(point, keypoints):\n \n dx = point[0] - keypoints[0]\n dy = point[1] - keypoints[1]\n \n ds = np.hypot(dx, dy)\n \n minind = np.argmin(ds)\n \n mindx = dx[minind]\n mindy = dy[minind]\n mindist = ds[minind]\n closest_point = keypoints[:, minind]\n \n return minind, mindx, mindy, mindist, closest_point", "def coord_x(self) -> List[float]:\n if len(self.__points) == 0:\n return []\n if len(self.__points[0]) > 0:\n return [p[0] for p in self.points]", "def smallest_x(self):\n return min(map(lambda v: v.x, self.vertices)) # was TODO", "def get_nearest_mesh_value(self, x, points=None):\n \n # Get quotient and remainder wrt grid spacing\n div_x, mod_x = divmod(x, self.grid_spacing)\n \n # Check if x should be rounded up or down\n if mod_x >= self.grid_spacing / 2:\n div_x += 1\n \n # Get new point\n new_x = div_x * self.grid_spacing\n if points is not None:\n i = div_x - 1\n while i < 0:\n i += points\n while i >= points:\n i -= points\n return new_x, int(i)\n else:\n return new_x", "def bottommost(pts):\n return withmin(ycoord, pts)", "def min_x(self):\n return self.origin[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the point, tuple such as (x,y) from points_list with maximal x coordinate. When there are two points it returns the upper right point
def return_right_point(points_list: List[tuple]) -> tuple: return max(points_list)
[ "def farthestPoint(pointList, p):\r\n return None", "def rightmost(pts):\n return withmax(xcoord, pts)", "def max_x(self):\n return max(point.x for point in self.points)", "def return_left_point(points_list: List[tuple]) -> tuple:\n return min(points_list)", "def find_max(elevation_list):\n maximum_elevation_per_row = max(elevation_list)\n maximum_point = max(maximum_elevation_per_row)\n return maximum_point", "def bottommost(pts):\n return withmin(ycoord, pts)", "def topmost(pts):\n return withmax(ycoord, pts)", "def _get_maxPoint(self) -> \"adsk::core::Ptr< adsk::core::Point2D >\" :\n return _core.BoundingBox2D__get_maxPoint(self)", "def furthest(reference,points):\n max_dis = -float('inf')\n for point in points:\n dis = distance(reference,point)\n if dis > max_dis:\n max_dis = dis\n closest_point = point\n return closest_point, max_dis", "def leftmost(pts):\n return withmin(xcoord, pts)", "def GetMaxPoint(self):\n ...", "def get_maximum_position(self):\n max_position = (0, 0)\n max_prob = 0.0\n\n for x in range(self.width):\n for y in range(self.height):\n if self[y][x] > max_prob:\n max_prob = self[y][x]\n max_position = (y, x)\n\n return max_position", "def findMaxValueOfEquation(self, points: list[list[int]], k: int) -> int:\n heap = []\n rslt = float('-inf')\n for x, y in points:\n while heap and heap[0][1] < x - k:\n heappop(heap)\n\n if heap:\n rslt = max(\n rslt,\n x + y - heap[0][0]\n )\n\n heappush(heap, (x - y, x))\n\n return rslt", "def max_point(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def extreme_points(self, type_):\n if type_ == 'xmin':\n xy = self.x\n max_min_fun = np.amin\n elif type_ == 'xmax':\n xy = self.x\n max_min_fun = np.amax\n elif type_ == 'ymin':\n xy = self.y\n max_min_fun = np.amin\n elif type_ == 'ymax':\n xy = self.y\n max_min_fun = np.amax\n else:\n raise ValueError(\"type_ must be 'xmin', 'xmax', 'ymin' or 'ymax'\")\n points = []\n max_min = max_min_fun(xy)\n max_min_index = np.where(xy == max_min)\n if np.array_equal(xy, self.x):\n for i in max_min_index[0]:\n points.append([max_min, self.y[i]])\n elif np.array_equal(xy, self.y):\n for i in max_min_index[0]:\n points.append([self.x[i], max_min])\n return np.array(points)", "def points_max(self):\n if self._games is None:\n raise TypeError('games has not been set')\n return self._games['points'].max()", "def getLongestLaneExtPoint(self, laneName):\r\n all_ext_points = np.array(self._all_ext_points)\r\n laneList = np.array(self._laneList)\r\n pointsNum = np.array(self._regionPointsList)\r\n\r\n target_index = np.where(laneList == laneName)[0]\r\n target_points_num = pointsNum[target_index]\r\n\r\n maxIndex = np.argmax(target_points_num)\r\n longestIndex = pointsNum.tolist().index(target_points_num[maxIndex])\r\n #print(\"points num: \", pointsNum, longestIndex)\r\n x_value, y_value = all_ext_points[longestIndex]\r\n coord = [x_value, y_value]\r\n return coord", "def get_max_x(sticks: list[Matchstick]) -> float:\n max_x = None\n for stick in sticks:\n if max_x is None or stick.h_pos > max_x:\n max_x = stick.h_pos\n return max_x", "def max_distance(points, return_points=False):\n pts = np.asarray(points)\n shape = pts.shape\n # shape must be two dimensions, d0 = number of points, must be >2, d1 = coordinate components, must be 2D+\n assert len(shape) == 2 and shape[0] >= 2 and shape[1] >= 2\n\n # TODO: this is flawed because it fixes the origin for all distance calculations so it might not be the maximum (works for paths though)\n i0 = 0\n origin = pts[i0]\n distances = np.array([distance(origin, pt) for pt in pts[1:]])\n\n i1 = np.argmax(distances)\n if not return_points:\n return distances[i1]\n return distances[i1], np.stack((pts[i0], pts[1 + i1]), axis=0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
classifies a number as positive, negative or zero
def classify(number): p = 1 n = -1 z = 0 if number > 0: return p elif number < 0: return n else: return z
[ "def classify_number(number):\n #YOUR CODE HERE\n return", "def positive(x):\r\n return x > 0", "def is_positive(number):\n if number > 0:\n return True\n return None", "def signe(x):\n if x > 0 : return 1\n elif x < 0 : return -1\n else : return 0", "def print_pos_neg(num):\n \n if num > 0:\n return \"positive\"\n elif num == 0: \n return \"neutral\"\n else:\n return \"negative\"", "def _sign(self, num):\n \n if num > 0:\n return 1\n elif num < 0:\n return -1\n else:\n return 0", "def sign(x: float) -> float:\n return 1.0 if x > 0.0 else -1.0", "def classify(svm, point):\n if positiveness(svm, point) >0:\n return 1\n elif positiveness(svm, point) < 0:\n return -1\n else:\n return 0", "def classify_value(self, value):\n if value >= self.bisect_value:\n classification = 1\n else:\n classification = -1\n return classification * self.direction", "def temp_classifier(temp_celsius): \n if temp_celsius<-2:\n number=0\n elif -2<=temp_celsius<2:\n number=1\n elif 2<=temp_celsius<15:\n number=2\n else:\n number=3\n return number", "def is_positive(x: int) -> bool:\n return x > 0", "def asNumeral(value):", "def JudgeNumber(number):\n return f\"Good number {number:.1f}.\"", "def is_Negative(x):\n if x < 0:\n return True\n return False", "def get_GTto0(x):\n\n if x==\"GT\": return 0\n else: return 1", "def replace_zero(bn):\n if (str(nb) == '0'):\n return '-'\n else:\n return(group(nb/1000))", "def is_int_not_neg(x):\n return True if is_int(x) and x >= 0 else False", "def _sign(x):\n if _copysign(1.0, x) == -1.0:\n return \"-\"\n else:\n return \"+\"", "def is_int_neg(x):\n return True if is_int(x) and x < 0 else False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Proposal distribution for modifying a single particle's Cartesian coordinates.
def move_particle(current_sample: Chem.rdchem.Mol, args: Args) -> Chem.rdchem.Mol: # Initialize proposed sample proposed_sample = copy.deepcopy(current_sample) proposed_conf = proposed_sample.GetConformer() # Select a particle at random num_atoms = proposed_sample.GetNumAtoms() particle_index = random.randint(0, num_atoms - 1) # Modify the particle's Cartesian coordinates pos = proposed_conf.GetPositions() epsilons = [random.uniform(0, 1) for _ in range(3)] for i in range(3): pos[particle_index][i] += (1./(math.sqrt(3)))*(epsilons[i] - 0.5)*args.delta # Save updated atomic coordinates to the conformation object proposed_conf.SetAtomPosition(particle_index, Point3D(pos[particle_index][0], pos[particle_index][1], pos[particle_index][2])) return proposed_sample
[ "def particle(upperRight=\"string\", particleId=int, perParticleDouble=bool, inherit=float, vectorValue=float, order=int, conserve=float, name=\"string\", numJitters=int, dynamicAttrList=bool, deleteCache=bool, jitterRadius=\"string\", shapeName=\"string\", attribute=\"string\", jitterBasePoint=\"string\", count=bool, cache=bool, lowerLeft=\"string\", perParticleVector=bool, position=\"string\", floatValue=float, gridSpacing=\"string\"):\n pass", "def nParticle(upperRight=\"string\", particleId=int, perParticleDouble=bool, inherit=float, vectorValue=float, order=int, conserve=float, name=\"string\", numJitters=int, dynamicAttrList=bool, deleteCache=bool, jitterRadius=\"string\", shapeName=\"string\", attribute=\"string\", jitterBasePoint=\"string\", count=bool, cache=bool, lowerLeft=\"string\", perParticleVector=bool, position=\"string\", floatValue=float, gridSpacing=\"string\"):\n pass", "def particleDistribution(latticeList):\n\n\n particlesRemaining = particleNumber\n \n # the following while loop attempts to place a particle each iteration, if the spot\n # picked is full, it finds a new random spot on the next iteration. It consumes\n # particles as it goes, until it runs out. If a spot is full, nothing is consumed.\n\n while particlesRemaining != 0:\n \n # these randomly select a row and column\n randomColumn = random.randint(0, containerSize-1)\n randomRow = random.randint(0, containerSize-1)\n\n # this statement finds the number of spots left in the target, and which index\n # values they have\n spotsRemaining, openSpots = latticeList.spots_remaining(randomColumn, randomRow)\n \n\n # this if statement ensures that the only time a particle is added is when there\n # are spots remaining.\n if spotsRemaining > 0:\n positionChosen = random.choice(openSpots)\n\n latticeList.array[randomRow][randomColumn][positionChosen] = 1\n\n particlesRemaining -= 1\n\n return latticeList", "def update_particles_with_odom(self, msg):\n new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)\n # compute the change in x,y,theta since our last update\n if self.current_odom_xy_theta:\n old_odom_xy_theta = self.current_odom_xy_theta\n delta = (new_odom_xy_theta[0] - self.current_odom_xy_theta[0],\n new_odom_xy_theta[1] - self.current_odom_xy_theta[1],\n new_odom_xy_theta[2] - self.current_odom_xy_theta[2])\n\n self.current_odom_xy_theta = new_odom_xy_theta\n else:\n self.current_odom_xy_theta = new_odom_xy_theta\n return\n\n odom_noise = .3 # level of noise put into particles after update from odom to introduce variability\n\n # updates the particles based on r1, d, and r2. For more information on this, consult the website\n \tfor particle in self.particle_cloud:\n # calculates r1, d, and r2\n r1 = np.arctan2(float(delta[1]),float(delta[0])) - old_odom_xy_theta[2]\n d = np.sqrt(np.square(delta[0])+np.square(delta[1]))\n r2 = delta[2] - r1\n\n # updates the particles with the above variables, while also adding in some noise\n particle.theta = particle.theta + r1*(random_sample()*odom_noise+(1-odom_noise/2.0))\n particle.x = particle.x + d*np.cos(particle.theta)*(random_sample()*odom_noise+(1-odom_noise/2.0))\n particle.y = particle.y + d*np.sin(particle.theta)*(random_sample()*odom_noise+(1-odom_noise/2.0))\n particle.theta = particle.theta + r2*(random_sample()*odom_noise+(1-odom_noise/2.0))", "def uniform_probability(self, args = []):\n\t\tself.probability = 1", "def __init__(self, dim, distribution, proposal_distribution): \n self.dim = dim\n self.distribution = distribution\n self.proposal_distribution = proposal_distribution\n self.x = None\n self.init_x()", "def generate_pore_diameters(self):\n self._logger.info(\"generate_pore_diameters: Generate pore diameter from \"+self._psd_dist+\" distribution\")\n prob_fn = getattr(spst,self._psd_dist)\n P = prob_fn(self._psd_shape,loc=self._psd_loc,scale=self._psd_scale)\n self._net.pore_properties['diameter'] = P.ppf(self._net.pore_properties['seed'])\n #Set boundadry pores to size 0\n self._net.pore_properties['diameter'][self._net.pore_properties['type']>0] = 0\n self._logger.debug(\"generate_pore_diameters: End of method\")", "def __position_cartesian_local_desired_cb(self, data):\n self.__position_cartesian_local_desired = posemath.fromMsg(data.pose)", "def odom_update(self):\n x_d, y_d, theta_d = self.pose_delta\n for i in range(len(self.particle_cloud)):\n self.particle_cloud[i].x -= x_d\n self.particle_cloud[i].y -= y_d\n self.particle_cloud[i].theta += theta_d", "def generate_particle(info):\n\tdim = info['dimension']\n\tsol = info['lower'] + rand(dim) * (info['upper'] - info['lower'])\n\treturn sol", "def point_mutation(self, clone, mutation_rate):\r\n for i in range(0, len(clone.paratopes)):\r\n if random() < mutation_rate:\r\n clone.paratopes[i] = self.rand_paratope()\r\n return clone", "def propose(self):\n runenv.stepblockind=self.blockind\n if self.proposal_distribution == \"Normal\":\n self.stochastic.value = rnormal(self.stochastic.value, self.adaptive_scale_factor * self.proposal_sd, size=self.stochastic.value.shape)\n elif self.proposal_distribution == \"Prior\":\n self.stochastic.random()", "def mutate(self, probability, per_gene=False):\n for i in range(0, self.cnf.num_variables):\n if random.uniform(0,1) < (probability[i] if per_gene else probability):\n self.assignments[:,i] = 1-self.assignments[:,i]", "def particle (self):\n return self.__particle", "def setParticleAttr(randomVector=float, object=\"string\", relative=bool, floatValue=float, randomFloat=float, attribute=\"string\", vectorValue=float):\n pass", "def move_particle_with_mouse(self, particle, mouse_pos):\n env_mouse_pos = mouse_pos\n if env_mouse_pos.x - particle.radius >= 0 and env_mouse_pos.x + particle.radius<= self.width:\n particle.pos.x = env_mouse_pos.x\n else:\n if env_mouse_pos.x - particle.radius > 0:\n particle.pos.x = self.width - particle.radius\n else:\n particle.pos.x = particle.radius\n if env_mouse_pos.y - particle.radius >= 0 and env_mouse_pos.y + particle.radius<= self.height:\n particle.pos.y = env_mouse_pos.y\n else:\n if env_mouse_pos.y - particle.radius > 0:\n particle.pos.y = self.height - particle.radius\n else:\n particle.pos.y = particle.radius", "def apply_distortion(self, dist):\n if self.agg_density is None:\n logger.warning(f'You must update before applying a distortion ')\n return\n\n S = self.density_df.S\n # some dist return np others don't this converts to numpy...\n gS = np.array(dist.g(S))\n\n self.density_df['gS'] = gS\n self.density_df['exag'] = np.hstack((0, gS[:-1])).cumsum() * self.bs", "def getAtmosphericNucleus(position):\n atomSeed = random_sample()\n if atomSeed>=1-.78: #78% nitrogen\n return Particle(\"nitrogen\",pos=position)\n elif atomSeed>=.01: #21% oxygen\n return Particle(\"oxygen\",pos=position)\n else: #1% argon\n return Particle(\"argon\",pos=position)", "def update_robot_pose(self):\n # first make sure that the particle weights are normalized\n self.normalize_particles()\n\n # for the pose, calculate the particle's mean location\n \tmean_particle = Particle(0, 0, 0, 0)\n mean_particle_theta_x = 0\n mean_particle_theta_y = 0\n for particle in self.particle_cloud:\n mean_particle.x += particle.x * particle.w\n mean_particle.y += particle.y * particle.w\n\n # angle is calculated using trig to account for angle runover\n distance_vector = np.sqrt(np.square(particle.y)+np.square(particle.x))\n mean_particle_theta_x += distance_vector * np.cos(particle.theta) * particle.w\n mean_particle_theta_y += distance_vector * np.sin(particle.theta) * particle.w\n\n mean_particle.theta = np.arctan2(float(mean_particle_theta_y),float(mean_particle_theta_x))\n\n self.robot_pose = mean_particle.as_pose()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MetropolisHastings conformational search using RDKit.
def rdkit_metropolis(args: Args, logger: Logger) -> None: # Set up logger debug, info = logger.debug, logger.info # Define constants k_b = 3.297e-24 # Boltzmann constant in cal/K avogadro = 6.022e23 # Molecule conformation list # conformation_molecules = [] all_conformation_molecules = [] # Load molecule # noinspection PyUnresolvedReferences mol = Chem.Mol(open(args.bin_path, "rb").read()) mol.RemoveAllConformers() debug(f'Starting search...') # Discover the rotatable bonds rotatable_bonds = mol.GetSubstructMatches(RotatableBondSmarts) debug(f'Num rotatable bonds: {len(rotatable_bonds)}') # Generate initial conformation and minimize it current_sample = copy.deepcopy(mol) AllChem.EmbedMolecule(current_sample, maxAttempts=args.max_attempts) if args.init_minimize: res = AllChem.MMFFOptimizeMoleculeConfs(current_sample) else: res = AllChem.MMFFOptimizeMoleculeConfs(current_sample, maxIters=0) current_energy = res[0][1] * 1000.0 / avogadro all_conformation_molecules.append(current_sample) # Run MC steps debug(f'Running MC steps...') num_accepted = 0 start_time = time.time() for step in tqdm(range(args.num_steps)): if args.cartesian: proposed_sample = move_particle(current_sample, args) else: proposed_sample = rotate_bonds(current_sample, rotatable_bonds, args) # Compute the energy of the proposed sample if args.minimize: res = AllChem.MMFFOptimizeMoleculeConfs(proposed_sample) else: res = AllChem.MMFFOptimizeMoleculeConfs(proposed_sample, maxIters=0) proposed_energy = res[0][1] * 1000.0 / avogadro # Probability ratio prob_ratio = math.exp((current_energy - proposed_energy) / (k_b * args.temp)) mu = random.uniform(0, 1) if mu <= prob_ratio: # Update the energy of the current sample to that of the proposed sample current_sample = proposed_sample current_energy = proposed_energy num_accepted += 1 if step % args.subsample_frequency == 0: all_conformation_molecules.append(current_sample) if step % args.log_frequency == 0: if num_accepted == 0: acceptance_percentage = 0.0 else: acceptance_percentage = float(num_accepted)/float(step + 1)*100.0 debug(f'Steps completed: {step}, acceptance percentage: {acceptance_percentage}') end_time = time.time() debug(f'Total Time (s): {end_time - start_time}') debug(f'% Moves accepted: {float(num_accepted)/float(args.num_steps)*100.0}') # Save all sub sampled conformations in molecule object # noinspection PyUnresolvedReferences all_mol = Chem.Mol(open(args.bin_path, "rb").read()) all_mol.RemoveAllConformers() for i in range(len(all_conformation_molecules)): c = all_conformation_molecules[i].GetConformer() c.SetId(i) all_mol.AddConformer(c) # Save molecule to binary file bin_str = all_mol.ToBinary() with open(os.path.join(args.save_dir, "conformations.bin"), "wb") as b: b.write(bin_str)
[ "def PHOENIX_model_search(s_met, s_grav, s_teff, s_vturb):\n\n if not os.path.exists(rootdir + '/phoenix_models'):\n os.mkdir(rootdir + '/phoenix_models')\n os.mkdir(rootdir + '/phoenix_models/raw_models')\n\n # Path to the PHOENIX models\n model_path = rootdir + '/phoenix_models/raw_models/'\n\n # In PHOENIX models, all of them are computed with vturb = 2 km/2\n if(s_vturb==-1):\n print('\\t + No known turbulent velocity. Setting it to 2 km/s.')\n s_vturb = 2.0\n\n possible_mets = np.array([0.0, -0.5, -1.0, 1.0, -1.5, -2.0, -3.0, -4.0])\n\n if s_met not in possible_mets:\n # Now check closest metallicity model for input star:\n m_diff = np.inf\n chosen_met = np.inf\n for met in possible_mets:\n # Estimate distance between current and input metallicity:\n c_m_diff = np.abs(met-s_met)\n if(c_m_diff<m_diff):\n chosen_met = met\n m_diff = copy(c_m_diff)\n\n print('\\t + For input metallicity {}, closest value is {}.'.\n format(s_met, chosen_met))\n else:\n chosen_met = s_met\n\n # Generate the folder name:\n if chosen_met == 0.0:\n met_folder = 'm00'\n model = 'Z-0.0'\n else:\n abs_met = str(np.abs(chosen_met)).split('.')\n if chosen_met<0:\n met_folder = 'm'+abs_met[0]+abs_met[1]\n model = 'Z-'+abs_met[0]+abs_met[1]\n else:\n met_folder = 'p'+abs_met[0]+abs_met[1]\n model = 'Z+'+abs_met[0]+abs_met[1]\n\n chosen_met_folder = model_path + met_folder\n\n # Check if folder exists. If it does not, create it and download the\n # PHOENIX models that are closer in temperature and gravity to the\n # user input values:\n if not os.path.exists(chosen_met_folder):\n os.mkdir(chosen_met_folder)\n cwd = os.getcwd()\n os.chdir(chosen_met_folder)\n\n # See if in a past call the file list for the given metallicity was\n # saved; if not, retrieve it from the PHOENIX website:\n if os.path.exists('file_list.dat'):\n with open('file_list.dat') as f:\n all_files = f.readlines()\n for i in np.arange(len(all_files)):\n all_files[i] = all_files[i].strip()\n else:\n response = urlopen('ftp://phoenix.astro.physik.uni-goettingen.de/SpecIntFITS/PHOENIX-ACES-AGSS-COND-SPECINT-2011/'+model+'/')\n html = str(response.read())\n all_files = []\n while True:\n idx = html.find('lte')\n if(idx==-1):\n break\n else:\n idx2 = html.find('.fits')\n all_files.append(html[idx:idx2+5])\n html = html[idx2+5:]\n f = open('file_list.dat','w')\n for file in all_files:\n f.write(file+'\\n')\n f.close()\n # Now check closest Teff for input star:\n t_diff = np.inf\n chosen_teff = np.inf\n for file in all_files:\n teff = np.double(file[3:8])\n c_t_diff = abs(teff-s_teff)\n if(c_t_diff<t_diff):\n chosen_teff = teff\n t_diff = c_t_diff\n\n print('\\t + For input effective temperature {:.1f} K, closest '\n 'value is {:.0f} K.'.format(s_teff, chosen_teff))\n\n teff_files = []\n teff_string = \"{:05.0f}\".format(chosen_teff)\n for file in all_files:\n if teff_string in file:\n teff_files.append(file)\n\n # Now check closest gravity:\n grav_diff = np.inf\n chosen_grav = np.inf\n chosen_fname = ''\n for file in teff_files:\n grav = np.double(file[9:13])\n c_g_diff = abs(grav-s_grav)\n if(c_g_diff<grav_diff):\n chosen_grav = grav\n grav_diff = c_g_diff\n chosen_fname = file\n\n print('\\t + Checking if PHOENIX model file is on the system...')\n # Check if file is already downloaded. If not, download it from the PHOENIX website:\n if not os.path.exists(chosen_fname):\n print('\\t + Model file not found.')\n downloader('ftp://phoenix.astro.physik.uni-goettingen.de/SpecIntFITS/PHOENIX-ACES-AGSS-COND-SPECINT-2011/'+model+'/'+chosen_fname)\n else:\n print('\\t + Model file found.')\n\n os.chdir(cwd)\n chosen_path = chosen_met_folder + '/' + chosen_fname\n\n # Summary:\n print('\\t + For input metallicity {}, effective temperature {} K, and\\n'\n '\\t log-gravity {}, closest combination is metallicity: {},\\n'\n '\\t effective temperature: {} K, and log-gravity {}\\n\\n'\n '\\t + Chosen model file to be used:\\n\\t\\t{:s}\\n'.format(s_met, s_teff,\n s_grav, chosen_met, chosen_teff, chosen_grav, chosen_fname))\n\n return chosen_path, chosen_teff, chosen_grav, chosen_met, s_vturb", "def _search(self):", "def searchSim(self, SCx=None, pcentSimRange=(.5, 1), tolarance=0, \n tniTog=1, cardRange=(1,12)):\n if SCx == None: # use tonic if no scTriple is provided\n SCx = self.tonic.get('sc')\n\n scToCompare = self.scObj.getAllScTriples(cardRange, tniTog)\n if len(scToCompare) == 0: # in_case something goes wrong\n print _MOD, 'bad cardRange', cardRange,\n return None\n\n analysisValues = [] # enter as pairs: (chord, value) \n for scTriple in scToCompare:\n # egines that have tn/tni distinction\n if self.srcKey in ['ATMEMB','REL','TpRel']: \n value = self.compareSet(SCx, scTriple, tniTog)\n else:\n value = self.compareSet(SCx, scTriple)\n if value == None:\n print _MOD, 'failed set comparison:', SCx, scTriple\n continue\n\n analysisValues.append((scTriple, value))\n \n # get ranges from_usr percentage for_particular path_engine\n if self.min > self.max: # 1.0 > 0.0 max similarity \n high = self.min\n low = self.max\n else:\n high = self.max # 0.0 < 1.0 max similarity \n low = self.min\n usrMin = pcentSimRange[0] \n usrMax = pcentSimRange[1]\n if usrMin < 0.00: usrMin = 0.00 # 0%\n if usrMax > 1.00: usrMax = 1.00 # 100%\n \n totRange = abs(high - low) # in_scale of analysis algorithm\n initMin = totRange * usrMin # multiply by percentage to find actual val\n initMax = totRange * usrMax\n \n incrMult = 0 # multiple tolarance to increase selection range\n lastTry = 0\n matchingScTriples = []\n \n numFirstR = 0 # track number of matches within initial range\n while 1:\n curMin = initMin - ((tolarance * incrMult) * totRange)\n if curMin < low:\n curMin = low\n curMax = initMax + ((tolarance * incrMult) * totRange)\n if curMax > high:\n curMax = high\n if curMin <= low and curMax >= high:\n lastTry = 1 # range already expanded, must end\n\n for scTriple, analValue in analysisValues: \n if analValue >= curMin and analValue <= curMax:\n matchingScTriples.append((scTriple, analValue)) \n\n if incrMult == 0:\n numFirstR = len(matchingScTriples)\n if tolarance == 0:\n break\n if len(matchingScTriples) > 0: lastTry = 1 \n if lastTry: break\n incrMult = incrMult + 1 # increase to increase range\n\n return matchingScTriples, numFirstR, len(matchingScTriples)", "def search_space() -> Mapping[str, Any]:\n\n return {\n \"model_type\": \"BayesianRidge\",\n \"n_iter\": tune.choice(np.arange(50, 1000)),\n \"fit_intercept\": tune.choice([True, False]),\n }", "def search(self, type=None):\n\t\tprint('search()')\n\t\t#\n\t\tthresholdDist = 10\n\t\t#\n\t\ttimer = bimpy.util.bTimer(name='search')\n\t\ttheDictList = []\n\t\tnumRows = 0\n\t\tnNodes = len(self.nodeDictList)\n\t\tdistanceMatrix = np.ndarray((nNodes,nNodes))\n\t\tdistanceMatrix[:] = np.nan\n\t\tfor i, iDict in enumerate(self.nodeDictList):\n\t\t\tiDict = self.getNode(i)\n\t\t\tx1 = iDict['x']\n\t\t\ty1 = iDict['y']\n\t\t\tz1 = iDict['z']\n\t\t\tfor j, jDict in enumerate(self.nodeDictList):\n\t\t\t\tjDict = self.getNode(j)\n\t\t\t\tif i==j: continue\n\t\t\t\tif not np.isnan(distanceMatrix[j,i]): continue\n\t\t\t\tx2 = jDict['x']\n\t\t\t\ty2 = jDict['y']\n\t\t\t\tz2 = jDict['z']\n\t\t\t\tdist = self.euclideanDistance(x1,y1,z1,x2,y2,z2)\n\t\t\t\tif dist<thresholdDist:\n\t\t\t\t\tdistanceMatrix[i,j] = dist\n\t\t\t\t\ttheDict = OrderedDict()\n\t\t\t\t\ttheDict['Idx'] = numRows\n\t\t\t\t\ttheDict['z'] = z1\n\t\t\t\t\ttheDict['node1'] = i\n\t\t\t\t\ttheDict['nEdges1'] = iDict['nEdges']\n\t\t\t\t\tif iDict['nEdges'] == 1:\n\t\t\t\t\t\ttheDict['edgeList'] = iDict['edgeList']\n\t\t\t\t\t\tedgeDict = self.getEdge(iDict['edgeList'][0])\n\t\t\t\t\t\ttheDict['preNode'] = edgeDict['preNode']\n\t\t\t\t\t\ttheDict['postNode'] = edgeDict['postNode']\n\t\t\t\t\t\ttheDict['Len 2D'] = edgeDict['Len 2D']\n\t\t\t\t\telse:\n\t\t\t\t\t\ttheDict['edgeList'] = ''\n\t\t\t\t\t\ttheDict['preNode'] = ''\n\t\t\t\t\t\ttheDict['postNode'] = ''\n\t\t\t\t\t\ttheDict['Len 2D'] = ''\n\t\t\t\t\ttheDict['node2'] = j\n\t\t\t\t\ttheDict['nEdges2'] = jDict['nEdges']\n\t\t\t\t\ttheDict['dist'] = round(dist,2)\n\t\t\t\t\t#print(' nodes close:', theDict)\n\t\t\t\t\ttheDictList.append(theDict)\n\t\t\t\t\tnumRows += 1\n\t\tself.editDictList = theDictList\n\t\tprint(' found:', numRows, timer.elapsed())\n\t\treturn theDictList", "def eucentricSearch(scope,params,instance):\n scope.eucentricHeight()", "def search_edges_to(concept='dog'):\n return search_edges(end='/c/' + settings.LANGUAGE + '/' + concept, minWeight=2)", "def ATLAS_model_search(s_met, s_grav, s_teff, s_vturb):\n if not os.path.exists(rootdir + '/atlas_models'):\n os.mkdir(rootdir + '/atlas_models')\n os.mkdir(rootdir + '/atlas_models/raw_models')\n\n model_path = rootdir + '/atlas_models/'\n\n # This is the list of all the available metallicities in Kurucz's website:\n possible_mets = np.array([-0.1, -0.2, -0.3, -0.5, -1.0, -1.5, -2.0, -2.5,\n -3.0, -3.5, -4.0, -4.5, -5.0, 0.0, 0.1, 0.2, 0.3, 0.5, 1.0])\n # And this is the list of all possible vturbs:\n possible_vturb = np.array([0.0, 2.0, 4.0, 8.0])\n\n # Check if turbulent velocity is given. If not, set to 2 km/s:\n if s_vturb == -1:\n print('\\t > No known turbulent velocity. Setting it to 2 km/s.')\n s_vturb = 2.0\n if s_vturb not in possible_vturb:\n # Check closest vturb to input:\n vturb_diff = np.inf\n chosen_vturb = np.inf\n for vturb in possible_vturb:\n # Estimate distance between current and input vturb:\n c_vturb_diff = np.abs(vturb - s_vturb)\n if c_vturb_diff < vturb_diff:\n chosen_vturb = c_vturb_diff\n vturb_diff = copy(c_vturb_diff)\n print('\\t > For input vturb {} km/s, closest vturb is {} km/s.'.\n format(s_vturb, chosen_vturb))\n else:\n chosen_vturb = s_vturb\n\n if s_met not in possible_mets:\n # Now check closest metallicity model for input star:\n m_diff = np.inf\n chosen_met = np.inf\n for met in possible_mets:\n # Estimate distance between current and input metallicity:\n c_m_diff = np.abs(met-s_met)\n if(c_m_diff<m_diff):\n chosen_met = met\n m_diff = copy(c_m_diff)\n\n print('\\t > For input metallicity {}, closest metallicity is {}.'\n .format(s_met, chosen_met))\n else:\n chosen_met = s_met\n\n # Check if the intensity file for the calculated metallicity and\n # vturb is on the atlas_models folder:\n if chosen_met == 0.0:\n met_dir = 'p00'\n elif chosen_met < 0:\n met_string = str(np.abs(chosen_met)).split('.')\n met_dir = 'm'+met_string[0]+met_string[1]\n else:\n met_string = str(np.abs(chosen_met)).split('.')\n met_dir = 'p'+met_string[0]+met_string[1]\n\n print('\\t + Checking if ATLAS model file is on the system ...')\n # This will make the code below easier to follow:\n amodel = '{:s}k{:.0f}'.format(met_dir, chosen_vturb)\n afile = model_path + 'raw_models/i' + amodel\n\n if os.path.exists(afile + 'new.pck') or \\\n os.path.exists(afile + '.pck19') or \\\n os.path.exists(afile + '.pck'):\n print('\\t + Model file found.')\n else:\n # If not in the system, download it from Kurucz's website.\n # First, check all possible files to download:\n print('\\t + Model file not found.')\n response = urlopen('http://kurucz.harvard.edu/grids/grid' +\n met_dir + '/')\n html = str(response.read())\n ok = True\n filenames = []\n while(ok):\n idx = html.find('>i'+met_dir.lower())\n if(idx==-1):\n ok = False\n else:\n for i in range(30):\n if(html[idx+i]=='<'):\n filenames.append(html[idx+1:idx+i])\n html = html[idx+1:]\n\n hasnew = False\n gotit = False\n araw = model_path + \"raw_models/\"\n # Check that filenames have the desired vturb and prefer *new* models:\n for afname in filenames:\n if 'new' in afname and amodel in afname:\n hasnew = True\n gotit = True\n downloader('http://kurucz.harvard.edu/grids/grid'\n + met_dir + '/' + afname)\n if not os.path.exists(araw):\n os.mkdir(araw)\n os.rename(afname, araw + afname)\n\n if not hasnew:\n for afname in filenames:\n if '.pck19' in afname and amodel in afname:\n gotit = True\n downloader('http://kurucz.harvard.edu/grids/grid'\n + met_dir + '/' + afname)\n if not os.path.exists(araw):\n os.mkdir(araw)\n os.rename(afname, araw + afname)\n if not gotit:\n for afname in filenames:\n if amodel+'.pck' in afname:\n gotit = True\n downloader('http://kurucz.harvard.edu/grids/grid'\n + met_dir + '/' + afname)\n if not os.path.exists(araw):\n os.mkdir(araw)\n os.rename(afname, araw + afname)\n if not gotit:\n print('\\t > No model with closest metallicity of {} and closest '\n 'vturb of {} km/s found.\\n\\t Please, modify the input '\n 'values of the target and select other stellar parameters '\n 'for it.'.format(chosen_met, chosen_vturb))\n sys.exit()\n\n # Check if the models in machine readable form have been generated.\n # If not, generate them:\n if not os.path.exists(model_path + amodel):\n # Now read the files and generate machine-readable files:\n possible_paths = [afile+'new.pck', afile+'.pck19', afile+'.pck']\n\n for i in range(len(possible_paths)):\n possible_path = possible_paths[i]\n if os.path.exists(possible_path):\n lines = getFileLines(possible_path)\n # Create folder for current metallicity and turbulent\n # velocity if not created already:\n if not os.path.exists(model_path + amodel):\n os.mkdir(model_path + amodel)\n # Save files in the folder:\n while True:\n TEFF,GRAVITY,LH = getATLASStellarParams(lines)\n if not os.path.exists(model_path + amodel+'/'+TEFF):\n os.mkdir(model_path + amodel+'/'+TEFF)\n idx,mus = getIntensitySteps(lines)\n save_mr_file = True\n if os.path.exists(model_path + amodel+'/'+TEFF+\n '/grav_'+GRAVITY+'_lh_'+LH+'.dat'):\n save_mr_file = False\n if save_mr_file:\n f = open(model_path + amodel+'/'+TEFF+\n '/grav_'+GRAVITY+'_lh_'+LH+'.dat','w')\n f.write('#TEFF:' + TEFF +\n ' METALLICITY:' + met_dir +\n ' GRAVITY:' + GRAVITY +\n ' VTURB:' + str(int(chosen_vturb)) +\n ' L/H: ' + LH + '\\n')\n f.write('#wav (nm) \\t cos(theta):' + mus)\n for i in range(idx, len(lines)):\n line = lines[i]\n idx = line.find('EFF')\n idx2 = line.find('\\x0c')\n if(idx2!=-1 or line==''):\n hhhh=1\n elif(idx!=-1):\n lines = lines[i:]\n break\n else:\n wav_p_intensities = line.split(' ')\n s = FixSpaces(wav_p_intensities)\n if save_mr_file:\n f.write(s+'\\n')\n if save_mr_file:\n f.close()\n if(i==len(lines)-1):\n break\n\n # Now, assuming models are written in machine readable form, we can work:\n chosen_met_folder = model_path + amodel\n\n # Now check closest Teff for input star:\n t_diff = np.inf\n chosen_teff = np.inf\n chosen_teff_folder = ''\n tefffolders = glob.glob(chosen_met_folder+'/*')\n for tefffolder in tefffolders:\n fname = tefffolder.split('/')[-1]\n teff = np.double(fname)\n c_t_diff = abs(teff-s_teff)\n if(c_t_diff<t_diff):\n chosen_teff = teff\n chosen_teff_folder = tefffolder\n t_diff = c_t_diff\n\n print('\\t + For input effective temperature {:.1f} K, closest value '\n 'is {:.0f} K.'.format(s_teff, chosen_teff))\n # Now check closest gravity and turbulent velocity:\n grav_diff = np.inf\n chosen_grav = 0.0\n chosen_fname = ''\n all_files = glob.glob(chosen_teff_folder+'/*')\n\n for filename in all_files:\n grav = np.double((filename.split('grav')[1]).split('_')[1])\n c_g_diff = abs(grav-s_grav)\n if c_g_diff < grav_diff:\n chosen_grav = grav\n grav_diff = c_g_diff\n chosen_filename = filename\n\n # Summary:\n model_root_len = len(model_path)\n print('\\t + For input metallicity {}, effective temperature {} K, and\\n'\n '\\t log-gravity {}, and turbulent velocity {} km/s, closest\\n'\n '\\t combination is metallicity: {}, effective temperature: {} K,\\n'\n '\\t log-gravity {} and turbulent velocity of {} km/s.\\n\\n'\n '\\t + Chosen model file to be used:\\n\\t\\t{:s}.\\n'.\n format(s_met, s_teff, s_grav, s_vturb, chosen_met, chosen_teff,\n chosen_grav, chosen_vturb, chosen_filename[model_root_len:]))\n\n return chosen_filename, chosen_teff, chosen_grav, chosen_met, chosen_vturb", "def search_edges_from(concept='dog'):\n return search_edges(start='/c/' + settings.LANGUAGE + '/' + concept, minWeight=2)", "def test_magnitude_table_search_15():\n mag_params_15 = magnitude_table_search(9.91, age=5, band=\"K\", model=\"2015\")\n assert mag_params_15[\"M/Ms\"] == 0.09\n assert mag_params_15[\"Teff\"] == 2644\n assert mag_params_15[\"R/Rs\"] == 0.113\n assert mag_params_15[\"Mk\"] == 9.91", "def bowtie(search_list, metadata_list):\n search = pd.read_csv(search_list, index_col=None)\n metadata = pd.read_csv(metadata_list, sep='\\t', index_col=None)\n\n enc_metadata = metadata.set_index(['Run_ID'], drop=False)\n\n gsm_metadata = metadata.set_index(['GSM_ID'], drop=False)\n gsm_metadata = gsm_metadata.drop_duplicates(subset=['Run_ID'])\n\n search = search.set_index(['Data_ID'], drop=False)\n\n results = []\n\n samples = set()\n\n nodes = [1, 2, 3, 4, 5, 6]\n\n missed =set()\n count = 0\n\n for i in range(search.shape[0]):\n sample_id = search.ix[i, 'Data_ID']\n input_id = search.ix[i, 'Input']\n\n # if pd.isnull(input_id):\n # continue\n\n if sample_id.startswith('ENC') and not pd.isnull(input_id):\n input_id = input_id[7:-1]\n\n node_index = i % 6\n\n species = search.ix[sample_id, 'Organism']\n\n if sample_id.startswith('ENC'):\n try:\n if enc_metadata.ix[sample_id, 'Run type'] == 'single-ended':\n if sample_id not in samples:\n # ENC_single(sample_id, nodes[node_index], species)\n samples.add(sample_id)\n else:\n continue\n elif enc_metadata.ix[sample_id, 'Run type'] == 'paired-ended':\n pair_id = enc_metadata.ix[sample_id, 'Paired with']\n if sample_id not in samples and pair_id not in samples:\n samples.add(sample_id)\n samples.add(pair_id)\n # ENC_pair(sample_id, pair_id, nodes[node_index], species)\n elif sample_id in samples:\n samples.add(pair_id)\n continue\n elif pair_id in samples:\n samples.add(sample_id)\n continue\n except:\n print sample_id, 'sample'\n missed.add(sample_id)\n if sample_id.startswith(\"GSM\"):\n if sample_id in samples:\n continue\n samples.add(sample_id)\n\n try:\n SRR_ids = gsm_metadata.ix[sample_id, 'Run_ID']\n if isinstance(SRR_ids, str):\n if gsm_metadata.ix[sample_id, 'Run type'] == 'SINGLE':\n # SRR_single(SRR_id, node_id=nodes[node_index], species=species)\n pass\n elif gsm_metadata.ix[sample_id, 'Run type'] == 'PAIRED':\n # SRR_pair(SRR_id, nodes[node_index], species)\n pass\n else:\n for srr_id in SRR_ids.tolist():\n if gsm_metadata[gsm_metadata.Run_ID == srr_id].ix[0, 'Run type'] == 'SINGLE':\n # SRR_single(SRR_id, node_id=nodes[node_index], species=species)\n pass\n elif gsm_metadata[gsm_metadata.Run_ID == srr_id].ix[0, 'Run type'] == 'PAIRED':\n # SRR_pair(SRR_id, nodes[node_index], species)\n pass\n except:\n print sample_id, 'sample'\n missed.add(sample_id)\n pass\n\n if pd.isnull(input_id):\n # results.append((sample_id, ''))\n pass\n else:\n input_id = input_id.replace('files','')\n input_id = input_id.replace('/','')\n input_ids = [x.strip() for x in input_id.split(',')]\n if len(input_ids) > 1:\n print sample_id\n if sample_id.startswith('ENC'):\n count +=1\n if sample_id.startswith('ENC'):\n results.append((sample_id, ';'.join(input_ids)))\n if os.path.isfile(input_id):\n continue\n else:\n for input_id in input_ids:\n samples.add(input_id)\n if input_id.startswith('ENC'):\n try:\n if enc_metadata.ix[input_id, 'Run type'] == 'single-ended':\n # ENC_single(input_id, nodes[node_index], species)\n pass\n elif enc_metadata.ix[input_id, 'Run type'] == 'paired-ended':\n pair_id = enc_metadata.ix[input_id, 'Paired with']\n samples.add(pair_id)\n # ENC_pair(input_id, pair_id, nodes[node_index], species)\n pass\n except:\n print input_id, 'input'\n missed.add(input_id)\n if input_id.startswith(\"GSM\"):\n try:\n SRR_ids = gsm_metadata.ix[input_id, 'Run_ID']\n if isinstance(SRR_ids, str):\n if gsm_metadata.ix[input_id, 'Run type'] == 'SINGLE':\n # SRR_single(SRR_id, node_id=nodes[node_index], species=species)\n pass\n elif gsm_metadata.ix[input_id, 'Run type'] == 'PAIRED':\n # SRR_pair(SRR_id, nodes[node_index], species)\n pass\n else:\n for srr_id in SRR_ids.tolist():\n if gsm_metadata[gsm_metadata.Run_ID == srr_id].ix[0, 'Run type'] == 'SINGLE':\n # SRR_single(SRR_id, node_id=nodes[node_index], species=species)\n pass\n elif gsm_metadata[gsm_metadata.Run_ID == srr_id].ix[0, 'Run type'] == 'PAIRED':\n # SRR_pair(SRR_id, nodes[node_index], species)\n pass\n except:\n print input_id, 'input'\n missed.add(input_id)\n pass\n\n\n df = pd.DataFrame(results)\n df.to_csv('sample_input_pair.csv', index=None, header=False)\n\n f = open('missed.csv', 'w')\n for id in set(missed):\n f.write(id+'\\n')\n f.close()\n print count\n return results", "def search_plate(self):\n display(self.plate)", "def test_mass_table_search_15():\n comp_params_15 = mass_table_search(0.09, age=5, model=\"2015\")\n assert comp_params_15[\"M/Ms\"] == 0.09\n assert comp_params_15[\"Teff\"] == 2644\n assert comp_params_15[\"R/Rs\"] == 0.113\n assert comp_params_15[\"Mk\"] == 9.91", "def db_searcher(self) ->pd.DataFrame:\r\n\r\n # OpenMS Peptide Query\r\n protein_ids = []\r\n peptide_ids = []\r\n\r\n #SimpleSearchEngineAlgorithm compares mzML file against fasta file, and it gives an output that contains the number of peptides and the proteins in the database and how many spectra were matched to peptides and proteins\r\n if self.mzml_file and self.fasta_file:\r\n SimpleSearchEngineAlgorithm().search(self.mzml_file, self.fasta_file, protein_ids, peptide_ids)\r\n logger.info(\"MS file and fasta file were accepted as input files\")\r\n else:\r\n logger.error(\"Please enter appropriate input files \")\r\n\r\n\r\n # Results Preprocessing\r\n mz_lst_1 = []\r\n mz_lst_2 = []\r\n\r\n MZ = int()\r\n RT = int()\r\n meta_val = int()\r\n score_type = int()\r\n hit_rank = int()\r\n hit_charge = int()\r\n hit_seq = str()\r\n hit_monoisotopic = int()\r\n ppm_error = int()\r\n hit_score = int()\r\n\r\n if peptide_ids != []:\r\n logger.info(\"Peptides have successfully been identified by SimpleSearchEngineAlgorithm\")\r\n #Exploring the individual hits, and gathering the peptide information\r\n for peptides in peptide_ids:\r\n MZ = round(peptides.getMZ(), 2)\r\n RT = round(peptides.getRT(), 2)\r\n meta_val = peptides.getMetaValue(\"scan_index\")\r\n score_type = peptides.getScoreType()\r\n mz_lst_1.append([MZ, RT, meta_val, score_type])\r\n\r\n for hit in peptides.getHits():\r\n hit_rank = round(hit.getRank(), 2)\r\n hit_charge = round(hit.getCharge(), 2)\r\n hit_seq = hit.getSequence()\r\n hit_monoisotopic = round(\r\n hit.getSequence().getMonoWeight(Residue.ResidueType.Full, hit.getCharge()) / hit.getCharge(), 2)\r\n ppm_error = round(abs(hit_monoisotopic - peptides.getMZ()) / hit_monoisotopic * 10 ** 6, 2)\r\n hit_score = round(hit.getScore(), 2)\r\n\r\n mz_lst_2.append([hit_rank, hit_charge, str(hit_seq), hit_monoisotopic, ppm_error, hit_score])\r\n\r\n final_lst = list()\r\n for i in range(len(mz_lst_2)):\r\n final_lst.append(mz_lst_1[i] + mz_lst_2[i])\r\n\r\n #Generating a dataframe that contains the peptide information\r\n if final_lst != []:\r\n self.peptide_info_df = pd.DataFrame(final_lst)\r\n self.peptide_info_df.columns = [\"Peptide ID m/z\", \"Peptide ID RT\", \"Peptide scan index\", \"Peptide ID score type\",\r\n \"Peptide hit rank\", \"Peptide hit charge\", \"Peptide hit sequence\", \"Peptide hit monoisotopic m/z\",\r\n \"Peptide ppm error\", \"Peptide hit score\"]\r\n logging.info(\"A dataframe containing the properties of peptides was generated\")\r\n\r\n else:\r\n logging.error(\"A dataframe containing the properties of peptides was not generated. Please check the inputs\")\r\n\r\n # For depicting entire columns in the dataframe, pd.option_context was used\r\n with pd.option_context('display.max_rows', None, 'display.max_columns',\r\n None): # more options can be specified also\r\n print(self.peptide_info_df)\r\n return self.peptide_info_df\r\n\r\n\r\n else:\r\n print(\"No Peptide Data found! Please try a different reference Fasta file or MS spectrum file!\")\r\n logger.warning(\"Peptides have not been identified by SimpleSearchEngineAlgorithm. Please try a different reference fasta file or MS file\")", "def searchlight(coords,K,mask,loo_idx,subjs,song_idx,song_bounds):\n\n stride = 5\n radius = 5\n min_vox = 10\n nPerm = 1000\n SL_allvox = []\n SL_results = []\n datadir = '/tigress/jamalw/MES/prototype/link/scripts/data/searchlight_input/'\n for x in range(0,np.max(coords, axis=0)[0]+stride,stride):\n for y in range(0,np.max(coords, axis=0)[1]+stride,stride):\n for z in range(0,np.max(coords, axis=0)[2]+stride,stride):\n if not os.path.isfile(datadir + subjs[0] + '/' + str(x) + '_' + str(y) + '_' + str(z) + '.npy'):\n continue\n D = distance.cdist(coords,np.array([x,y,z]).reshape((1,3)))[:,0]\n SL_vox = D <= radius\n data = []\n for i in range(len(subjs)):\n subj_data = np.load(datadir + subjs[i] + '/' + str(x) + '_' + str(y) + '_' + str(z) + '.npy')\n data.append(np.nan_to_num(stats.zscore(subj_data[:,:,0],axis=1,ddof=1)))\n for i in range(len(subjs)):\n subj_data = np.load(datadir + subjs[i] + '/' + str(x) + '_' + str(y) + '_' + str(z) + '.npy')\n data.append(np.nan_to_num(stats.zscore(subj_data[:,:,1],axis=1,ddof=1))) \n print(\"Running Searchlight\")\n SL_within_across = HMM(data,K,loo_idx,song_idx,song_bounds)\n SL_results.append(SL_within_across)\n SL_allvox.append(np.array(np.nonzero(SL_vox)[0])) \n voxmean = np.zeros((coords.shape[0], nPerm+1))\n vox_SLcount = np.zeros(coords.shape[0])\n for sl in range(len(SL_results)):\n voxmean[SL_allvox[sl],:] += SL_results[sl]\n vox_SLcount[SL_allvox[sl]] += 1\n voxmean = voxmean / vox_SLcount[:,np.newaxis]\n vox_z = np.zeros((coords.shape[0], nPerm+1))\n for p in range(nPerm+1):\n vox_z[:,p] = (voxmean[:,p] - np.mean(voxmean[:,1:],axis=1))/np.std(voxmean[:,1:],axis=1) \n return vox_z,voxmean", "def search_sample(path):\r\n db = get_database()\r\n attributes = getstrings(path)\r\n minhashes,sketches = minhash(attributes)\r\n neighbors = []\r\n\r\n for sketch in sketches:\r\n sketch = str(sketch)\r\n\r\n if not sketch in db:\r\n continue\r\n\r\n for neighbor_path in db[sketch]:\r\n neighbor_minhashes = db[neighbor_path]['minhashes']\r\n similarity = (neighbor_minhashes == minhashes).sum() / float(NUM_MINHASHES)\r\n neighbors.append((neighbor_path,similarity))\r\n\r\n neighbors = list(set(neighbors))\r\n neighbors.sort(key=lambda entry:entry[1],reverse=True)\r\n print \"\"\r\n print \"Sample name\".ljust(64),\"Shared code estimate\"\r\n for neighbor, similarity in neighbors:\r\n short_neighbor = neighbor.split(\"/\")[-1]\r\n comments = db[neighbor]['comments']\r\n print str(\"[*] \"+short_neighbor).ljust(64),similarity\r\n for comment in comments:\r\n print \"\\t[comment]\",comment", "def results():\n length = 0\n hasNext = False\n nextPageNumber = None\n fuzzy_terms = []\n r = []\n\n theWhooshSearch = WhooshSearch()\n theWhooshSearch.index()\n\n if request.method == 'POST':\n data = request.form\n else:\n data = request.args\n\n searchType = data.get('searchType')\n keywordQuery = data.get('keywordQuery')\n fuzzySearch = data.get('fuzzySearch')\n page = int(data.get('pageNumber'))\n\n if keywordQuery:\n keywordQuery = removeStop(keywordQuery)\n\n if searchType == 'advanced':\n actor = data.get('actor')\n production_company = data.get('production')\n director = data.get('director')\n genre = data.get('genre')\n runTime = data.get('runtime')\n if fuzzySearch == 'True' or fuzzySearch == 'true':\n whooshFuzzy = data.get('whoosh')\n if whooshFuzzy == 'True' or whooshFuzzy == 'true':\n # Whoosh Advanced Fuzzy Search\n r, length = theWhooshSearch.advancedSearch(\n keywordQuery, actor, production_company, director, genre, runTime, whooshFuzzy, page)\n else:\n # BK Tree Advanced Search\n keywordQuery = keywordQuery.split()\n for word in keywordQuery:\n fuzzy_terms += fuzzy_tree.autocorrect(word, 1)\n for term in fuzzy_terms:\n tempResult, tempLength = theWhooshSearch.advancedSearch(\n term[0], actor, production_company, director, genre, runTime, False, pageNumber=-1)\n r += tempResult\n length += tempLength\n r = r[page * 10 - 10:page * 10]\n else:\n # Regular Advanced Search\n r, length = theWhooshSearch.advancedSearch(\n keywordQuery, actor, production_company, director, genre, runTime, False, page)\n else:\n if fuzzySearch == 'True' or fuzzySearch == 'true':\n whooshFuzzy = data.get('whoosh')\n if whooshFuzzy == 'True' or whooshFuzzy == 'true':\n r, length = theWhooshSearch.basicSearch(\n keywordQuery, whooshFuzzy, page)\n else:\n keywordQuery = keywordQuery.split()\n for word in keywordQuery:\n fuzzy_terms += fuzzy_tree.autocorrect(word, 1)\n for term in fuzzy_terms:\n tempResult, tempLength = theWhooshSearch.basicSearch(\n term[0], False, pageNumber=-1)\n r += tempResult\n length += tempLength\n r = r[page * 10 - 10:page * 10]\n else:\n r, length = theWhooshSearch.basicSearch(\n keywordQuery, False, page)\n\n # Check if there are new pages\n if nextPage(length, page):\n nextPageNumber = page + 1\n previous = page - 1\n returnResults = {'nextPage': nextPageNumber,\n 'prevPage': previous, 'results': r}\n return jsonify(returnResults)", "def search(self, action: 'SoSearchAction') -> \"void\":\n return _coin.SoVRMLGeometry_search(self, action)", "def ClaimClassifierHyperParameterSearch():\n\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates count error statistics for buckets of userobserved counts (postfiltering). Outputs the files preservation_by_length and preservation_by_count tsv and svg files.
def generateStatsAndGraphics(output_dir, max_syn_count, count_length_preservation, prefix): sorted_counts = sorted(count_length_preservation, key=lambda x: x[0], reverse=False) buckets = [] next_bucket = 10 while next_bucket < max_syn_count: buckets.append(next_bucket) next_bucket *= 2 buckets.append(next_bucket) bucket_to_preservations = defaultdict(list) bucket_to_counts = defaultdict(list) bucket_to_lengths = defaultdict(list) length_to_preservations = defaultdict(list) length_to_counts = defaultdict(list) for (count, length, preservation) in sorted_counts: bucket = buckets[0] for bi in range(len(buckets)-1, -1, -1): if count > buckets[bi]: bucket = buckets[bi+1] break bucket_to_preservations[bucket].append(preservation) bucket_to_lengths[bucket].append(length) bucket_to_counts[bucket].append(count) length_to_counts[length].append(count) length_to_preservations[length].append(preservation) bucket_to_mean_count = {bucket: sum(counts)/len(counts) if len(counts) > 0 else 0 for bucket, counts in bucket_to_counts.items()} bucket_to_mean_preservation = {bucket: sum(preservations)/len(preservations) if len(preservations) > 0 else 0 for bucket, preservations in bucket_to_preservations.items()} bucket_to_mean_length = {bucket: sum(lengths)/len(lengths) if len(lengths) > 0 else 0 for bucket, lengths in bucket_to_lengths.items()} counts_tsv = path.join(output_dir, f'{prefix}_preservation_by_count.tsv') counts_svg = path.join(output_dir, f'{prefix}_preservation_by_count.svg') with open(counts_tsv, 'w') as f: f.write('\t'.join(['syn_count_bucket', 'mean_combo_count', 'mean_combo_length', 'count_preservation'])+'\n') for bucket in reversed(buckets): f.write('\t'.join([str(bucket), str(bucket_to_mean_count.get(bucket, 0)), str(bucket_to_mean_length.get(bucket, 0)), str(bucket_to_mean_preservation.get(bucket, 0))])+'\n') util.plotStats( x_axis='syn_count_bucket', x_axis_title='Count of Filtered Synthetic Records', y_bar='mean_combo_length', y_bar_title='Mean Length of Combinations', y_line='count_preservation', y_line_title='Count Preservation (Synthetic/Sensitive)', color='lightgreen', darker_color='green', stats_tsv=counts_tsv, stats_svg=counts_svg, delimiter='\t', style='whitegrid', palette='magma') length_to_mean_preservation = {length: sum(preservations)/len(preservations) if len(preservations) > 0 else 0 for length, preservations in length_to_preservations.items()} length_to_mean_count = {length: sum(counts)/len(counts) if len(counts) > 0 else 0 for length, counts in length_to_counts.items()} lengths_tsv = path.join(output_dir, f'{prefix}_preservation_by_length.tsv') lengths_svg = path.join(output_dir, f'{prefix}_preservation_by_length.svg') with open(lengths_tsv, 'w') as f: f.write('\t'.join(['syn_combo_length', 'mean_combo_count', 'count_preservation'])+'\n') for length in sorted(length_to_preservations.keys()): f.write('\t'.join([str(length), str(length_to_mean_count.get(length, 0)), str(length_to_mean_preservation.get(length, 0))])+'\n') util.plotStats( x_axis='syn_combo_length', x_axis_title='Length of Combination', y_bar='mean_combo_count', y_bar_title='Mean Synthetic Count', y_line='count_preservation', y_line_title='Count Preservation (Synthetic/Sensitive)', color='cornflowerblue', darker_color='mediumblue', stats_tsv=lengths_tsv, stats_svg=lengths_svg, delimiter='\t', style='whitegrid', palette='magma')
[ "def summary(path_to_GARUDATA, cycle):\n count = 0\n\n vis_directory, file_path_list = visibility_update(path_to_GARUDATA, cycle)\n\n for file_path in file_path_list:\n current_visibility = vis_directory[file_path]\n with open(file_path) as file:\n lines = file.readlines()\n visibility_count = 0\n dict = {}\n for line in lines:\n if 'processing took' in line:\n if 'day' in line:\n line = line.split(\" \")\n minutes = (int(line[3]) * 24 * 60)\n temp_time = line[5].split(\":\")\n minutes += int(temp_time[0]) * 60 + int(temp_time[1])\n seconds = temp_time[2]\n else:\n line = line.split(\" \")\n temp_time = line[3].split(\":\")\n minutes = int(temp_time[0]) * 60 + int(temp_time[1])\n seconds = temp_time[2]\n if 'image' in line:\n line = line.split(\" \")\n index = line.index(\"image:\") - 1\n keyname = line[index]\n visibility = int(line[line.index(\"visibilities,\") - 1])\n visibility = current_visibility[visibility_count]\n visibility_count += 1\n flux = float(line[line.index(\"Jy\") - 1])\n clean_components = int(line[line.index(\"CLEAN\") - 1])\n rms = float(line[line.index(\"mJy/beam\") - 1])\n dict[keyname] = {\"visibilities\" : visibility, \"flux\" : flux, \"clean_components\" : clean_components, \"rms\" : rms}\n\n if lines:\n filename = file_path.split(\"/\")\n dirlist = filename\n filename = filename[-1]\n filename = filename.split(\".\")\n filename = filename[0]\n filename = filename.split(\"_\")\n source = filename[1]\n\n matching = [s for s in filename if \"GMRT\" in s]\n try:\n freq = matching[0][4:]\n except:\n continue\n\n file_last_val = filename[-2]+\"_\"+filename[-1]\n\n obsno = dirlist[-4]\n\n cyclenumlist = [s for s in dirlist if \"CYCLE\" in s]\n\n cycleno = cyclenumlist[0] #dirlist[-5]\n date = dirlist[-3].split(\"_\")[1]\n proposal_id = dirlist[-3].split(\"_\")[0]\n\n document = {}\n document['source'] = source\n document['frequency'] = int(freq)\n document['obs_no'] = obsno\n document['proposal_id'] = proposal_id\n document['date'] = date\n document['summary'] = dict\n document['time'] = {'minutes': minutes, 'seconds':seconds}\n document['file_last_val'] = file_last_val\n\n #Insert into database\n last_entry = dict.get(\"SP2B\")\n\n if last_entry is not None:\n collection = mydb[cycleno]\n collection.insert_one(document)\n count += 1\n\n return count", "def writeStatsMissed(outfile,\n output,\n genomes,\n options):\n counts = [0] * (len(genomes) + 1)\n\n for entry, missed_genomes in output.items():\n counts[len(missed_genomes)] += 1\n\n outfile.write(\"ngenomes\\tcounts\\tpercent\\n\")\n total = sum(counts)\n for x in range(1, len(genomes)):\n outfile.write(\"%i\\t%i\\t%s\\n\" % (\n x, counts[x], options.format_percent % (100.0 * float(counts[x]) / total)))", "def generate_stats(self):\n\n if self.test_set == 'test':\n print 'Error: test set has no labels'\n return\n\n lbbs_not_in_pbbs_df = pd.DataFrame(columns=['pid','z','y','x','d'])\n if self.classifier_pred_path is None:\n pbbs_df = pd.DataFrame(columns=['pid','prob','z','y','x','d','nod'])\n else:\n pbbs_df = pd.DataFrame(columns=['pid','prob','z','y','x','d','nod','c_prob'])\n\n n_annot = 0\n for name in self.filenames:\n #print name\n pbb = np.load(os.path.join(self.bbox_dir, name+'_pbb.npy'))\n # add nod\n pbb = np.concatenate([pbb, np.zeros((pbb.shape[0],1))], axis=1)\n\n # Include classifier scores\n # Use nan for patients that got pbbs but not classifier predictions\n # eg blacklist\n if self.classifier_pred_path is not None:\n pred_fname = os.path.join(self.classifier_pred_path, name+'_pred.npy')\n if os.path.exists(pred_fname):\n cl_scores = np.load(pred_fname)\n else:\n cl_scores = np.empty((pbb.shape[0],1))\n cl_scores[:] = np.nan\n\n pbb = np.concatenate([pbb,cl_scores], axis=1)\n pbb = pbb[pbb[:,0].argsort()][::-1]\n lbb = np.load(os.path.join(self.bbox_dir, name+'_lbb.npy'))\n n_annot += len(lbb)\n lab_hits = np.zeros(len(lbb))\n\n # determine ground truth label of pbb\n # exclude relevant pbbs that are redundant for purposes of FROC\n\n #print 'pbb len', len(pbb)\n it = range(len(pbb)) if self.topk is None else range(min(len(pbb),self.topk))\n for i in it:\n\n if self.pbb_cutoff is not None and pbb[i,0] < self.pbb_cutoff:\n break\n\n lbb_match = False\n redundant_hit = False\n for j in range(len(lbb)):\n if is_hit(pbb[i][1:4], lbb[j][:3], lbb[j][3]):\n if lab_hits[j] > 0:\n redundant_hit = True\n #print 'redundant tp!'\n #print name, 'pbb', pbb[i], 'lbb', lbb[j]\n #tp.append(pbb[i])\n lab_hits[j] += 1\n lbb_match = True\n break\n if lbb_match:\n pbb[i,5] = 1\n else:\n pbb[i,5] = 0\n\n if not redundant_hit:\n pbbs_df.loc[len(pbbs_df)] = [name] + list(pbb[i])\n missed = pd.DataFrame(columns=list('zyxd'), data = lbb[lab_hits == 0].reshape(-1,len(list('zyxd'))))\n missed['pid'] = name\n missed = missed[['pid','z','y','x','d']]\n lbbs_not_in_pbbs_df = pd.concat([lbbs_not_in_pbbs_df,missed], ignore_index=True)\n\n\n # convert scores to probabilities\n pbbs_probs = s_to_p(np.array(pbbs_df['prob']))\n pbbs_df['prob'] = pbbs_probs\n\n if self.classifier_pred_path is not None:\n pbbs_cprobs = s_to_p(np.array(pbbs_df['c_prob']))\n pbbs_df['c_prob'] = pbbs_cprobs\n\n # ensemble\n pbbs_df['ensemble'] = (pbbs_df['prob'] + pbbs_df['c_prob'])/2.0\n\n\n\n self.n_annot = n_annot\n self.pbbs = pbbs_df\n self.rel = pbbs_df[pbbs_df['nod']==1]\n self.irr = pbbs_df[pbbs_df['nod']==0]\n self.lbbs_not_in_pbbs = lbbs_not_in_pbbs_df\n print 'loaded {} pbbs'.format(len(pbbs_df))\n if self.test_set == 'train' or self.test_set == 'val':\n print 'saved pbbs missed {} out of {} annotations ({:.2%})'.format(len(lbbs_not_in_pbbs_df),\n n_annot,\n 1.0 * len(lbbs_not_in_pbbs_df)/n_annot)", "def mageckcount_main(args):\n # check arguments\n genedict=mageckcount_checkargs(args)\n # save sgRNA ID and gene name\n sgdict={} #\n for (k,v) in genedict.items():\n sgdict[v[0]]=(k,v[1]) # {seq:(sgid,gene)\n if hasattr(args,'count_table') and args.count_table != None:\n # treat it as a count table\n (allmappeddict,datastat,mapptab)=mageckcount_processcounttable(args,genedict,sgdict)\n # note that the key of allmappeddict is sgRNA ID\n # if library file is provided, we need to change sgdict to make it consistent with other situations (like fastq file)\n sgdict={k:(k,v) for (k,v) in mapptab.items()}\n else:\n # check the listed files: fastq/sam/bam files provided\n (allmappeddict,datastat)=mageckcount_processfastq(args,genedict,sgdict)\n # note that the key of allmappeddict is sgRNA sequence\n \n # normalize read counts\n if hasattr(args,\"norm_method\"):\n normmethod=args.norm_method\n else:\n normmethod=\"median\"\n if hasattr(args,\"control_sgrna\"):\n ctrlsg=args.control_sgrna\n else:\n ctrlsg=None\n medalldict=normalizeCounts(allmappeddict,sgdict=sgdict,method=normmethod,controlsgfile=ctrlsg)\n ofilel=open(args.output_prefix+'.count_normalized.txt','w')\n mageckcount_printdict(medalldict,args,ofilel,None,sgdict,datastat,sep='\\t')\n ofilel.close()\n # perform additional QCs\n if args.day0_label!= None:\n mageckcount_getQC(args,datastat,sgdict)\n # print statistics\n mageckcount_printstat(args,datastat)\n return 0", "def generategraphicsandstats():\n positions = np.array([1,10,100,1000])\n numtrials = 10000\n meanloop = np.zeros(4)\n stdloop = np.zeros(4)\n results = open('results.txt','w')\n for numpos in range(4):\n value = investment.calculatesimulation(positions[numpos], numtrials)\n cumu_ret = value[0]\n daily_ret = value[1]\n plt.show()\n plt.hist(daily_ret,100, range=[-1, 1])\n plt.xlim(-1,1)\n meanloop[numpos] = np.mean(np.array(cumu_ret))\n stdloop[numpos] = np.std(np.array(cumu_ret))\n valueprint = '%04d' % positions[numpos]\n plt.savefig('histogram_'+valueprint+'_pos.pdf')\n plt.close()\n results.write(\"Mean for position %i = %f \\n\" %(positions[numpos],np.mean(np.array(daily_ret))))\n results.write(\"Std for position %i = %f \\n\\n\" %(positions[numpos],np.std(np.array(daily_ret))))\n statstotal=[meanloop,stdloop]\n return statstotal", "def analyze_possible_autocatalytic_cycles(generation_num, mod_exports_folder_path, query_results_folder):\n \n \n print(\"Generating some plots on cycle size distribution / stats by generation...\")\n # 1.\n query_data = pd.read_json(f\"output/\" + query_results_folder + f\"/{generation_num}/autocat_query_results.json\")\n if query_data.empty:\n print(\"No cycles found.\")\n return\n # print(query_data.describe())\n # print(query_data.head())\n \n # cycle distribution (y axis is frequency, x axis is ring size)\n fig, ax = plt.subplots()\n # print(query_data.head())\n # query_data['countMolsInRing'] = query_data['countMolsInRing'].astype(int)\n query_data['countMolsInRing'].value_counts().plot(ax = ax,\n kind='bar',\n title = \"Ring Size Frequency Distribution\")\n ax.set_xlabel(\"Ring Size (# of Molecules)\")\n ax.set_ylabel(\"Count of Cycles\")\n plt.savefig(f\"output/\" + query_results_folder + f\"/{generation_num}/ring_size_distribution.png\")\n # plt.show()\n \n \n # 2.\n # Total mass of cycle per generation. Not really needed.\n \n \n # 3.\n # count of cycles by feeder generation\n fig, ax = plt.subplots()\n gen_formed_arr = []\n feederMolData = list(query_data['feederMol'])\n for feederMol in feederMolData:\n gen_formed_arr.append(feederMol['generation_formed'])\n # get unique list of feeder generations and sum by generation\n gen_formed_arr = np.array(gen_formed_arr)\n feeder_gen_counts = np.unique(gen_formed_arr, return_counts=True)\n feeder_gen_counts = np.transpose(feeder_gen_counts)\n cycles_by_gen_df = pd.DataFrame(feeder_gen_counts, columns=['feeder_gen',\n 'cycle_count'])\n cycles_by_gen_df.plot(ax=ax,\n x = \"feeder_gen\",\n y = \"cycle_count\",\n kind = \"bar\",\n legend = False,\n title = \"Count of Cycles by Feeder Generation\")\n ax.set_xlabel(\"Cycle Generation (Generation Formed of Feeder Molecule)\")\n ax.set_ylabel(\"Count of Cycles\")\n plt.savefig(f\"output/\" + query_results_folder + f\"/{generation_num}/count_cycles_by_feeder_generation.png\")\n \n # close all plots so they don't accumulate memory\n print(\"\\tAutocatalysis pattern matching done.\")\n plt.close('all')", "def sumStats(generation, individuals, errorTrackers, sizeTrackers):\n print(f\"GENERATION {generation}\")\n errors = [individual.getTotalError() for individual in individuals]\n depths = [individual.getProgramSize() for individual in individuals]\n numNodes = [Individual.getNumNodes() for Individual in individuals]\n errorTrackers.append(round(min(errors), 2)); sizeTrackers.append(getAverage(numNodes))\n print(f\"ERROR - Average: {getAverage(errors)}, Min: {round(min(errors), 2)}, Max: {round(max(errors), 2)}, Std: {round(pstdev(errors), 2)}\")\n print(f\"DEPTH - Average: {getAverage(depths)}, Min: {round(min(depths), 2)}, Max: {round(max(depths), 2)}, Std: {round(pstdev(depths), 2)}\")\n print(f\"NODES - Average: {getAverage(numNodes)}, Min: {round(min(numNodes), 2)}, Max: {round(max(numNodes), 2)}, Std: {round(pstdev(numNodes), 2)}\")\n print()", "def plot_all(self):\n files = [f for f in listdir(self.count_path) if isfile(join(self.count_path, f))]\n try:\n mkdir(self.plots_path)\n except:\n print('plots directory already exists')\n for file_name in files:\n with open(join(self.count_path, file_name), 'rb') as f:\n counts = pickle.load(f)\n file_name = file_name[:-4]\n try:\n mkdir(join(self.plots_path, file_name))\n except:\n print('plots ' + file_name + ' directory already exists')\n counts['w_b/w'] = np.nan_to_num((self.get_marginal_counts(counts['f(w_b)']) / self.get_marginal_counts(counts['f(w)'])))\n counts['b_b/b'] = np.nan_to_num((self.get_marginal_counts(counts['f(b_b)']) / self.get_marginal_counts(counts['f(b)'])))\n self.plot_nodes_over_time(counts, file_name)\n self.plot_edges_over_time(counts, file_name)\n self.plot_f_w_over_time(counts, file_name)\n self.plot_bichromatic_fraction_diff_over_time(counts, file_name)\n self.plot_f_b_over_time(counts, file_name)\n self.plot_f_w_f_b_ratios_over_time(counts, file_name)\n self.plot_f_w_f_b_separately_over_time(counts, file_name)\n self.plot_marginal_w_b_over_time(counts, file_name)\n self.plot_marginal_bichromatic_fraction_diff_over_time(counts, file_name)", "def generate_report_file(self):\n # Organize raw data into lines for HTML processing\n # All of self.users sorted\n sorteduserlist = sorted(self.users.itervalues(),\n key=lambda user: user.get_wasted_hours_percent(),\n reverse=True)\n\n # Generate report lines, with some cutoffs\n all_report_lines_gen = (\n (user.user,\n self.vo,\n NiceNum.niceNum(user.failure['CoreHours'], 1),\n NiceNum.niceNum(user.get_wasted_hours_percent(), 0.1),\n NiceNum.niceNum(user.total_CoreHours, 1),\n NiceNum.niceNum(user.failure['Njobs'], 1),\n NiceNum.niceNum(user.get_job_failure_percent(), 0.1),\n NiceNum.niceNum(user.total_Njobs, 1))\n for user in sorteduserlist\n # Cutoffs: Core hours and Wasted Hours Percent\n if user.total_CoreHours >= self.hours_cutoff\n and user.get_wasted_hours_percent() / 100. >= self.perc_cutoff\n )\n\n # Enforce cutoff for number of entries to include (self.numrank)\n top_lines_gen = ((count,) + line\n for count, line in enumerate(all_report_lines_gen, start=1)\n if count <= self.numrank\n )\n\n # Generate HTML for report\n\n # Column info in (column name, column alignment) form\n columns_setup = [('Rank', 'right'),\n ('User', 'left'),\n ('VO', 'left'),\n ('Hours Wasted', 'right'),\n ('% Hours Wasted of Total', 'right'),\n ('Total Used Wall Hours', 'right'),\n ('Total Jobs Failed', 'right'),\n ('% Jobs Failed', 'right'),\n ('Total Jobs Run', 'right')]\n table = ''\n\n # Generate table lines\n def tdalign(info, align):\n \"\"\"HTML generator to wrap a table cell with alignment\"\"\"\n return '<td align=\"{0}\">{1}</td>'.format(align, info)\n\n lineal = [elt[1] for elt in columns_setup]\n for line in top_lines_gen:\n if self.verbose:\n print line\n linemap = zip(line, lineal)\n table += '\\n<tr>' + ''.join((tdalign(info, al) for info, al in linemap)) + '</tr>'\n\n if len(table) == 0:\n self.logger.info('The report is empty. Will not send anything.')\n sys.exit(0)\n\n # Generate header HTML\n headernames = (elt[0] for elt in columns_setup)\n header = ''.join(('<th>{0}</th>'.format(elt) for elt in headernames))\n\n # Put it all into the template\n htmldict = dict(title=self.title, table=table, header=header)\n\n with open(self.template, 'r') as f:\n self.text = f.read()\n\n self.text = self.text.format(**htmldict)\n\n return", "def write_output_stat(): \n print('\\n') \n print('Comparing input & output statistics')\n cwd = os.getcwd()\n json_dir = cwd + '/json_files' # Folder to store the json files\n\n try:\n with open(json_dir + '/nodeDict.txt') as json_file:\n nodeDict = json.load(json_file)\n\n with open(json_dir + '/elmtDict.txt') as json_file:\n elmtDict = json.load(json_file)\n\n with open(json_dir + '/elmtSetDict.txt') as json_file:\n elmtSetDict = json.load(json_file)\n\n with open(json_dir + '/particle_data.txt') as json_file: \n particle_data = json.load(json_file)\n \n with open(json_dir + '/RVE_data.txt') as json_file: \n RVE_data = json.load(json_file)\n\n with open(json_dir + '/simulation_data.txt') as json_file: \n simulation_data = json.load(json_file) \n \n except FileNotFoundError:\n print('Json file not found, make sure \"particleStatGenerator(), packingRoutine(), voxelizationRoutine()\" function is executed first!')\n raise FileNotFoundError\n \n # Extract from dictionaries\n par_eqDia = particle_data['Equivalent_diameter']\n par_majDia = particle_data['Major_diameter']\n par_minDia = particle_data['Minor_diameter1']\n voxel_size = RVE_data['Voxel_resolution']\n RVEsize = RVE_data['RVE_size']\n \n if simulation_data['Periodicity'] == 'True':\n periodic_status = True\n elif simulation_data['Periodicity'] == 'False':\n periodic_status = False\n \n # Check if Equiaxed or elongated particles\n if np.array_equal(par_majDia, par_minDia): # Equiaxed grains (spherical particles) \n \n # Find each grain's equivalent diameter\n grain_eqDia = [] \n for k, v in elmtSetDict.items():\n num_voxels = len(v)\n grain_vol = num_voxels * (voxel_size)**3\n grain_dia = 2 * (grain_vol * (3/(4*np.pi)))**(1/3)\n grain_eqDia.append(grain_dia)\n \n print('Writing particle & grain equivalent diameter files', end=\"\")\n \n # write out the particle and grain equivalent diameters to files\n np.savetxt('particle_equiDiameters.txt', par_eqDia)\n np.savetxt('grain_equiDiameters.txt', grain_eqDia) \n \n else: # Elongated grains (ellipsoidal particles)\n\n grain_eqDia, grain_majDia, grain_minDia = [], [], [] \n # Find all the nodal coordinates belonging to the grain\n grain_node = {} \n for k, v in elmtSetDict.items(): \n num_voxels = len(v)\n grain_vol = num_voxels * (voxel_size)**3\n grain_dia = 2 * (grain_vol * (3/(4*np.pi)))**(1/3)\n grain_eqDia.append(grain_dia) \n \n # All nodes belonging to grain \n nodeset = set()\n for el in v:\n nodes = elmtDict[str(el)]\n for n in nodes:\n if n not in nodeset:\n nodeset.add(n)\n \n # Get the coordinates as an array \n points = [nodeDict[str(n)] for n in nodeset]\n points = np.asarray(points) \n grain_node[k] = points\n \n if periodic_status: \n # If periodic, find the grains whose perodic halves have to be shifted\n shiftRight, shiftTop, shiftBack = [], [], [] \n for key, value in grain_node.items(): \n \n # Find all nodes on left, Right, Top, Bottom, Front & Back faces\n nodeLS, nodeRS = set(), set()\n nodeTS, nodeBS = set(), set()\n nodeFS, nodeBaS = set(), set() \n for enum, coord in enumerate(value): \n \n if abs(0.0000 - coord[0]) <= 0.00000001: # nodes on Left face\n nodeLS.add(enum)\n elif abs(RVEsize - coord[0]) <= 0.00000001: # nodes on Right face\n nodeRS.add(enum)\n \n if abs(0.0000 - coord[1]) <= 0.00000001: # nodes on Bottom face\n nodeBS.add(enum)\n elif abs(RVEsize - coord[1]) <= 0.00000001: # nodes on Top face\n nodeTS.add(enum)\n\n if abs(0.0000 - coord[2]) <= 0.00000001: # nodes on Front face\n nodeFS.add(enum)\n elif abs(RVEsize - coord[2]) <= 0.00000001: # nodes on Back face\n nodeBaS.add(enum) \n \n if len(nodeLS) != 0 and len(nodeRS) != 0: # grain is periodic, has faces on both Left & Right sides\n shiftRight.append(key) # left set has to be moved to right side \n if len(nodeBS) != 0 and len(nodeTS) != 0: # grain is periodic, has faces on both Top & Bottom sides\n shiftTop.append(key) # bottom set has to be moved to Top side \n if len(nodeFS) != 0 and len(nodeBaS) != 0: # grain is periodic, has faces on both Front & Back sides\n shiftBack.append(key) # front set has to be moved to Back side \n \n # For each grain that has to be shifted, pad along x, y, z respectively\n for grain in shiftRight:\n pts = grain_node[grain] \n # Pad the nodes on the left side by RVE x-dimension\n for enum, val in enumerate(pts[:, 0]):\n if val>=0.0 and val<=RVEsize/2.:\n pts[enum, 0] += RVEsize\n\n for grain in shiftBack:\n pts = grain_node[grain] \n # Pad the nodes on the front side by RVE z-dimension\n for enum, val in enumerate(pts[:, 2]):\n if val>=0.0 and val<=RVEsize/2.:\n pts[enum, 2] += RVEsize\n\n for grain in shiftTop:\n pts = grain_node[grain] \n # Pad the nodes on the bottom side by RVE y-dimension\n for enum, val in enumerate(pts[:, 1]):\n if val>=0.0 and val<=RVEsize/2.:\n pts[enum, 1] += RVEsize \n \n # For periodic & Non-periodic: create the convex hull and find the major & minor diameters\n for grain, points in grain_node.items(): \n \n hull = ConvexHull(points) \n hull_pts = points[hull.vertices]\n \n # Find the approximate center of the grain using extreme surface points\n xmin, xmax = np.amin(points[:, 0]), np.amax(points[:, 0])\n ymin, ymax = np.amin(points[:, 1]), np.amax(points[:, 1])\n zmin, zmax = np.amin(points[:, 2]), np.amax(points[:, 2]) \n center = np.array([xmin + (xmax-xmin)/2.0, ymin + (ymax-ymin)/2.0, zmin + (zmax-zmin)/2.0])\n \n # Find the euclidean distance to all surface points from the center\n dists = [euclidean(center, pt) for pt in hull_pts]\n a2 = 2.0*np.amax(dists)\n b2 = 2.0*np.amin(dists)\n\n grain_majDia.append(a2) # update the major diameter list\n grain_minDia.append(b2) # update the minor diameter list\n \n print('Writing particle & grain equivalent, major & minor diameter files', end=\"\")\n \n # write out the particle and grain equivalent diameters to files\n np.savetxt('particle_equiDiameters.txt', par_eqDia)\n np.savetxt('grain_equiDiameters.txt', grain_eqDia)\n \n # write out the particle and grain equivalent diameters to files\n np.savetxt('particle_majorDiameters.txt', par_majDia)\n np.savetxt('grain_majorDiameters.txt', grain_majDia)\n \n # write out the particle and grain equivalent diameters to files\n np.savetxt('particle_minorDiameters.txt', par_minDia)\n np.savetxt('grain_minorDiameters.txt', grain_minDia) \n \n print('---->DONE!') \n return", "def write_statistics(output_filename, test_name, d):\n log_and_print(\"Writing statistics tex file: %s\" % output_filename)\n with open(output_filename, \"w\") as out_f:\n out_f.write(r\"\\documentclass{article}\" + \"\\n\")\n out_f.write(r\"\\usepackage[margin=3cm]{geometry}\" + \"\\n\")\n out_f.write(r\"\\usepackage{pgfplots}\" + \"\\n\")\n out_f.write(r\"\\begin{document}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\centerline{{\\bf\\Large %s: Statistics}}\" % test_name + \"\\n\")\n out_f.write(\"\\n\")\n\n out_f.write(r\"\\section{Student score distribution}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{1cm}\" + \"\\n\")\n tex_write_basic_stats(out_f, d)\n out_f.write(r\"\\vspace{8mm}\" + \"\\n\")\n tex_write_pdf(out_f, d)\n out_f.write(r\"\\vspace{8mm}\" + \"\\n\")\n tex_write_cdf(out_f, d)\n\n out_f.write(\"\\n\")\n out_f.write(r\"\\clearpage\" + \"\\n\")\n out_f.write(r\"\\section{Question summary data}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"The plot below shows the \\emph{difficulty} and \\emph{discrimination}\" + \"\\n\")\n out_f.write(r\"for each question. Ideally the discrimination should be high, and\" + \"\\n\")\n out_f.write(r\"there should be a mixture of easy and hard questions.\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\begin{center}\" + \"\\n\")\n out_f.write(r\"\\begin{tabular}{lll}\" + \"\\n\")\n out_f.write(r\"quantity & symbol & description \\\\\" + \"\\n\")\n out_f.write(r\"\\hline\" + \"\\n\")\n out_f.write(r\"difficulty & $D_{\\rm Q}(Q)$ & fraction of students who get question $Q$ incorrect \\\\\" + \"\\n\")\n out_f.write(r\"discrimination & $r^{\\rm P}_{\\rm Q}(Q)$ & correlation of scores between question $Q$ and the total exam\" + \"\\n\")\n out_f.write(r\"\\end{tabular}\" + \"\\n\")\n out_f.write(r\"\\end{center}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_question_summary(out_f, d)\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{1em}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"The following plot shows the relative points for the question\" + \"\\n\")\n out_f.write(r\"variants. Variants with $R_{\\rm QV}(Q,V)$ above 100\\% are easier than\" + \"\\n\")\n out_f.write(r\"average (more points awarded), while values below 100\\% indicate\" + \"\\n\")\n out_f.write(r\"a harder-than-average variant.\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{2em}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_variant_summary(out_f, d)\n out_f.write(\"\\n\")\n out_f.write(r\"\\clearpage\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"The scatter-plot below contains the same information as the first plot\" + \"\\n\")\n out_f.write(r\"in this section, but plots the \\emph{discrimination} against the\" + \"\\n\")\n out_f.write(r\"\\emph{difficulty} for each question. Questions should ideally be high\" + \"\\n\")\n out_f.write(r\"on this plot (discriminating well), and there should be a mixture of\" + \"\\n\")\n out_f.write(r\"left-to-right (difficulty) values.\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{2em}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_question_summary_scatter(out_f, d)\n\n out_f.write(\"\\n\")\n out_f.write(r\"\\clearpage\" + \"\\n\")\n out_f.write(r\"\\section{Question detailed data}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_question_answers(out_f, d)\n\n out_f.write(r\"\\end{document}\" + \"\\n\")\n log(\"Successfully completed writing statistics tex file\")", "def buildStats(count, otype, name, dataset, filename, updated_on=None):\n y,t = getCurrentTime()\n size = file_size(filename)\n filename = filename.split('/')[-1]\n \n return(str(y), str(t), dataset, filename, size, count, otype, name, updated_on)", "def generate_report( saved_model, out_dir):\n data_path = '/usr/share/data/processed/'\n\n test_x = pd.read_csv(data_path + 'test_x.csv', index_col=0)\n test_y = pd.read_csv(data_path + 'test_y.csv', index_col=0,\\\n header= None).values.squeeze()\n train_y = pd.read_csv(data_path + 'train_y.csv', index_col=0, \\\n header= None).values.squeeze()\n train_mean = train_y.mean()\n\n xgb_model = xgb.Booster()\n xgb_model.load_model(Path(saved_model) / 'xgb_model.model')\n preds = xgb_model.predict(xgb.DMatrix(test_x))\n rmse = np.sqrt( np.mean( np.square(preds - test_y)))\n \n # dataframe of target values, predictions, and errors\n test_pred = pd.DataFrame({'y' : test_y, 'pred' : preds})\n test_pred['err_abs'] = abs(test_pred.y - test_pred.pred)\n test_pred['err'] = test_pred.y - test_pred.pred\n\n Path(out_dir).mkdir(parents=True, exist_ok=True)\n\n # indexes of sorted target point values to compare predictions\n idx= np.argsort( test_y)\n \n plot_feature_importance(xgb_model, out_dir)\n plot_regression(test_y, preds, idx, out_dir)\n plot_distribution(test_y, preds, out_dir) \n plot_error_cumulative(test_y, preds, idx, rmse, out_dir)\n compare_model_baseline(test_y, preds, idx, train_mean, out_dir)\n plot_error_per_point(test_pred, out_dir)\n plot_preds_per_point(test_pred, out_dir)\n spreadsheet_worst_preds(test_pred, test_x, out_dir)\n\n flag= Path(out_dir) / '.SUCCESS'\n flag.touch()", "def ext_count(self, filepath):\n \n self.filepath = filepath \n\n #can probably turn into dictionary...\n dat_names = ['vp3', 'cpt', 'ern', 'ant', 'aod', 'ans', 'stp', 'gng']\n cnt_names = ['eeo', 'eec', 'vp3', 'cpt', 'ern', 'ant', 'aod', 'ans', 'stp', 'gng']\n ps_names = ['vp3', 'cpt', 'ern', 'ant', 'aod', 'anr', 'stp', 'gng']\n avg_dict = {'vp3': '3',\n 'gng': '2',\n 'ern': '4',\n 'stp': '2',\n 'ant': '4',\n 'cpt': '6',\n 'aod': '2',\n 'anr': '2'}\n avg_od = collections.OrderedDict(sorted(avg_dict.items()))\n\n #count of each type of file extension \n avg_lst = []\n cnt_lst = []\n cnt_rr = []\n dat_lst = []\n ps_lst = []\n for root, dirs, files in os.walk(filepath):\n for name in files:\n if name.endswith(\"avg\"):\n avg_lst.append(name.split(\"_\")[0])\n if name.endswith(\"_32.cnt\"):\n cnt_lst.append(name.split(\"_\")[0])\n if name.endswith(\"dat\"):\n dat_lst.append(name.split(\"_\")[0])\n if name.endswith(\"ps\"):\n ps_lst.append(name.split(\"_\")[0])\n if name.endswith(\"_rr.cnt\"):\n cnt_rr.append(name.split(\"_\")[0])\n \n \n print(\"File extension Count:\", '\\n',\n len(avg_lst), '.avg files', '\\n',\n len(cnt_lst), '.cnt files', '\\n',\n len(dat_lst), '.dat files', '\\n',\n len(ps_lst), '.ps files', '\\n',\n len(cnt_rr), 're-runs', '(',str(cnt_rr).strip(\"[]\"),')')\n \n print ('Missing dat files =', ','.join(set(dat_names).difference(dat_lst)))\n print ('Missing cnt files =', ','.join(set(cnt_names).difference(cnt_lst)))\n print ('Missing ps files =', ','.join(set(ps_names).difference(ps_lst)))\n\n #count frequency of each task with .avg extension \n fq= defaultdict(int)\n for w in avg_lst:\n fq[w] += 1\n\n fq_od = collections.OrderedDict(sorted(fq.items()))\n\n \n for key in avg_od:\n if key not in fq_od:\n print('Missing avg files = ', key)\n #print(key,avg_dict[key])\n return True", "def test_counts(self):\n from mirtop.libs import logger\n from mirtop.gff.convert import convert_gff_counts\n import argparse\n\n logger.initialize_logger(\"test counts\", True, True)\n logger = logger.getLogger(__name__)\n\n args = argparse.Namespace()\n args.hairpin = \"data/examples/annotate/hairpin.fa\"\n args.sps = \"hsa\"\n args.gtf = \"data/examples/annotate/hsa.gff3\"\n args.gff = 'data/examples/synthetic/let7a-5p.gff'\n args.out = 'data/examples/synthetic'\n args.add_extra = True\n convert_gff_counts(args)\n os.remove(os.path.join(args.out, \"let7a-5p.tsv\"))\n\n return True", "def test_error_count_decorator(tmpdir, supply_generalSchema_data):\n\n # create single csv of test data\n df = supply_generalSchema_data[0]\n df_loc = str(tmpdir.join('test_csv0'))\n df.to_csv(df_loc, index=False)\n\n # create AggCSV object\n a = AggCSV('general', df_loc)\n a.validate()\n assert a.rows_w_errors == {3,4}\n assert a.distinct_cols_w_errors == {'Redirect Link', 'Cost Per Ad Click'}", "def per_run_statistics(run_stat):\n for end in ENDS:\n print(end)\n no_valid = 0\n for i, run in enumerate(run_stat[end]):\n f_stat = run.filtered_freq_stat(FREQ_THRESHOLD)\n # f_stat = run.freq_stat()\n if(f_stat):\n fs_msg = \", \".join(f\"{k}%: {v}\" for k, v in f_stat)\n print(f\"Run {i}: {fs_msg}\")\n else:\n no_valid += 1\n print(f\"Runs with no valid consensus: {no_valid}/{len(run_stat[end])}\")\n print(\"\")", "def report_uuas_and_tikz(args, prediction_batches, dataset, split_name):\n uspan_total = 0\n uspan_correct = 0\n adjacent_correct = 0\n rand_correct = 0\n total_sents = 0\n\n per_relation_stats = defaultdict(lambda: [0, 0])\n per_relation_stats_adjacent = defaultdict(lambda: [0, 0])\n\n uuas_per_sen = []\n for prediction_batch, (data_batch, label_batch, length_batch, observation_batch) in tqdm(\n zip(prediction_batches, dataset), desc=\"computing uuas\"\n ):\n for prediction, label, length, (observation, _) in zip(\n prediction_batch, label_batch, length_batch, observation_batch\n ):\n words = observation.sentence\n poses = observation.xpos_sentence\n length = int(length)\n assert length == len(observation.sentence)\n prediction = prediction[:length, :length]\n label = label[:length, :length].cpu()\n\n temp_gold_edges = list(\n zip([int(x) - 1 for x in observation.index], [int(x) - 1 for x in observation.head_indices])\n )\n edge_to_relation = dict(zip([tuple(sorted(e)) for e in temp_gold_edges], observation.governance_relations))\n gold_edges = prims_matrix_to_edges(label, words, poses)\n pred_edges = prims_matrix_to_edges(prediction, words, poses)\n rand_edges = prims_matrix_to_edges(np.random.rand(*prediction.shape), words, poses)\n\n non_punct = (np.array(observation.upos_sentence) != \"PUNCT\").nonzero()[0]\n adjacent_edges = [(non_punct[i], non_punct[i + 1]) for i in range(len(non_punct) - 1)]\n\n pred_edges = set([tuple(sorted(e)) for e in pred_edges])\n gold_edges = set([tuple(sorted(e)) for e in gold_edges])\n adjacent_edges = set([tuple(sorted(e)) for e in adjacent_edges])\n\n if args.print_tikz and total_sents < 100 :\n print_tikz(args.output_dir, pred_edges, gold_edges, edge_to_relation, words, split_name)\n\n num_correct = 0\n for edge in gold_edges:\n per_relation_stats[edge_to_relation[edge]][0] += 1\n if edge in pred_edges:\n num_correct += 1\n per_relation_stats[edge_to_relation[edge]][1] += 1\n\n num_correct_adjacent = 0\n for edge in gold_edges:\n per_relation_stats_adjacent[edge_to_relation[edge]][0] += 1\n if edge in adjacent_edges:\n num_correct_adjacent += 1\n per_relation_stats_adjacent[edge_to_relation[edge]][1] += 1\n\n num_correct_rand = len(set([tuple(sorted(e)) for e in rand_edges]).intersection(gold_edges))\n\n # computed error matrix after filtering adjacency edges\n pred_in_adjacent = pred_edges.intersection(adjacent_edges)\n pred_out_adjacent = pred_edges - adjacent_edges\n gold_in_adjacent = gold_edges.intersection(adjacent_edges)\n gold_out_adjacent = gold_edges - adjacent_edges\n\n uspan_correct += num_correct\n adjacent_correct += num_correct_adjacent\n rand_correct += num_correct_rand\n uspan_total += len(gold_edges)\n total_sents += 1\n uuas_per_sen.append(uspan_correct / uspan_total)\n\n uuas = uspan_correct / float(uspan_total)\n uuas_adjacent = adjacent_correct / float(uspan_total)\n uuas_rand = rand_correct / float(uspan_total)\n\n return (\n uuas,\n uuas_per_sen,\n uuas_adjacent,\n uuas_rand,\n per_relation_stats,\n per_relation_stats_adjacent,\n )", "def calculate_error_metrics(model_name):\n sim_data_fldr = \"simulation_data\"\n reward_data_fldr = \"reward_data\"\n\n ee_pos_path = os.path.join(sim_data_fldr, \"ee_pos_\" + model_name + \".csv\")\n ee_goal_pos_path = os.path.join(sim_data_fldr, \"ee_goal_pos_\" + model_name + \".csv\")\n\n ee_z_force_path = os.path.join(sim_data_fldr, \"ee_z_contact_force_\" + model_name + \".csv\")\n ee_mean_z_force_path = os.path.join(sim_data_fldr, \"ee_z_running_mean_contact_force_\" + model_name + \".csv\")\n ee_goal_z_force_path = os.path.join(sim_data_fldr, \"ee_z_goal_contact_force_\" + model_name + \".csv\")\n\n ee_z_derivative_force_path = os.path.join(sim_data_fldr, \"ee_z_derivative_contact_force_\" + model_name + \".csv\")\n ee_goal_derivative_z_force_path = os.path.join(sim_data_fldr, \"ee_z_goal_derivative_contact_force_\" + model_name + \".csv\")\n\n ee_vel_path = os.path.join(sim_data_fldr, \"ee_vel_\" + model_name + \".csv\")\n ee_mean_vel_path = os.path.join(sim_data_fldr, \"ee_running_mean_vel_\" + model_name + \".csv\")\n ee_goal_vel_path = os.path.join(sim_data_fldr, \"ee_goal_vel_\" + model_name + \".csv\")\n\n ee_diff_quat_path = os.path.join(sim_data_fldr, \"ee_diff_quat_\" + model_name + \".csv\")\n\n pos_reward_path = os.path.join(reward_data_fldr, \"pos_\" + model_name + \".csv\")\n ori_reward_path = os.path.join(reward_data_fldr, \"ori_\" + model_name + \".csv\")\n force_reward_path = os.path.join(reward_data_fldr, \"force_\" + model_name + \".csv\")\n der_reward_path = os.path.join(reward_data_fldr, \"derivative_force_\" + model_name + \".csv\")\n vel_reward_path = os.path.join(reward_data_fldr, \"vel_\" + model_name + \".csv\")\n\n mse_ee_pos(ee_pos_path, ee_goal_pos_path, model_name)\n mse_ee_force(ee_z_force_path, ee_mean_z_force_path, ee_goal_z_force_path, model_name)\n mse_ee_der_force(ee_z_derivative_force_path, ee_goal_derivative_z_force_path, model_name)\n mse_ee_velocity(ee_vel_path, ee_mean_vel_path, ee_goal_vel_path, model_name)\n mean_ee_quat_diff(ee_diff_quat_path, model_name)\n mean_rewards(\n pos_reward_path,\n ori_reward_path,\n force_reward_path,\n der_reward_path,\n vel_reward_path,\n model_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assure the columns classes constructor work
def test_all_columns_classes_initialize(self): t = Text() b = Bool() i = Integer() f = Float() i_d = Id() self.assertIsInstance(t, Text) self.assertIsInstance(b, Bool) self.assertIsInstance(i, Integer) self.assertIsInstance(f, Float) self.assertIsInstance(i_d, Id)
[ "def test_column_attributes_handled_correctly(self):\n\n class TestModel(Model):\n\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\n text = columns.Text()\n\n #check class attibutes\n self.assertHasAttr(TestModel, '_columns')\n self.assertHasAttr(TestModel, 'id')\n self.assertHasAttr(TestModel, 'text')\n\n #check instance attributes\n inst = TestModel()\n self.assertHasAttr(inst, 'id')\n self.assertHasAttr(inst, 'text')\n self.assertIsNone(inst.id)\n self.assertIsNone(inst.text)", "def __init__(self, columns, func):\n # Ensure that columns is a list.\n self.columns = as_list(columns)\n # Ensure that the function is a value function.\n if not isinstance(func, ValueFunction):\n # Instantiate the function if a class object is given\n if isinstance(func, type):\n func = func()\n func = CallableWrapper(func=func)\n self.func = func", "def __init__(self, data, columns:list):\n self.data = data.loc[:, columns]\n self.columns = columns", "def __init__(self, columns: Columns, names: Optional[Names] = None):\n self.columns = as_list(columns)\n # Set the list of names for the result columns if given. Raise an error\n # if the list of column names is given but does not match the length of\n # the list of selected columns.\n if names is not None:\n self.names = names if isinstance(names, list) else [names]\n if len(self.columns) != len(self.names):\n raise ValueError('incompatible lists for columns and names')\n else:\n self.names = None", "def __init__(self, columns: Columns, func: UpdateFunction):\n # Ensure that columns is a list\n self.columns = columns\n self.func = get_update_function(func=func, columns=self.columns)", "def buildColumn(self, b):\n b.error('Classes inheriting from *Column* should implement **self.buildColumn(b)**.')", "def getColumnsClass(view, column):", "def test_gen_columns(self):\n fn = self.filenames[0]\n\n # Get the first two columns. Both columns are of type int.\n data_gen = du.gen_columns(fn, self.header_lines[fn], (int, int))\n for i, (c0, c1) in enumerate(data_gen):\n self.assertEqual(c0, self.columns[fn][0][i])\n self.assertEqual(c1, self.columns[fn][1][i])\n\n # Read columns 1 and 3 and convert them to type\n # int and float, respectively. Skip the two header rows.\n data_gen = du.gen_columns(fn, self.header_lines[fn], (int, float), (1,3))\n for i, (c1, c3) in enumerate(data_gen):\n self.assertEqual(c1, self.columns[fn][1][i])\n self.assertEqual(c3, self.columns[fn][3][i])\n\n # Read only the second column. Note the use of tuples!\n data_gen = du.gen_columns(fn, self.header_lines[fn], (int,), (1,))\n for i, (c1,) in enumerate(data_gen):\n self.assertEqual(c1, self.columns[fn][1][i])\n\n # Finding a float when int is expected should raise\n # ValueError.\n fn = self.filenames[2]\n data_gen = du.gen_columns(fn, self.header_lines[fn], (int,int), (0,1))\n self.assertRaises(ValueError, lambda y: [x for x in y], data_gen)\n\n # A missing column should raise IndexError.\n fn = self.filenames[1]\n data_gen = du.gen_columns(fn, self.header_lines[fn], (int,float), (1,3))\n self.assertRaises(IndexError, lambda y: [x for x in y], data_gen)", "def __init__(self, column_name, function_name=None):\n self.column_name = column_name\n if function_name:\n self.function_name = function_name\n elif not self.function_name:\n raise ValueError('No function_name.')", "def initialize_dataframe(self):\n # TODO: check if the set of columns in dataframe after initialiation is exactly\n # the set of base features.\n raise NotImplementedError", "def test_construct_column(type_, expected_type):\n artifacts = types.ColumnArtifacts(type_)\n\n returned_column = column.construct_column(artifacts=artifacts)\n\n assert isinstance(returned_column, sqlalchemy.Column)\n assert isinstance(returned_column.type, expected_type)\n assert len(returned_column.foreign_keys) == 0\n assert returned_column.nullable is True", "def test_extract_column_1(self, is_instance_mock):\n with self.assertRaises(TypeError):\n is_instance_mock.return_value = False\n querying.extract_column(self.column)\n\n is_instance_mock.assert_any_call(self.column, Column)\n is_instance_mock.assert_any_call(self.column, functions.count)\n is_instance_mock.assert_any_call(self.column, UnaryExpression)\n is_instance_mock.assert_any_call(self.column, BinaryExpression)", "def test_table_instantiation(self):\n self.table = MyTable(self.request, TEST_DATA)\n # Properties defined on the table\n self.assertEqual(TEST_DATA, self.table.data)\n self.assertEqual(\"my_table\", self.table.name)\n # Verify calculated options that weren't specified explicitly\n self.assertTrue(self.table._meta.actions_column)\n self.assertTrue(self.table._meta.multi_select)\n # Test for verbose_name\n self.assertEqual(\"My Table\", str(self.table))\n # Column ordering and exclusion.\n # This should include auto-columns for multi_select and actions,\n # but should not contain the excluded column.\n # Additionally, auto-generated columns should use the custom\n # column class specified on the table.\n self.assertQuerysetEqual(self.table.get_columns(),\n ['<MyColumn: multi_select>',\n '<Column: id>',\n '<Column: name>',\n '<Column: value>',\n '<Column: optional>',\n '<Column: status>',\n '<MyColumn: actions>'])\n # Actions (these also test ordering)\n self.assertQuerysetEqual(list(self.table.base_actions.values()),\n ['<MyBatchAction: batch>',\n '<MyBatchActionWithHelpText: batch_help>',\n '<MyAction: delete>',\n '<MyFilterAction: filter>',\n '<MyLinkAction: login>',\n '<MyToggleAction: toggle>'])\n self.assertQuerysetEqual(self.table.get_table_actions(),\n ['<MyFilterAction: filter>',\n '<MyAction: delete>',\n '<MyBatchAction: batch>',\n '<MyBatchActionWithHelpText: batch_help>'])\n self.assertQuerysetEqual(self.table.get_row_actions(TEST_DATA[0]),\n ['<MyAction: delete>',\n '<MyLinkAction: login>',\n '<MyBatchAction: batch>',\n '<MyToggleAction: toggle>',\n '<MyBatchActionWithHelpText: batch_help>'])\n # Auto-generated columns\n multi_select = self.table.columns['multi_select']\n self.assertEqual(\"multi_select\", multi_select.auto)\n self.assertEqual(\"multi_select_column\",\n multi_select.get_final_attrs().get('class', \"\"))\n actions = self.table.columns['actions']\n self.assertEqual(\"actions\", actions.auto)\n self.assertEqual(\"actions_column\",\n actions.get_final_attrs().get('class', \"\"))\n # In-line edit action on column.\n name_column = self.table.columns['name']\n self.assertEqual(forms.CharField, name_column.form_field.__class__)\n self.assertEqual({'class': 'test'}, name_column.form_field_attributes)", "def test_unsupported_columns(self):\n self.dlg.set_focus()\n table = self.dlg.Table\n self.assertRaises(NotImplementedError, table.column_count)\n self.assertRaises(NotImplementedError, table.get_column, 0)", "def create_columns(self):\n for value in self.raw_data[0]:\n self.columns.append(Column(header=value))\n self.headers.append(value)\n self.valid_rows.pop(0)\n\n for row in self.valid_rows:\n for index, value in enumerate(row):\n self.columns[index].values.append(value)", "def __init__(\n self, database, cls_order=None, dtype='d', debias=True,\n **other_kws\n ):\n\n if isinstance(database, dict):\n self._init_from_dict(database, dtype, debias)\n else:\n self._init_from_array(database, cls_order, dtype, debias)\n self.n_classes = len(self.class_to_columns)\n self._normalize_frame()", "def initialize_traits(self, row):\n raise NotImplementedError()", "def _setup_arguments_on_columns(self):\n for prop in self.props:\n prop.active_history = self.active_history\n if self.deferred:\n prop.deferred = self.deferred\n prop.strategy_class = strategies.DeferredColumnLoader\n prop.group = self.group", "def __init__(self, magic_file=None, columns=None, dtype=None,\n groups=None, dmodel=None, df=None):\n if isinstance(df, pd.DataFrame):\n self.df = df\n if dtype:\n self.dtype = dtype\n else:\n print '-W- Please provide data type...'\n # make sure all required arguments are present\n if not magic_file and not dtype and not isinstance(df, pd.DataFrame):\n print \"-W- To make a MagicDataFrame, you must provide either a filename or a datatype\"\n self.df = None\n return\n # fetch data model if not provided\n if isinstance(dmodel, type(None)):\n self.data_model = data_model.DataModel()\n else:\n self.data_model = dmodel\n\n if isinstance(df, pd.DataFrame):\n pass\n # if no file is provided, make an empty dataframe of the appropriate type\n elif not magic_file:\n self.dtype = dtype\n if not isinstance(columns, type(None)):\n self.df = DataFrame(columns=columns)\n else:\n self.df = DataFrame()\n self.df.index.name = dtype[:-1] if dtype.endswith(\"s\") else dtype\n # if there is a file provided, read in the data and ascertain dtype\n else:\n ## old way of reading in data using pmag.magic_read\n #data, dtype, keys = pmag.magic_read(magic_file, return_keys=True)\n ## create dataframe, maintaining column order:\n #self.df = DataFrame(data, columns=keys)\n #if dtype == 'bad_file':\n # print \"-W- Bad file {}\".format(magic_file)\n # self.dtype = 'empty'\n # return\n\n ## new way of reading in data using pd.read_table\n with open(magic_file) as f:\n try:\n delim, dtype = f.readline().split('\\t')[:2]\n except ValueError:\n print \"-W- Empty file {}\".format(magic_file)\n self.df = DataFrame()\n return\n self.df = pd.read_table(magic_file, skiprows=[0])\n self.dtype = dtype.strip()\n if self.dtype == 'measurements':\n ###self.df['measurement_name'] = self.df['experiment_name'] + self.df['measurement_number']\n self.df['measurement'] = self.df['experiment'] + self.df['number'].astype(str)\n name = 'measurement'\n elif self.dtype.endswith('s'):\n #dtype = dtype[:-1]\n name = '{}'.format(self.dtype[:-1])\n elif self.dtype == 'contribution':\n name = 'doi'\n # **** this is broken at the moment, fix it!\n return\n else:\n name = self.dtype\n # fix these:\n if self.dtype == 'images':\n self.df = pd.DataFrame()\n return\n if self.dtype == 'criteria':\n #self.df = pd.DataFrame()\n self.df.index = self.df['table_column']\n return\n if len(self.df) and self.dtype != 'ages':\n self.df.index = self.df[name].astype(str)\n elif self.dtype == 'ages':\n self.df.index = self.df.index.astype(str)\n #del self.df[name]\n #self.dtype = dtype\n # replace '' with None, so you can use isnull(), notnull(), etc.\n # can always switch back with DataFrame.fillna('')\n self.df = self.df.where(self.df.notnull(), None)\n\n # drop any completely blank columns\n # this is not necessarily a good idea....\n #self.df.dropna(axis=1, how='all', inplace=True)\n #\n # add df columns that were passed in but weren't in the file\n if columns:\n for col in columns:\n if col not in self.df.columns:\n self.df[col] = None\n\n # add col_names by group\n if groups and not columns:\n columns = []\n for group_name in groups:\n columns.extend(list(self.data_model.get_group_headers(self.dtype, group_name)))\n for col in columns:\n if col not in self.df.columns:\n self.df[col] = None\n self.df = self.df[columns]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialization from the 9 components of the orientation matrix.
def __init__(self, matrix): g = np.array(matrix, dtype=np.float64).reshape((3, 3)) self._matrix = g self.euler = Orientation.OrientationMatrix2Euler(g) self.rod = Orientation.OrientationMatrix2Rodrigues(g) self.quat = Orientation.OrientationMatrix2Quaternion(g, P=1)
[ "def __initialize_from_mat3(self, mat):\n for i in range(0,8):\n self.data[i] = mat[i]", "def initialize_quaternions(self):\n # initialize as s = (0,0,0,0) and r = (0,0,0,1)\n N, M = self.M.num_reactants, self.M.num_products\n # only use three elements and enforce constraints in the fourth\n q = np.zeros((N-1, 2, 3))\n #q[:,1,3] = 1\n return q", "def __init__(self,activeRotors=[0],initialKeys=[0]):\n self.setRotorsAndKeys(activeRotors,initialKeys)", "def getRotationMatrix( self):", "def _orientation_vectors(self):\n\n agent_orientations = np.empty((self.num_agents,2),dtype=np.float)\n\n for a_idx, a in enumerate(self._ctrl.agents):\n theta = a.th*2*math.pi/self.num_head_turns\n agent_orientations[a_idx] = [-1*math.sin(theta),math.cos(theta)]\n\n return agent_orientations", "def __init__(self):\n log.debug(\"Initialized an empty SensorTransformations class.\")\n\n self.d415_rgb = np.array([])\n self.d415_depth = np.array([])\n self.d435_rgb = np.array([])\n self.d435_depth = np.array([])\n self.ps_rgb = np.array([])\n self.ps_depth = np.array([])\n self.cham_rgb = np.array([])", "def build_matrix(self):\n\n m = Matrix44.identity()\n if isinstance(self.scale, list) or isinstance(self.scale, tuple):\n m.m11 = self.scale[0]\n m.m22 = self.scale[1]\n m.m33 = self.scale[2]\n else:\n m *= self.scale\n m.m44 = 1\n m = Matrix44.from_x_rotation(math.radians(self.pitch)) * m\n m = Matrix44.from_y_rotation(math.radians(self.yaw)) * m\n m = Matrix44.from_z_rotation(math.radians(self.roll)) * m\n m = Matrix44.from_translation(Vector3(self.position)) * m\n self.m = numpy.array(m).astype(\"f4\")", "def __init__(self, initializer_array = np.eye((4))):\n if hasattr(initializer_array, 'TM'):\n #Returns a copy of the tm object\n self.TM = initializer_array.TM.copy()\n self.TAA = initializer_array.TAA.copy()\n return\n init_arr_len = len(initializer_array)\n if isinstance(initializer_array, list):\n #Generates tm from list\n if init_arr_len == 3:\n self.TAA = np.array([0, 0, 0,\n initializer_array[0],\n initializer_array[1],\n initializer_array[2]])\n self.TAAtoTM()\n return\n elif init_arr_len == 6:\n self.TAA = np.array([initializer_array[0],\n initializer_array[1],\n initializer_array[2],\n initializer_array[3],\n initializer_array[4],\n initializer_array[5]])\n self.TAAtoTM()\n return\n else:\n self.TAA = np.array([initializer_array[0],\n initializer_array[1],\n initializer_array[2],\n 0,\n 0,\n 0])\n self.setQuat(initializer_array[3:7])\n return\n else:\n if init_arr_len == 6:\n #Generates tm from numpy array\n self.TAA = initializer_array.reshape((6, 1)).copy()\n self.TAAtoTM()\n return\n elif init_arr_len == 7:\n self.TAA = initializer_array.reshape((6, 1)).copy()\n self.setQuat(initializer_array[3:])\n return\n elif (len(initializer_array) == 1):\n if isinstance(initializer_array, np.ndarray):\n if isinstance(initializer_array[0], tm):\n self.TM = initializer_array[0].TM.copy()\n self.TMtoTAA()\n return\n else:\n self.transformSqueezedCopy(initializer_array)\n self.TMtoTAA()\n return", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "def _rotation_matrix_uniaxial(theta,phi, R):\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n \n R[0,0] = costheta * cosphi\n R[0,1] = - sinphi \n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi \n R[1,1] = cosphi\n R[1,2] = sintheta * sinphi\n R[2,0] = -sintheta\n R[2,1] = 0.\n R[2,2] = costheta", "def setUpOrientation(node, parmname, defaultup):\n pass", "def test_init(self):\n #should only care about number of elements, not shape\n p = PairMatrix([1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8], RnaPairs)\n assert p.Alphabet is RnaPairs\n self.assertEqual(len(p._data), 4)\n self.assertEqual(len(p._data.flat), 16)\n self.assertEqual(p._data[0], array([1,2,3,4]))\n self.assertEqual(p._data[1], array([5,6,7,8]))", "def __initialize_from_row_major_ret_mat(self, \n m00, m01, m02,\n m10, m11, m12,\n m20, m21, m22):\n self.data = (\n m00, m01, m02,\n m10, m11, m12,\n m20, m21, m22)", "def __init__(self, id, middle=5):\n self.id = id\n self.angle = 0\n self.row = 0\n self.col = middle - 1\n self.matrix = Tetrimino.matrixmap[self.id]", "def _initialize(self) -> None:\n # use random indices for blocks\n indices = self.random_state.choice(\n self._dimension, self.block_dimension * self.num_blocks, replace=False # type: ignore\n ).tolist()\n indices.sort() # keep the indices sorted sorted so that blocks do not overlap\n # Caution this is also important for split, so that splitted arrays end un in the same block\n for transform_inds in tools.grouper(indices, n=self.block_dimension):\n self._transforms.append(\n utils.Transform(\n transform_inds,\n translation_factor=self.translation_factor,\n rotation=self.rotation,\n random_state=self.random_state,\n expo=self.expo,\n )\n )", "def __init__(self):\n self.sensor_list = []\n self.location = (0, 0, 0)\n self.rotation = 0", "def __init__(self):\n num_cols = np.prod(self.dir_shape)\n num_rows = np.prod(self.adj_shape)\n super().__init__(np.float32, [num_rows, num_cols])\n self.is_dir_operator = True", "def __init__(self, vertices=0, arestas=0, direcionado=False):\n\n super().__init__(vertices, arestas, direcionado)\n\n self.matriz = []\n \n # Inicializa uma matriz num_vertices X num_vertices \n # com os valores zerados\n for linha in range(self.num_vertices):\n aux_linha = []\n for coluna in range(self.num_vertices):\n aux_linha.append(0) \n self.matriz.append(aux_linha)", "def initialiser(self):\n\n # Vider le dictionnaire (pratique si on veut recommencer le jeu).\n self.uplateau.clear()\n # Parcourir le dictionnaire et mettre des objets de la classe Plateau.\n for i in range(0, 3):\n for j in range(0, 3):\n self.uplateau[i,j] = Plateau((i,j))", "def __init__(self, imatrix):\n self.imatrix = imatrix\n p, n = imatrix.shape\n num = imatrix.max()\n theta0 = np.random.rand(num)\n self._pid = PIdentity(p, n)\n TemplateStrategy.__init__(self, p+n, n, theta0)\n # should call set_params" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the orientation matrix in the form of a 3x3 numpy array.
def orientation_matrix(self): return self._matrix
[ "def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])", "def get_rotationMatrix(self):\n rot_mat = quat2mat(self.quat)\n try:\n [U, s, V] = np.linalg.svd(rot_mat)\n return np.dot(U, V)\n except:\n return np.eye(3)", "def rotation_matrix(self) -> Tensor:\n return self.extrinsics[..., :3, :3]", "def orientation_matrix(euler_angle):\n\n # Convert from degrees to radians\n phi1 = np.deg2rad(euler_angle[0])\n Phi = np.deg2rad(euler_angle[1])\n phi2 = np.deg2rad(euler_angle[2])\n\n # Assemble orientation matrix\n M = np.zeros([3, 3])\n M[0,0] = cos(phi1)*cos(phi2) - sin(phi1)*sin(phi2)*cos(Phi)\n M[0,1] = sin(phi1)*cos(phi2) + cos(phi1)*sin(phi2)*cos(Phi)\n M[0,2] = sin(phi2)*sin(Phi)\n M[1,0] = -cos(phi1)*sin(phi2) - sin(phi1)*cos(phi2)*cos(Phi)\n M[1,1] = -sin(phi1)*sin(phi2) + cos(phi1)*cos(phi2)*cos(Phi)\n M[1,2] = cos(phi2)*sin(Phi)\n M[2,0] = sin(phi1)*sin(Phi)\n M[2,1] = -cos(phi1)*sin(Phi)\n M[2,2] = cos(Phi)\n return M", "def getRotationMatrix( self):", "def get_orientation(self):\n pose = self.get_pose()\n orientation = np.array(pose.r)\n return orientation", "def rotationMatrix(self):\n\n # R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n # self.exteriorOrientationParameters[5])\n\n return self.__rotationMatrix", "def getRotationMatrix(self, quaternion):\n quaternion = npRotation.from_quat(quaternion)\n return quaternion.as_matrix()", "def rotation_matrix(angles, out): \n if len(angles) != 3:\n raise ValueError(\"Invalid input data shape\")\n _rotation_matrix(angles[0],angles[1],angles[2], out)", "def v_transpose(self):\n return Matrix(self.row_n, self.col_n, [num for row in self.data for num in row[::-1]])", "def quaternion_to_rotation_matrix_rows(w, x, y, z):\n x2 = x*x\n y2 = y*2\n z2 = z*2\n row0 = (1 - 2*y2 - 2*z2,\n 2*x*y - 2*w*z,\n 2*x*z + 2*w*y)\n row1 = (2*x*y + 2*w*z,\n 1 - 2*x2 - 2*z2,\n 2*y*z - 2*w*x)\n row2 = (2*x*z - 2*w*y,\n 2*y*z + 2*w*x,\n 1 - 2*x2 - 2*y2)\n return row0, row1, row2", "def _quat_to_rotation_matrix(q):\n\n q = q.flatten()\n if q.shape[0] != 4:\n raise ValueError('Quaternion must be of length 4')\n\n w = q[0]\n x = q[1]\n y = q[2]\n z = q[3]\n\n n = ag_np.sum(q ** 2)\n if n == 0.0:\n raise ZeroDivisionError(\n \"Input to `_quat_to_rotation_matrix({0})` has zero norm\".format(q))\n elif abs(n - 1.0) < np.finfo(np.float).eps:\n # Input q is basically normalized\n return ag_np.array([\n [1 - 2 * (y ** 2 + z ** 2), 2 * (x * y - z * w), 2 * (x * z + y * w)],\n [2 * (x * y + z * w), 1 - 2 * (x ** 2 + z ** 2), 2 * (y * z - x * w)],\n [2 * (x * z - y * w), 2 * (y * z + x * w), 1 - 2 * (x ** 2 + y ** 2)]])\n else:\n # Input q is not normalized\n return ag_np.array([\n [1 - 2 * (y ** 2 + z ** 2) / n, 2 * (x * y - z * w) / n, 2 * (x * z + y * w) / n],\n [2 * (x * y + z * w) / n, 1 - 2 * (x ** 2 + z ** 2) / n, 2 * (y * z - x * w) / n],\n [2 * (x * z - y * w) / n, 2 * (y * z + x * w) / n, 1 - 2 * (x ** 2 + y ** 2) / n]])", "def RotationMatrix_Image1(self):\r\n return Compute3DRotationMatrix(self.__relativeOrientationImage1[3], self.__relativeOrientationImage1[4],\r\n self.__relativeOrientationImage1[5])", "def rotation_matrix(delta):\n return np.array([[np.cos(delta), -np.sin(delta)],[np.sin(delta), np.cos(delta)]])", "def get_rotation_matrix(theta):\n return np.array([[np.cos(theta), -1 * np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])", "def np_transpose(matrix):\r\n return (matrix.T)", "def get_transposed(self):\n\t\t# Create a matrix with columns and row sizes swapped\n\t\tmatrix = Matrix(0, self.__row_size)\n\n\t\t# Begin looping through and making rows\n\t\tfor i in range(0, self.__column_size):\n\t\t\tmatrix.insert_row([v[i] for v in self.__rows])\n\n\t\treturn matrix", "def side_transpose(self):\n return Matrix(self.col_n, self.row_n, [num for col in self.columns[::-1] for num in col[::-1]])", "def rotation_matrix_z(angle, out = None):\n c,s = np.cos(angle), np.sin(angle)\n if out is None:\n out = np.zeros(shape = c.shape + (3,3), dtype = FDTYPE)\n out[...,0,0] = c\n out[...,0,1] = -s\n out[...,1,0] = s\n out[...,1,1] = c\n out[...,2,2] = 1.\n return out", "def quaternionToRotationMatrix(q):\n xx = q[0] * q[0]\n xy = q[0] * q[1]\n xz = q[0] * q[2]\n xw = q[0] * q[3]\n yy = q[1] * q[1]\n yz = q[1] * q[2]\n yw = q[1] * q[3]\n zz = q[2] * q[2]\n zw = q[2] * q[3]\n mat = np.zeros((4,4))\n mat[0, 0] = 1 - 2 * ( yy + zz )\n mat[1, 0] = 2 * ( xy - zw )\n mat[2, 0] = 2 * ( xz + yw )\n mat[0, 1] = 2 * ( xy + zw )\n mat[1, 1] = 1 - 2 * ( xx + zz )\n mat[2, 1] = 2 * ( yz - xw )\n mat[0, 2] = 2 * ( xz - yw )\n mat[1, 2] = 2 * ( yz + xw )\n mat[2, 2] = 1 - 2 * ( xx + yy )\n mat[3, 3] = 1\n return mat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the particular crystal orientation called Cube and which corresponds to euler angle (0, 0, 0).
def cube(): return Orientation.from_euler((0., 0., 0.))
[ "def write_cube(data):\n\ttext = ''\n\tcenter = np.array(data['center'])\n\theight = data['height']\n\te1 = np.array([1,0,0]); e2 = np.array([0,1,0]); e3 = np.array([0,0,1])\n\tdata = {'corner': center-height/2*(e1+e2), 'v1': height*e1, 'v2': height*e2, 'v3': height*e3}\n\ttext = write_prism(data)\n\treturn text", "def makecube():\n\n vtype = [('a_position', np.float32, 3),\n ('a_texcoord', np.float32, 2)]\n itype = np.uint32\n\n # Vertices positions\n p = np.array([[1, 1, 1], [-1, 1, 1], [-1, -1, 1], [1, -1, 1],\n [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1]])\n\n # Texture coords\n t = np.array([[0, 0], [0, 1], [1, 1], [1, 0]])\n\n faces_p = [0, 1, 2, 3, 0, 3, 4, 5, 0, 5, 6,\n 1, 1, 6, 7, 2, 7, 4, 3, 2, 4, 7, 6, 5]\n faces_t = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]\n\n vertices = np.zeros(24, vtype)\n vertices['a_position'] = p[faces_p]\n vertices['a_texcoord'] = t[faces_t]\n\n indices = np.resize(\n np.array([0, 1, 2, 0, 2, 3], dtype=itype), 6 * (2 * 3))\n indices += np.repeat(4 * np.arange(6), 6)\n\n return vertices, indices", "def cube():\n vtype = [('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_color', np.float32, 4)]\n # Vertices positions\n v = [[1, 1, 1], [-1, 1, 1], [-1, -1, 1], [1, -1, 1],\n [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1]]\n # Face Normals\n n = [[0, 0, 1], [1, 0, 0], [0, 1, 0],\n [-1, 0, 1], [0, -1, 0], [0, 0, -1]]\n # Vertice colors\n colors = [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1],\n [1, 1, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1]]\n\n V = np.array([(v[0], n[0], colors[0]), (v[1], n[0], colors[1]),\n (v[2], n[0], colors[2]), (v[3], n[0], colors[3]),\n (v[0], n[1], colors[0]), (v[3], n[1], colors[3]),\n (v[4], n[1], colors[4]), (v[5], n[1], colors[5]),\n (v[0], n[2], colors[0]), (v[5], n[2], colors[5]),\n (v[6], n[2], colors[6]), (v[1], n[2], colors[1]),\n (v[1], n[3], colors[1]), (v[6], n[3], colors[6]),\n (v[7], n[3], colors[7]), (v[2], n[3], colors[2]),\n (v[7], n[4], colors[7]), (v[4], n[4], colors[4]),\n (v[3], n[4], colors[3]), (v[2], n[4], colors[2]),\n (v[4], n[5], colors[4]), (v[7], n[5], colors[7]),\n (v[6], n[5], colors[6]), (v[5], n[5], colors[5])],\n dtype=vtype)\n I1 = np.resize(np.array([0, 1, 2, 0, 2, 3], dtype=np.uint32), 6 * (2 * 3))\n I1 += np.repeat(4 * np.arange(2 * 3, dtype=np.uint32), 6)\n\n I2 = np.resize(\n np.array([0, 1, 1, 2, 2, 3, 3, 0], dtype=np.uint32), 6 * (2 * 4))\n I2 += np.repeat(4 * np.arange(6, dtype=np.uint32), 8)\n\n return V, I1, I2", "def print_cube(self):\n for f in range(3): #right-next-faces\n for r in range(3): # rows\n print(self._row_str(2*f,r) + ' ' + self._row_str(2*f+1,r))\n print('')", "def polyCube(texture=int, depth=\"string\", axis=\"string\", width=\"string\", createUVs=int, height=\"string\", constructionHistory=bool, subdivisionsY=int, subdivisionsX=int, subdivisionsZ=int, name=\"string\"):\n pass", "def icosahedron(self, upward=False):\n phi = (1 + 5**0.5) / 2\n radius = (phi**2 + 1)**0.5\n vertices = [0, 1, phi, 0, -1, phi, 0, 1, -phi, 0, -1, -phi, phi, 0, 1,\n phi, 0, -1, -phi, 0, 1, -phi, 0, -1, 1, phi, 0, -1, phi, 0,\n 1, -phi, 0, -1, -phi, 0]\n vertices = np.reshape(vertices, (-1, 3)) / radius\n faces = [0, 1, 6, 0, 6, 9, 0, 9, 8, 0, 8, 4, 0, 4, 1, 1, 6, 11, 11, 6, 7,\n 6, 7, 9, 7, 9, 2, 9, 2, 8, 2, 8, 5, 8, 5, 4, 5, 4, 10, 4, 10,\n 1, 10, 1, 11, 3, 10, 11, 3, 11, 7, 3, 7, 2, 3, 2, 5, 3, 5, 10]\n faces = np.reshape(faces, (-1,3))\n if upward:\n vertices = self._upward(vertices, faces)\n return vertices, faces", "def drawCube( self ):\n glBegin(GL_TRIANGLES);\n #Bagian atas\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, -1.0, 0);\n glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, 0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, -1.0, 0);\n #Samping kanan (besar)\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, -1.0, 0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, -1.0, -2.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, -1.0, 0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, -1.0, 0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, -1.0, -2.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, -2.0);\n #Samping kanan (kecil)\n glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, -2.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, -1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, -2.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(2.0, -1.0, -2.0);\n #Samping kiri (Besar)\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.0, 0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -2.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(1.0, 1.0, 0);\n #Samping kiri (Kecil)\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(1.0, 1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, 1.0, -2.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(2.0, 1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, 1.0, 0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, -1.0, -2.0);\n \n glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, 1.0, 0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, -1.0, 0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(-3.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, -1.0, 0);\n glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(1.0, 1.0, 0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0);\n glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(1.0, 1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(2.0, -1.0, -1.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(2.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(2.0, 1.0, -2.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(2.0, -1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(2.0, 1.0, -2.0);\n\n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, -1.0, -2.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, -1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-3.0, 1.0, -2.0);\n \n glTexCoord2f(1.0, 1.0); glVertex3f(2.0, -1.0, -2.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-3.0, 1.0, -2.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(2.0, 1.0, -2.0);\n glEnd()", "def euler2_qua(self,euler_angle):\n qua_angle=numpy.zeros([4,1],dtype=float) \n qua_angle[0]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n qua_angle[1]=(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n -(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[2]=(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[3]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2)) \\\n -(math.sin(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n return qua_angle", "def draw_cube(p):\n p.set('linecolor', 'g')\n p.vector(0, 1)\n p.vector(1, 0)\n p.vector(0, -1)\n p.vector(-1, 0)\n p.draw()\n p.set('linecolor', 'b')", "def from_euler(cls, euler):\n # Bunge convention\n euler = np.array(euler)\n n = euler.shape[:-1]\n alpha, beta, gamma = euler[..., 0], euler[..., 1], euler[..., 2]\n alpha -= np.pi / 2\n gamma -= 3 * np.pi / 2\n zero = np.zeros(n)\n qalpha = Quaternion(\n np.stack((np.cos(alpha / 2), zero, zero, np.sin(alpha / 2)),\n axis=-1))\n qbeta = Quaternion(\n np.stack((np.cos(beta / 2), zero, np.sin(beta / 2), zero), axis=-1))\n qgamma = Quaternion(\n np.stack((np.cos(gamma / 2), zero, zero, np.sin(gamma / 2)),\n axis=-1))\n data = qalpha * qbeta * qgamma\n rot = cls(data.data)\n rot.improper = zero\n return rot", "def make_cube_1(texture, texture_index): \n glBindTexture(GL_TEXTURE_2D,texture[texture_index])\t \n # Front Face (Each texture's corner is matched a quad's corner.) \n glBegin(GL_QUADS)\t \n\tglTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0)\t# Bottom Left Of The Texture and Quad \n\tglTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0)\t# Bottom Right Of The Texture and Quad \n\tglTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0)\t# Top Right Of The Texture and Quad \n\tglTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0)\t# Top Left Of The Texture and Quad\t \n\tglEnd();", "def printCube(self):\r\n print('This is the cube\\n------------------------------')\r\n for row in self.cube:\r\n for j in row:\r\n print(f'{j}', end='')\r\n print()\r\n print('\\nThis is the transposed cube\\n------------------------------')\r\n for row in self.transposed_cube:\r\n for j in row:\r\n print(f'{j}', end='')\r\n print()", "def cube(l):\n A=6 * (l**2)\n return A", "def nurbsCube(degree=int, axis=\"string\", pivot=\"string\", width=\"string\", lengthRatio=float, object=bool, nodeState=int, patchesV=int, name=\"string\", heightRatio=float, patchesU=int, caching=bool, constructionHistory=bool, polygon=int):\n pass", "def generate_cube(size):\n sides = [((0, 0, 0), (size, 0, 0), (size, 0, size), (0, 0, size)), # front\n ((size, 0, 0), (size, size, 0), (size, size, size), (size, 0, size)), # right side\n ((0, 0, 0), (0, 0, size), (0, size, size), (0, size, 0)), # left side\n ((0, 0, 0), (0, size, 0), (size, size, 0), (size, 0, 0)), # bottom\n ((0, 0, size), (size, 0, size), (size, size, size), (0, size, size)), # top\n ((0, size, 0), (0, size, size), (size, size, size), (size, size, 0)) # back\n ]\n for side in sides:\n for triangle in generate_quad(*side):\n yield triangle", "def create_cube(self,cube_id):\n username = \"Paula\"\n self.execute_command(\"insert into cube values ({},'{}');\".format(cube_id,username))", "def rca_format(cube):\r\n return cube.swapaxes(0, 1).swapaxes(1, 2)", "def build_cube(self):\n cube = Cube(self.context)\n cube.setup_shader(self.prog)\n cube.create_geometry()\n # cube.apply_model()\n self.cube = cube", "def Cube(self, n):\n if n == 0:\n return CubicalComplex([Cube([[0]])])\n else:\n return CubicalComplex([Cube([[0,1]]*n)])", "def _cube_to_hex(c):\n return vector.Vector(c.x, c.z)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the particular crystal orientation called Brass and which corresponds to euler angle (35.264, 45, 0).
def brass(): return Orientation.from_euler((35.264, 45., 0.))
[ "def solid_angle(angle):\n return (pi/4)*angle**2", "def zenith_angle(self):\n\t\treturn 90 - self.altitude_angle()", "def get_zenith_angle(self):\n theta = self.get_sph()[2]\n return np.pi/2. - theta", "def plot_sb_rotation():\n\n # Load CB offsets in HA, Dec\n cb_offsets = np.loadtxt('square_39p1.cb_offsets', usecols=[1, 2], delimiter=',')\n ncb = len(cb_offsets)\n cb_pos = np.zeros((ncb, 2))\n for cb, (dra, ddec) in enumerate(cb_offsets):\n cb_pos[cb] = np.array([dra, ddec])\n cb_pos *= u.deg\n\n # CB width\n freq = 1370 * u.MHz\n cb_radius = CB_HPBW * REF_FREQ / freq / 2\n\n # LST to convert HA <-> RA\n lst = 180 * u.deg\n\n # Which SBs to plot in which cBs\n # SB for given CB\n beams = {0: [0, 35, 70],\n # 33: [0, 35, 70],\n # 39: [0, 35, 70],\n 4: [63],\n 5: [12],\n 11: [34]}\n\n # SB separation = TAB separation at highest freq\n lambd = 299792458 * u.meter / u.second / (1500. * u.MHz)\n Bmax = 1296 * u.m\n sb_separation = ((.8 * lambd / Bmax).to(1) * u.radian).to(u.arcmin)\n\n # plot\n fig, axes = plt.subplots(figsize=(16, 9), ncols=2)\n # RA Dec axis\n ax = axes[0]\n # Alt Az axis\n ax2 = axes[1]\n font = {'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'alpha': .5,\n 'size': 10}\n # make room for sliders\n plt.subplots_adjust(bottom=.25)\n ax.set_aspect('equal')\n ax2.set_aspect('equal')\n\n # slider settings\n ha_init = 0\n ha_min = -90\n ha_max = 90\n ha_step = 1\n\n dec_init = 0\n dec_min = -35\n dec_max = 90\n dec_step = 1\n\n fc = 'lightgoldenrodyellow'\n ax_ha = plt.axes([.1, .15, .8, .03], facecolor=fc)\n ax_dec = plt.axes([.1, .1, .8, .03], facecolor=fc)\n slider_ha = Slider(ax_ha, 'HA (deg)', ha_min, ha_max, valinit=ha_init, valstep=ha_step)\n slider_dec = Slider(ax_dec, 'Dec (deg)', dec_min, dec_max, valinit=dec_init, valstep=dec_step)\n\n # plot in function\n def do_plot(ha0=0 * u.deg, dec0=0 * u.deg, parang=0 * u.deg):\n # store coordinates of CB00\n ra0 = lst - ha0\n alt0, az0 = tools.hadec_to_altaz(ha0, dec0)\n # plot CBs\n for cb, (dra, ddec) in enumerate(cb_pos):\n # RADec\n # pointing of this CB\n ra, dec = tools.offset_to_coord(ra0, dec0, dra, ddec)\n\n _ra = ra.to(u.deg).value\n _dec = dec.to(u.deg).value\n\n # plot\n patch = SphericalCircle((ra, dec), cb_radius,\n ec='k', fc='none', ls='-', alpha=.5)\n ax.add_patch(patch)\n ax.text(_ra, _dec, f'CB{cb:02d}', va='center', ha='center',\n fontdict=font, clip_on=True)\n\n # AltAz\n ha = lst - ra\n alt, az = tools.hadec_to_altaz(ha, dec)\n dalt = alt - alt0\n daz = az - az0\n\n _alt = alt.to(u.deg).value\n _az = az.to(u.deg).value\n\n # plot\n patch = SphericalCircle((az, alt), cb_radius,\n ec='k', fc='none', ls='-', alpha=.5)\n ax2.add_patch(patch)\n ax2.text(_az, _alt, f'CB{cb:02d}', va='center', ha='center',\n fontdict=font, clip_on=True)\n\n # plot SBs\n for cb in beams.keys():\n cb_dra, cb_ddec = cb_offsets[cb] * 60 # to arcmin\n for sb in beams[cb]:\n # SB increases towards higher RA\n sb_offset = (sb - 35) * sb_separation\n\n # draw line from (x, -y) to (x, y)\n # but the apply rotation by parallactic angle\n # in altaz:\n # x = +/-sb_offset, depending on azimuth:\n # higher SB = higher RA = East = either lower or higher Az\n # assume we are pointing above NCP if North\n if az0 > 270 * u.deg or az0 < 90 * u.deg:\n sgn = 1\n else:\n sgn = -1\n\n # alt az of this cb\n cb_shift_az = 0\n cb_shift_alt = 0\n\n # y = +/- length of line a sb_offset from center of CB\n dy = np.sqrt(cb_radius ** 2 - sb_offset ** 2)\n # alt start and end point\n alts = alt0 + dy + cb_shift_alt\n alte = alt0 - dy + cb_shift_alt\n # az start and end point\n azs = az0 + sgn * sb_offset / np.cos(alts) + cb_shift_az\n aze = az0 + sgn * sb_offset / np.cos(alte) + cb_shift_az\n\n # convert to HA, Dec\n has, decs = tools.altaz_to_hadec(alts, azs)\n hae, dece = tools.altaz_to_hadec(alte, aze)\n # convert HA to RA\n ras = lst - has\n rae = lst - hae\n\n # plot in RADec\n x = [ras.to(u.deg).value, rae.to(u.deg).value]\n y = [decs.to(u.deg).value, dece.to(u.deg).value]\n\n ax.plot(x, y, c='b')\n # add text above lines\n ax.text(np.mean(x), np.mean(y), f\"SB{sb:02d}\", va='center', ha='center')\n\n # plot in AltAz\n x = [azs.to(u.deg).value, aze.to(u.deg).value]\n y = [alts.to(u.deg).value, alte.to(u.deg).value]\n\n ax2.plot(x, y, c='b')\n # add text above lines\n ax2.text(np.mean(x), np.mean(y), f\"SB{sb:02d}\", va='center', ha='center')\n\n # polar\n # theta_start = np.arctan2(dy, sb_offset)\n # theta_end = np.arctan2(-dy, sb_offset)\n\n # apply parallactic angle rotation\n # works positive in HA space, but negative in RA space\n # theta_start -= parang.to(u.radian).value\n # theta_end -= parang.to(u.radian).value\n\n # start and end in cartesian coordinates\n # shift to correct CB and position\n # xstart = cb_radius * np.cos(theta_start) + cb_dra\n # ystart = cb_radius * np.sin(theta_start) + cb_ddec\n # xend = cb_radius * np.cos(theta_end) + cb_dra\n # yend = cb_radius * np.sin(theta_end) + cb_ddec\n\n # plot RA Dec\n # ax.plot((xstart, xend), (ystart, yend), c='b')\n # add text above lines\n # ax.text(np.mean([xstart, xend]), np.mean([ystart, yend]), \"SB{:02d}\".format(sb), va='center', ha='center')\n\n # continue\n # plot Alt Az\n # ystart, xstart = tools.hadec_to_altaz(ha0-xstart*u.arcmin, dec0+ystart*u.arcmin)\n # yend, xend = tools.hadec_to_altaz(ha0-xend*u.arcmin, dec0+yend*u.arcmin)\n # subtract center and remove cosine correction\n # #xstart = (xstart - az0).to(u.arcmin).value\n # #xend = (xend - az0).to(u.arcmin).value\n # #ystart = (ystart - alt0).to(u.arcmin).value\n # #yend = (yend - alt0).to(u.arcmin).value\n # xstart = xstart.to(u.arcmin).value\n # xend = xend.to(u.arcmin).value\n # ystart = ystart.to(u.arcmin).value\n # yend = yend.to(u.arcmin).value\n\n # set limits\n x = ra0.to(u.deg).value\n y = dec0.to(u.deg).value\n ax.set_xlim(x - 130 / 60., x + 130 / 60.)\n ax.set_ylim(y - 100 / 60., y + 100 / 60.)\n ax.set_xlabel('RA (deg)')\n ax.set_ylabel('Dec (deg)')\n ax.set_title('RA - Dec')\n\n x = az0.to(u.deg).value\n y = alt0.to(u.deg).value\n ax2.set_xlim(x - 130 / 60., x + 130 / 60.)\n ax2.set_ylim(y - 100 / 60., y + 100 / 60.)\n ax2.set_xlabel('Az (deg)')\n ax2.set_ylabel('Alt (deg)')\n ax2.set_title('Alt - Az')\n\n # plot once\n do_plot()\n\n # define update function\n def update(val):\n ha = slider_ha.val * u.deg\n dec = slider_dec.val * u.deg\n parang = tools.hadec_to_par(ha, dec)\n ax.cla()\n ax2.cla()\n do_plot(ha, dec, parang)\n\n # attach sliders to update function\n slider_ha.on_changed(update)\n slider_dec.on_changed(update)\n\n fig.suptitle('Apertif beam pattern for LST = {} hr'.format(lst.to(u.deg).value / 15.))\n plt.show()", "def test_angle_zone(self):\n # euler angles in degrees\n phi1 = 89.4\n phi = 92.0\n phi2 = 86.8\n orientation = Orientation.from_euler([phi1, phi, phi2])\n gt = orientation.orientation_matrix().transpose()\n # zone axis\n uvw = HklDirection(1, 0, 5, self.ni)\n ZA = gt.dot(uvw.direction())\n if ZA[0] < 0:\n ZA *= -1 # make sur the ZA vector is going forward\n psi0 = np.arccos(np.dot(ZA, np.array([1., 0., 0.])))\n self.assertAlmostEqual(psi0 * 180 / np.pi, 9.2922, 3)", "def angle(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n angle = np.arctan2(ls, act_loc-myo_loc)\n return angle", "def blade_degrees(self) -> tf.Tensor:\n return self._blade_degrees", "def angle(self):\n cos_the = branch_angles(\n self.direction, np.array([[0, 1]]), np.ones(1))[0]\n return 180 / np.pi * np.arccos(cos_the)", "def elementary_angle(self) -> Angle:\n a, b = self.angle.numerator, self.angle.denominator\n if a % 2 == 0:\n p = a // 2\n q = b - p\n else:\n p = a\n q = 2 * b - a\n\n if (p == 1) or (q == 1):\n p, q = 2 * p, 2 * q\n\n return Angle(2, p + q)", "def __crystGen(self,cell):\n\n a,b,c,alpha,beta,gamma = cell\n\n alpha = alpha * np.pi/180\n beta = beta * np.pi/180\n gamma = gamma * np.pi/180\n\n cosAlpha = np.cos(alpha)\n sinAlpha = np.sin(alpha)\n cosBeta = np.cos(beta)\n sinBeta = np.sin(beta)\n cosGamma = np.cos(gamma)\n sinGamma = np.sin(gamma)\n\n vol=a*b*c*np.sqrt(1.-cosAlpha**2-cosBeta**2-cosGamma**2+2.*cosAlpha*cosBeta*cosGamma)\n\n ar=b*c*sinAlpha/vol\n br=a*c*sinBeta/vol\n cr=a*b*sinGamma/vol\n\n cosalfar=(cosBeta*cosGamma-cosAlpha)/(sinBeta*sinGamma)\n cosbetar=(cosAlpha*cosGamma-cosBeta)/(sinAlpha*sinGamma)\n cosgamar=(cosAlpha*cosBeta-cosGamma)/(sinAlpha*sinBeta)\n\n alfar=np.arccos(cosalfar)\n betar=np.arccos(cosbetar)\n gamar=np.arccos(cosgamar)\n\n am = np.matrix([[ar, br*np.cos(gamar), cr*np.cos(betar)],\n [ 0.0, br*np.sin(gamar), -cr*np.sin(betar)*cosAlpha],\n [ 0.0, 0.0, 1.0/c]])\n\n #print am\n\n return am", "def sector(ix,iy,iz):\n\n if eecrystalphi(ix,iy,iz) ==999 : return 999\n \n deg = ( eecrystalphi(ix,iy,iz)+ pi ) * 180/pi\n return int(deg/5)", "def _compute_pitch_angle(self):\n return np.arctan(self.pitch / (2.0 * np.pi * self.radii))", "def getQuad(ang):\n if ang < math.radians(180):\n if ang < math.radians(90): return 1\n else: return 2\n elif ang < math.radians(270): return 3\n else: return 4", "def __add_orientation_marks(self):\n center = self.size // 2\n ring_radius = 5 if self.compact else 7\n # add orientation marks\n # left-top\n self.matrix[center - ring_radius][center - ring_radius] = '#'\n self.matrix[center - ring_radius + 1][center - ring_radius] = '#'\n self.matrix[center - ring_radius][center - ring_radius + 1] = '#'\n # right-top\n self.matrix[center - ring_radius + 0][center + ring_radius + 0] = '#'\n self.matrix[center - ring_radius + 1][center + ring_radius + 0] = '#'\n # right-down\n self.matrix[center + ring_radius - 1][center + ring_radius + 0] = '#'", "def roangles3D(dens, Bx, By, Bz):\n # Calculates the relative orientation angles between the density structures and the magnetic field.\n # INPUTS\n # dens - regular cube with the values of density \n # Bx -\n # By -\n # Bz -\n #\n # OUTPUTS\n #\n #\n \n grad=np.gradient(dens, edge_order=2)\n \n # JCIM - are you sure this is the order of the output? gx = [1], gy = [0] and gz = [2]?\n #gx=grad[1]; gy=grad[0]; gz=grad[2];\n gx=grad[0]; gy=grad[1]; gz=grad[2];\n \n normgrad=np.sqrt(gx*gx+gy*gy+gz*gz)\n normb =np.sqrt(Bx*Bx+By*By+Bz*Bz)\n \n zerograd=(normgrad==0.).nonzero()\t\n zerob =(normb ==0.).nonzero()\n \n normcross=np.sqrt((gy*Bz-gz*By)**2+(gx*Bz-gz*Bx)**2+(gx*By-gy*Bx)**2)\n normdot =gx*Bx+gy*By+gz*Bz\t\n \n # Here I calculate the angle using atan2 to avoid the numerical problems of acos or asin\n phigrad=np.arctan2(normcross,normdot) \n \n # The cosine of the angle between the iso-density and B is the sine of the angle between\n # the density gradient and B.\t\n cosphi=np.sin(phigrad)\n \n # JCIM: what is this value 32768?\n cosphi[(normgrad == 0.).nonzero()]=-32768\n cosphi[(normb == 0.).nonzero()]=-32768\n \n return cosphi", "def declination_angle(self):\n\t\tinside_sin = math.radians((360 * (284 + int(self.n)))/(float(365)))\n\t\t#return float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees\n\t\treturn float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees", "def calcBA(atmcoordi, atmcoordj, atmcoordk):\n vecji = atmcoordi - atmcoordj\n vecjk = atmcoordk - atmcoordj\n normji = np.sqrt( np.dot(vecji, vecji) )\n normjk = np.sqrt( np.dot(vecjk, vecjk) )\n dotijk = np.dot(vecji, vecjk)\n angleijk = 180 - np.arccos( dotijk / (normji * normjk) ) * rad2grad \n return angleijk", "def angleBetween(vector1=\"string\", euler=bool, constructionHistory=bool, vector2=\"string\"):\n pass", "def findAngleB():\n B=math.asin(b/c)\n return B", "def azimuth_angle(self):\n\t\tdiv = math.cos(math.radians(self.declination_angle())) * (math.sin(math.radians(self.hour_angle())) / math.cos(math.radians(self.altitude_angle())))\n\t\treturn math.degrees(math.asin(div))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the particular crystal orientation called Copper and which corresponds to euler angle (90, 35.264, 45).
def copper(): return Orientation.from_euler((90., 35.264, 45.))
[ "def sector(ix,iy,iz):\n\n if eecrystalphi(ix,iy,iz) ==999 : return 999\n \n deg = ( eecrystalphi(ix,iy,iz)+ pi ) * 180/pi\n return int(deg/5)", "def orientation(p, q, r):\n # use the slope to get orientation\n val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])\n\n if val == 0: # colinear\n return 0\n\n return 1 if val > 0 else 2 # clock or counterclokwise", "def calcOrient(self, za, ref, ang, detector, mode, setting, acur = 1e-9):\r\n #first check that za (real space) and ref (recyprocal space) are indeed perpendicular. This follows the normal h*u + k*v + l*w = 0 relationship valid for any crystal system.\r\n if abs(np.dot(za, ref))<acur:\r\n #turn angle from degrees to radians\r\n ang = ang/360*2*np.pi\r\n \r\n #calculate the cartesian equivalents of the vectors\r\n zaC = self.millerToCartesian(za)\r\n refC = self.millerToCartesian(ref, typ = \"recyp\")\r\n #normalize the vectors\r\n zaC = zaC/np.linalg.norm(zaC)\r\n refC = refC/np.linalg.norm(refC)\r\n depC = np.cross(zaC, refC)\r\n #the vectors of the crystal to be transformed\r\n mat1 = np.array([zaC, refC, depC]).T\r\n \r\n #the matrix of corresponding detector vectors\r\n c1 = np.array([0,0,1])\r\n c2 = np.array([np.cos(ang), np.sin(ang), 0])\r\n c3 = np.array([np.cos(ang+np.pi/2), np.sin(ang+np.pi/2), 0])\r\n mat2 = np.array([c1, c2, c3]).T\r\n \r\n #these must be converted to stage coordinates.\r\n dec = self.stage.getTEM().getDetector(detector) #find the detector\r\n realcords = dec.detectorToAbs(mat2, mode, setting) #change to absolute coordinates\r\n stagecoords = self.stage.absToStage(realcords)\r\n \r\n \r\n #the rotation matrix needs to turn mat 1 (cartesian vectors stuck to crystal) into stagecoords (stage vectors). Therefore\r\n ormat = np.dot(stagecoords, np.linalg.inv(mat1))\r\n self.setOrient(ormat)\r\n #multiplying by ormat goes from crystal cartesian vector to stage coordinates, ormat.T (inverse) goes from stage to cartesian.\r\n return ormat\r\n else:\r\n print(\"ZA vector and reflection vector are not perpendicular\")\r\n return np.identity(3)", "def phase_angle(self, scanpoint=0):\n disc_openings = (26.0, 42.0, 43.5, 126.0)\n O_C1d, O_C2d, O_C2Bd, O_C3d = disc_openings\n\n cat = self.cat\n master = cat.master\n slave = cat.slave\n disc_phase = cat.phase[scanpoint]\n phase_angle = 0\n\n if master == 1:\n phase_angle += 0.5 * O_C1d\n master_opening = O_C1d\n elif master == 2:\n phase_angle += 0.5 * O_C2d\n master_opening = O_C2d\n\n # the phase_offset is defined as the angle you have to add to the\n # calibrated blind opening to get to the nominal optically blind\n # chopper opening.\n # e.g. Nominal opening for optically blind may be at 34 degrees\n # but the calibrated optically blind position is 34.22 degrees\n # the chopper_phase_offset would be -0.22 degrees.\n if slave == 2:\n phase_angle += 0.5 * O_C2d\n phase_angle += -disc_phase - cat.poff_c2_slave_1_master[0]\n elif slave == 3:\n # chopper 2B\n phase_angle += 0.5 * O_C2Bd\n if master == 1:\n phase_angle += -disc_phase - cat.poff_c2b_slave_1_master[0]\n elif master == 2:\n phase_angle += -disc_phase - cat.poff_c2b_slave_2_master[0]\n\n return phase_angle, master_opening", "def solid_angle(angle):\n return (pi/4)*angle**2", "def angle(self):\n cos_the = branch_angles(\n self.direction, np.array([[0, 1]]), np.ones(1))[0]\n return 180 / np.pi * np.arccos(cos_the)", "def euler2_qua(self,euler_angle):\n qua_angle=numpy.zeros([4,1],dtype=float) \n qua_angle[0]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n qua_angle[1]=(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n -(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[2]=(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[3]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2)) \\\n -(math.sin(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n return qua_angle", "def compass(angles, radii, arrowprops=dict(color='darkorange', linewidth=2)):\n\n #angles, radii = cart2pol(u, v)\n\n fig, ax = plt.subplots(subplot_kw=dict(polar=True))\n\n #kw = \n #if arrowprops:\n # kw.update(arrowprops)\n [ax.annotate(\"\", xy=(-angle, radius), xytext=(0, 0),\n arrowprops=dict(arrowstyle=\"->\", linewidth=2, color=rad2rgb(angle))) for\n angle, radius in zip(angles, radii)]\n\n ax.set_ylim(0, np.max(radii))\n\n return fig, ax", "def circular_sector(r,c):\n A = pi * (r**2) * (c/360)\n return A", "def zenith_angle(self):\n\t\treturn 90 - self.altitude_angle()", "def orient_6circle(self, phi=0, chi=0, eta=0, mu=0):\n rotation = fc.diffractometer_rotation(phi, chi, eta, mu)\n self.orient_set_r(rotation)", "def compl180(self):\n\n rot_ang = - (180.0 - self.rotAngle) % 360.0\n return RotationAxis.fromDirect(self.dr, rot_ang)", "def toDegree(self):\n self.x = self.x * 180 / pi\n self.y = self.y * 180 / pi\n self.z = self.z * 180 / pi\n return self", "def _compute_pitch_angle(self):\n return np.arctan(self.pitch / (2.0 * np.pi * self.radii))", "def declination_angle(self):\n\t\tinside_sin = math.radians((360 * (284 + int(self.n)))/(float(365)))\n\t\t#return float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees\n\t\treturn float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees", "def view_angle(self):\n view_i = -self.Ri[2,:].T\n view_j = -self.Rj[2,:].T\n return np.arccos(np.dot(view_i.T, view_j))", "def correct_angle(hdr, angle=0):\n # Get relevant setup info from header\n date_obs = datetime.strptime(hdr['DATE-OBS'], '%Y-%m-%dT%H:%M:%S.%f')\n instrument = hdr['ESO OCS DET1 IMGNAME'].split('_')[2]\n stabilization = hdr['ESO INS4 COMB ROT']\n\n # Define angles\n tn = -1.75\n parang = hdr['ESO TEL PARANG START']\n posang = hdr['ESO INS4 DROT2 POSANG']\n tel_alt = hdr['ESO TEL ALT']\n ins4_drot2_begin = hdr['ESO INS4 DROT2 BEGIN']\n\n if stabilization == 'FIELD':\n pupil_offset = 0\n else:\n pupil_offset = -135.99\n\n if instrument == 'IFS':\n ifs_offset = -100.48\n else:\n ifs_offset = 0\n\n # Calculate correction factor for old observations\n if stabilization == 'FIELD' and date_obs < datetime(2016, 7, 13):\n tn += np.rad2deg(np.arctan(np.tan(np.deg2rad(tel_alt - parang - 2*ins4_drot2_begin))))\n elif stabilization == 'PUPIL' and date_obs < datetime(2016, 7, 13):\n parang += np.rad2deg(np.arctan(np.tan(np.deg2rad(tel_alt - 2*ins4_drot2_begin))))\n if stabilization == 'FIELD':\n parang_posang = posang\n else:\n parang_posang = parang\n angle += parang_posang + tn + pupil_offset + ifs_offset\n angle %= 360 # Put value in nice range\n return angle", "def get_orientation_map(crystal_map):\n eulers = crystal_map.isig[1:4]\n eulers.map(_euler2axangle_signal, inplace=True, show_progressbar=False)\n orientation_map = eulers.as_signal2D((0,1))\n #Set calibration to same as signal\n x = orientation_map.axes_manager.signal_axes[0]\n y = orientation_map.axes_manager.signal_axes[1]\n x.name = 'x'\n x.scale = crystal_map.axes_manager.navigation_axes[0].scale\n x.units = 'nm'\n y.name = 'y'\n y.scale = crystal_map.axes_manager.navigation_axes[0].scale\n y.units = 'nm'\n return orientation_map", "def _get_rr_cc(self):\n theta = self.phi\n center = self.center[::-1] #Necessary \n\n if theta % 360.0 == 0.0:\n return self.unrotated_rr_cc\n\n # Rotate transposed rr_cc\n transposed = np.array(self.unrotated_rr_cc).T\n return rotate(transposed, center=center, theta=self.phi, rint='up').T", "def test_rotation_angle_pupil_coordinate_convention(self):\n\n ra = 30.0\n dec = 0.0\n delta = 0.001\n\n pointing = CelestialCoord(ra*galsim.degrees, dec*galsim.degrees)\n north = CelestialCoord(ra*galsim.degrees, (dec+delta)*galsim.degrees)\n east = CelestialCoord((ra+delta)*galsim.degrees, dec*galsim.degrees)\n\n camera = LsstCamera(pointing, 0.0*galsim.degrees)\n x_0, y_0 = camera.pupilCoordsFromPoint(pointing)\n x_n, y_n = camera.pupilCoordsFromPoint(north)\n x_e, y_e = camera.pupilCoordsFromPoint(east)\n self.assertAlmostEqual(0.0, np.degrees(x_0), 7)\n self.assertAlmostEqual(0.0, np.degrees(y_0), 7)\n self.assertAlmostEqual(0.0, np.degrees(x_n), 7)\n self.assertGreater(np.degrees(y_n), 1.0e-4)\n self.assertLess(np.degrees(x_e), -1.0e-4)\n self.assertAlmostEqual(np.degrees(y_e), 0.0, 7)\n\n camera = LsstCamera(pointing, 90.0*galsim.degrees)\n x_n, y_n = camera.pupilCoordsFromPoint(north)\n x_e, y_e = camera.pupilCoordsFromPoint(east)\n self.assertLess(np.degrees(x_n), -1.0e-4)\n self.assertAlmostEqual(np.degrees(y_n), 0.0, 7)\n self.assertAlmostEqual(np.degrees(x_e), 0.0, 7)\n self.assertLess(np.degrees(y_e), -1.0e-4)\n\n camera = LsstCamera(pointing, -90.0*galsim.degrees)\n x_n, y_n = camera.pupilCoordsFromPoint(north)\n x_e, y_e = camera.pupilCoordsFromPoint(east)\n self.assertGreater(np.degrees(x_n), 1.0e-4)\n self.assertAlmostEqual(np.degrees(y_n), 0.0, 7)\n self.assertAlmostEqual(np.degrees(x_e), 0.0, 7)\n self.assertGreater(np.degrees(y_e), 1.0e-4)\n\n camera = LsstCamera(pointing, 180.0*galsim.degrees)\n x_n, y_n = camera.pupilCoordsFromPoint(north)\n x_e, y_e = camera.pupilCoordsFromPoint(east)\n self.assertAlmostEqual(np.degrees(x_n), 0, 7)\n self.assertLess(np.degrees(y_n), -1.0e-4)\n self.assertGreater(np.degrees(x_e), 1.0e-4)\n self.assertAlmostEqual(np.degrees(y_e), 0.0, 7)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the particular crystal orientation called Goss and which corresponds to euler angle (0, 45, 0).
def goss(): return Orientation.from_euler((0., 45., 0.))
[ "def zenith_angle(self):\n\t\treturn 90 - self.altitude_angle()", "def get_zenith_angle(self):\n theta = self.get_sph()[2]\n return np.pi/2. - theta", "def __str__(self):\n return \"{0:.4f}\".format(self.GetAngle('GON'))", "def declination_angle(self):\n\t\tinside_sin = math.radians((360 * (284 + int(self.n)))/(float(365)))\n\t\t#return float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees\n\t\treturn float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees", "def solid_angle(angle):\n return (pi/4)*angle**2", "def angs(self):\n if self.units == \"Bohr\":\n self.geom /= 1.889725989\n self.units = \"Angstrom\"\n return self.geom", "def euler(ex, ey, ez, angl):\n\n s = math.sqrt(ex**2 + ey**2 + ez**2)\n ex = ex/s\n ey = ey/s\n ez = ez/s\n beta = math.acos(ez)\n\n #these approximations are for compton scattering\n if (abs(beta) < 0.027):\n alpha = 0.0\n else:\n arg = ey/math.sin(beta)\n aarg = abs(arg)\n if (aarg < 1.0):\n alpha = math.asin(arg)\n else:\n arg = arg/(1.0001*aarg)\n sco1 = math.cos(alpha)*math.sin(beta) + ex\n sco1 = abs(sco1)\n sco2 = abs(ex)\n if (sco1 < sco2):\n beta = -beta\n alpha = -alpha\n gamma = 0.0\n # alpha, beta, gamma are the euler angles of rotation from the z-axis\n # to the direction of the initial particle.\n theta = angl\n rn1 = np.random.rand()\n phi = 2*math.pi*rn1\n\n # now calculate the roation matrix to rotate the scattered direction\n # back to the original axes.\n r11 = math.cos(alpha)*math.cos(beta)*math.cos(gamma) - math.sin(alpha)*math.sin(gamma)\n r12 = math.cos(beta)*math.sin(alpha)*math.cos(gamma) + math.cos(alpha)*math.sin(gamma)\n r13 = -math.sin(beta)*math.cos(gamma)\n r21 = -math.sin(gamma)*math.cos(beta)*math.cos(alpha) - math.sin(alpha)*math.cos(gamma)\n r22 = -math.sin(gamma)*math.cos(beta)*math.sin(alpha) + math.cos(alpha)*math.cos(gamma)\n r23 = math.sin(beta)*math.sin(gamma)\n r31 = math.sin(beta)*math.cos(alpha)\n r32 = math.sin(alpha)*math.sin(beta)\n r33 = math.cos(beta)\n sox = math.sin(theta)*math.cos(phi)\n soy = math.sin(theta)*math.sin(phi)\n soz = math.cos(theta)\n sx = r11*sox + r21*soy + r31*soz\n sy = r12*sox + r22*soy + r32*soz\n sz = r13*sox + r23*soy + r33*soz\n # sx, sy, sz is the unit propagation vector of the scattered particle\n # in the original fram.\n return sx, sy, sz", "def angle(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n angle = np.arctan2(ls, act_loc-myo_loc)\n return angle", "def make_gear(pressure_angle, teeth, pitch):\n\n l = []\n\n x = []\n y = []\n\n tx, ty = make_tooth(pressure_angle, teeth, pitch)\n\n for i in range(teeth):\n\n m = float(i) * 2.0 * math.pi / float(teeth)\n rx, ry = gears_rotate(m, tx, ty)\n x.extend(rx)\n y.extend(ry)\n\n x.append(x[0])\n y.append(y[0])\n\n return x, y", "def euler2_qua(self,euler_angle):\n qua_angle=numpy.zeros([4,1],dtype=float) \n qua_angle[0]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n qua_angle[1]=(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n -(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[2]=(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[3]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2)) \\\n -(math.sin(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n return qua_angle", "def sector(ix,iy,iz):\n\n if eecrystalphi(ix,iy,iz) ==999 : return 999\n \n deg = ( eecrystalphi(ix,iy,iz)+ pi ) * 180/pi\n return int(deg/5)", "def __crystGen(self,cell):\n\n a,b,c,alpha,beta,gamma = cell\n\n alpha = alpha * np.pi/180\n beta = beta * np.pi/180\n gamma = gamma * np.pi/180\n\n cosAlpha = np.cos(alpha)\n sinAlpha = np.sin(alpha)\n cosBeta = np.cos(beta)\n sinBeta = np.sin(beta)\n cosGamma = np.cos(gamma)\n sinGamma = np.sin(gamma)\n\n vol=a*b*c*np.sqrt(1.-cosAlpha**2-cosBeta**2-cosGamma**2+2.*cosAlpha*cosBeta*cosGamma)\n\n ar=b*c*sinAlpha/vol\n br=a*c*sinBeta/vol\n cr=a*b*sinGamma/vol\n\n cosalfar=(cosBeta*cosGamma-cosAlpha)/(sinBeta*sinGamma)\n cosbetar=(cosAlpha*cosGamma-cosBeta)/(sinAlpha*sinGamma)\n cosgamar=(cosAlpha*cosBeta-cosGamma)/(sinAlpha*sinBeta)\n\n alfar=np.arccos(cosalfar)\n betar=np.arccos(cosbetar)\n gamar=np.arccos(cosgamar)\n\n am = np.matrix([[ar, br*np.cos(gamar), cr*np.cos(betar)],\n [ 0.0, br*np.sin(gamar), -cr*np.sin(betar)*cosAlpha],\n [ 0.0, 0.0, 1.0/c]])\n\n #print am\n\n return am", "def __add_orientation_marks(self):\n center = self.size // 2\n ring_radius = 5 if self.compact else 7\n # add orientation marks\n # left-top\n self.matrix[center - ring_radius][center - ring_radius] = '#'\n self.matrix[center - ring_radius + 1][center - ring_radius] = '#'\n self.matrix[center - ring_radius][center - ring_radius + 1] = '#'\n # right-top\n self.matrix[center - ring_radius + 0][center + ring_radius + 0] = '#'\n self.matrix[center - ring_radius + 1][center + ring_radius + 0] = '#'\n # right-down\n self.matrix[center + ring_radius - 1][center + ring_radius + 0] = '#'", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "def rotate2strings(rotate):\n _ret_val = None\n\n if 0.5 == rotate:\n _ret_val = 'R90'\n elif 1.0 == rotate:\n _ret_val = 'R180'\n elif 1.5 == rotate:\n _ret_val = 'R270'\n return _ret_val", "def angle(self):\n cos_the = branch_angles(\n self.direction, np.array([[0, 1]]), np.ones(1))[0]\n return 180 / np.pi * np.arccos(cos_the)", "def PlotRotation(self) -> _n_0_t_5:", "def view_angle(self):\n view_i = -self.Ri[2,:].T\n view_j = -self.Rj[2,:].T\n return np.arccos(np.dot(view_i.T, view_j))", "def ND_angle(self):\n ND_angle = np.degrees(np.arctan(np.average(self.ND_params[0,:])))\n return ND_angle", "def getQuad(ang):\n if ang < math.radians(180):\n if ang < math.radians(90): return 1\n else: return 2\n elif ang < math.radians(270): return 3\n else: return 4" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the particular crystal orientation called shear and which corresponds to euler angle (45, 0, 0).
def shear(): return Orientation.from_euler((45., 0., 0.))
[ "def shear(self, shear, angle=0): \n if not isinstance(shear, (int,float)):\n raise TypeError('shear factor must be numeric')\n if not isinstance(angle, (int,float)):\n raise TypeError('angle must be numeric')\n \n angle = _math.pi*angle/180.\n p = self._localToGlobal(self._reference)\n trans = _Transformation((1.,0.,0.,1.)+p.get())\n rot = _Transformation((_math.cos(angle),_math.sin(angle),\n -_math.sin(angle),_math.cos(angle),0.,0.))\n rotinv = rot.inv()\n sh = _Transformation((1.,-shear,0.,1.,0.,0.))\n \n self._transform = trans*(rotinv*(sh*(rot*(trans.inv()*self._transform))))\n self._objectChanged(True,False,False)", "def vertical_shear(self):\n\n if \"shear\" not in self.ds:\n var = xroms.vertical_shear(\n self.dudz, self.dvdz, self.grid, hboundary=\"extend\"\n )\n self.ds[\"shear\"] = var\n return self.ds[\"shear\"]", "def shear(self):\r\n return (self.shear_Voigt + self.shear_Reuss) / 2", "def shear(hx, hy):\n m = identity(3)\n\n m[0, 1] = tan(hx)\n m[1, 0] = tan(hy)\n \n return m", "def solid_angle(angle):\n return (pi/4)*angle**2", "def calculate_shear(self,B31c = 0):\n logger.debug('Calculating magnetic shear...')\n \n # Shorthand introduced: we also have to ransform to 1/B**2 expansion parameters, taking into account the \n # difference in the definition of the radial coordinate. In the work of Rodriguez et al.,\n # Phys. Plasmas, (2021), epsilon=sqrt(psi) while in the work of Landreman et al.,\n # J. Plasma Physics (2019) it is defined r=\\sqrt(2*psi/B0). Need to transform between the\n # two.\n\n eps_scale = np.sqrt(2/self.B0) \n\n # sign_psi = self.spsi\n # sign_G = self.sG # Sign is taken to be positive for simplicity. To include this, need to track expressions\n d_d_varphi = self.d_d_varphi\n G2 = self.G2*eps_scale**2\n G0 = self.G0\n I2 = self.I2*eps_scale**2\n X1c = self.X1c*eps_scale\n Y1c = self.Y1c*eps_scale\n Y1s = self.Y1s*eps_scale\n X20 = self.X20*eps_scale**2\n X2s = self.X2s*eps_scale**2\n X2c = self.X2c*eps_scale**2\n Y20 = self.Y20*eps_scale**2\n Y2s = self.Y2s*eps_scale**2\n Y2c = self.Y2c*eps_scale**2\n Z20 = self.Z20*eps_scale**2\n Z2s = self.Z2s*eps_scale**2\n Z2c = self.Z2c*eps_scale**2\n torsion = -self.torsion # I use opposite sign for the torsion\n curvature = self.curvature\n iota = self.iotaN\n dldp = self.abs_G0_over_B0\n dXc1v = self.d_X1c_d_varphi*eps_scale\n dY1cdp = self.d_Y1c_d_varphi*eps_scale\n dY1sdp = self.d_Y1s_d_varphi*eps_scale\n dZ20dp = self.d_Z20_d_varphi*eps_scale**2\n dZ2cdp = self.d_Z2c_d_varphi*eps_scale**2\n dZ2sdp = self.d_Z2s_d_varphi*eps_scale**2\n dX20dp = self.d_X20_d_varphi*eps_scale**2\n dX2cdp = self.d_X2c_d_varphi*eps_scale**2\n dX2sdp = self.d_X2s_d_varphi*eps_scale**2\n dY20dp = self.d_Y20_d_varphi*eps_scale**2\n dY2cdp = self.d_Y2c_d_varphi*eps_scale**2\n dY2sdp = self.d_Y2s_d_varphi*eps_scale**2\n # Transformation to 1/B**2 parameters \n B0 = 1/self.B0**2\n Ba0 = G0\n Ba1 = G2 + self.iotaN*I2\n eta = self.etabar*np.sqrt(2)*B0**0.25\n B1c = -2*B0*eta\n B20 = (0.75*self.etabar**2/np.sqrt(B0) - self.B20)*4*B0**2\n B31s = 0 # To preserve stellarator symmetry\n I4 = 0 # Take current variations at this order to be 0\n \n # Compute Z31c and Z31s from Cp2: we assume standard equilibria, meaning that we may\n # pick Bpsi0=0 and Bpsi1=0\n Z31c = -1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) - 2*Ba0*X2s*Y1c*Z20 +\n 2*Ba0* X2c*Y1s*Z20 + 2*Ba0*X1c*Y2s*Z20 - 4*Ba0*X2s*Y1c*Z2c - 2*Ba0* X20*Y1s*Z2c +\n 4*Ba0*X1c*Y2s*Z2c - dldp*(torsion*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c +\n X2s*Y1s) + I2*(2*X20*Y1c + X2c*Y1c - 2*X1c*Y20 - X1c*Y2c + X2s*Y1s) - \n 2*curvature*X1c*Z20 - curvature*X1c*Z2c) + 2*Ba0*X20*Y1c*Z2s + 4*Ba0*X2c*Y1c*Z2s - \n 2*Ba0*X1c*Y20*Z2s - 4*Ba0*X1c*Y2c*Z2s + 2*X1c*dX20dp + X1c*dX2cdp+2*Y1c*dY20dp +\n Y1c*dY2cdp + Y1s*dY2sdp)\n \n dZ31cdp = np.matmul(d_d_varphi, Z31c)\n \n Z31s = 1/3/Ba0/X1c/Y1s*(2*iota*(X1c*X2c + Y1c*Y2c + Y1s*Y2s) - 2*Ba0*X2c*Y1c*Z20 + \n 2*Ba0*X1c*Y2c*Z20 - 2*Ba0*X2s*Y1s*Z20 + 2*Ba0*X20*Y1c*Z2c - 2*Ba0*X1c*Y20*Z2c +\n 4*Ba0*X2s*Y1s*Z2c + 2*Ba0*X20*Y1s*Z2s - 4*Ba0*X2c*Y1s*Z2s + dldp*(I2*X2s*Y1c + \n 2*I2*X20*Y1s - I2*X2c*Y1s - I2*X1c*Y2s + torsion*(X2s*Y1c + 2*X20*Y1s - X2c*Y1s -\n X1c*Y2s) - curvature*X1c*Z2s) - X1c*dX2sdp - 2*Y1s*dY20dp + Y1s*dY2cdp - Y1c*dY2sdp)\n \n dZ31sdp = np.matmul(d_d_varphi, Z31s)\n\n \n # Equation J3: expression for X31c/s\n X31c = 1/2/dldp**2/curvature*(-2*Ba0*Ba1*B1c - Ba0**2*B31c+2*dldp**2*torsion**2*X1c*X20 +\n 2*iota**2*X1c*X2c + dldp**2*torsion**2*X1c*X2c + dldp**2*curvature**2*X1c*(2*X20 + X2c) + \n 3*dldp*iota*torsion*X2s*Y1c + 2*dldp**2*torsion**2*Y1c*Y20 + 2*iota**2*Y1c*Y2c +\n dldp**2*torsion**2*Y1c*Y2c - 2*dldp*iota*torsion*X20*Y1s - 3*dldp*iota*torsion*X2c*Y1s -\n 3*dldp*iota*torsion*X1c*Y2s + 2*iota**2*Y1s*Y2s + dldp**2*torsion**2*Y1s*Y2s + \n 2*dldp*iota*Z31s + 2*iota*X2s*dXc1v + 2*dldp*torsion*Y20*dXc1v + dldp*torsion*Y2c*dXc1v + \n 2*dldp*torsion*Y1c*dX20dp + 2*dXc1v*dX20dp + dldp*torsion*Y1c*dX2cdp + dXc1v*dX2cdp - \n iota*X1c*dX2sdp + dldp*torsion*Y1s*dX2sdp - 2*dldp*torsion*X20*dY1cdp - dldp*torsion*X2c*dY1cdp +\n 2*iota*Y2s*dY1cdp - 2*dldp*torsion*X1c*dY20dp + 2*iota*Y1s*dY20dp + 2*dY1cdp*dY20dp - \n dldp*torsion*X1c*dY2cdp + iota*Y1s*dY2cdp + dY1cdp*dY2cdp - dldp*torsion*X2s*dY1sdp - \n 2*iota*Y2c*dY1sdp - iota*Y1c*dY2sdp + dY1sdp*dY2sdp + dldp*curvature*(-3*iota*X1c*Z2s + \n dldp*torsion*(Y1c*(2*Z20 + Z2c) + Y1s*Z2s) + 2*Z20*dXc1v + Z2c*dXc1v - 2*X1c*dZ20dp - \n X1c*dZ2cdp) + 2*dldp*dZ31cdp)\n \n X31s = 1/2/dldp**2/curvature*(-Ba0**2*B31s + dldp**2*curvature**2*X1c*X2s + dldp**2*torsion**2*X1c*X2s +\n 2*dldp**2*torsion**2*Y20*Y1s - dldp**2*torsion**2*Y2c*Y1s + dldp**2*torsion**2*Y1c*Y2s +\n 2*iota**2*(X1c*X2s - Y2c*Y1s + Y1c*Y2s) + 2*dldp**2*curvature*torsion*Y1s*Z20 - \n dldp**2*curvature*torsion*Y1s*Z2c + dldp**2*curvature*torsion*Y1c*Z2s + dldp*torsion*Y2s*dXc1v +\n dldp*curvature*Z2s*dXc1v + 2*dldp*torsion*Y1s*dX20dp - dldp*torsion*Y1s*dX2cdp + \n dldp*torsion*Y1c*dX2sdp + dXc1v*dX2sdp - dldp*torsion*X2s*dY1cdp - 2*dldp*torsion*X20*dY1sdp + \n dldp*torsion*X2c*dY1sdp + 2*dY20dp*dY1sdp - dY2cdp*dY1sdp - dldp*torsion*X1c*dY2sdp + dY1cdp*dY2sdp +\n iota*(dldp*torsion*(2*X20*Y1c - 3*X2c*Y1c - 2*X1c*Y20 + 3*X1c*Y2c - 3*X2s*Y1s) + dldp*curvature*X1c*\n (-2*Z20 + 3*Z2c) - 2*dldp*Z31c - 2*X2c*dXc1v - 2*X1c*dX20dp + X1c*dX2cdp - 2*Y2c*dY1cdp -\n 2*Y1c*dY20dp + Y1c*dY2cdp - 2*Y2s*dY1sdp + Y1s*dY2sdp) - dldp*curvature*X1c*dZ2sdp +2*dldp*dZ31sdp)\n\n dX31sdp = np.matmul(d_d_varphi, X31s)\n \n # Equation Cb2\n Y31s = 1/4/Ba0/X1c*(-2*Ba1*X1c*Y1s + 2*iota*I2*X1c*Y1s - dldp*(4*curvature*X20 + torsion*I2*\n (X1c**2 + Y1c**2 + Y1s**2)) + 4*Ba0*(X31s*Y1c + 2*X2s*Y2c - X31c*Y1s - 2*X2c*Y2s) -\n I2*Y1c*dXc1v + I2*X1c*dY1cdp + 4*dZ20dp) \n\n dY31sdp = np.matmul(d_d_varphi, Y31s)\n\n \n # From the equation for Bt to order n=4, and looking at m=0\n LamTilde = 2/Y1s**2*(Ba0*B0*I4 + (Ba1*B0 + Ba0*B20)*I2) + 1/Y1s**2*(-2*iota*(2*X2c**2 + X1c*X31c + \n 2*X2s**2 + 2*Y2c**2 + 2*Y2s**2 + Y1s*Y31s + 2*Z2c**2 + 2*Z2s**2) + 2*dldp*(torsion*(-X31s*Y1c -\n 2*X2s*Y2c + X31c*Y1s + 2*X2c*Y2s + X1c*Y31s) + curvature*(-2*X2s*Z2c + 2*X2c*Z2s + X1c*Z31s)) -\n X31s*dXc1v - 2*X2s*dX2cdp + 2*X2c*dX2sdp + X1c*dX31sdp - Y31s*dY1cdp - 2*Y2s*dY2cdp +\n 2*Y2c*dY2sdp + Y1c*dY31sdp - 2*Z2s*dZ2cdp + 2*Z2c*dZ2sdp)\n\n # Need to compute the integration factor necessary for computing the shear\n DMred = d_d_varphi[1:,1:] # The differentiation matrix has a linearly dependent row, focus on submatrix\n\n # Distinguish between the stellarator symmetric case and the non-symmetric one at order r^1.\n # Distinction leads to the expSig function being periodic (stell. sym.) or not.\n if self.sigma0 == 0 and np.max(np.abs(self.rs)) == 0 and np.max(np.abs(self.zc)) == 0:\n # Case in which sigma is stellarator-symmetric:\n integSig = np.linalg.solve(DMred,self.sigma[1:]) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig = np.exp(2*iota*integSig)\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n self.iota2 = self.B0/2*sum(expSig*LamTilde*self.d_varphi_d_phi)/sum(expSig*(X1c**2 + Y1c**2 + Y1s**2)/Y1s**2*self.d_varphi_d_phi) \n else:\n # Case in which sigma is not stellarator-symmetric:\n # d_phi_d_varphi = 1 + np.matmul(d_d_varphi,self.phi-self.varphi)\n avSig = sum(self.sigma*self.d_varphi_d_phi)/len(self.sigma) # Separate the piece that gives secular part, so all things periodic\n integSigPer = np.linalg.solve(DMred,self.sigma[1:]-avSig) # Invert differentiation matrix: as if first entry a zero, need to add it later\n integSig = integSigPer + avSig*self.varphi[1:] # Include the secular piece\n integSig = np.insert(integSig,0,0) # Add the first entry 0\n expSig_ext = np.append(np.exp(2*iota*integSig),np.exp(2*iota*(avSig*2*np.pi/self.nfp))) # Add endpoint at 2*pi for better integration\n LamTilde_ext = np.append(LamTilde,LamTilde[0])\n fac_denom = (X1c**2 + Y1c**2 + Y1s**2) / Y1s**2\n fac_denom_ext = np.append(fac_denom, fac_denom[0])\n varphi_ext = np.append(self.varphi, 2 * np.pi / self.nfp)\n self.iota2 = self.B0 / 2 \\\n * integ.trapz(expSig_ext * LamTilde_ext, varphi_ext) \\\n / integ.trapz(expSig_ext * fac_denom_ext, varphi_ext)\n \n # Using cumtrapz without exploiting periodicity\n # expSig = np.exp(2*iota*integ.cumtrapz(self.sigma,self.varphi,initial=0))", "def zenith_angle(self):\n\t\treturn 90 - self.altitude_angle()", "def hadec_to_rot(ha, dec):\n theta_rot = np.arctan2(np.sin(dec) * np.sin(ha), np.cos(ha))\n\n return theta_rot.to(u.deg)", "def sector(ix,iy,iz):\n\n if eecrystalphi(ix,iy,iz) ==999 : return 999\n \n deg = ( eecrystalphi(ix,iy,iz)+ pi ) * 180/pi\n return int(deg/5)", "def euler2_qua(self,euler_angle):\n qua_angle=numpy.zeros([4,1],dtype=float) \n qua_angle[0]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n qua_angle[1]=(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n -(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[2]=(math.cos(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2)) \\\n +(math.sin(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2))\n qua_angle[3]=(math.cos(euler_angle[0]/2)*math.cos(euler_angle[1]/2)*math.sin(euler_angle[2]/2)) \\\n -(math.sin(euler_angle[0]/2)*math.sin(euler_angle[1]/2)*math.cos(euler_angle[2]/2))\n return qua_angle", "def shear_Reuss(self):\r\n s = self.Sij\r\n return 15 / (4 * (s[0, 0] + s[1, 1] + s[2, 2]) - 4 * (s[0, 1] + s[1, 2] + s[0, 2]) + 3 * (s[3, 3] + s[4, 4] + s[5, 5]))", "def get_zenith_angle(self):\n theta = self.get_sph()[2]\n return np.pi/2. - theta", "def euler(ex, ey, ez, angl):\n\n s = math.sqrt(ex**2 + ey**2 + ez**2)\n ex = ex/s\n ey = ey/s\n ez = ez/s\n beta = math.acos(ez)\n\n #these approximations are for compton scattering\n if (abs(beta) < 0.027):\n alpha = 0.0\n else:\n arg = ey/math.sin(beta)\n aarg = abs(arg)\n if (aarg < 1.0):\n alpha = math.asin(arg)\n else:\n arg = arg/(1.0001*aarg)\n sco1 = math.cos(alpha)*math.sin(beta) + ex\n sco1 = abs(sco1)\n sco2 = abs(ex)\n if (sco1 < sco2):\n beta = -beta\n alpha = -alpha\n gamma = 0.0\n # alpha, beta, gamma are the euler angles of rotation from the z-axis\n # to the direction of the initial particle.\n theta = angl\n rn1 = np.random.rand()\n phi = 2*math.pi*rn1\n\n # now calculate the roation matrix to rotate the scattered direction\n # back to the original axes.\n r11 = math.cos(alpha)*math.cos(beta)*math.cos(gamma) - math.sin(alpha)*math.sin(gamma)\n r12 = math.cos(beta)*math.sin(alpha)*math.cos(gamma) + math.cos(alpha)*math.sin(gamma)\n r13 = -math.sin(beta)*math.cos(gamma)\n r21 = -math.sin(gamma)*math.cos(beta)*math.cos(alpha) - math.sin(alpha)*math.cos(gamma)\n r22 = -math.sin(gamma)*math.cos(beta)*math.sin(alpha) + math.cos(alpha)*math.cos(gamma)\n r23 = math.sin(beta)*math.sin(gamma)\n r31 = math.sin(beta)*math.cos(alpha)\n r32 = math.sin(alpha)*math.sin(beta)\n r33 = math.cos(beta)\n sox = math.sin(theta)*math.cos(phi)\n soy = math.sin(theta)*math.sin(phi)\n soz = math.cos(theta)\n sx = r11*sox + r21*soy + r31*soz\n sy = r12*sox + r22*soy + r32*soz\n sz = r13*sox + r23*soy + r33*soz\n # sx, sy, sz is the unit propagation vector of the scattered particle\n # in the original fram.\n return sx, sy, sz", "def rotate_ryb(self, angle=180):\n\n h = self.h * 360\n angle = angle % 360\n\n # Approximation of Itten's RYB color wheel.\n # In HSB, colors hues range from 0-360.\n # However, on the artistic color wheel these are not evenly distributed. \n # The second tuple value contains the actual distribution.\n wheel = [\n ( 0, 0), ( 15, 8),\n ( 30, 17), ( 45, 26),\n ( 60, 34), ( 75, 41),\n ( 90, 48), (105, 54),\n (120, 60), (135, 81),\n (150, 103), (165, 123),\n (180, 138), (195, 155),\n (210, 171), (225, 187),\n (240, 204), (255, 219),\n (270, 234), (285, 251),\n (300, 267), (315, 282),\n (330, 298), (345, 329),\n (360, 0 )\n ]\n \n # Given a hue, find out under what angle it is\n # located on the artistic color wheel.\n for i in _range(len(wheel)-1):\n x0, y0 = wheel[i] \n x1, y1 = wheel[i+1]\n if y1 < y0:\n y1 += 360\n if y0 <= h <= y1:\n a = 1.0 * x0 + (x1-x0) * (h-y0) / (y1-y0)\n break\n \n # And the user-given angle (e.g. complement).\n a = (a+angle) % 360\n\n # For the given angle, find out what hue is\n # located there on the artistic color wheel.\n for i in range(len(wheel)-1):\n x0, y0 = wheel[i] \n x1, y1 = wheel[i+1]\n if y1 < y0:\n y1 += 360\n if x0 <= a <= x1:\n h = 1.0 * y0 + (y1-y0) * (a-x0) / (x1-x0)\n break\n \n h = h % 360\n return Color(h/360, self.s, self.brightness, self.a, mode=\"hsb\", name=\"\")", "def cylinder(r,h):\n A = 2 * pi * r * (r+h)\n return A", "def shear_Voigt(self):\r\n c = self.Cij\r\n return ((c[0, 0] + c[1, 1] + c[2, 2]) - (c[0, 1] + c[1, 2] + c[0, 2]) + 3 * (c[3, 3] + c[4, 4] + c[5, 5])) / 15", "def calcOrient(self, za, ref, ang, detector, mode, setting, acur = 1e-9):\r\n #first check that za (real space) and ref (recyprocal space) are indeed perpendicular. This follows the normal h*u + k*v + l*w = 0 relationship valid for any crystal system.\r\n if abs(np.dot(za, ref))<acur:\r\n #turn angle from degrees to radians\r\n ang = ang/360*2*np.pi\r\n \r\n #calculate the cartesian equivalents of the vectors\r\n zaC = self.millerToCartesian(za)\r\n refC = self.millerToCartesian(ref, typ = \"recyp\")\r\n #normalize the vectors\r\n zaC = zaC/np.linalg.norm(zaC)\r\n refC = refC/np.linalg.norm(refC)\r\n depC = np.cross(zaC, refC)\r\n #the vectors of the crystal to be transformed\r\n mat1 = np.array([zaC, refC, depC]).T\r\n \r\n #the matrix of corresponding detector vectors\r\n c1 = np.array([0,0,1])\r\n c2 = np.array([np.cos(ang), np.sin(ang), 0])\r\n c3 = np.array([np.cos(ang+np.pi/2), np.sin(ang+np.pi/2), 0])\r\n mat2 = np.array([c1, c2, c3]).T\r\n \r\n #these must be converted to stage coordinates.\r\n dec = self.stage.getTEM().getDetector(detector) #find the detector\r\n realcords = dec.detectorToAbs(mat2, mode, setting) #change to absolute coordinates\r\n stagecoords = self.stage.absToStage(realcords)\r\n \r\n \r\n #the rotation matrix needs to turn mat 1 (cartesian vectors stuck to crystal) into stagecoords (stage vectors). Therefore\r\n ormat = np.dot(stagecoords, np.linalg.inv(mat1))\r\n self.setOrient(ormat)\r\n #multiplying by ormat goes from crystal cartesian vector to stage coordinates, ormat.T (inverse) goes from stage to cartesian.\r\n return ormat\r\n else:\r\n print(\"ZA vector and reflection vector are not perpendicular\")\r\n return np.identity(3)", "def shearing2D(xshear, yshear):\n\n shearmat = np.array([[0, xshear, 0],\n [yshear, 0, 0],\n [0, 0, 0]])\n\n return np.eye(3) + shearmat", "def add_shear_z(fit,cov):\n J = np.array([[ 1, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0],\n [ 0, 0, 0, -1, -1, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 1, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 1]])\n\n return J.dot(fit), J.dot(cov.dot(J.T))", "def haversin(angle):\n return ((1.0 - math.cos(angle*math.pi/180.0))/2.0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a random crystal orientation.
def random(): from random import random from math import acos phi1 = random() * 360. Phi = 180. * acos(2 * random() - 1) / np.pi phi2 = random() * 360. return Orientation.from_euler([phi1, Phi, phi2])
[ "def randomanglerotate(axis, xyz):\n angle = 2 * pi * rand()\n return rotate(axis, angle, xyz)", "def random_move(self):\n\t\toptions = [90, 180, 270]\n\t\tang = randint(0,2)\n\t\tn = randint(2, self.length - 1)\n\t\tself.rotate(n, radians(options[ang]))", "def pickDirection():\n turtle.right(random.randrange(-1*MAX_ANGLE(),MAX_ANGLE()))", "def randomize(self):\n if self.degrees is not None:\n rotate = np.random.uniform(*self.degrees, size=self.ndims)\n rotate = np.deg2rad(rotate)\n if self.ndims == 2:\n rotate_matrix = np.array(\n [\n [np.cos(rotate[0]), -np.sin(rotate[0]), 0],\n [np.sin(rotate[0]), np.cos(rotate[0]), 0],\n [0, 0, 1],\n ]\n )\n else:\n rx = np.array(\n [\n [1, 0, 0, 0],\n [0, np.cos(rotate[0]), -np.sin(rotate[0]), 0],\n [0, np.sin(rotate[0]), np.cos(rotate[0]), 0],\n [0, 0, 0, 1],\n ]\n )\n ry = np.array(\n [\n [np.cos(rotate[1]), 0, np.sin(rotate[1]), 0],\n [0, 1, 0, 0],\n [-np.sin(rotate[1]), 0, np.cos(rotate[1]), 0],\n [0, 0, 0, 1],\n ]\n )\n rz = np.array(\n [\n [np.cos(rotate[2]), -np.sin(rotate[2]), 0, 0],\n [np.sin(rotate[2]), np.cos(rotate[2]), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n )\n rotate_matrix = rx.dot(ry).dot(rz)\n else:\n rotate_matrix = np.eye(self.ndims + 1)\n\n if self.translate is not None:\n # draw random translate\n translate = np.random.uniform(*self.translate, size=self.ndims)\n # make affine transformation matrix\n translate_matrix = np.eye(self.ndims + 1)\n translate_matrix[:-1, -1] = translate\n else:\n translate_matrix = np.eye(self.ndims + 1)\n\n if self.scale is not None:\n # draw random scale\n scale = np.random.uniform(*self.scale, size=(self.ndims))\n # add homogenous coordinate\n scale = np.append(scale, [1])\n # make affine transformation matrix\n scale_matrix = np.diag(scale)\n else:\n scale_matrix = np.eye(self.ndims + 1)\n\n if self.shear is not None:\n # draw random shear\n shear = np.random.uniform(*self.shear, size=(self.ndims, self.ndims))\n shear_matrix = np.eye(self.ndims + 1)\n shear_matrix[:-1, :-1] = shear\n for i in range(self.ndims):\n shear_matrix[i, i] = 1\n else:\n shear_matrix = np.eye(self.ndims + 1)\n\n if self.flip:\n # draw random flip\n flip = np.sign(np.random.normal(size=self.ndims))\n # add homogenous coordinate\n flip = np.append(flip, [1])\n # make affine transformation matrix\n flip_matrix = np.diag(flip)\n else:\n flip_matrix = np.eye(self.ndims + 1)\n\n # combine all transformations\n self.affine = (\n rotate_matrix.dot(translate_matrix)\n .dot(scale_matrix)\n .dot(shear_matrix)\n .dot(flip_matrix)\n )\n return", "def generate_rotation(self):\r\n\r\n rng = self.cfg[\"rng\"]\r\n available_numbers = [i for i in range(self.num_abilities)]\r\n rotation = []\r\n\r\n while len(rotation) < self.rotation_length:\r\n idx = rng.randint(0, len(available_numbers))\r\n rotation.append(available_numbers.pop(idx))\r\n\r\n return rotation", "def random_shape(max_sides=7):\n return rotate(approx_ngon(rand.randrange(3, max_sides)),\n rand.uniform(0, 360),\n origin='centroid')", "def random_gen(self):\n\t\ttypes = [\"Normal\", \"Robot\", \"Ninja\", \"Fire\", \"Water\", \"Dinosaur\", \"Earth\", \"Sound\", \"Wind\", \"Darkness\", \"Light\", \"Plasma\", \"Solar\", \"Lunar\", \"Meme\", \"Magic\"]\n\t\tself._name_gen()\n\t\tself.speed = random.randint(1, 6) # All ranges here are balanced using eyeballs and hopes. And wishes.\n\t\tself.attk_pw = random.randint(0, 5)\n\t\tself.attk_type = random.choice(['physical', 'emotional'])\n\t\tself.moveType = random.choice(types)\n\t\tif self.attk_type == 'emotional':\n\t\t\tself.fp = random.randint(1, 5)", "def random_unit(cls):\n\n #\n # This method is adapted from \n # http://mathworld.wolfram.com/SpherePointPicking.html\n #\n theta = random() * pi * 2.0\n return vector2d(cos(theta), sin(theta))", "def _set_random_direction(self):\n direction = np.random.randn(self.polytope.dim)\n self.direction = direction / norm(direction)", "def random_rotation_matrix(randgen=None):\n # adapted from http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c\n \n if randgen is None:\n randgen = np.random.RandomState()\n \n theta, phi, z = tuple(randgen.rand(3).tolist())\n \n theta = theta * 2.0*np.pi # Rotation about the pole (Z).\n phi = phi * 2.0*np.pi # For direction of pole deflection.\n z = z * 2.0 # For magnitude of pole deflection.\n \n # Compute a vector V used for distributing points over the sphere\n # via the reflection I - V Transpose(V). This formulation of V\n # will guarantee that if x[1] and x[2] are uniformly distributed,\n # the reflected points will be uniform on the sphere. Note that V\n # has length sqrt(2) to eliminate the 2 in the Householder matrix.\n \n r = np.sqrt(z)\n Vx, Vy, Vz = V = (\n np.sin(phi) * r,\n np.cos(phi) * r,\n np.sqrt(2.0 - z)\n )\n \n st = np.sin(theta)\n ct = np.cos(theta)\n \n R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))\n # Construct the rotation matrix ( V Transpose(V) - I ) R.\n\n M = (np.outer(V, V) - np.eye(3)).dot(R)\n return M", "def _starting_angle(angle=1):\n return np.random.rand() * 2 * angle - angle", "def randomrotate(xyz):\n # get random point on unit sphere\n axis = randn(3)\n axis = axis / norm(axis)\n angle = 2 * pi * rand()\n return rotate(axis, angle, xyz)", "def randomcreaturetype(self, level):\n roll = random.randint(1, 100)\n if roll <= 18:\n return 'PC'\n elif roll <= 33:\n return 'Trainable'\n elif roll <= 90:\n return 'Nontrainable'\n else:\n return 'Rare'", "def random_rotation_rotor(max_angle=np.pi):\n return generate_rotation_rotor(max_angle * np.random.rand(), random_unit_vector(), random_unit_vector())", "def sample_orbit_rotation(self):\n return np.random.random(size=self.number_of_stars) * 2 * np.pi", "def generate_random_asteroid():\n radius = random.randrange(1, 4)\n return Asteroid(Asteroid.calculate_circumference(radius),\n Asteroid.generate_random_position(),\n Asteroid.generate_random_velocity())", "def generate_random_rotation_matrix() -> np.ndarray:\n u = generate_random_unit_vector()\n v = generate_random_unit_vector()\n while np.abs(np.dot(u, v)) >= 0.99:\n v = generate_random_unit_vector()\n\n vp = v - (np.dot(u, v) * u)\n vp /= np.linalg.norm(vp)\n w = np.cross(u, vp)\n R = np.column_stack((u, vp, w))\n return R", "def get_random(min_side_length, max_side_length):\n # Generates a random rectangle with min and max side lengths specified\n # Make it randomly rotated but not translated from (0,0)\n l = np.random.randint(min_side_length, max_side_length)\n w = np.random.randint(min_side_length, max_side_length)\n # theta = np.random.uniform(0, 2*np.pi)\n # transform = np.array([[np.cos(theta), -np.sin(theta), 0],\n # [np.sin(theta), np.cos(theta), 0],\n # [0, 0, 1]])\n return Rectangle(l, w, np.eye(3))", "def random():\n return Note(random.randrange(12))", "def generateDirection(self):\n screenWidth, screenHeight = self.screen.get_size()\n randX = randint(0, screenWidth)\n randY = randint(0, screenHeight)\n randomPoint = Vec2d(randX, randY)\n\n # Get angle between asteroid's position and random point.\n angleToRandomPoint = self.pos.get_angle_between(randomPoint)\n\n self.direction = Vec2d((1, 0))\n self.direction.rotate(angleToRandomPoint)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the IPF (inverse pole figure) colour for this orientation. Given a particular axis expressed in the laboratory coordinate system, one can compute the so called IPF colour based on that direction
def get_ipf_colour(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic): axis /= np.linalg.norm(axis) # find the axis lying in the fundamental zone for sym in symmetry.symmetry_operators(): Osym = np.dot(sym, self.orientation_matrix()) Vc = np.dot(Osym, axis) if Vc[2] < 0: Vc *= -1. # using the upward direction uvw = np.array([Vc[2] - Vc[1], Vc[1] - Vc[0], Vc[0]]) uvw /= np.linalg.norm(uvw) uvw /= max(uvw) if (uvw[0] >= 0. and uvw[0] <= 1.0) and (uvw[1] >= 0. and uvw[1] <= 1.0) and ( uvw[2] >= 0. and uvw[2] <= 1.0): # print('found sym for sst') break return uvw
[ "def inverse(im):\n return 255 - im", "def color_pais ( self , pais ) :\n\n return self . _paises [ pais ] [ 0 ]\n if 58 - 58: i11iIiiIii % I1Ii111\n if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i", "def colorizer(x, y):\n r = min(1, 1 - y/3)\n g = min(1, 1 + y/3)\n b = 1/4 + x/16\n return r, g, b", "def _inverted_color(self):\n inverted_image = ImageOps.invert(self.image)\n inverted_image.save('inverted_' + self.image_file)", "def getComplementary(colour):\n return 255 - colour[0], 255 - colour[1], 255 - colour[2]", "def __idiv__(self, *args):\n return _coin.SbColor4f___idiv__(self, *args)", "def inverted_image(self):\n\n inverted_image = np.amax(self.image) - self.image\n max_val = np.amax(inverted_image)\n inverted_image = inverted_image / max_val\n inverted_image *= 100\n return inverted_image", "def pf(self, pole=[[1,0,0],[1,1,0],[1,1,1]], mode='contour',\n ifig=1, dm=7.5, dn=7.5, ssym=None, levels=None,\n axes=None, cmode=None,rot=0., nths=None):\n\n ## PF ploting directly from experimental pole figure\n if self.epf!=None:\n # mode is fixed to be 'im'\n self.epfplot(\n ifig=ifig, mode=mode, cmode=cmode, levels=levels, rot=rot\n )\n return\n ## end of experimental polefigure plotting\n\n ## if it is not an experimental polefigure,\n ## binning of COD on the spherical surface is carried out.\n if self.csym=='hexag' or self.csym=='trigo':\n p__ = pole ## for later use on pole index indication\n pole_tmp=[]\n for ip in range(len(pole)):\n p_ = pole[ip]\n p = [0,0,0]\n if len(pole[ip])!=4:\n input('pole must be four digit for hexag')\n raise IOError\n p[2] = p_[3]\n p[0] = p_[0] - p_[2]\n p[1] = p_[1] - p_[2]\n p[2] = p_[3]\n #print 'pole as below'\n #print p[0:3]\n pole_tmp.append(p[0:3])\n pole = pole_tmp\n pi = math.pi\n ## pole figure\n temp = []\n if mode=='dot':\n for ip in range(len(pole)):\n color = ['r','b','g','k','gray']\n marker =['o','x','+','d','.']\n for i in range(len(self.gr)):\n if len(self.gr)<5:\n cl = color[i]\n mk = marker[i]\n else:\n cl='k'\n mk='.'\n tm = self.dotplot(proj='pf', agrain=self.gr[i],\n npole=len(pole), ipole=ip+1,\n pole=pole[ip], ifig=ifig,\n cdim=self.cdim, cang=self.cang,\n csym=self.csym, mode=None,\n color=cl\n )\n elif mode=='trace':\n for ip in range(len(pole)):\n for i in range(len(self.gr)):\n if i==0:\n color='red'\n alpha=1.0\n marker='o'\n elif i==-1:\n color='blue'\n alpha=1.0\n marker='o'\n tm = self.dotplot(proj='pf', agrain=self.gr[i],\n alpha=alpha,color=color,\n marker=marker,\n pole=pole[ip], ifig=ifig,\n cdim=self.cdim, cang=self.cang,\n csym=self.csym, mode=None)\n color='black'\n marker='.'\n alpha=0.5\n\n elif mode in ['contour', 'contourf']:\n cnts = [] # cnt container\n ## figure setting\n # figure size\n fact = 2. #size factor\n figsize = (len(pole)*2.*fact, 1.*2.*fact)\n\n ## If axis is passed to the module, plotting is performed on them.\n if axes!=None:\n if len(axes)!=len(pole): raise IOError\n fig = plt.figure(ifig)\n pass\n elif axes==None:\n fig = plt.figure(ifig, figsize=figsize)\n nrow = len(pole)\n pass\n levs = []\n\n\n #### on each of the pole ------------------------------ ####\n N = []\n self.pfnode = []\n start = time.time()\n for ip in range(len(pole)):\n # Polar cell and nodes generation.\n f, nodes = self.cells(\n pole=pole[ip],\n ifig=None,\n dm=dm, dn=dn,\n cdim=self.cdim,\n cang=self.cang,\n csym=self.csym,\n nths=nths)\n\n\n N.append(nodes)\n self.pfnode.append(nodes)\n print('node shape:', nodes.shape)\n\n print(\"%5.2f seconds elapsed during calling\"\\\n \" self.cells\\n\"%(time.time() - start))\n del nodes\n\n ## resolution and pole figure plotting preferences\n nm = (360.0 - 0.)/dm; nn = (180.-90.)/dn\n # theta and phi and stereographic projection of them.\n theta = np.linspace(pi, pi/2., nn+1) #tilting angle\n phi = np.linspace(0.,2.*pi, nm+1) #rotation angle\n r = np.sin(theta)/(1-np.cos(theta)) #tilting angle to radius\n R, PHI = np.meshgrid(r,phi) #meshing radius and rotation angle\n PHI = PHI + pi/2. # rotation the pole figure up.\n x = R*np.cos(PHI); y = R*np.sin(PHI) #convert the polar coord-> cartensian\n\n ## somehow, I did not recognize that there is an intrinsic\n ## polar projection option in subplot. Therefore, at the time\n ## this defunc was made, I did transform them into the\n ## cartensian coordinate system above. Now I am wishing to\n ## make use of it, over excursion of pixel view of the\n ## binning of pole figures. That is mode=='im'.\n ## I do not like this lousy name again.\n ##\n ## On the top of it, matplotlib contours do not have\n ## polar coordinate system. Thus, if one wants to have\n ## contours with lines, this manual transform is the only\n ## way. (2011-Sept)\n\n for ip in range(len(pole)):\n nodes = N[ip] ## intensity at the current pole (ip)\n if axes!=None: ax=axes[ip]\n elif axes==None:\n ax=fig.add_subplot(1,nrow,ip+1)\n pass\n\n ax.set_frame_on(False)\n\n # contour plotting\n if mode=='contour':\n if levels==None:\n if cmode!=None:\n cnt = ax.contour(\n x, y, nodes,\n cmap=plt.cm.cmap_d[cmode])\n pass\n else: cnt = ax.contour(\n x, y, nodes)\n pass\n #cmap=plt.cm.bone)\n elif levels!=None:\n if cmode!=None: cnt = ax.contour(\n x, y, nodes, cmap=plt.cm.cmap_d[cmode])\n else: cnt = ax.contour(x, y, nodes, levels)#, cmap=plt.cm.bone);pass\n pass\n pass\n elif mode=='contourf':\n if levels==None:\n if cmode!=None:\n cnt = ax.contourf(\n x, y, nodes,\n cmap=plt.cm.cmap_d[cmode]\n )\n pass\n else: cnt = ax.contourf(x, y, nodes);pass\n elif levels!=None:\n if cmode!=None:\n cnt = ax.contouf(\n x,y,nodes,\n cmap=plt.cm.cmap_d[cmode]\n )\n else: cnt = ax.contourf(\n x, y, nodes,\n levels)#, cmap=plt.cm.bone)\n pass\n pass\n\n cnts.append(cnt)\n clev = cnt._levels\n levs.append(clev)\n\n # Contour's details.\n ax.set_axis_off();\n ax.set_aspect('equal')\n rx, ry = circle()\n ax.plot(rx, ry, 'k')\n\n # misc (decoration of the axes, with information)\n tcolors = cnt.tcolors\n for i in range(len(tcolors)):\n cc = tcolors[i][0][0:3]\n #if levels==None:\n if levels==None or ip==len(pole)-1:\n ## level line\n ax.plot(\n [1.28, 1.35],\n [1. - i * 0.2, 1. - i * 0.2],\n color=cc)\n ## level text\n ax.text(x=1.47, y= 1. - i*0.2 - 0.05,\n s='%3.2f'%(clev[i]),\n fontsize=4.*fact)\n pass\n ## pole plane\n if self.csym=='hexag' or self.csym=='trigo':\n ax.text(x=0.4,\n y=-1.18, s='(%1i%1i%1i%1i)'%\n\n (p__[ip][0],p__[ip][1],\n p__[ip][2],p__[ip][3]),\n fontsize=6.*fact)\n else:\n ax.text(x=0.4, y=-1.18, s='(%1i%1i%1i)'%\n (pole[ip][0],\n pole[ip][1],\n pole[ip][2]),\n fontsize=6.*fact)\n ## RD and TD indication\n ax.text(x=-0.05, y = 1.05,\n s='RD', fontsize = 4.*fact)\n ax.text(x= 1.05, y = 0.,\n s='TD', fontsize = 4.*fact)\n # Fixes the frame\n ax.set_xlim(-1.2, 1.5); ax.set_ylim(-1.2, 1.5)\n #### on each of the pole\n ####---------------------------\n ####---------------------------\n #for ip in range(len(pole)):\n return cnts # for mode in ['contour', 'contourf']\n\n elif mode=='im':\n fact = 2. #plot size factor\n figsize=(len(pole)*2.*fact, 1.*2.*fact)\n\n if axes!=None:\n raise IOError(\"'im' mode does not support\"\\\n \" imposition of axes\")\n fig = plt.figure(ifig, figsize=figsize)\n fig.clf() #clear the figure\n nrow = len(pole)\n Zs = []\n start = time.time()\n print('dm, dn:', dm,dn)\n for ip in range(len(pole)):\n f, Z = self.cells(\n pole=pole[ip], ifig=None,\n dm=dm, dn=dn,\n cdim=self.cdim, cang=self.cang,\n csym=self.csym,\n nths=nths)\n Zs.append(Z)\n\n print(\"%5.2f seconds elapsed during\"\\\n \" calling self.cells\\n\"%(time.time()-start))\n\n del Z\n ## resolution and pole figure plotting preferences\n nm = (360.0 - 0.)/dm; nn = (180.-90.)/dn\n # theta and phi and stereographic projection of them.\n theta = np.linspace(pi, pi/2., nn+1) #tilting\n phi = np.linspace(0.,2.*pi, nm+1) #rotation\n r = np.sin(theta)/(1-np.cos(theta)) #radius\n phi = phi + pi/2. # rotate the RD to the north\n # pole up (for 'im')\n phi = phi + rot # arbitrary rotation (2011 OCT)\n\n #phi, r = np.meshgrid(phi,r)\n zmax = 0.\n for ip in range(len(pole)):\n if np.max(Zs[ip])>zmax: zmax=np.max(Zs[ip].copy())\n pass\n\n for ip in range(len(pole)):\n Z = Zs[ip] ## intensity of the current pole\n nodes = Z.copy()\n Z = Z.transpose()\n\n ## polar coordinate system\n axp = fig.add_subplot(\n 1, nrow, ip+1, polar=True) #polar\n\n pcm = axp.pcolormesh(\n phi.copy(), r.copy(), Z.copy(),\n #color='red',# alpha=0.75\n )\n\n axp.set_axis_off()\n axp.set_aspect('equal')\n\n cnt = axp.contour(\n phi.copy(), r.copy(), Z.copy(),\n #cmap = plt.cm.gray_r,\n color='red',\n levels=levels)\n\n clev = cnt._levels\n #rx, ry = circle()\n tcolors = cnt.tcolors\n for i in range(len(tcolors)):\n cc = tcolors[i][0][0:3]\n if levels==None or ip==len(pole)-1:\n x0, y0 = 1.3, 0.8 - i * 0.2\n r0, t0 = cart2polar(x0,y0)\n axp.plot(t0, r0, marker='o',\n ls='None', mec='black',\n mfc=cc, #color=cc\n ms=7./len(pole),\n markeredgewidth=0.01/len(pole)\n )\n x2, y2 = 1.40, 0.8 - i *0.2 - 0.05\n r2, t2 = cart2polar(x2, y2)\n axp.text(x=t2, y=r2,\n s='%4.2f'%(clev[i]),\n fontsize=6.*fact/len(pole)\n )\n pass\n # pole figure indices\n x3, y3 = 0.4, -1.18\n r3, t3 = cart2polar(x3, y3)\n if self.csym=='hexga' or self.csym=='trigo':\n axp.text(\n x=t3, y=r3,\n s='(%1i%1i%1i%1i)'%\n (\n p__[ip][0],p__[ip][1],\n p__[ip][2],p__[ip][3]\n ) ,\n fontsize=8.*fact/len(pole)\n )\n pass\n else:\n axp.text(\n x=t3, y=r3,\n s='(%1i%1i%1i)'%\n (\n pole[ip][0],\n pole[ip][1],\n pole[ip][2]\n ),\n fontsize=8.*fact/len(pole)\n )\n pass\n ## RD and TD indication\n x4, y4 = -0.05, 1.05\n r4, t4 = cart2polar(x4, y4)\n axp.text(x=t4, y=r4,\n s='RD',\n fontsize = 6.*fact/len(pole))\n x5, y5 = 1.05, 0.\n r5, t5 = cart2polar(x5, y5)\n axp.text(x=t5, y=r5,\n s='TD',\n fontsize = 6.*fact/len(pole))\n axp.set_axis_off()\n pass\n pass\n ## save figures\n fig.savefig('pcm.pdf')\n fig.savefig('pcm.eps')\n for i in range(len(fig.axes)):\n extent = fig.axes[i].get_window_extent()\n extent = extent.transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(\n 'each_axpcm_%s.pdf'%str(i).zfill(2),\n bbox_inches=extent.expanded(1.1,1.1)\n )\n fig.savefig(\n 'each_axpcm_%s.eps'%str(i).zfill(2),\n bbox_inches=extent.expanded(1.1,1.1)\n )\n pass\n ##\n print(\"A figure's been saved to pcm.pdf and .eps\")\n print('each_axpcm_00.pdf and .eps')\n\n fig.clf()\n #return phi, r, Z\n pass", "def color_deconvolution(img):\n\n\t#Note: I am simply copying the naming conventions used in the matlab script\n\t\n\timg = img.copy()\n\n\t#STAIN VECTORS FOR H&E DECONVOLUTION (can add support for more later)\n\tMODx = [0.644211, 0.092789, 0]\n\tMODy = [0.716556, 0.954111, 0]\n\tMODz = [0.266844, 0.283111, 0]\n\n\t#Normalize columns to length 1 in 3D space\n\tleng = [0, 0, 0]\n\tcosx = [0, 0, 0]\n\tcosy = [0, 0, 0]\n\tcosz = [0, 0, 0]\n\tfor i in range(3):\n\t\tleng[i] = sqrt(MODx[i]*MODx[i] + MODy[i]*MODy[i] + MODz[i]*MODz[i])\n\t\tif not (leng[i] == 0):\n\t\t\tcosx[i] = MODx[i]/leng[i]\n\t\t\tcosy[i] = MODy[i]/leng[i]\n\t\t\tcosz[i] = MODz[i]/leng[i]\n\n\t#translation matrix\n\tif cosx[1] == 0:\n\t\tif cosy[1] == 0:\n\t\t\tif cosz[1] == 0: #2nd color is unspecified\n\t\t\t\tcosx[1] = cosz[0]\n\t\t\t\tcosy[1] = cosx[0]\n\t\t\t\tcosz[1] = cosy[0]\n\n\tif cosx[2] == 0:\n\t\tif cosy[2] == 0:\n\t\t\tif cosz[2] == 0: #3rd color is unspecified\n\t\t\t\t#3rd column will be cross product of first 2\n\t\t\t\t#fiji implementation allows for computation of 3rd color via Ruifroks method\n\t\t\t\t# but this is unnecessary for extracting just H&E \n\t\t\t\tcosx[2] = cosy[0] * cosz[1] - cosz[0] * cosy[1];\n\t\t\t\tcosy[2] = cosz[0] * cosx[1] - cosx[0] * cosz[1];\n\t\t\t\tcosz[2] = cosx[0] * cosy[1] - cosy[0] * cosx[1];\n\n\t#renormalize 3rd column\n\tleng = sqrt(cosx[2]*cosx[2] + cosy[2]*cosy[2] + cosz[2]*cosz[2])\n\tif leng != 0 and leng != 1:\n\t\tcosx[2] = cosx[2]/leng\n\t\tcosy[2] = cosy[2]/leng\n\t\tcosz[2] = cosz[2]/leng\n\n\tCOS3x3Mat = np.matrix([\n\t\t\t\t[cosx[0], cosy[0], cosz[0]], \n\t\t\t\t[cosx[1], cosy[1], cosz[1]],\n\t\t\t\t[cosx[2], cosy[2], cosz[2]]\n\t\t\t\t])\n\n\t#Note: I am skipping lines 390-459 of the matlab code, since\n\t# the determinant of the COS3x3Mat matrix is > 0 (~0.5). I think that\n\t# bit of code is trying to make the matrix invertible, but it already is\n\t# for H&E stain matrix \n\t#print(np.linalg.det(COS3x3Mat))\n\n\t#Invert the matrix\n\t# Note that this is done manually in the matlab code.\n\tQ3x3Mat = np.linalg.inv(COS3x3Mat)\n\tQ3x3MatInverted = COS3x3Mat #Just following the matlab code...\n\n\t#Compute transmittance \n\trowR = img.shape[0]\n\tcolR = img.shape[1]\n\n\t#These are the 1 channel transmittances of each dye \n\tDye1_transmittance = np.zeros([rowR, colR])\n\tDye2_transmittance = np.zeros([rowR, colR])\n\tDye3_transmittance = np.zeros([rowR, colR])\n\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\tRGB1 = img[r, c]\n\t\t\tRGB1[RGB1==0] = 1 #Avoid log0\n\t\t\tACC = -np.log(RGB1 / 255)\n\t\t\ttransmittances = 255 * np.exp(-ACC*Q3x3Mat)\n\t\t\ttransmittances = transmittances[0,:]\n\t\t\ttransmittances[transmittances>255] = 255\n\n\t\t\tDye1_transmittance[r,c] = transmittances[0,0]\n\t\t\tDye2_transmittance[r,c] = transmittances[0,1]\n\t\t\tDye3_transmittance[r,c] = transmittances[0,2]\n\n\t#Construct lookup tables to convert 1 channel dye images to \n\t# \t3 channel RGB representations \n\trLUT = np.zeros([256,3])\n\tgLUT = np.zeros([256,3])\n\tbLUT = np.zeros([256,3])\n\n\tfor i in range(3):\n\t\tfor j in range(256):\n\t\t\tif cosx[i] < 0:\n\t\t\t\trLUT[255-j, i] = 255 + (j * cosx[i])\n\t\t\telse:\n\t\t\t\trLUT[255-j, i] = 255 - (j * cosx[i])\n\n\t\t\tif cosy[i] < 0:\n\t\t\t\tgLUT[255-j, i] = 255 + (j * cosy[i])\n\t\t\telse:\n\t\t\t\tgLUT[255-j, i] = 255 - (j * cosy[i])\n\n\t\t\tif cosz[i] < 0:\n\t\t\t\tbLUT[255-j, i] = 255 + (j * cosz[i])\n\t\t\telse:\n\t\t\t\tbLUT[255-j, i] = 255 - (j * cosz[i])\n\n\t#Apply the lookup table to first dye (Hematoxilin)\n\tDye1_color_im = np.zeros(img.shape)\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\t#print(floor(Dye1_transmittance[r,c]))\n\t\t\tDye1_color_im[r,c,0] = rLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,1] = gLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,2] = bLUT[floor(Dye1_transmittance[r,c]),0]\n\n\tDye1_color_im = Dye1_color_im.astype(np.uint8)\n\n\treturn Dye1_transmittance, Dye1_color_im", "def ipix_swap_axes(self, ipix, colwise=False):\n xy = self.ipix_to_xypix(ipix, colwise)\n return self.xy_pix_to_ipix(xy, not colwise)", "def falco_hex_aperture_LUVOIR_A(wf, nrings, hexrad, hexsep, xc=0.0, yc=0.0,\n **kwargs):\n ngrid = wf.ngrid\n\n ap = np.zeros([ngrid, ngrid], dtype=np.float64)\n\n isDark = True if \"DARK\" in kwargs else False\n angle = kwargs.get(\"ROTATION\", 0)\n\n counter = 0\n for iring in range(nrings+1):\n x = hexsep * iring * cosd(30)\n y = hexsep * (iring * cosd(60) - nrings)\n\n for iseg in range(2*nrings-iring+1):\n xhex = xc + x*cosd(angle) - y*sind(angle)\n yhex = yc + x*sind(angle) + y*cosd(angle)\n\n if iring != 0 or not (iseg == nrings and isDark):\n counter += 1\n if not any(counter == np.array([1, 12, 113, 114, 125, 126])):\n ap += proper.prop_polygon(wf, 6, hexrad, xhex, yhex,\n ROTATION=angle)\n\n if iring != 0:\n xhex = -x*cosd(angle) - y*sind(angle) + xc\n yhex = -x*sind(angle) + y*cosd(angle) + yc\n counter += 1\n if not any(counter == np.array([1, 12, 113, 114, 125, 126])):\n ap = ap + proper.prop_polygon(wf, 6, hexrad, xhex, yhex,\n ROTATION=angle)\n\n y += hexsep\n\n return ap", "def invert(self):\r\n red=abs(255-self.red);\r\n green=abs(255-self.green);\r\n blue=abs(255-self.blue);\r\n return Color(red,green,blue,self.alpha);", "def getPixelColor(self, x, y):\n if((x < 0) or (y < 0) or (x >= self.ROWS) or (y >= self.COLS)):\n return 0\n if(len(self.np) >= 0):\n return self.np[x + (y* self.COLS)]\n return 0", "def _get_field_op_colour(idx, vec, term, op_parser: FockDrudge.OP_PARSER):\n\n char = vec.indices[0]\n assert char == CR or char == AN\n return char, idx if char == CR else -idx", "def _get_Ic(self, Rn, Delta):\n return pi*Delta/(2.0*e)/Rn", "def axis2euler(axis_angle):\n R = cv2.Rodrigues(axis_angle)[0]\n euler = rot2euler(R)\n R_hat = euler2rot(euler)\n assert (np.all(R - R_hat < 1e-3))\n\n return euler", "def invert_colors(self):\n\n im = self._image.as_numpy_array()\n self._image.im_representation = 255 - im", "def gradient_image(ax, direction=0.3, cmap_range=(0, 1), extent=(0, 1, 0, 1), **kwargs):\n xlim, ylim = ax.get_xlim(), ax.get_ylim()\n \n phi = direction * np.pi / 2\n v = np.array([np.cos(phi), np.sin(phi)])\n X = np.array([[v @ [1, 0], v @ [1, 1]],\n [v @ [0, 0], v @ [0, 1]]])\n a, b = cmap_range\n X = a + (b - a) / X.max() * X\n im = ax.imshow(X, interpolation='bicubic', extent=extent,\n vmin=0, vmax=1, **kwargs)\n \n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.set_aspect('auto')\n \n return im", "def circ(dec,dip,alpha):#From PmagPy (Tauxe et al., 2016)\r\n rad=np.pi/180.\r\n D_out,I_out=[],[]\r\n dec,dip,alpha=dec*rad ,dip*rad,alpha*rad\r\n dec1=dec+np.pi/2.\r\n isign=1\r\n if dip!=0: isign=(abs(dip)/dip)\r\n dip1=(dip-isign*(np.pi/2.))\r\n t=[[0,0,0],[0,0,0],[0,0,0]]\r\n v=[0,0,0]\r\n t[0][2]=np.cos(dec)*np.cos(dip)\r\n t[1][2]=np.sin(dec)*np.cos(dip)\r\n t[2][2]=np.sin(dip)\r\n t[0][1]=np.cos(dec)*np.cos(dip1)\r\n t[1][1]=np.sin(dec)*np.cos(dip1)\r\n t[2][1]=np.sin(dip1)\r\n t[0][0]=np.cos(dec1)\r\n t[1][0]=np.sin(dec1)\r\n t[2][0]=0\r\n for i in range(101):\r\n psi=float(i)*np.pi/50.\r\n v[0]=np.sin(alpha)*np.cos(psi)\r\n v[1]=np.sin(alpha)*np.sin(psi)\r\n v[2]=np.sqrt(abs(1.-v[0]**2 - v[1]**2))\r\n elli=[0,0,0]\r\n for j in range(3):\r\n for k in range(3):\r\n elli[j]=elli[j] + t[j][k]*v[k]\r\n Dir=cart2dir(elli)\r\n D_out.append(Dir[0])\r\n I_out.append(Dir[1])\r\n return D_out,I_out", "def ferromagnetic_disperion(p, x):\n return 4 * p[0] * (1 - np.cos(p[1] * x))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the misorientation axis from the misorientation matrix.
def misorientation_axis_from_delta(delta): n = np.array([delta[1, 2] - delta[2, 1], delta[2, 0] - delta[0, 2], delta[0, 1] - delta[1, 0]]) n /= np.sqrt( (delta[1, 2] - delta[2, 1]) ** 2 + (delta[2, 0] - delta[0, 2]) ** 2 + (delta[0, 1] - delta[1, 0]) ** 2) return n
[ "def _get_unit_axis(self, axis):\n r = self._get_column_d()\n if axis == 1:\n d = np.asarray([0, r], dtype=np.double)\n elif axis == 2:\n d = np.asarray([r*0.5*np.sqrt(3), r*0.5], dtype=np.double)\n elif axis == 3:\n d = np.asarray([r*0.5*np.sqrt(3), -r*0.5], dtype=np.double)\n elif axis == 4:\n d = np.asarray([0, -r], dtype=np.double)\n elif axis == 5:\n d = np.asarray([-r*0.5*np.sqrt(3), -r*0.5], dtype=np.double)\n elif axis == 6:\n d = np.asarray([-r*0.5*np.sqrt(3), r*0.5], dtype=np.double)\n else:\n raise ValueError('Axis value {} is not in [1,6]'.format(axis))\n return d", "def _get_axis_num(self, xarr, axis):\n py_axnum = xarr.get_axis_num(axis)\n if numpy.isfortran(xarr.values):\n return py_axnum\n\n if self._ends_with_channel_axis(xarr):\n if axis == len(xarr.dims) - 1:\n return axis\n else:\n return len(xarr.dims) - py_axnum - 2\n else:\n return len(xarr.dims) - py_axnum - 1", "def u_axis(self) -> np.ndarray:\n return self._u_vector / self.u_length", "def get_offx_axis(self):\n if(self.__rnhx is None):\n raise Exception(\"Cannot return x subsurface offset axis without running extended imaging\")\n if(self.__sym): ohx = -self.__nhx*self.__dx\n else: ohx = 0.0\n\n return self.__rnhx, ohx, self.__dx", "def get_rotation_axis( quat ):\n # extract W component\n sinThetaOver2Sq = 1.0 - (quat[ 3 ] ** 2)\n \n if sinThetaOver2Sq <= 0.0:\n # assert here for the time being\n assert False\n print(\"rotation axis was identity\")\n \n # identity quaternion or numerical imprecision.\n # return a valid vector\n # we'll treat -Z as the default\n out[:] = [ 0.0, 0.0, -1.0 ]\n return out\n \n oneOverSinThetaOver2 = 1.0 / math.sqrt( sinThetaOver2Sq )\n \n # we use the x,y,z values\n return numpy.array(\n [\n quat[ 0 ] * oneOverSinThetaOver2,\n quat[ 1 ] * oneOverSinThetaOver2,\n quat[ 2 ] * oneOverSinThetaOver2\n ]\n )", "def invertAxis(matrix, axis=0):\n axis = getAxis(axis)\n x, y, z = matrix[:3]\n for v in (x, y, z):\n v[axis.index] *= -1\n return pm.dt.Matrix(x, y, z)", "def GetAxis(self) -> \"itkVectorD3\":\n return _itkVersorPython.itkVersorD_GetAxis(self)", "def _ijdim_to_pydim(self, axis):\n if str(axis) in ['X', 'Y', 'Z', 'C', 'T']:\n return str(axis).lower()\n return str(axis)", "def _get_majorAxis(self) -> \"adsk::core::Ptr< adsk::core::Vector2D >\" :\n return _core.EllipticalArc2D__get_majorAxis(self)", "def get_axis(header):\n mywcs = wcs.WCS(header)\n specwcs = mywcs.sub([wcs.WCSSUB_SPECTRAL])\n return specwcs.wcs_pix2world(np.arange(header['NAXIS{0}'.format(mywcs.wcs.spec+1)]), 0)", "def axis_and_angle_of_rotation(self):\n if self.n_dims == 2:\n return self._axis_and_angle_of_rotation_2d()\n elif self.n_dims == 3:\n return self._axis_and_angle_of_rotation_3d()", "def get_ellipsoid_semiminor_axis(ellipsoid):\n return ellipsoid[0] * np.sqrt(1.0 - ellipsoid[1]**2)", "def compute_misorientation(euler_angle1, euler_angle2):\n\n # Assemble orientation matrices\n M1 = orientation_matrix(euler_angle1)\n M2 = orientation_matrix(euler_angle2)\n\n # Calculate misorientation\n M = np.dot(M1, np.linalg.inv(M2))\n\n # Get angle\n cosTheta = (M[0,0]+M[1,1]+M[2,2]-1.)/2\n eps = 1e-6\n if 1-eps < cosTheta < 1+eps:\n cosTheta = 1\n\n return np.rad2deg(acos(cosTheta))", "def _get_majorAxis(self) -> \"adsk::core::Ptr< adsk::core::Vector2D >\" :\n return _core.Ellipse2D__get_majorAxis(self)", "def _dimShape(self):\n naxis = self.header['NAXIS']\n axes = naxis*[0]\n for j in range(naxis):\n axes[j] = self.header['NAXIS'+`j+1`]\n axes.reverse()\n return tuple(axes)", "def channel_axis(self, batch):\n axis = self._model.channel_axis()\n if not batch:\n axis = axis - 1\n return axis", "def get_principal_axis(atoms: ndarray, V: ndarray) -> ndarray:\n inertia = get_inertia_tensor(atoms, V)\n\n eigval, eigvec = np.linalg.eig(inertia)\n\n principal_axis: ndarray = eigvec[np.argmax(eigval)]\n\n return principal_axis", "def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])", "def get_angle_and_axis(self):\n # special case: no rotation\n if self == Rotation():\n angle = 0.\n axis = Vector((1., 0., 0.))\n return angle, axis\n\n # construct unit vector along rotation axis\n R = self.get_matrix()\n x = R[2, 1] - R[1, 2]\n y = R[0, 2] - R[2, 0]\n z = R[1, 0] - R[0, 1]\n axis = Vector((x, y, z)).unit_vector()\n\n # helper Vector\n w = Vector((z, x, y))\n\n # vector perpendicular to u\n v = axis.cross(w)\n\n # ========== magnitude =========================== #\n cos = 0.5 * (np.trace(R) - 1)\n acos = np.arccos(cos)\n\n # use sine to resolve ambiguity of acos\n # The following line of code is taken from\n # http://vhm.mathematik.uni-stuttgart.de/Vorlesungen/\n # Lineare_Algebra/Folien_Drehachse_und_Drehwinkel.pdf\n # (join the above two lines to get the proper URL)\n sin = v.dot(self.dot(axis))\n if sin < 0:\n angle = - acos\n else:\n angle = acos\n # ================================================ #\n\n return angle, axis", "def getOrientation(matrix=None,errorValue=(0,0,0)):\n if matrix==None:\n matrix=getRotationMatrix()\n if matrix==None:\n return errorValue \n yaw=atan2(matrix[0][1], matrix[1][1])\n pitch=asin(-matrix[2][1])\n roll=atan2(-matrix[2][0], matrix[2][2])\n return yaw,pitch,roll" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the misorientation axis with another crystal orientation. This vector is by definition common to both crystalline orientations.
def misorientation_axis(self, orientation): delta = np.dot(self.orientation_matrix(), orientation.orientation_matrix().T) return Orientation.misorientation_axis_from_delta(delta)
[ "def compute_misorientation(euler_angle1, euler_angle2):\n\n # Assemble orientation matrices\n M1 = orientation_matrix(euler_angle1)\n M2 = orientation_matrix(euler_angle2)\n\n # Calculate misorientation\n M = np.dot(M1, np.linalg.inv(M2))\n\n # Get angle\n cosTheta = (M[0,0]+M[1,1]+M[2,2]-1.)/2\n eps = 1e-6\n if 1-eps < cosTheta < 1+eps:\n cosTheta = 1\n\n return np.rad2deg(acos(cosTheta))", "def get_rotation_axis( quat ):\n # extract W component\n sinThetaOver2Sq = 1.0 - (quat[ 3 ] ** 2)\n \n if sinThetaOver2Sq <= 0.0:\n # assert here for the time being\n assert False\n print(\"rotation axis was identity\")\n \n # identity quaternion or numerical imprecision.\n # return a valid vector\n # we'll treat -Z as the default\n out[:] = [ 0.0, 0.0, -1.0 ]\n return out\n \n oneOverSinThetaOver2 = 1.0 / math.sqrt( sinThetaOver2Sq )\n \n # we use the x,y,z values\n return numpy.array(\n [\n quat[ 0 ] * oneOverSinThetaOver2,\n quat[ 1 ] * oneOverSinThetaOver2,\n quat[ 2 ] * oneOverSinThetaOver2\n ]\n )", "def compute_misorientation(euler1, euler2, symlist=None):\r\n quat1 = euler2quat(euler1)\r\n quat2 = euler2quat(euler2)\r\n\r\n if symlist is None:\r\n symlist = symeq('cubic')\r\n misori=disori(quat1,quat2,symlist)\r\n eps = 1e-6\r\n if 1-eps < misori < 1+eps:\r\n misori = 1\r\n\r\n return N.rad2deg(math.acos(misori))", "def orientation(a, b, c) -> int:\n slope_diff = (b.y - a.y) * (c.x - b.x) / (b.x - a.x) * (c.y - b.y)\n if slope_diff > 0:\n return 1\n elif slope_diff < 0:\n return -1\n else:\n return 0", "def u_axis(self) -> np.ndarray:\n return self._u_vector / self.u_length", "def compute_orientation_error(self, orientations, true_orientations):\n ori = orientations.copy()\n true_ori = true_orientations.copy()\n ori[ori < 0] *= -1\n true_ori[true_ori < 0] *= -1\n\n return ori - true_ori", "def get_angle_and_axis(self):\n # special case: no rotation\n if self == Rotation():\n angle = 0.\n axis = Vector((1., 0., 0.))\n return angle, axis\n\n # construct unit vector along rotation axis\n R = self.get_matrix()\n x = R[2, 1] - R[1, 2]\n y = R[0, 2] - R[2, 0]\n z = R[1, 0] - R[0, 1]\n axis = Vector((x, y, z)).unit_vector()\n\n # helper Vector\n w = Vector((z, x, y))\n\n # vector perpendicular to u\n v = axis.cross(w)\n\n # ========== magnitude =========================== #\n cos = 0.5 * (np.trace(R) - 1)\n acos = np.arccos(cos)\n\n # use sine to resolve ambiguity of acos\n # The following line of code is taken from\n # http://vhm.mathematik.uni-stuttgart.de/Vorlesungen/\n # Lineare_Algebra/Folien_Drehachse_und_Drehwinkel.pdf\n # (join the above two lines to get the proper URL)\n sin = v.dot(self.dot(axis))\n if sin < 0:\n angle = - acos\n else:\n angle = acos\n # ================================================ #\n\n return angle, axis", "def _get_unit_axis(self, axis):\n r = self._get_column_d()\n if axis == 1:\n d = np.asarray([0, r], dtype=np.double)\n elif axis == 2:\n d = np.asarray([r*0.5*np.sqrt(3), r*0.5], dtype=np.double)\n elif axis == 3:\n d = np.asarray([r*0.5*np.sqrt(3), -r*0.5], dtype=np.double)\n elif axis == 4:\n d = np.asarray([0, -r], dtype=np.double)\n elif axis == 5:\n d = np.asarray([-r*0.5*np.sqrt(3), -r*0.5], dtype=np.double)\n elif axis == 6:\n d = np.asarray([-r*0.5*np.sqrt(3), r*0.5], dtype=np.double)\n else:\n raise ValueError('Axis value {} is not in [1,6]'.format(axis))\n return d", "def vector_orientation (x, y):\n\tif x <= 0.3826 and x >= -0.3826 and y <= 1 and y >= 0.9238:\n\t\treturn \"North\"\n\telif x < 0.8660 and x > 0.3826 and y < 0.9238 and y > 0.5000:\n\t\treturn \"Northeast\"\n\telif x <= 1 and x >= 0.8660 and y <= 0.5000 and y >= -0.3583:\n\t\treturn \"East\"\n\telif x < 0.9335 and x > 0.3090 and y < -0.3583 and y > -0.9510:\n\t\treturn \"Southeast\"\n\telif x <= 0.3090 and x >= -0.3090 and y <= -0.9510 and y >= -1:\n\t\treturn \"South\"\n\telif x < -0.3090 and x > -0.9335 and y < -0.3583 and y > -0.9510:\n\t\treturn \"Southwest\"\n\telif x <= -0.8660 and x >= -1 and y <= 0.5000 and y >= -0.3583:\n\t\treturn \"West\"\n\telif x < -0.3826 and x > -0.8660 and y < 0.9238 and y > 0.5000:\n\t\treturn \"Northwest\"\n\telse:\n\t\treturn \"No orientation\"", "def rotation_axis(quat):\n # extract W component\n sinThetaOver2Sq = 1.0 - (quat[3] ** 2)\n\n # check for zero before we sqrt\n if sinThetaOver2Sq <= 0.0:\n # identity quaternion or numerical imprecision.\n # return a valid vector\n # we'll treat -Z as the default\n return np.array([0.0, 0.0, -1.0], dtype=quat.dtype)\n\n oneOverSinThetaOver2 = 1.0 / np.sqrt(sinThetaOver2Sq)\n\n # we use the x,y,z values\n return np.array(\n [\n quat[0] * oneOverSinThetaOver2,\n quat[1] * oneOverSinThetaOver2,\n quat[2] * oneOverSinThetaOver2\n ],\n dtype=quat.dtype\n )", "def axis_and_angle_of_rotation(self):\n if self.n_dims == 2:\n return self._axis_and_angle_of_rotation_2d()\n elif self.n_dims == 3:\n return self._axis_and_angle_of_rotation_3d()", "def get_offx_axis(self):\n if(self.__rnhx is None):\n raise Exception(\"Cannot return x subsurface offset axis without running extended imaging\")\n if(self.__sym): ohx = -self.__nhx*self.__dx\n else: ohx = 0.0\n\n return self.__rnhx, ohx, self.__dx", "def orientation(p, q, r):\n # use the slope to get orientation\n val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])\n\n if val == 0: # colinear\n return 0\n\n return 1 if val > 0 else 2 # clock or counterclokwise", "def calc_orientation_diff(orientation_1: float, orientation_2: float) -> float:\n return math.pi - abs(abs(orientation_1 - orientation_2) - math.pi)", "def axis_calc(self, axis):\n # TODO: Rewrite this method to allow non-90deg planes to work\n # Figure out which axes the plane exists in\n axes = [1, 1, 1]\n axes[0] = (axis.v0.x - axis.v1.x - axis.v2.x) / 3.0\n axes[1] = (axis.v0.y - axis.v1.y - axis.v2.y) / 3.0\n axes[2] = (axis.v0.z - axis.v1.z - axis.v2.z) / 3.0\n # if axis.v0.x == axis.v1.x == axis.v2.x:\n # axes[0] = 0\n # if axis.v0.y == axis.v1.y == axis.v2.y:\n # axes[1] = 0\n # if axis.v0.z == axis.v1.z == axis.v2.z:\n # axes[2] = 0\n\n # Figure out uaxis xyz\n u = [0, 0, 0]\n for i in range(3):\n if axes[i] != 0.0:\n u[i] = axes[i]\n axes[i] = 0\n break\n\n # Figure out vaxis xyz\n v = [0, 0, 0]\n for i in range(3):\n if axes[i] != 0.0:\n v[i] = -axes[i]\n break\n\n uaxis = Axis(u[0], u[1], u[2])\n vaxis = Axis(v[0], v[1], v[2])\n return (uaxis, vaxis)", "def _axis_and_angle_of_rotation_3d(self):\n eval_, evec = np.linalg.eig(self.rotation_matrix)\n real_eval_mask = np.isreal(eval_)\n real_eval = np.real(eval_[real_eval_mask])\n evec_with_real_eval = np.real_if_close(evec[:, real_eval_mask])\n error = 1e-7\n below_margin = np.abs(real_eval) < (1 + error)\n above_margin = (1 - error) < np.abs(real_eval)\n re_unit_eval_mask = np.logical_and(below_margin, above_margin)\n evec_with_real_unitary_eval = evec_with_real_eval[:, re_unit_eval_mask]\n # all the eigenvectors with real unitary eigenvalues are now all\n # equally 'valid' if multiple remain that probably means that this\n # rotation is actually a no op (i.e. rotate by 360 degrees about any\n # axis is an invariant transform) but need to check this. For now,\n # just take the first\n if evec_with_real_unitary_eval.shape[1] != 1:\n # TODO confirm that multiple eigenvalues of 1 means the rotation\n # does nothing\n return None, None\n axis = evec_with_real_unitary_eval[:, 0]\n axis /= np.sqrt((axis ** 2).sum()) # normalize to unit vector\n # to find the angle of rotation, build a new unit vector perpendicular\n # to the axis, and see how it rotates\n axis_temp_vector = axis - np.random.rand(axis.size)\n perpendicular_vector = np.cross(axis, axis_temp_vector)\n perpendicular_vector /= np.sqrt((perpendicular_vector ** 2).sum())\n transformed_vector = np.dot(self.rotation_matrix, perpendicular_vector)\n angle_of_rotation = np.arccos(np.dot(transformed_vector, perpendicular_vector))\n chirality_of_rotation = np.dot(\n axis, np.cross(perpendicular_vector, transformed_vector)\n )\n if chirality_of_rotation < 0:\n angle_of_rotation *= -1.0\n return axis, angle_of_rotation", "def get_orientation(self):\n if -4.9 < accelerometer.acceleration[0] < 4.9:\n self.orientation = 0\n else:\n self.orientation = 1", "def _inverse_rotation_matrix(self):\n return simplify(self._parent_rotation_matrix**-1)", "def get_orientation_map(crystal_map):\n eulers = crystal_map.isig[1:4]\n eulers.map(_euler2axangle_signal, inplace=True, show_progressbar=False)\n orientation_map = eulers.as_signal2D((0,1))\n #Set calibration to same as signal\n x = orientation_map.axes_manager.signal_axes[0]\n y = orientation_map.axes_manager.signal_axes[1]\n x.name = 'x'\n x.scale = crystal_map.axes_manager.navigation_axes[0].scale\n x.units = 'nm'\n y.name = 'y'\n y.scale = crystal_map.axes_manager.navigation_axes[0].scale\n y.units = 'nm'\n return orientation_map", "def cbf_gonio_to_effective_axis_fixed_old(cbf_gonio):\n\n # First construct the real rotation axis, as the difference in rotating\n # the identity matrix at the end of the scan and the beginning.\n\n x = cbf_gonio.rotate_vector(0.0, 1, 0, 0)\n y = cbf_gonio.rotate_vector(0.0, 0, 1, 0)\n z = cbf_gonio.rotate_vector(0.0, 0, 0, 1)\n\n R = matrix.rec(x + y + z, (3, 3)).transpose()\n\n x1 = cbf_gonio.rotate_vector(1.0, 1, 0, 0)\n y1 = cbf_gonio.rotate_vector(1.0, 0, 1, 0)\n z1 = cbf_gonio.rotate_vector(1.0, 0, 0, 1)\n\n R1 = matrix.rec(x1 + y1 + z1, (3, 3)).transpose()\n\n RA = R1 * R.inverse()\n\n rot = r3_rotation_axis_and_angle_from_matrix(RA)\n\n # Then, given this, determine the component of the scan which is fixed -\n # which will need to be with respect to the unrotated axis. N.B. this\n # will not be unique, but should be correct modulo a free rotation about\n # the shifted axis.\n\n start = cbf_gonio.get_rotation_range()[0]\n\n # want positive rotations => if negative invert axis\n axis = matrix.col(rot.axis)\n angle = rot.angle()\n if angle < 0:\n axis = -1 * axis\n # common sense would suggest in here that if the angle is -ve should\n # be made +ve - works OK for omega scans but not phi scans, probably\n # incomplete goniometer definition problem...\n # start = -start\n\n S = axis.axis_and_angle_as_r3_rotation_matrix(start, deg=True)\n\n return axis, S.inverse() * R" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the disorientation another crystal orientation. Considering all the possible crystal symmetries, the disorientation is defined as the combination of the minimum misorientation angle and the misorientation axis lying in the fundamental zone, which can be used to bring the two lattices into coincidence.
def disorientation(self, orientation, crystal_structure=Symmetry.triclinic): the_angle = np.pi symmetries = crystal_structure.symmetry_operators() (gA, gB) = (self.orientation_matrix(), orientation.orientation_matrix()) # nicknames for (g1, g2) in [(gA, gB), (gB, gA)]: for j in range(symmetries.shape[0]): sym_j = symmetries[j] oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied for i in range(symmetries.shape[0]): sym_i = symmetries[i] oi = np.dot(sym_i, g2) delta = np.dot(oi, oj.T) mis_angle = Orientation.misorientation_angle_from_delta(delta) if mis_angle < the_angle: # now compute the misorientation axis, should check if it lies in the fundamental zone mis_axis = Orientation.misorientation_axis_from_delta(delta) # here we have np.dot(oi.T, mis_axis) = np.dot(oj.T, mis_axis) # print(mis_axis, mis_angle*180/np.pi, np.dot(oj.T, mis_axis)) the_angle = mis_angle the_axis = mis_axis the_axis_xyz = np.dot(oi.T, the_axis) return the_angle, the_axis, the_axis_xyz
[ "def compute_misorientation(euler1, euler2, symlist=None):\r\n quat1 = euler2quat(euler1)\r\n quat2 = euler2quat(euler2)\r\n\r\n if symlist is None:\r\n symlist = symeq('cubic')\r\n misori=disori(quat1,quat2,symlist)\r\n eps = 1e-6\r\n if 1-eps < misori < 1+eps:\r\n misori = 1\r\n\r\n return N.rad2deg(math.acos(misori))", "def compute_misorientation(euler_angle1, euler_angle2):\n\n # Assemble orientation matrices\n M1 = orientation_matrix(euler_angle1)\n M2 = orientation_matrix(euler_angle2)\n\n # Calculate misorientation\n M = np.dot(M1, np.linalg.inv(M2))\n\n # Get angle\n cosTheta = (M[0,0]+M[1,1]+M[2,2]-1.)/2\n eps = 1e-6\n if 1-eps < cosTheta < 1+eps:\n cosTheta = 1\n\n return np.rad2deg(acos(cosTheta))", "def orientation(p, q, r):\n # use the slope to get orientation\n val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])\n\n if val == 0: # colinear\n return 0\n\n return 1 if val > 0 else 2 # clock or counterclokwise", "def calc_orientation_diff(orientation_1: float, orientation_2: float) -> float:\n return math.pi - abs(abs(orientation_1 - orientation_2) - math.pi)", "def orientation(a, b, c) -> int:\n slope_diff = (b.y - a.y) * (c.x - b.x) / (b.x - a.x) * (c.y - b.y)\n if slope_diff > 0:\n return 1\n elif slope_diff < 0:\n return -1\n else:\n return 0", "def polarizability_diagonal(self, unit=\"au\"):\n\n tensor = self.polarizability_tensor()\n diag = []\n for i, line in enumerate(tensor):\n for j, el in enumerate(line):\n if i==j:\n diag.append(el)\n if unit == \"au\" or unit == \"bohr\":\n return diag\n elif unit == \"angstrom\":\n return map(lambda x: x / 1.8897162**3, diag)", "def get_orientation_map(crystal_map):\n eulers = crystal_map.isig[1:4]\n eulers.map(_euler2axangle_signal, inplace=True, show_progressbar=False)\n orientation_map = eulers.as_signal2D((0,1))\n #Set calibration to same as signal\n x = orientation_map.axes_manager.signal_axes[0]\n y = orientation_map.axes_manager.signal_axes[1]\n x.name = 'x'\n x.scale = crystal_map.axes_manager.navigation_axes[0].scale\n x.units = 'nm'\n y.name = 'y'\n y.scale = crystal_map.axes_manager.navigation_axes[0].scale\n y.units = 'nm'\n return orientation_map", "def get_orientation(self):\n if -4.9 < accelerometer.acceleration[0] < 4.9:\n self.orientation = 0\n else:\n self.orientation = 1", "def compute_orientation_error(self, orientations, true_orientations):\n ori = orientations.copy()\n true_ori = true_orientations.copy()\n ori[ori < 0] *= -1\n true_ori[true_ori < 0] *= -1\n\n return ori - true_ori", "def calculate_angle_from_discontinuity(orig_eofs: eof.EOFDataForAllDOYs):\n\n list_of_doys = tools.doy_list()\n doy1 = orig_eofs.eofdata_for_doy(1)\n\n ndoys = len(list_of_doys)\n \n # set DOY1 initialization\n rots = np.array([doy1.eof1vector, doy1.eof2vector])\n\n # project onto previous day\n for d in list_of_doys:\n if d+1 > ndoys: # for last day in cycle, return to January 1\n doyn = orig_eofs.eofdata_for_doy(1)\n else:\n doyn = orig_eofs.eofdata_for_doy(d+1)\n\n B = np.array([doyn.eof1vector, doyn.eof2vector]).T \n A = np.array([rots[0,:], rots[1,:]]).T\n \n rots = np.matmul(np.matmul(B, B.T),A).T\n \n # calculate discontinuity between Jan 1 and Jan 1 at end of rotation cycle\n discont = tools.angle_btwn_vectors(doy1.eof1vector, rots[0,:])\n \n # determine direction of rotation\n cross_angle = np.dot(doy1.eof1vector, rots[1,:])/(np.linalg.norm(doy1.eof1vector)*np.linalg.norm(rots[1,:]))\n if cross_angle <= 0:\n return -discont/ndoys\n else: \n return discont/ndoys", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "def symmetry_rotation(self, bond_to_rotate, normal_direction, angles):\n\t\tpass", "def is_orientation_legal(self, newrot, newmir):\n if self.has_vdd and newrot != 0:\n return False\n if self.has_gnd and newrot != 2:\n return False\n return True", "def sync_phase(self):\n return _np.math.pi - _np.math.asin(self.en_lost_rad/self.gap_voltage)", "def get_orientation_code(\n azimuth=None, direction=None, orientation=\"horizontal\"\n):\n if azimuth is not None:\n # angles are only from 0 to 360\n azimuth = azimuth % 360\n\n value = abs(np.cos(np.deg2rad(azimuth)))\n\n if orientation == \"horizontal\":\n if value >= angle(15):\n return \"N\"\n elif value <= angle(105):\n return \"E\"\n elif (value < angle(15)) and (value >= angle(45)):\n return \"1\"\n elif (value < angle(45)) and (value >= angle(105)):\n return \"2\"\n\n elif orientation == \"vertical\":\n if value >= angle(15):\n return \"Z\"\n else:\n return \"3\"\n\n elif direction is not None:\n try:\n return forced_orientation[direction.lower()]\n except KeyError:\n raise ValueError(\n f\"Could not match {direction} with allowed direction (x, y, z)\"\n )", "def calculateOrientation(self):\n\n cumulativeAngleChange = 0\n for node in self.nodes:\n # Calculate the angle change due to node\n angleIn, angleOut = self.nodeAngles[node.ID]\n # Angle change is out - in if positively oriented,\n # and in - out if negatively oriented\n angleChange = (node.o*(angleOut - angleIn)) % (2 * np.pi)\n if node.o == 1:\n cumulativeAngleChange += angleChange\n elif node.o == -1:\n cumulativeAngleChange -= angleChange\n\n self.o = 1 if cumulativeAngleChange > 0 else -1", "def declination_angle(self):\n\t\tinside_sin = math.radians((360 * (284 + int(self.n)))/(float(365)))\n\t\t#return float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees\n\t\treturn float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees", "def _rotation_matrix_uniaxial(theta,phi, R):\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n \n R[0,0] = costheta * cosphi\n R[0,1] = - sinphi \n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi \n R[1,1] = cosphi\n R[1,2] = sintheta * sinphi\n R[2,0] = -sintheta\n R[2,1] = 0.\n R[2,2] = costheta", "def _inverse_rotation_matrix(self):\n return simplify(self._parent_rotation_matrix**-1)", "def rotation_ellipticity(self):\n if self.sigma_x > self.sigma_y:\n temp_rotation = self.__rotation % math.pi\n else:\n temp_rotation = (self.__rotation+(math.pi/2)) % math.pi\n return(temp_rotation)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the two omega angles which satisfy the Bragg condition. For a given crystal orientation sitting on a vertical rotation axis,
def dct_omega_angles(self, hkl, lambda_keV, verbose=False): (h, k, l) = hkl.miller_indices() theta = hkl.bragg_angle(lambda_keV, verbose=verbose) lambda_nm = 1.2398 / lambda_keV gt = self.orientation_matrix().T # gt = g^{-1} in Poulsen 2004 Gc = hkl.scattering_vector() A = np.dot(Gc, gt[0]) B = - np.dot(Gc, gt[1]) # A = h / a * gt[0, 0] + k / b * gt[0, 1] + l / c * gt[0, 2] # B = -h / a * gt[1, 0] - k / b * gt[1, 1] - l / c * gt[1, 2] C = -2 * np.sin(theta) ** 2 / lambda_nm # the minus sign comes from the main equation omega_1, omega_2 = Orientation.solve_trig_equation(A, B, C, verbose=verbose) if verbose: print('the two omega values in degrees fulfilling the Bragg condition are (%.1f, %.1f)' % (omega_1, omega_2)) return omega_1, omega_2
[ "def compute_omega( g, tilt ): \n kz = g[2] # component along the rotation axis\n modg2 = (g*g).sum(axis=0)\n num = -modg2 - 2*tilt*kz\n den = 2*np.sqrt(1 - tilt*tilt)\n kx = num / den\n arg = modg2 - kx*kx - kz*kz\n mask = arg >= 0\n ky = np.sqrt( arg * mask ) # positive\n omegaplus = anglevecs2D( g[0], g[1], kx, ky )\n omegaminus= anglevecs2D( g[0], g[1], kx, -ky )\n return omegaplus, omegaminus", "def calc_ang_acc(self, u, omega, I, L, b, k):\n # Calculate torque given control input and physical constants\n tau = self.calc_torque(u, L, b, k)\n\n # Calculate body frame angular acceleration using Euler's equation\n omegaddot = np.dot(np.linalg.inv(\n I), (tau - np.cross(omega, np.dot(I, omega))))\n\n return omegaddot", "def roangles3D(dens, Bx, By, Bz):\n # Calculates the relative orientation angles between the density structures and the magnetic field.\n # INPUTS\n # dens - regular cube with the values of density \n # Bx -\n # By -\n # Bz -\n #\n # OUTPUTS\n #\n #\n \n grad=np.gradient(dens, edge_order=2)\n \n # JCIM - are you sure this is the order of the output? gx = [1], gy = [0] and gz = [2]?\n #gx=grad[1]; gy=grad[0]; gz=grad[2];\n gx=grad[0]; gy=grad[1]; gz=grad[2];\n \n normgrad=np.sqrt(gx*gx+gy*gy+gz*gz)\n normb =np.sqrt(Bx*Bx+By*By+Bz*Bz)\n \n zerograd=(normgrad==0.).nonzero()\t\n zerob =(normb ==0.).nonzero()\n \n normcross=np.sqrt((gy*Bz-gz*By)**2+(gx*Bz-gz*Bx)**2+(gx*By-gy*Bx)**2)\n normdot =gx*Bx+gy*By+gz*Bz\t\n \n # Here I calculate the angle using atan2 to avoid the numerical problems of acos or asin\n phigrad=np.arctan2(normcross,normdot) \n \n # The cosine of the angle between the iso-density and B is the sine of the angle between\n # the density gradient and B.\t\n cosphi=np.sin(phigrad)\n \n # JCIM: what is this value 32768?\n cosphi[(normgrad == 0.).nonzero()]=-32768\n cosphi[(normb == 0.).nonzero()]=-32768\n \n return cosphi", "def ang_sep(l1,b1,l2,b2):\n sin_theta = np.sqrt((np.cos(b2 * _d2r) * np.sin((l1 - l2) * _d2r)) ** 2 +\n (np.cos(b1 * _d2r) * np.sin(b2 * _d2r) - \n np.sin(b1 * _d2r) * np.cos(b2 * _d2r) * np.cos((l1 - l2) * _d2r)) ** 2)\n cos_theta = (np.cos(b1 * _d2r) * np.cos(b2 * _d2r) *\n np.cos((l1 - l2) * _d2r) +\n np.sin(b1 * _d2r) * np.sin(b2 * _d2r))\n tan_theta = sin_theta/cos_theta\n return np.arctan2(sin_theta,cos_theta) / _d2r", "def calcBA(atmcoordi, atmcoordj, atmcoordk):\n vecji = atmcoordi - atmcoordj\n vecjk = atmcoordk - atmcoordj\n normji = np.sqrt( np.dot(vecji, vecji) )\n normjk = np.sqrt( np.dot(vecjk, vecjk) )\n dotijk = np.dot(vecji, vecjk)\n angleijk = 180 - np.arccos( dotijk / (normji * normjk) ) * rad2grad \n return angleijk", "def findAngleB():\n B=math.asin(b/c)\n return B", "def rotB(self, axis, angle, resetCijkl=True):\n M = bondmat(axis=axis, angle=angle)\n self.Cvoigt = np.dot(M, self.Cvoigt)\n self.Cvoigt = np.dot(self.Cvoigt, M.T)\n if resetCijkl: self.Voigt2Cijkl()\n return", "def get_angle(self):\n gyro_z = self.read_gyroscope().z\n # print(gyro_z)\n angle_xy = self.calc_accel_angle()\n # print(math.degrees(angle_xy))\n dt = time.time() - self.timestamp\n #y_n = (1 - self.a) * angle_xy + self.a * self.angle\n self.angle = self.a * (self.angle + gyro_z * dt) + (1 - self.a) * angle_xy\n #self.angle = angle_xy\n self.timestamp = time.time()\n return self.angle, dt", "def bearing_rads(self):\n return (np.pi / 180.0 ) * (90.0 - self.bearing)", "def Rbody2nav_to_angle(R, output_units='rad', rotation_sequence='321'):\n yaw = np.arctan2(R[1,0], R[0,0])\n #pitch = -np.arctan(R[2,0] / np.sqrt(1.-R[2,0]**2)) # Farrel eqn 2.45\n pitch = -np.arcsin(R[2,0]) # this is simpler\n roll = np.arctan2(R[2,1], R[2,2] )\n \n # Apply necessary unit transformations.\n if output_units == 'rad':\n pass\n elif output_units == 'deg':\n yaw, pitch, roll = np.degrees([yaw, pitch, roll])\n \n return yaw, pitch, roll", "def bifurcation_angle_vec(self,node,where='local'):\n child_node1,child_node2 = self._get_child_nodes(node,where=where)\n scaled_1 = child_node1.content['p3d'].xyz - node.content['p3d'].xyz\n scaled_2 = child_node2.content['p3d'].xyz - node.content['p3d'].xyz\n amp = lambda a: np.sqrt(np.sum((a)**2))\n return np.arccos(np.dot(scaled_1,scaled_2)/(amp(scaled_1)*amp(scaled_2))) / (2*np.pi/360)", "def bures_angle(A, B):\n if A.isket or A.isbra:\n A = A.proj()\n if B.isket or B.isbra:\n B = B.proj()\n if A.dims != B.dims:\n raise TypeError('A and B do not have same dimensions.')\n return np.arccos(fidelity(A, B))", "def test_generate_rotation_rotor_and_angle(self):\n from clifford.tools.g3 import generate_rotation_rotor, random_unit_vector, angle_between_vectors\n\n euc_vector_m = random_unit_vector()\n euc_vector_n = random_unit_vector()\n theta = angle_between_vectors(euc_vector_m, euc_vector_n)\n print(theta)\n\n rot_rotor = generate_rotation_rotor(theta, euc_vector_m, euc_vector_n)\n v1 = euc_vector_m\n v2 = rot_rotor*euc_vector_m*~rot_rotor\n theta_return = angle_between_vectors(v1, v2)\n print(theta_return)\n\n testing.assert_almost_equal(theta_return, theta)\n testing.assert_almost_equal(euc_vector_n.value, v2.value)", "def beta(self) -> float:\n return self.angles[1]", "def two_theta_max(self):\n if type(self.bounds[4]) == float:\n nub = self.bounds[4]\n else:\n nub = np.linspace(self.bounds[4][0], self.bounds[4][1], 1000)\n\n if type(self.bounds[5]) == float:\n delb = self.bounds[5]\n else:\n delb = np.linspace(self.bounds[5][0], self.bounds[5][1], 1000)\n\n delb, nub = np.meshgrid(delb, nub)\n R = np.cos(np.radians(delb)) * np.cos(np.radians(nub))\n Z = np.arccos(R)\n\n return np.degrees(np.max(Z)), np.degrees(np.min(Z))", "def calc_acc(vx,vy,beta):\n\n # Calculate magnitude of velocity:\n vmag = math.sqrt((vx**2)+(vy**2))\n\n # Calculate ax:\n ax = -beta*vmag*vx\n\n # Calculate ay:\n ay = -beta*vmag*vy-9.81\n\n return(ax,ay)", "def release_orientational_restraints(k_t, k_r, beta):\n\n def harmonic_restraint(r):\n return k_t*r**2\n\n Z_numeric = integrate_radial_Z(\n harmonic_restraint,\n beta,\n r_max=np.inf # i like to live dangerously\n )\n Z_exact = integrate_radial_Z_exact(k_t, beta)\n\n np.testing.assert_almost_equal(Z_exact, Z_numeric)\n dG_translation = standard_state_correction(Z_numeric, beta)\n u_fn = functools.partial(rmsd.angle_u, k=k_r)\n Z_rotation = integrate_rotation_Z(u_fn, beta)\n # A_ij = (-1/beta)*ln(Z_j/Z_i)\n dG_rotation = (-1/beta)*np.log(1/Z_rotation)\n return dG_translation, dG_rotation", "def get_angle_and_axis(self):\n # special case: no rotation\n if self == Rotation():\n angle = 0.\n axis = Vector((1., 0., 0.))\n return angle, axis\n\n # construct unit vector along rotation axis\n R = self.get_matrix()\n x = R[2, 1] - R[1, 2]\n y = R[0, 2] - R[2, 0]\n z = R[1, 0] - R[0, 1]\n axis = Vector((x, y, z)).unit_vector()\n\n # helper Vector\n w = Vector((z, x, y))\n\n # vector perpendicular to u\n v = axis.cross(w)\n\n # ========== magnitude =========================== #\n cos = 0.5 * (np.trace(R) - 1)\n acos = np.arccos(cos)\n\n # use sine to resolve ambiguity of acos\n # The following line of code is taken from\n # http://vhm.mathematik.uni-stuttgart.de/Vorlesungen/\n # Lineare_Algebra/Folien_Drehachse_und_Drehwinkel.pdf\n # (join the above two lines to get the proper URL)\n sin = v.dot(self.dot(axis))\n if sin < 0:\n angle = - acos\n else:\n angle = acos\n # ================================================ #\n\n return angle, axis", "def angle_diff(phi1, phi2):\n return ((phi1-phi2 + np.pi) % (2*np.pi)) - np.pi", "def view_angle(self):\n view_i = -self.Ri[2,:].T\n view_j = -self.Rj[2,:].T\n return np.arccos(np.dot(view_i.T, view_j))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the instrument transformation matrix for given rotation offset. This function compute a 3x3 rotation matrix (passive convention) that transform the sample coordinate system
def compute_instrument_transformation_matrix(rx_offset, ry_offset, rz_offset): angle_zr = np.radians(rz_offset) angle_yr = np.radians(ry_offset) angle_xr = np.radians(rx_offset) Rz = np.array([[np.cos(angle_zr), -np.sin(angle_zr), 0], [np.sin(angle_zr), np.cos(angle_zr), 0], [0, 0, 1]]) Ry = np.array([[np.cos(angle_yr), 0, np.sin(angle_yr)], [0, 1, 0], [-np.sin(angle_yr), 0, np.cos(angle_yr)]]) Rx = np.array([[1, 0, 0], [0, np.cos(angle_xr), -np.sin(angle_xr)], [0, np.sin(angle_xr), np.cos(angle_xr)]]) T = Rz.dot(np.dot(Ry, Rx)) return T
[ "def rotation_matrix(self) -> Tensor:\n return self.extrinsics[..., :3, :3]", "def getRotationMatrix(pitch, yaw):\n\trotationPitch = getRotationPitch(pitch)\n\trotationYaw = getRotationYaw(yaw)\n\treturn rotationYaw * rotationPitch", "def getRotationAndTranslationMatrix(rotation, translation):\n return np.array([cos(-rotation), -sin(-rotation), translation[0],\n sin(-rotation), cos(-rotation), translation[1],\n 0, 0, 1]\n ).reshape((3, 3))", "def rotation_matrix(delta):\n return np.array([[np.cos(delta), -np.sin(delta)],[np.sin(delta), np.cos(delta)]])", "def getRotationMatrix( self):", "def transformFromRotationPosition3D(*args):\n return _almathswig.transformFromRotationPosition3D(*args)", "def rotation_matrix(self):\n return np.array([self.axis_u, self.axis_v, self.axis_w])", "def rotation_matrix(u):\n c = 0.0\n s = 1.0\n # u has to be a versor in the l2-norm\n u = u / np.linalg.norm(u)\n x = u[0]\n y = u[1]\n z = u[2]\n C = 1. - c\n R = np.array([\n [(x * x * C + c), (x * y * C - z * s), (x * z * C + y * s)],\n [(y * x * C + z * s), (y * y * C + c), (y * z * C - x * s)],\n [(z * x * C - y * s), (z * y * C + x * s), (z * z * C + c)]])\n return R", "def _rotation_matrix(psi,theta,phi, R):\n cospsi = cos(psi)\n sinpsi = sin(psi)\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n\n sinphi_sinpsi = sinphi * sinpsi\n sinphi_cospsi = sinphi * cospsi \n\n cosphi_sinpsi = cosphi * sinpsi\n cosphi_cospsi = cosphi * cospsi\n \n R[0,0] = costheta * cosphi_cospsi - sinphi_sinpsi\n R[0,1] = - costheta * cosphi_sinpsi - sinphi_cospsi\n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi_cospsi + cosphi_sinpsi\n R[1,1] = cosphi_cospsi - costheta * sinphi_sinpsi\n R[1,2] = sintheta * sinphi\n R[2,0] = - cospsi * sintheta\n R[2,1] = sintheta*sinpsi\n R[2,2] = costheta", "def rotator_to_matrix(rotator: Rotator):\n return rotation_to_matrix([rotator.pitch, rotator.yaw, rotator.roll])", "def TMatrix(scaling, rotation, translation):\n XCos = numpy.cos(numpy.radians(rotation[0]))\n YCos = numpy.cos(numpy.radians(rotation[1]))\n ZCos = numpy.cos(numpy.radians(rotation[2]))\n\n XSin = numpy.sin(numpy.radians(rotation[0]))\n YSin = numpy.sin(numpy.radians(rotation[1]))\n ZSin = numpy.sin(numpy.radians(rotation[2]))\n Translate = numpy.array(\n [[scaling, 0, 0, translation[0]], [0, scaling, 0, translation[1]], [0, 0, scaling, translation[2]],\n [0, 0, 0, 1]])\n RotateX = numpy.array([[1, 0, 0, 0], [0, XCos, -XSin, 0], [0, XSin, XCos, 0], [0, 0, 0, 1]])\n RotateY = numpy.array([[YCos, 0, YSin, 0], [0, 1, 0, 0], [-YSin, 0, YCos, 0], [0, 0, 0, 1]])\n RotateZ = numpy.array([[ZCos, -ZSin, 0, 0], [ZSin, ZCos, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n return numpy.dot(RotateZ, numpy.dot(RotateY, numpy.dot(RotateX, Translate)))", "def transformFromRotation3D(pRotation):\n return _almathswig.transformFromRotation3D(pRotation)", "def rotationMatrix(self):\n\n # R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n # self.exteriorOrientationParameters[5])\n\n return self.__rotationMatrix", "def get_rotationMatrix(self):\n rot_mat = quat2mat(self.quat)\n try:\n [U, s, V] = np.linalg.svd(rot_mat)\n return np.dot(U, V)\n except:\n return np.eye(3)", "def transformation_matrix(self, s1, s2, s3, t1, t2 ,t3):\n\n s1 = np.array(s1)\n s2 = np.array(s2)\n s3 = np.array(s3)\n t1 = np.array(t1)\n t2 = np.array(t2)\n t3 = np.array(t3)\n\n Q = np.array([\n [t2[0] - t1[0], t2[1] - t1[1], t2[2] - t1[2]],\n [t3[0] - t1[0], t3[1] - t1[1], t3[2] - t1[2]]\n ])\n\n P = np.array([\n [s2[0] - s1[0], s2[1] - s1[1]],\n [s3[0] - s1[0], s3[1] - s1[1]]\n ])\n\n\n try:\n # Invert the P matrix\n Pinv = inv(P)\n\n # Build the dot product\n T = np.dot(Pinv, Q)\n\n # Offset\n V0 = np.subtract(t2,np.transpose(s2[0:2]).dot(T))\n except Exception as e:\n l.error(\"An error occured during the transformation with error: \" + str(e))\n return -1, -1\n\n return T, V0", "def _rotation_matrix_uniaxial(theta,phi, R):\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n \n R[0,0] = costheta * cosphi\n R[0,1] = - sinphi \n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi \n R[1,1] = cosphi\n R[1,2] = sintheta * sinphi\n R[2,0] = -sintheta\n R[2,1] = 0.\n R[2,2] = costheta", "def rotation3DFromTransform(pT):\n return _almathswig.rotation3DFromTransform(pT)", "def RotationMatrix_Image1(self):\r\n return Compute3DRotationMatrix(self.__relativeOrientationImage1[3], self.__relativeOrientationImage1[4],\r\n self.__relativeOrientationImage1[5])", "def build_matrix(self):\n # Note that by nature, a camera perspective inverts everything\n # So we negate everything and also do it in reverse\n\n # Overrides PositionMatrix, reverse everything, ignore scale \n m = Matrix44.identity()\n m = Matrix44.from_translation(-1 * Vector3(self.position)) * m\n m = Matrix44.from_z_rotation(-math.radians(self.roll)) * m\n m = Matrix44.from_y_rotation(-math.radians(self.yaw)) * m\n m = Matrix44.from_x_rotation(-math.radians(self.pitch)) * m\n if self.tp:\n # Third person enabled\n m = Matrix44.from_translation([0,0,-self.tp_distance]) * m\n \n self.m = m\n self.mvp = numpy.array(self.p * self.m).astype(\"f4\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the tilts for topotomography alignment.
def topotomo_tilts(self, hkl, T=None, verbose=False): if T is None: T = np.eye(3) # identity be default gt = self.orientation_matrix().transpose() Gc = hkl.scattering_vector() Gs = gt.dot(Gc) # in the cartesian sample CS # apply instrument specific settings Gs = np.dot(T.T, Gs) # find topotomo tilts ut = np.arctan(Gs[1] / Gs[2]) lt = np.arctan(-Gs[0] / (Gs[1] * np.sin(ut) + Gs[2] * np.cos(ut))) if verbose: print('up tilt (samrx) should be %.3f' % (ut * 180 / np.pi)) print('low tilt (samry) should be %.3f' % (lt * 180 / np.pi)) return ut, lt
[ "def toe_positions(self):\n torso_frame = self.data.xmat['torso'].reshape(3, 3)\n torso_pos = self.data.xpos['torso']\n torso_to_toe = self.data.xpos[_TOES] - torso_pos\n return torso_to_toe.dot(torso_frame)", "def OToL_unmapped_tips(self):\n debug(\"OTOL unmapped\")\n if self.config.unmapped == \"remove\":\n for key in self.data.otu_dict:\n if \"^ot:ottId\" not in self.data.otu_dict[key]:\n # second condition for OToL unmapped taxa, not present in own_data\n if u\"^ot:treebaseOTUId\" in self.data.otu_dict[key]:\n self.data.remove_taxa_aln_tre(key)\n else:\n i = 1\n for key in self.data.otu_dict:\n i = i + 1\n if \"^ot:ottId\" not in self.data.otu_dict[key]:\n self.data.otu_dict[key][\"^ot:ottId\"] = self.data.ott_mrca\n if self.data.ott_mrca in self.ids.ott_to_name:\n self.data.otu_dict[key]['^ot:ottTaxonName'] = self.ids.ott_to_name[self.data.ott_mrca]\n else:\n debug(\"think about a way...\")\n tx = APIWrapper().taxomachine\n nms = tx.taxon(self.data.ott_mrca)\n taxon_name = nms[u'unique_name']\n self.data.otu_dict[key]['^ot:ottTaxonName'] = \"unknown_{}\".format(taxon_name)", "def __align_mtms_to_psms(self):\n T_psm1 = self.__psm1_kin__.forward(self.__psm1_last_jnt__)\n T_psm2 = self.__psm2_kin__.forward(self.__psm2_last_jnt__)\n \n T_mtml = self.__mtml_kin__.forward(self.__last_mtml_jnt__)\n T_mtmr = self.__mtmr_kin__.forward(self.__last_mtmr_jnt__)\n T_mtml_000 = self.__mtml_kin__.forward([0,0,0,0,0,0,0])\n \n T_mtml[0:3,0:3] = (((self.__T_ecm__ * (T_mtml_000 ** -1)) ** -1) * T_psm2)[0:3,0:3]\n T_mtmr[0:3, 0:3] = T_psm1[0:3, 0:3]\n \n jnt_mtml = self.__mtml_kin__.inverse(T_mtml)\n jnt_mtmr = self.__mtmr_kin__.inverse(T_mtmr)\n print('aligning')\n if self.__mode__ == self.MODE.hardware:\n self.__hw_mtml__.move_joint_list( jnt_mtml.tolist(), range(0, len(jnt_mtml)), interpolate=True)\n self.__hw_mtmr__.move_joint_list( jnt_mtmr.tolist(), range(0, len(jnt_mtmr)), interpolate=True)", "def get_eman2_tilts(info_file):\n with open(info_file, \"r\") as f:\n info = json.load(f)\n tlt_params = np.array(info[\"tlt_params\"])\n tlt_angles = tlt_params[:, 3]\n tilt_axis = np.mean(tlt_params[:, 2])\n\n return tilt_axis, tlt_angles", "def upstream_of_leptons(self):\n p4l1 = get_TLorentzVector(self.get_leptons()[0])\n p4l2 = get_TLorentzVector(self.get_leptons()[1])\n p4met = get_TLorentzVector(self.get_met())\n return -p4met-p4l1-p4l2", "def relative_angle_planets(self, t, body1, body2):\n pos_body1 = body1.get_barycentric(t)[0:2]\n pos_body2 = body2.get_barycentric(t)[0:2]\n angle_body1 = np.mod(np.arctan2(pos_body1[1], pos_body1[0]), 2*np.pi)\n angle_body2 = np.mod(np.arctan2(pos_body2[1], pos_body2[0]), 2*np.pi)\n return angle_body1 - angle_body2", "def pt(self, t):\n return foot(vec.zero, self) + t*self.dir", "def arrow_specs(self):\n a = self.viewee.array\n n, m = a.shape\n i, j = self.viewee.loc\n center = j + 0.5, n - i - 0.5\n angle = -np.pi / 2 * self.viewee.state\n return center, angle", "def tangent_propogator_heatmap_in_piball_space(links, w_ins = default_w_in, w_outs = default_w_out,\n tau_n=dna_params['tau_n'], tau_d=dna_params['tau_d'], unwraps=None, color=\"linker\", mfig=None, **kwargs):\n # Create a sphere\n pi = np.pi\n r = pi\n cos = np.cos\n sin = np.sin\n theta, phi = np.mgrid[0:pi:101j, 0:2*pi:101j]\n x = r * sin(theta) * cos(phi)\n y = r * sin(theta) * sin(phi)\n z = r * cos(theta)\n\n if mfig is None:\n mfig = mlab.figure()\n\n #draw a translucent pi ball\n mlab.mesh(x,y,z,color=(0.67, 0.77, 0.93), opacity=0.5)\n\n #first generate axis angle representations of orientations from given linker lengths\n num_chains = links.size\n nodes = np.zeros((num_chains, 3)) #points in pi ball space\n w_ins, w_outs = convert.resolve_wrapping_params(unwraps, w_ins, w_outs, num_chains)\n\n for i in range(num_chains):\n #compute orientation of second nucleosome in dinucleosome chain (assumes\n #first entry orientation is the identity)\n Onext = OmegaNextEntry(links[i], tau_n=tau_n, tau_d=tau_d,\n w_in=w_ins[i], w_out=w_outs[i], helix_params=helix_params_best)\n axis, angle = ncr.axis_angle_from_matrix(Onext)\n #scale unit vector by angle of rotation\n nodes[i] = axis*angle\n\n #plot points in pi ball space\n pts = mlab.points3d(nodes[:, 0], nodes[:, 1], nodes[:, 2], scale_factor = 0.25, **kwargs)\n pts.glyph.scale_mode = 'scale_by_vector'\n #color by linker length\n if color == \"linker\":\n colors = 1.0 * (links - min(links))/max((links - min(links)))\n #color by unwrapping amount\n else:\n colors = 1.0 * (unwraps - min(unwraps))/(max(unwraps) - min(unwraps))\n\n pts.mlab_source.dataset.point_data.scalars = colors\n mlab.axes()\n return mfig, nodes", "def convert_tlt_eman2(info_file, map_file, output):\n\n tilt_axis, tlt_angles = get_eman2_tilts(info_file)\n lines = [\n \"TILT SERIES %s\\n\" % map_file,\n \"\\n\",\n \" AXIS\\n\",\n \"\\n\",\n \" TILT AZIMUTH %f\\n\" % tilt_axis,\n \"\\n\",\n \"\\n\",\n \" ORIENTATION\\n\",\n \" PHI 0.000\\n\",\n ]\n\n for i, angle in enumerate(tlt_angles):\n line = \" IMAGE %03d\" % (i + 1)\n line += \" ORIGIN [ 0.000 0.000 ]\"\n line += \" TILT ANGLE %.3f\" % angle\n line += \" ROTATION 0.000\\n\"\n lines.append(line)\n\n lines.extend([\"\\n\", \"\\n\", \"END\"])\n\n with open(output, \"w\") as f:\n f.writelines(lines)", "def optimal_angle_and_tilt(sensors_metadata_clean, latitude, worst_sh, worst_Az, transmissivity,\n Max_Isol, module_length):\n # calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.\n optimal_angle_flat = calc_optimal_angle(180, latitude, transmissivity) # assume surface azimuth = 180 (N,E), south facing\n sensors_metadata_clean['tilt']= np.vectorize(acos)(sensors_metadata_clean['Zdir']) #surface tilt angle in rad\n sensors_metadata_clean['tilt'] = np.vectorize(degrees)(sensors_metadata_clean['tilt']) #surface tilt angle in degrees\n sensors_metadata_clean['B'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean['tilt'],\n degrees(optimal_angle_flat)) # panel tilt angle in degrees\n\n # calculate spacing and surface azimuth of the panels for flat roofs\n\n optimal_spacing_flat = calc_optimal_spacing(worst_sh, worst_Az, optimal_angle_flat, module_length)\n sensors_metadata_clean['array_s'] = np.where(sensors_metadata_clean['tilt'] >= 5, 0, optimal_spacing_flat)\n sensors_metadata_clean['surface_azimuth'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],\n sensors_metadata_clean['Ydir'],\n sensors_metadata_clean['B']) # degrees\n\n # calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing\n surface_area_flat = module_length * (\n sensors_metadata_clean.array_s / 2 + module_length * [cos(optimal_angle_flat)])\n\n # calculate the pv module area within the area of each sensor point\n sensors_metadata_clean['area_module'] = np.where(sensors_metadata_clean['tilt'] >= 5,\n sensors_metadata_clean.AREA_m2,\n module_length ** 2 * (\n sensors_metadata_clean.AREA_m2 / surface_area_flat))\n\n # categorize the sensors by surface_azimuth, B, GB\n result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth, sensors_metadata_clean.B,\n sensors_metadata_clean.total_rad_Whm2, Max_Isol)\n sensors_metadata_clean['CATteta_z'] = result[0]\n sensors_metadata_clean['CATB'] = result[1]\n sensors_metadata_clean['CATGB'] = result[2]\n return sensors_metadata_clean", "def build_center_uncenter_transforms(image_shape):\n\n # need to swap rows and cols here apparently! confusing!\n center_shift = np.array([image_shape[1], image_shape[0]]) / 2.0 - 0.5\n tform_uncenter = skimage.transform.SimilarityTransform(translation=-center_shift)\n tform_center = skimage.transform.SimilarityTransform(translation=center_shift)\n return tform_center, tform_uncenter", "def meantip(tailfit):\n return tail2tipangles(tailfit).mean()", "def to_tiling(self) -> Tiling:\n pos = [len(self) - 1, self.odd_len()]\n tiling: Tiling = {}\n\n for pbit in self.parity_vector:\n tpos = tuple(pos)\n # print(tpos)\n if pbit == 0:\n if tpos not in tiling:\n tiling[tpos] = [None] * 4\n tiling[tpos][0] = 0\n pos[0] -= 1\n else:\n tpos_east = tpos[0] + 1, tpos[1]\n tpos_south = tpos[0], tpos[1] - 1\n if tpos_east not in tiling:\n tiling[tpos_east] = [None] * 4\n if tpos_south not in tiling:\n tiling[tpos_south] = [None] * 4\n # print(\"east\",tpos_east)\n tiling[tpos_east][3] = 1\n tiling[tpos_south][0] = 0\n pos[0] -= 1\n pos[1] -= 1\n\n for pos in tiling:\n tiling[pos] = SquareGlues(*tiling[pos])\n\n return Tiling(tiling, Collatz_tileset)", "def align(self):\n\t\trospy.loginfo(\"alignment\")\n\t\twhile abs(self.trolley.theta) > 0.05 or self.trolley.y > 0.05:\t\n\t\t\tx_err = 0.2 * self.trolley.x\n\t\t\trospy.loginfo(\"Loop\")\n\t\t\trospy.loginfo(self.trolley.theta)\t\t\t\n\t\t\tif self.trolley.theta > 0.2:\n\t\t\t\tvl = 0.15 \n\t\t\t\tvr = 0.30\n\n\t\t\telif self.trolley.theta > 0.1:\n\t\t\t\tvl = 0.15\n\t\t\t\tvr = 0.22\n\n\t\t\telif self.trolley.theta > 0.01:\n\t\t\t\tvl = 0.15\n\t\t\t\tvr = 0.17\n\n\t\t\telif self.trolley.theta < -0.2:\n\t\t\t\tvr = 0.15\n\t\t\t\tvl = 0.24\n\n\t\t\telif self.trolley.theta < -0.1:\n\t\t\t\tvr = 0.15\n\t\t\t\tvl = 0.19\n\n\t\t\telif self.trolley.theta < -0.01:\n\t\t\t\tvr = 0.15\n\t\t\t\tvl = 0.17\n\n\t\t\telse:\n\t\t\t\trospy.loginfo(\"angle aligned\")\n\t\t\t\tif x_err > 0.05:\n\t\t\t\t\tvl = 0.16\n\t\t\t\t\tvr = 0.12\n\n\t\t\t\telif x_err < -0.05:\n\t\t\t\t\tvl = 0.12\n\t\t\t\t\tvr = 0.16\n\n\t\t\t\telse:\n\t\t\t\t\trospy.loginfo(\"aligned\")\n\t\t\t\t\tvl = 0.2\n\t\t\t\t\tvr = 0.2\n\n\t\t\tself.vel_publish(vl,vr,2)\n\t\t\tif self.trolley.y < 0.1:\n\t\t\t\tbreak", "def tangent(self, pos):", "def GetTangentPoint(self,x,y,outx,outy):\n firstPoint=Point(x,y)\n fromPoint=Point(outx,outy)\n twoPointDistance=self._center.Dist(fromPoint)\n if(twoPointDistance<self._radius):\n return None,None\n originPoint=point.Point(0.0,0.0) \n tanMod=math.sqrt(pow(twoPointDistance,2)-pow(self._radius,2))\n tgAngle=math.asin(self._radius/twoPointDistance)\n #Compute the x versor\n xPoint=Point(1.0,0.0)\n xVector=Vector(originPoint,xPoint)\n twoPointVector=Vector(fromPoint,self._center)\n rightAngle=twoPointVector.Ang(xVector) \n cx,cy=self._center.getCoords() \n if(outy>cy): #stupid situation \n rightAngle=-rightAngle\n posAngle=rightAngle+tgAngle\n negAngle=rightAngle-tgAngle\n #Compute the Positive Tangent\n xCord=math.cos(posAngle)\n yCord=math.sin(posAngle)\n dirPoint=Point(xCord,yCord)#Versor that point at the tangentPoint\n ver=Vector(originPoint,dirPoint)\n ver.Mult(tanMod)\n tangVectorPoint=ver.Point()\n posPoint=Point(tangVectorPoint+(outx,outy))\n #Compute the Negative Tangent\n xCord=math.cos(negAngle)\n yCord=math.sin(negAngle)\n dirPoint=Point(xCord,yCord)#Versor that point at the tangentPoint\n ver=Vector(originPoint,dirPoint)\n ver.Mult(tanMod)\n tangVectorPoint=ver.Point()\n negPoint=Point(tangVectorPoint+(outx,outy))\n if(firstPoint.Dist(posPoint)<firstPoint.Dist(negPoint)):\n return posPoint.getCoords() \n else:\n return negPoint.getCoords()", "def compute_omega( g, tilt ): \n kz = g[2] # component along the rotation axis\n modg2 = (g*g).sum(axis=0)\n num = -modg2 - 2*tilt*kz\n den = 2*np.sqrt(1 - tilt*tilt)\n kx = num / den\n arg = modg2 - kx*kx - kz*kz\n mask = arg >= 0\n ky = np.sqrt( arg * mask ) # positive\n omegaplus = anglevecs2D( g[0], g[1], kx, ky )\n omegaminus= anglevecs2D( g[0], g[1], kx, -ky )\n return omegaplus, omegaminus", "def target_position(self):\n torso_frame = self.data.xmat['torso'].reshape(3, 3)\n torso_pos = self.data.xpos['torso']\n torso_to_target = self.data.site_xpos['target'] - torso_pos\n return torso_to_target.dot(torso_frame)", "def top_translation(isos_012):\n d = isos_012[0].shape[1]\n E2 = backend.eye(d**2, dtype=isos_012[0].dtype)\n # Ordering: mpo_left, mpo_right, phys_bottom, phys_top\n translation_tensor = backend.reshape(E2, (d,d,d,d))\n return ascend_uniform_MPO_to_top(translation_tensor, isos_012)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }